/// <summary> /// Fire the events registered for this event type synchronously /// </summary> /// <param name="capsEvent">Capability name</param> /// <param name="body">Decoded event body</param> /// <param name="simulator">Reference to the simulator that /// generated this event</param> internal void RaiseEvent(string capsEvent, StructuredData.LLSD body, Simulator simulator) { bool specialHandler = false; Caps.EventQueueCallback callback; // Default handler first, if one exists if (_EventTable.TryGetValue(capsEvent, out callback)) { if (callback != null) { try { callback(capsEvent, body, simulator); } catch (Exception ex) { Logger.Log("CAPS Event Handler: " + ex.ToString(), Helpers.LogLevel.Error, Client); } } } // Generic parser next if (body.Type == StructuredData.LLSDType.Map) { StructuredData.LLSDMap map = (StructuredData.LLSDMap)body; Packet packet = Packet.BuildPacket(capsEvent, map); if (packet != null) { NetworkManager.IncomingPacket incomingPacket; incomingPacket.Simulator = simulator; incomingPacket.Packet = packet; Logger.DebugLog("Serializing " + packet.Type.ToString() + " capability with generic handler", Client); Client.Network.PacketInbox.Enqueue(incomingPacket); specialHandler = true; } } // Explicit handler next if (_EventTable.TryGetValue(capsEvent, out callback) && callback != null) { try { callback(capsEvent, body, simulator); } catch (Exception ex) { Logger.Log("CAPS Event Handler: " + ex.ToString(), Helpers.LogLevel.Error, Client); } specialHandler = true; } if (!specialHandler) Logger.Log("Unhandled CAPS event " + capsEvent, Helpers.LogLevel.Warning, Client); }
/// <summary> /// Return bytes read from the local asset cache, null if it does not exist /// </summary> /// <param name="assetID">UUID of the asset we want to get</param> /// <returns>Raw bytes of the asset, or null on failure</returns> public byte[] GetCachedAssetBytes(UUID assetID) { if (!Operational()) { return(null); } try { Logger.DebugLog("Reading " + FileName(assetID) + " from asset cache."); byte[] data = File.ReadAllBytes(FileName(assetID)); return(data); } catch (Exception ex) { Logger.Log("Failed reading asset from cache (" + ex.Message + ")", Helpers.LogLevel.Warning, Client); return(null); } }
/// <summary> /// Process any incoming events, check to see if we have a message created for the event, /// </summary> /// <param name="eventName"></param> /// <param name="body"></param> private void EventQueueEventHandler(string eventName, OSDMap body) { IMessage message = Messages.MessageUtils.DecodeEvent(eventName, body); if (message != null) { Simulator.Client.Network.CapsEvents.BeginRaiseEvent(eventName, message, Simulator); #region Stats Tracking if (Simulator.Client.Settings.TRACK_UTILIZATION) { Simulator.Client.Stats.Update(eventName, OpenMetaverse.Stats.Type.Message, 0, body.ToString().Length); } #endregion } else { Logger.Log("No Message handler exists for event " + eventName + ". Unable to decode. Will try Generic Handler next", Helpers.LogLevel.Warning); Logger.Log("Please report this information to http://jira.openmetaverse.co/: \n" + body, Helpers.LogLevel.Debug); // try generic decoder next which takes a caps event and tries to match it to an existing packet if (body.Type == OSDType.Map) { OSDMap map = (OSDMap)body; Packet packet = Packet.BuildPacket(eventName, map); if (packet != null) { NetworkManager.IncomingPacket incomingPacket; incomingPacket.Simulator = Simulator; incomingPacket.Packet = packet; Logger.DebugLog("Serializing " + packet.Type.ToString() + " capability with generic handler", Simulator.Client); Simulator.Client.Network.PacketInbox.Enqueue(incomingPacket); } else { Logger.Log("No Packet or Message handler exists for " + eventName, Helpers.LogLevel.Warning); } } } }
/// <summary> /// /// </summary> public void Start() { if (!shutdownFlag) { return; } const int SIO_UDP_CONNRESET = -1744830452; IPEndPoint ipep = new IPEndPoint(Settings.BIND_ADDR, udpPort); udpSocket = new Socket( AddressFamily.InterNetwork, SocketType.Dgram, ProtocolType.Udp); try { // this udp socket flag is not supported under mono, // so we'll catch the exception and continue udpSocket.IOControl(SIO_UDP_CONNRESET, new byte[] { 0 }, null); } catch (Exception) { Logger.DebugLog("UDP SIO_UDP_CONNRESET flag not supported on this platform"); } // On at least Mono 3.2.8, multiple UDP sockets can bind to the same port by default. This means that // when running multiple connections, two can occasionally bind to the same port, leading to unexpected // errors as they intercept each others messages. We need to prevent this. This is not allowed by // default on Windows. udpSocket.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.ReuseAddress, false); udpSocket.Bind(ipep); // we're not shutting down, we're starting up shutdownFlag = false; // kick off an async receive. The Start() method will return, the // actual receives will occur asynchronously and will be caught in // AsyncEndRecieve(). AsyncBeginReceive(); }
private byte[] GetCachedAssetBytes0(UUID assetID, AssetType assetType) { if (CogbotHelpers.IsNullOrZero(assetID)) { return(null); } if (!Operational()) { return(null); } try { byte[] data = null; string fileName = FileName(assetID, assetType); bool exists = File.Exists(fileName); if (!exists) { string sfn = StaticFileName(assetID); //Logger.DebugLog("Reading " + fileName + " from asset cache. (missing) null"); if (Exists(sfn)) { data = File.ReadAllBytes(sfn); File.Copy(sfn, fileName); } } else { Logger.DebugLog("Reading " + fileName + " from asset cache."); data = File.ReadAllBytes(fileName); } return(data); } catch (FileNotFoundException ex) { return(null); } catch (Exception ex) { Logger.Log("Failed reading asset from cache (" + ex.Message + ")", Helpers.LogLevel.Warning, Client); return(null); } }
private void UpdateLoginStatus(LoginStatus status, string message) { InternalStatusCode = status; InternalLoginMessage = message; Logger.DebugLog("Login status: " + status.ToString() + ": " + message, Client); // If we reached a login resolution trigger the event if (status == LoginStatus.Success || status == LoginStatus.Failed) { CurrentContext = null; LoginEvent.Set(); } // Fire the login status callback if (OnLogin != null) { try { OnLogin(status, message); } catch (Exception e) { Logger.Log(e.Message, Helpers.LogLevel.Error, Client, e); } } }
private void SeedRequestCompleteHandler(CapsClient client, OSD result, Exception error) { if (result != null && result.Type == OSDType.Map) { OSDMap respTable = (OSDMap)result; foreach (string cap in respTable.Keys) { _Caps[cap] = respTable[cap].AsUri(); } if (_Caps.ContainsKey("EventQueueGet")) { Logger.DebugLog("Starting event queue for " + Simulator.ToString(), Simulator.Client); _EventQueueCap = new EventQueueClient(_Caps["EventQueueGet"]); _EventQueueCap.OnConnected += EventQueueConnectedHandler; _EventQueueCap.OnEvent += EventQueueEventHandler; _EventQueueCap.Start(); } OnCapabilitiesReceived(Simulator); } else if ( error != null && error is WebException && ((WebException)error).Response != null && ((HttpWebResponse)((WebException)error).Response).StatusCode == HttpStatusCode.NotFound) { // 404 error Logger.Log("Seed capability returned a 404, capability system is aborting", Helpers.LogLevel.Error); } else { // The initial CAPS connection failed, try again MakeSeedRequest(); } }
/// <summary> /// Sends a packet /// </summary> /// <param name="packet">Packet to be sent</param> public void SendPacket(Packet packet) { // DEBUG: This can go away after we are sure nothing in the library is trying to do this if (packet.Header.AppendedAcks || (packet.Header.AckList != null && packet.Header.AckList.Length > 0)) { Logger.Log("Attempting to send packet " + packet.Type + " with ACKs appended before serialization", Helpers.LogLevel.Error); } if (packet.HasVariableBlocks) { byte[][] datas; try { datas = packet.ToBytesMultiple(); } catch (NullReferenceException) { Logger.Log("Failed to serialize " + packet.Type + " packet to one or more payloads due to a missing block or field. StackTrace: " + Environment.StackTrace, Helpers.LogLevel.Error); return; } int packetCount = datas.Length; if (packetCount > 1) { Logger.DebugLog("Split " + packet.Type + " packet into " + packetCount + " packets"); } for (int i = 0; i < packetCount; i++) { byte[] data = datas[i]; SendPacketData(data, data.Length, packet.Type, packet.Header.Zerocoded); } } else { byte[] data = packet.ToBytes(); SendPacketData(data, data.Length, packet.Type, packet.Header.Zerocoded); } }
/// <summary> /// Loads in inventory cache file into the inventory structure. Note only valid to call after login has been successful. /// </summary> /// <param name="filename">Name of the cache file to load</param> /// <returns>The number of inventory items sucessfully reconstructed into the inventory node tree</returns> public int RestoreFromDisk(string filename) { List <InventoryNode> nodes = new List <InventoryNode>(); int item_count = 0; try { if (!File.Exists(filename)) { return(-1); } using (Stream stream = File.Open(filename, FileMode.Open)) { BinaryFormatter bformatter = new BinaryFormatter(); while (stream.Position < stream.Length) { OpenMetaverse.InventoryNode node = (InventoryNode)bformatter.Deserialize(stream); nodes.Add(node); item_count++; } } } catch (Exception e) { Logger.Log("Error accessing inventory cache file :" + e.Message, Helpers.LogLevel.Error); return(-1); } Logger.Log("Read " + item_count.ToString() + " items from inventory cache file", Helpers.LogLevel.Info); item_count = 0; List <InventoryNode> del_nodes = new List <InventoryNode>(); //nodes that we have processed and will delete List <UUID> dirty_folders = new List <UUID>(); // Tainted folders that we will not restore items into // Because we could get child nodes before parents we must itterate around and only add nodes who have // a parent already in the list because we must update both child and parent to link together // But sometimes we have seen orphin nodes due to bad/incomplete data when caching so we have an emergency abort route int stuck = 0; while (nodes.Count != 0 && stuck < 5) { foreach (InventoryNode node in nodes) { InventoryNode pnode; if (node.ParentID == UUID.Zero) { //We don't need the root nodes "My Inventory" etc as they will already exist for the correct // user of this cache. del_nodes.Add(node); item_count--; } else if (Items.TryGetValue(node.Data.UUID, out pnode)) { //We already have this it must be a folder if (node.Data is InventoryFolder) { InventoryFolder cache_folder = (InventoryFolder)node.Data; InventoryFolder server_folder = (InventoryFolder)pnode.Data; if (cache_folder.Version != server_folder.Version) { Logger.DebugLog("Inventory Cache/Server version mismatch on " + node.Data.Name + " " + cache_folder.Version.ToString() + " vs " + server_folder.Version.ToString()); pnode.NeedsUpdate = true; dirty_folders.Add(node.Data.UUID); } else { pnode.NeedsUpdate = false; } del_nodes.Add(node); } } else if (Items.TryGetValue(node.ParentID, out pnode)) { if (node.Data != null) { // If node is folder, and it does not exist in skeleton, mark it as // dirty and don't process nodes that belong to it if (node.Data is InventoryFolder && !(Items.ContainsKey(node.Data.UUID))) { dirty_folders.Add(node.Data.UUID); } //Only add new items, this is most likely to be run at login time before any inventory //nodes other than the root are populated. Don't add non existing folders. if (!Items.ContainsKey(node.Data.UUID) && !dirty_folders.Contains(pnode.Data.UUID) && !(node.Data is InventoryFolder)) { Items.Add(node.Data.UUID, node); node.Parent = pnode; //Update this node with its parent pnode.Nodes.Add(node.Data.UUID, node); // Add to the parents child list item_count++; } } del_nodes.Add(node); } } if (del_nodes.Count == 0) { stuck++; } else { stuck = 0; } //Clean up processed nodes this loop around. foreach (InventoryNode node in del_nodes) { nodes.Remove(node); } del_nodes.Clear(); } Logger.Log("Reassembled " + item_count.ToString() + " items from inventory cache file", Helpers.LogLevel.Info); return(item_count); }
/// <summary>Check the queue for pending work</summary> private void EnqueuePending() { lock (queue) { if (queue.Count > 0) { int nr = 0; lock (activeDownloads) { nr = activeDownloads.Count; } Logger.DebugLog(nr.ToString() + " active downloads. Queued textures: " + queue.Count.ToString()); for (int i = nr; i < ParallelDownloads && queue.Count > 0; i++) { DownloadRequest item = queue.Dequeue(); lock (activeDownloads) { string addr = item.Address.ToString(); if (activeDownloads.ContainsKey(addr)) { activeDownloads[addr].CompletedHandlers.Add(item.CompletedCallback); if (item.DownloadProgressCallback != null) { activeDownloads[addr].ProgresHadlers.Add(item.DownloadProgressCallback); } } else { ActiveDownload activeDownload = new ActiveDownload(); activeDownload.CompletedHandlers.Add(item.CompletedCallback); if (item.DownloadProgressCallback != null) { activeDownload.ProgresHadlers.Add(item.DownloadProgressCallback); } Logger.DebugLog("Requesting " + item.Address.ToString()); activeDownload.Request = SetupRequest(item.Address, item.ContentType); CapsBase.DownloadDataAsync( activeDownload.Request, item.MillisecondsTimeout, (HttpWebRequest request, HttpWebResponse response, int bytesReceived, int totalBytesToReceive) => { foreach (CapsBase.DownloadProgressEventHandler handler in activeDownload.ProgresHadlers) { handler(request, response, bytesReceived, totalBytesToReceive); } }, (HttpWebRequest request, HttpWebResponse response, byte[] responseData, Exception error) => { lock (activeDownloads) activeDownloads.Remove(addr); if (error == null || item.Attempt >= item.Retries || (error != null && error.Message.Contains("404"))) { foreach (CapsBase.RequestCompletedEventHandler handler in activeDownload.CompletedHandlers) { handler(request, response, responseData, error); } } else { item.Attempt++; Logger.Log(string.Format("Texture {0} HTTP download failed, trying again retry {1}/{2}", item.Address, item.Attempt, item.Retries), Helpers.LogLevel.Warning); lock (queue) queue.Enqueue(item); } EnqueuePending(); } ); activeDownloads[addr] = activeDownload; } } } } } }
protected override void PacketReceived(UDPPacketBuffer buffer) { Packet packet = null; // Check if this packet came from the server we expected it to come from if (!remoteEndPoint.Address.Equals(((IPEndPoint)buffer.RemoteEndPoint).Address)) { Logger.Log("Received " + buffer.DataLength + " bytes of data from unrecognized source " + ((IPEndPoint)buffer.RemoteEndPoint).ToString(), Helpers.LogLevel.Warning, Client); return; } // Update the disconnect flag so this sim doesn't time out DisconnectCandidate = false; #region Packet Decoding int packetEnd = buffer.DataLength - 1; try { packet = Packet.BuildPacket(buffer.Data, ref packetEnd, // Only allocate a buffer for zerodecoding if the packet is zerocoded ((buffer.Data[0] & Helpers.MSG_ZEROCODED) != 0) ? new byte[8192] : null); } catch (MalformedDataException) { Logger.Log(String.Format("Malformed data, cannot parse packet:\n{0}", Utils.BytesToHexString(buffer.Data, buffer.DataLength, null)), Helpers.LogLevel.Error); } // Fail-safe check if (packet == null) { Logger.Log("Couldn't build a message from the incoming data", Helpers.LogLevel.Warning, Client); return; } Interlocked.Add(ref Stats.RecvBytes, buffer.DataLength); Interlocked.Increment(ref Stats.RecvPackets); #endregion Packet Decoding if (packet.Header.Resent) { Interlocked.Increment(ref Stats.ReceivedResends); } #region ACK Receiving // Handle appended ACKs if (packet.Header.AppendedAcks && packet.Header.AckList != null) { lock (NeedAck) { for (int i = 0; i < packet.Header.AckList.Length; i++) { NeedAck.Remove(packet.Header.AckList[i]); } } } // Handle PacketAck packets if (packet.Type == PacketType.PacketAck) { PacketAckPacket ackPacket = (PacketAckPacket)packet; lock (NeedAck) { for (int i = 0; i < ackPacket.Packets.Length; i++) { NeedAck.Remove(ackPacket.Packets[i].ID); } } } #endregion ACK Receiving if (packet.Header.Reliable) { #region ACK Sending // Add this packet to the list of ACKs that need to be sent out uint sequence = (uint)packet.Header.Sequence; PendingAcks.Enqueue(sequence); // Send out ACKs if we have a lot of them if (PendingAcks.Count >= Client.Settings.MAX_PENDING_ACKS) { SendAcks(); } #endregion ACK Sending // Check the archive of received packet IDs to see whether we already received this packet if (!PacketArchive.TryEnqueue(packet.Header.Sequence)) { if (packet.Header.Resent) { Logger.DebugLog("Received a resend of already processed packet #" + packet.Header.Sequence + ", type: " + packet.Type); } else { Logger.Log("Received a duplicate (not marked as resend) of packet #" + packet.Header.Sequence + ", type: " + packet.Type, Helpers.LogLevel.Warning); } // Avoid firing a callback twice for the same packet return; } } #region Inbox Insertion NetworkManager.IncomingPacket incomingPacket; incomingPacket.Simulator = this; incomingPacket.Packet = packet; Network.PacketInbox.Enqueue(incomingPacket); #endregion Inbox Insertion #region Stats Tracking if (Client.Settings.TRACK_UTILIZATION) { Client.Stats.Update(packet.Type.ToString(), OpenMetaverse.Stats.Type.Packet, 0, packet.Length); } #endregion }
/// <summary> /// Sends a packet directly to the simulator without queuing /// </summary> /// <param name="packet">Packet to be sent</param> /// <param name="setSequence">True to set the sequence number, false to /// leave it as is</param> public void SendPacketUnqueued(Packet packet, bool setSequence) { byte[] buffer; int bytes; // Set sequence implies that this is not a resent packet if (setSequence) { // Reset to zero if we've hit the upper sequence number limit Interlocked.CompareExchange(ref Sequence, 0, Settings.MAX_SEQUENCE); // Increment and fetch the current sequence number packet.Header.Sequence = (uint)Interlocked.Increment(ref Sequence); if (packet.Header.Reliable) { // Wrap this packet in a struct to track timeouts and resends NetworkManager.OutgoingPacket outgoing = new NetworkManager.OutgoingPacket(this, packet, true); // Keep track of when this packet was first sent out (right now) outgoing.TickCount = Environment.TickCount; // Add this packet to the list of ACK responses we are waiting on from the server lock (NeedAck) { NeedAck[packet.Header.Sequence] = outgoing; } if (packet.Header.Resent) { // This packet has already been sent out once, strip any appended ACKs // off it and reinsert them into the outgoing ACK queue under the // assumption that this packet will continually be rejected from the // server or that the appended ACKs are possibly making the delivery fail if (packet.Header.AckList.Length > 0) { Logger.DebugLog(String.Format("Purging ACKs from packet #{0} ({1}) which will be resent.", packet.Header.Sequence, packet.GetType())); lock (PendingAcks) { foreach (uint sequence in packet.Header.AckList) { if (!PendingAcks.ContainsKey(sequence)) { PendingAcks[sequence] = sequence; } } } packet.Header.AppendedAcks = false; packet.Header.AckList = new uint[0]; } // Update the sent time for this packet SetResentTime(packet.Header.Sequence); } else { // This packet is not a resend, check if the conditions are favorable // to ACK appending if (packet.Type != PacketType.PacketAck && packet.Type != PacketType.LogoutRequest) { lock (PendingAcks) { if (PendingAcks.Count > 0 && PendingAcks.Count < Client.Settings.MAX_APPENDED_ACKS) { // Append all of the queued up outgoing ACKs to this packet packet.Header.AckList = new uint[PendingAcks.Count]; for (int i = 0; i < PendingAcks.Count; i++) { packet.Header.AckList[i] = PendingAcks.Values[i]; } PendingAcks.Clear(); packet.Header.AppendedAcks = true; } } } } } else if (packet.Header.AckList.Length > 0) { // Sanity check for ACKS appended on an unreliable packet, this is bad form Logger.Log("Sending appended ACKs on an unreliable packet", Helpers.LogLevel.Warning); } } // Serialize the packet buffer = packet.ToBytes(); bytes = buffer.Length; Stats.SentBytes += (ulong)bytes; ++Stats.SentPackets; UDPPacketBuffer buf = new UDPPacketBuffer(ipEndPoint); // Zerocode if needed if (packet.Header.Zerocoded) { bytes = Helpers.ZeroEncode(buffer, bytes, buf.Data); } else { Buffer.BlockCopy(buffer, 0, buf.Data, 0, bytes); } buf.DataLength = bytes; AsyncBeginSend(buf); }
/// <summary> /// Updates the state of the InventoryNode and inventory data structure that /// is responsible for the InventoryObject. If the item was previously not added to inventory, /// it adds the item, and updates structure accordingly. If it was, it updates the /// InventoryNode, changing the parent node if <code>item.parentUUID</code> does /// not match <code>node.Parent.Data.UUID</code>. /// /// You can not set the inventory root folder using this method /// </summary> /// <param name="item">The InventoryObject to store</param> public void UpdateNodeFor(InventoryBase item) { lock (Items) { InventoryNode itemParent = null; if (item.ParentUUID != UUID.Zero && !Items.TryGetValue(item.ParentUUID, out itemParent)) { // OK, we have no data on the parent, let's create a fake one. InventoryFolder fakeParent = new InventoryFolder(item.ParentUUID); fakeParent.DescendentCount = 1; // Dear god, please forgive me. itemParent = new InventoryNode(fakeParent); Items[item.ParentUUID] = itemParent; // Unfortunately, this breaks the nice unified tree // while we're waiting for the parent's data to come in. // As soon as we get the parent, the tree repairs itself. Logger.DebugLog("Attempting to update inventory child of " + item.ParentUUID.ToString() + " when we have no local reference to that folder", Client); if (Client.Settings.FETCH_MISSING_INVENTORY) { // Fetch the parent List <UUID> fetchreq = new List <UUID>(1); fetchreq.Add(item.ParentUUID); } } InventoryNode itemNode; if (Items.TryGetValue(item.UUID, out itemNode)) // We're updating. { InventoryNode oldParent = itemNode.Parent; // Handle parent change if (oldParent == null || itemParent == null || itemParent.Data.UUID != oldParent.Data.UUID) { if (oldParent != null) { lock (oldParent.Nodes.SyncRoot) oldParent.Nodes.Remove(item.UUID); } if (itemParent != null) { lock (itemParent.Nodes.SyncRoot) itemParent.Nodes[item.UUID] = itemNode; } } itemNode.Parent = itemParent; if (m_InventoryObjectUpdated != null) { OnInventoryObjectUpdated(new InventoryObjectUpdatedEventArgs(itemNode.Data, item)); } itemNode.Data = item; } else // We're adding. { itemNode = new InventoryNode(item, itemParent); Items.Add(item.UUID, itemNode); if (m_InventoryObjectAdded != null) { OnInventoryObjectAdded(new InventoryObjectAddedEventArgs(item)); } } } }
public void Parse(OSDMap reply) { try { AgentID = ParseUUID("agent_id", reply); SessionID = ParseUUID("session_id", reply); SecureSessionID = ParseUUID("secure_session_id", reply); FirstName = ParseString("first_name", reply).Trim('"'); LastName = ParseString("last_name", reply).Trim('"'); StartLocation = ParseString("start_location", reply); AgentAccess = ParseString("agent_access", reply); LookAt = ParseVector3("look_at", reply); } catch (OSDException e) { Logger.DebugLog("Login server returned (some) invalid data: " + e.Message); } // Home OSDMap home = null; OSD osdHome = OSDParser.DeserializeLLSDNotation(reply["home"].AsString()); if (osdHome.Type == OSDType.Map) { home = (OSDMap)osdHome; OSD homeRegion; if (home.TryGetValue("region_handle", out homeRegion) && homeRegion.Type == OSDType.Array) { OSDArray homeArray = (OSDArray)homeRegion; if (homeArray.Count == 2) { HomeRegion = Utils.UIntsToLong((uint)homeArray[0].AsInteger(), (uint)homeArray[1].AsInteger()); } else { HomeRegion = 0; } } HomePosition = ParseVector3("position", home); HomeLookAt = ParseVector3("look_at", home); } else { HomeRegion = 0; HomePosition = Vector3.Zero; HomeLookAt = Vector3.Zero; } CircuitCode = ParseUInt("circuit_code", reply); RegionX = ParseUInt("region_x", reply); RegionY = ParseUInt("region_y", reply); SimPort = (ushort)ParseUInt("sim_port", reply); string simIP = ParseString("sim_ip", reply); IPAddress.TryParse(simIP, out SimIP); SeedCapability = ParseString("seed_capability", reply); // Buddy list OSD buddyLLSD; if (reply.TryGetValue("buddy-list", out buddyLLSD) && buddyLLSD.Type == OSDType.Array) { OSDArray buddyArray = (OSDArray)buddyLLSD; BuddyList = new FriendInfo[buddyArray.Count]; for (int i = 0; i < buddyArray.Count; i++) { if (buddyArray[i].Type == OSDType.Map) { OSDMap buddy = (OSDMap)buddyArray[i]; BuddyList[i] = new FriendInfo( ParseUUID("buddy_id", buddy), (FriendRights)ParseUInt("buddy_rights_given", buddy), (FriendRights)ParseUInt("buddy_rights_has", buddy)); } } } SecondsSinceEpoch = Utils.UnixTimeToDateTime(ParseUInt("seconds_since_epoch", reply)); InventoryRoot = ParseMappedUUID("inventory-root", "folder_id", reply); InventorySkeleton = ParseInventoryFolders("inventory-skeleton", AgentID, reply); LibraryRoot = ParseMappedUUID("inventory-lib-root", "folder_id", reply); LibraryOwner = ParseMappedUUID("inventory-lib-owner", "agent_id", reply); LibrarySkeleton = ParseInventoryFolders("inventory-skel-lib", LibraryOwner, reply); }
/// <summary>Check the queue for pending work</summary> private void EnqueuePending() { lock (queue) { if (queue.Count > 0) { int nr = 0; lock (activeDownloads) { nr = activeDownloads.Count; } for (int i = nr; i < ParallelDownloads && queue.Count > 0; i++) { DownloadRequest item = queue.Dequeue(); lock (activeDownloads) { string addr = item.Address.ToString(); if (activeDownloads.ContainsKey(addr)) { activeDownloads[addr].CompletedHandlers.Add(item.CompletedCallback); if (item.DownloadProgressCallback != null) { activeDownloads[addr].ProgresHadlers.Add(item.DownloadProgressCallback); } } else { ActiveDownload activeDownload = new ActiveDownload(); activeDownload.CompletedHandlers.Add(item.CompletedCallback); if (item.DownloadProgressCallback != null) { activeDownload.ProgresHadlers.Add(item.DownloadProgressCallback); } Logger.DebugLog("Requesting " + item.Address.ToString()); activeDownload.Request = SetupRequest(item.Address, item.ContentType); CapsBase.DownloadDataAsync( activeDownload.Request, item.MillisecondsTimeout, (HttpWebRequest request, HttpWebResponse response, int bytesReceived, int totalBytesToReceive) => { foreach (CapsBase.DownloadProgressEventHandler handler in activeDownload.ProgresHadlers) { handler(request, response, bytesReceived, totalBytesToReceive); } }, (HttpWebRequest request, HttpWebResponse response, byte[] responseData, Exception error) => { lock (activeDownloads) activeDownloads.Remove(addr); foreach (CapsBase.RequestCompletedEventHandler handler in activeDownload.CompletedHandlers) { handler(request, response, responseData, error); } EnqueuePending(); } ); activeDownloads[addr] = activeDownload; } } } } } }