private void Process()
            {
                Debug.Assert(!Monitor.IsEntered(_incomingRequests), "No locks should be held while invoking Process");
                Debug.Assert(_runningWorker != null && _runningWorker.Id == Task.CurrentId, "This is the worker, so it must be running");
                Debug.Assert(_wakeupRequestedPipeFd != 0, "Should have a valid pipe for wake ups");

                // Create the multi handle to use for this round of processing.  This one handle will be used
                // to service all easy requests currently available and all those that come in while
                // we're processing other requests.  Once the work quiesces and there are no more requests
                // to process, this multi handle will be released as the worker goes away.  The next
                // time a request arrives and a new worker is spun up, a new multi handle will be created.
                SafeCurlMultiHandle multiHandle = Interop.libcurl.curl_multi_init();
                if (multiHandle.IsInvalid)
                {
                    throw CreateHttpRequestException();
                }

                // Clear our active operations table.  This should already be clear, either because
                // all previous operations completed without unexpected exception, or in the case of an
                // unexpected exception we should have cleaned up gracefully anyway.  But just in case...
                Debug.Assert(_activeOperations.Count == 0, "We shouldn't have any active operations when starting processing.");
                _activeOperations.Clear();

                bool endingSuccessfully = false;
                try
                {
                    // Continue processing as long as there are any active operations
                    while (true)
                    {
                        // First handle any requests in the incoming requests queue.
                        while (true)
                        {
                            IncomingRequest request;
                            lock (_incomingRequests)
                            {
                                if (_incomingRequests.Count == 0) break;
                                request = _incomingRequests.Dequeue();
                            }
                            HandleIncomingRequest(multiHandle, request);
                        }

                        // If we have no active operations, we're done.
                        if (_activeOperations.Count == 0)
                        {
                            endingSuccessfully = true;
                            return;
                        }

                        // We have one or more active operations. Run any work that needs to be run.
                        int running_handles;
                        ThrowIfCURLMError(Interop.libcurl.curl_multi_perform(multiHandle, out running_handles));

                        // Complete and remove any requests that have finished being processed.
                        int pendingMessages;
                        IntPtr messagePtr;
                        while ((messagePtr = Interop.libcurl.curl_multi_info_read(multiHandle, out pendingMessages)) != IntPtr.Zero)
                        {
                            Interop.libcurl.CURLMsg message = Marshal.PtrToStructure<Interop.libcurl.CURLMsg>(messagePtr);
                            Debug.Assert(message.msg == Interop.libcurl.CURLMSG.CURLMSG_DONE, "CURLMSG_DONE is supposed to be the only message type");

                            IntPtr gcHandlePtr;
                            ActiveRequest completedOperation;
                            if (message.msg == Interop.libcurl.CURLMSG.CURLMSG_DONE)
                            {
                                int getInfoResult = Interop.libcurl.curl_easy_getinfo(message.easy_handle, CURLINFO.CURLINFO_PRIVATE, out gcHandlePtr);
                                Debug.Assert(getInfoResult == CURLcode.CURLE_OK, "Failed to get info on a completing easy handle");
                                if (getInfoResult == CURLcode.CURLE_OK)
                                {
                                    bool gotActiveOp = _activeOperations.TryGetValue(gcHandlePtr, out completedOperation);
                                    Debug.Assert(gotActiveOp, "Expected to find GCHandle ptr in active operations table");
                                    if (gotActiveOp)
                                    {
                                        DeactivateActiveRequest(multiHandle, completedOperation.Easy, gcHandlePtr, completedOperation.CancellationRegistration);
                                        FinishRequest(completedOperation.Easy, message.result);
                                    }
                                }
                            }
                        }

                        // Wait for more things to do.  Even with our cancellation mechanism, we specify a timeout so that
                        // just in case something goes wrong we can recover gracefully.  This timeout is relatively long.
                        // Note, though, that libcurl has its own internal timeout, which can be requested separately
                        // via curl_multi_timeout, but which is used implicitly by curl_multi_wait if it's shorter
                        // than the value we provide.
                        const int FailsafeTimeoutMilliseconds = 1000;
                        int numFds;
                        unsafe
                        {
                            Interop.libcurl.curl_waitfd extraFds = new Interop.libcurl.curl_waitfd {
                                fd = _wakeupRequestedPipeFd,
                                events = Interop.libcurl.CURL_WAIT_POLLIN,
                                revents = 0
                            };
                            ThrowIfCURLMError(Interop.libcurl.curl_multi_wait(multiHandle, &extraFds, 1, FailsafeTimeoutMilliseconds, out numFds));
                            if ((extraFds.revents & Interop.libcurl.CURL_WAIT_POLLIN) != 0)
                            {
                                // We woke up (at least in part) because a wake-up was requested.  
                                // Read the data out of the pipe to clear it.
                                Debug.Assert(numFds >= 1, "numFds should have been at least one, as the extraFd was set");
                                VerboseTrace("curl_multi_wait wake-up notification");
                                ReadFromWakeupPipeWhenKnownToContainData();
                            }
                            VerboseTraceIf(numFds == 0, "curl_multi_wait timeout");
                        }

                        // PERF NOTE: curl_multi_wait uses poll (assuming it's available), which is O(N) in terms of the number of fds 
                        // being waited on. If this ends up being a scalability bottleneck, we can look into using the curl_multi_socket_* 
                        // APIs, which would let us switch to using epoll by being notified when sockets file descriptors are added or 
                        // removed and configuring the epoll context with EPOLL_CTL_ADD/DEL, which at the expense of a lot of additional
                        // complexity would let us turn the O(N) operation into an O(1) operation.  The additional complexity would come
                        // not only in the form of additional callbacks and managing the socket collection, but also in the form of timer
                        // management, which is necessary when using the curl_multi_socket_* APIs and which we avoid by using just
                        // curl_multi_wait/perform.
                    }
                }
                finally
                {
                    // If we got an unexpected exception, something very bad happened. We may have some 
                    // operations that we initiated but that weren't completed. Make sure to clean up any 
                    // such operations, failing them and releasing their resources.
                    if (_activeOperations.Count > 0)
                    {
                        Debug.Assert(!endingSuccessfully, "We should only have remaining operations if we got an unexpected exception");
                        foreach (KeyValuePair<IntPtr, ActiveRequest> pair in _activeOperations)
                        {
                            ActiveRequest failingOperation = pair.Value;
                            IntPtr failingOperationGcHandle = pair.Key;

                            DeactivateActiveRequest(multiHandle, failingOperation.Easy, failingOperationGcHandle, failingOperation.CancellationRegistration);

                            // Complete the operation's task and clean up any of its resources
                            failingOperation.Easy.FailRequest(CreateHttpRequestException());
                            failingOperation.Easy.Cleanup(); // no active processing remains, so cleanup
                        }

                        // Clear the table.
                        _activeOperations.Clear();
                    }

                    // Finally, dispose of the multi handle.
                    multiHandle.Dispose();
                }
            }
Example #2
0
            private void Process()
            {
                Debug.Assert(!Monitor.IsEntered(_incomingRequests), "No locks should be held while invoking Process");
                Debug.Assert(_workerRunning, "This is the worker, so it must be running");
                Debug.Assert(_wakeupRequestedPipeFd != 0, "Should have a valid pipe for wake ups");

                // Create the multi handle to use for this round of processing.  This one handle will be used
                // to service all easy requests currently available and all those that come in while
                // we're processing other requests.  Once the work quiesces and there are no more requests
                // to process, this multi handle will be released as the worker goes away.  The next
                // time a request arrives and a new worker is spun up, a new multi handle will be created.
                SafeCurlMultiHandle multiHandle = Interop.libcurl.curl_multi_init();

                if (multiHandle.IsInvalid)
                {
                    throw CreateHttpRequestException();
                }

                // Clear our active operations table.  This should already be clear, either because
                // all previous operations completed without unexpected exception, or in the case of an
                // unexpected exception we should have cleaned up gracefully anyway.  But just in case...
                Debug.Assert(_activeOperations.Count == 0, "We shouldn't have any active operations when starting processing.");
                _activeOperations.Clear();

                bool endingSuccessfully = false;

                try
                {
                    // Continue processing as long as there are any active operations
                    while (true)
                    {
                        // Activate any new operations that were submitted, and cancel any operations
                        // that should no longer be around.
                        lock (_incomingRequests)
                        {
                            while (_incomingRequests.Count > 0)
                            {
                                EasyRequest easy = _incomingRequests.Dequeue();
                                Debug.Assert(easy._associatedMultiAgent == null || easy._associatedMultiAgent == this, "An incoming request must only be associated with no or this agent");
                                if (easy._associatedMultiAgent == null)
                                {
                                    // Handle new request
                                    ActivateNewRequest(multiHandle, easy);
                                }
                                else
                                {
                                    // Handle cancellation request.
                                    Debug.Assert(easy.CancellationToken.IsCancellationRequested, "_associatedMultiAgent should only be non-null if cancellation was requested");
                                    IntPtr        gcHandlePtr;
                                    ActiveRequest activeRequest;
                                    if (FindActiveRequest(easy, out gcHandlePtr, out activeRequest))
                                    {
                                        DeactivateActiveRequest(multiHandle, easy, gcHandlePtr, activeRequest.CancellationRegistration);
                                        easy.FailRequest(new OperationCanceledException(easy.CancellationToken));
                                        easy.Cleanup(); // no active processing remains, so we can cleanup
                                    }
                                    else
                                    {
                                        Debug.Assert(easy.Task.IsCompleted, "We should only not be able to find the request if it was already completed.");
                                    }
                                }
                            }
                        }

                        // If we have no active operations, we're done.
                        if (_activeOperations.Count == 0)
                        {
                            endingSuccessfully = true;
                            return;
                        }

                        // We have one or more active operaitons. Run any work that needs to be run.
                        int running_handles;
                        ThrowIfCURLMError(Interop.libcurl.curl_multi_perform(multiHandle, out running_handles));

                        // Complete and remove any requests that have finished being processed.
                        int    pendingMessages;
                        IntPtr messagePtr;
                        while ((messagePtr = Interop.libcurl.curl_multi_info_read(multiHandle, out pendingMessages)) != IntPtr.Zero)
                        {
                            Interop.libcurl.CURLMsg message = Marshal.PtrToStructure <Interop.libcurl.CURLMsg>(messagePtr);
                            IntPtr        gcHandlePtr;
                            ActiveRequest completedOperation;
                            if (message.msg == Interop.libcurl.CURLMSG.CURLMSG_DONE &&
                                Interop.libcurl.curl_easy_getinfo(message.easy_handle, CURLINFO.CURLINFO_PRIVATE, out gcHandlePtr) == CURLcode.CURLE_OK &&
                                _activeOperations.TryGetValue(gcHandlePtr, out completedOperation))
                            {
                                DeactivateActiveRequest(multiHandle, completedOperation.Easy, gcHandlePtr, completedOperation.CancellationRegistration);
                                FinishRequest(completedOperation.Easy, message.result);
                            }
                        }

                        // Wait for more things to do.  Even with our cancellation mechanism, we specify a timeout so that
                        // just in case something goes wrong we can recover gracefully.  This timeout is relatively long.
                        // Note, though, that libcurl has its own internal timeout, which can be requested separately
                        // via curl_multi_timeout, but which is used implicitly by curl_multi_wait if it's shorter
                        // than the value we provide.
                        const int FailsafeTimeoutMilliseconds = 1000;
                        int       numFds;
                        unsafe
                        {
                            Interop.libcurl.curl_waitfd extraFds = new Interop.libcurl.curl_waitfd {
                                fd      = _wakeupRequestedPipeFd,
                                events  = Interop.libcurl.CURL_WAIT_POLLIN,
                                revents = 0
                            };
                            ThrowIfCURLMError(Interop.libcurl.curl_multi_wait(multiHandle, &extraFds, 1, FailsafeTimeoutMilliseconds, out numFds));
                            if ((extraFds.revents & Interop.libcurl.CURL_WAIT_POLLIN) != 0)
                            {
                                // We woke up (at least in part) because a wake-up was requested.  Read the data out of the pipe
                                // to clear it. It's possible but unlikely that there will be tons of extra data in the pipe,
                                // more than we end up reading out here (it's unlikely because we only write a byte to the pipe when
                                // transitioning from 0 to 1 incoming request or when cancellation is requested, and that would
                                // need to happen many times in a single iteration).  In that unlikely case, we'll simply loop
                                // around again as normal and end up waking up spuriously from the next curl_multi_wait.  For now,
                                // this is preferable to making additional syscalls to poll and read from the pipe).
                                const int ClearBufferSize = 4096; // some sufficiently large size to clear the pipe in any normal case
                                byte *    clearBuf        = stackalloc byte[ClearBufferSize];
                                while (Interop.CheckIo((long)Interop.libc.read(_wakeupRequestedPipeFd, clearBuf, (IntPtr)ClearBufferSize)))
                                {
                                    ;
                                }
                            }
                        }

                        // PERF NOTE: curl_multi_wait uses poll (assuming it's available), which is O(N) in terms of the number of fds
                        // being waited on. If this ends up being a scalability bottleneck, we can look into using the curl_multi_socket_*
                        // APIs, which would let us switch to using epoll by being notified when sockets file descriptors are added or
                        // removed and configuring the epoll context with EPOLL_CTL_ADD/DEL, which at the expense of a lot of additional
                        // complexity would let us turn the O(N) operation into an O(1) operation.  The additional complexity would come
                        // not only in the form of additional callbacks and managing the socket collection, but also in the form of timer
                        // management, which is necessary when using the curl_multi_socket_* APIs and which we avoid by using just
                        // curl_multi_wait/perform.
                    }
                }
                finally
                {
                    // If we got an unexpected exception, something very bad happened. We may have some
                    // operations that we initiated but that weren't completed. Make sure to clean up any
                    // such operations, failing them and releasing their resources.
                    if (_activeOperations.Count > 0)
                    {
                        Debug.Assert(!endingSuccessfully, "We should only have remaining operations if we got an unexpected exception");
                        foreach (KeyValuePair <IntPtr, ActiveRequest> pair in _activeOperations)
                        {
                            ActiveRequest failingOperation         = pair.Value;
                            IntPtr        failingOperationGcHandle = pair.Key;

                            DeactivateActiveRequest(multiHandle, failingOperation.Easy, failingOperationGcHandle, failingOperation.CancellationRegistration);

                            // Complete the operation's task and clean up any of its resources
                            failingOperation.Easy.FailRequest(CreateHttpRequestException());
                            failingOperation.Easy.Cleanup(); // no active processing remains, so cleanup
                        }

                        // Clear the table.
                        _activeOperations.Clear();
                    }

                    // Finally, dispose of the multi handle.
                    multiHandle.Dispose();
                }
            }
            private void Process()
            {
                Debug.Assert(!Monitor.IsEntered(_incomingRequests), "No locks should be held while invoking Process");
                Debug.Assert(_workerRunning, "This is the worker, so it must be running");
                Debug.Assert(_wakeupRequestedPipeFd != 0, "Should have a valid pipe for wake ups");

                // Create the multi handle to use for this round of processing.  This one handle will be used
                // to service all easy requests currently available and all those that come in while
                // we're processing other requests.  Once the work quiesces and there are no more requests
                // to process, this multi handle will be released as the worker goes away.  The next
                // time a request arrives and a new worker is spun up, a new multi handle will be created.
                SafeCurlMultiHandle multiHandle = Interop.libcurl.curl_multi_init();
                if (multiHandle.IsInvalid)
                {
                    throw CreateHttpRequestException();
                }

                // Clear our active operations table.  This should already be clear, either because
                // all previous operations completed without unexpected exception, or in the case of an
                // unexpected exception we should have cleaned up gracefully anyway.  But just in case...
                Debug.Assert(_activeOperations.Count == 0, "We shouldn't have any active operations when starting processing.");
                _activeOperations.Clear();

                bool endingSuccessfully = false;
                try
                {
                    // Continue processing as long as there are any active operations
                    while (true)
                    {
                        // Activate any new operations that were submitted, and cancel any operations
                        // that should no longer be around.
                        lock (_incomingRequests)
                        {
                            while (_incomingRequests.Count > 0)
                            {
                                EasyRequest easy = _incomingRequests.Dequeue();
                                Debug.Assert(easy._associatedMultiAgent == null || easy._associatedMultiAgent == this, "An incoming request must only be associated with no or this agent");
                                if (easy._associatedMultiAgent == null)
                                {
                                    // Handle new request
                                    ActivateNewRequest(multiHandle, easy);
                                }
                                else
                                {
                                    // Handle cancellation request.
                                    Debug.Assert(easy.CancellationToken.IsCancellationRequested, "_associatedMultiAgent should only be non-null if cancellation was requested");
                                    IntPtr gcHandlePtr;
                                    ActiveRequest activeRequest;
                                    if (FindActiveRequest(easy, out gcHandlePtr, out activeRequest))
                                    {
                                        DeactivateActiveRequest(multiHandle, easy, gcHandlePtr, activeRequest.CancellationRegistration);
                                        easy.FailRequest(new OperationCanceledException(easy.CancellationToken));
                                        easy.Cleanup(); // no active processing remains, so we can cleanup
                                    }
                                    else
                                    {
                                        Debug.Assert(easy.Task.IsCompleted, "We should only not be able to find the request if it was already completed.");
                                    }
                                }
                            }
                        }

                        // If we have no active operations, we're done.
                        if (_activeOperations.Count == 0)
                        {
                            endingSuccessfully = true;
                            return;
                        }

                        // We have one or more active operaitons. Run any work that needs to be run.
                        int running_handles;
                        ThrowIfCURLMError(Interop.libcurl.curl_multi_perform(multiHandle, out running_handles));

                        // Complete and remove any requests that have finished being processed.
                        int pendingMessages;
                        IntPtr messagePtr;
                        while ((messagePtr = Interop.libcurl.curl_multi_info_read(multiHandle, out pendingMessages)) != IntPtr.Zero)
                        {
                            Interop.libcurl.CURLMsg message = Marshal.PtrToStructure<Interop.libcurl.CURLMsg>(messagePtr);
                            IntPtr gcHandlePtr;
                            ActiveRequest completedOperation;
                            if (message.msg == Interop.libcurl.CURLMSG.CURLMSG_DONE &&
                                Interop.libcurl.curl_easy_getinfo(message.easy_handle, CURLINFO.CURLINFO_PRIVATE, out gcHandlePtr) == CURLcode.CURLE_OK &&
                                _activeOperations.TryGetValue(gcHandlePtr, out completedOperation))
                            {
                                DeactivateActiveRequest(multiHandle, completedOperation.Easy, gcHandlePtr, completedOperation.CancellationRegistration);
                                FinishRequest(completedOperation.Easy, message.result);
                            }
                        }

                        // Wait for more things to do.  Even with our cancellation mechanism, we specify a timeout so that
                        // just in case something goes wrong we can recover gracefully.  This timeout is relatively long.
                        // Note, though, that libcurl has its own internal timeout, which can be requested separately
                        // via curl_multi_timeout, but which is used implicitly by curl_multi_wait if it's shorter
                        // than the value we provide.
                        const int FailsafeTimeoutMilliseconds = 1000;
                        int numFds;
                        unsafe
                        {
                            Interop.libcurl.curl_waitfd extraFds = new Interop.libcurl.curl_waitfd {
                                fd = _wakeupRequestedPipeFd,
                                events = Interop.libcurl.CURL_WAIT_POLLIN,
                                revents = 0
                            };
                            ThrowIfCURLMError(Interop.libcurl.curl_multi_wait(multiHandle, &extraFds, 1, FailsafeTimeoutMilliseconds, out numFds));
                            if ((extraFds.revents & Interop.libcurl.CURL_WAIT_POLLIN) != 0)
                            {
                                // We woke up (at least in part) because a wake-up was requested.  Read the data out of the pipe 
                                // to clear it. It's possible but unlikely that there will be tons of extra data in the pipe, 
                                // more than we end up reading out here (it's unlikely because we only write a byte to the pipe when 
                                // transitioning from 0 to 1 incoming request or when cancellation is requested, and that would
                                // need to happen many times in a single iteration).  In that unlikely case, we'll simply loop 
                                // around again as normal and end up waking up spuriously from the next curl_multi_wait.  For now, 
                                // this is preferable to making additional syscalls to poll and read from the pipe).
                                const int ClearBufferSize = 4096; // some sufficiently large size to clear the pipe in any normal case
                                byte* clearBuf = stackalloc byte[ClearBufferSize];
                                while (Interop.CheckIo((long)Interop.libc.read(_wakeupRequestedPipeFd, clearBuf, (IntPtr)ClearBufferSize)));
                            }
                        }

                        // PERF NOTE: curl_multi_wait uses poll (assuming it's available), which is O(N) in terms of the number of fds 
                        // being waited on. If this ends up being a scalability bottleneck, we can look into using the curl_multi_socket_* 
                        // APIs, which would let us switch to using epoll by being notified when sockets file descriptors are added or 
                        // removed and configuring the epoll context with EPOLL_CTL_ADD/DEL, which at the expense of a lot of additional
                        // complexity would let us turn the O(N) operation into an O(1) operation.  The additional complexity would come
                        // not only in the form of additional callbacks and managing the socket collection, but also in the form of timer
                        // management, which is necessary when using the curl_multi_socket_* APIs and which we avoid by using just
                        // curl_multi_wait/perform.
                    }
                }
                finally
                {
                    // If we got an unexpected exception, something very bad happened. We may have some 
                    // operations that we initiated but that weren't completed. Make sure to clean up any 
                    // such operations, failing them and releasing their resources.
                    if (_activeOperations.Count > 0)
                    {
                        Debug.Assert(!endingSuccessfully, "We should only have remaining operations if we got an unexpected exception");
                        foreach (KeyValuePair<IntPtr, ActiveRequest> pair in _activeOperations)
                        {
                            ActiveRequest failingOperation = pair.Value;
                            IntPtr failingOperationGcHandle = pair.Key;

                            DeactivateActiveRequest(multiHandle, failingOperation.Easy, failingOperationGcHandle, failingOperation.CancellationRegistration);

                            // Complete the operation's task and clean up any of its resources
                            failingOperation.Easy.FailRequest(CreateHttpRequestException());
                            failingOperation.Easy.Cleanup(); // no active processing remains, so cleanup
                        }

                        // Clear the table.
                        _activeOperations.Clear();
                    }

                    // Finally, dispose of the multi handle.
                    multiHandle.Dispose();
                }
            }
Example #4
0
            private void WorkerLoop()
            {
                Debug.Assert(!Monitor.IsEntered(_incomingRequests), "No locks should be held while invoking Process");
                Debug.Assert(_runningWorker != null && _runningWorker.Id == Task.CurrentId, "This is the worker, so it must be running");
                Debug.Assert(_wakeupRequestedPipeFd != 0, "Should have a valid pipe for wake ups");

                // Create the multi handle to use for this round of processing.  This one handle will be used
                // to service all easy requests currently available and all those that come in while
                // we're processing other requests.  Once the work quiesces and there are no more requests
                // to process, this multi handle will be released as the worker goes away.  The next
                // time a request arrives and a new worker is spun up, a new multi handle will be created.
                SafeCurlMultiHandle multiHandle = Interop.libcurl.curl_multi_init();

                if (multiHandle.IsInvalid)
                {
                    throw CreateHttpRequestException();
                }

                // Clear our active operations table.  This should already be clear, either because
                // all previous operations completed without unexpected exception, or in the case of an
                // unexpected exception we should have cleaned up gracefully anyway.  But just in case...
                Debug.Assert(_activeOperations.Count == 0, "We shouldn't have any active operations when starting processing.");
                _activeOperations.Clear();

                bool endingSuccessfully = false;

                try
                {
                    // Continue processing as long as there are any active operations
                    while (true)
                    {
                        // First handle any requests in the incoming requests queue.
                        while (true)
                        {
                            IncomingRequest request;
                            lock (_incomingRequests)
                            {
                                if (_incomingRequests.Count == 0)
                                {
                                    break;
                                }
                                request = _incomingRequests.Dequeue();
                            }
                            HandleIncomingRequest(multiHandle, request);
                        }

                        // If we have no active operations, we're done.
                        if (_activeOperations.Count == 0)
                        {
                            endingSuccessfully = true;
                            return;
                        }

                        // We have one or more active operations. Run any work that needs to be run.
                        int running_handles;
                        ThrowIfCURLMError(Interop.libcurl.curl_multi_perform(multiHandle, out running_handles));

                        // Complete and remove any requests that have finished being processed.
                        int    pendingMessages;
                        IntPtr messagePtr;
                        while ((messagePtr = Interop.libcurl.curl_multi_info_read(multiHandle, out pendingMessages)) != IntPtr.Zero)
                        {
                            Interop.libcurl.CURLMsg message = Marshal.PtrToStructure <Interop.libcurl.CURLMsg>(messagePtr);
                            Debug.Assert(message.msg == Interop.libcurl.CURLMSG.CURLMSG_DONE, "CURLMSG_DONE is supposed to be the only message type");

                            IntPtr        gcHandlePtr;
                            ActiveRequest completedOperation;
                            if (message.msg == Interop.libcurl.CURLMSG.CURLMSG_DONE)
                            {
                                int getInfoResult = Interop.libcurl.curl_easy_getinfo(message.easy_handle, CURLINFO.CURLINFO_PRIVATE, out gcHandlePtr);
                                Debug.Assert(getInfoResult == CURLcode.CURLE_OK, "Failed to get info on a completing easy handle");
                                if (getInfoResult == CURLcode.CURLE_OK)
                                {
                                    bool gotActiveOp = _activeOperations.TryGetValue(gcHandlePtr, out completedOperation);
                                    Debug.Assert(gotActiveOp, "Expected to find GCHandle ptr in active operations table");
                                    if (gotActiveOp)
                                    {
                                        DeactivateActiveRequest(multiHandle, completedOperation.Easy, gcHandlePtr, completedOperation.CancellationRegistration);
                                        FinishRequest(completedOperation.Easy, message.result);
                                    }
                                }
                            }
                        }

                        // Wait for more things to do.  Even with our cancellation mechanism, we specify a timeout so that
                        // just in case something goes wrong we can recover gracefully.  This timeout is relatively long.
                        // Note, though, that libcurl has its own internal timeout, which can be requested separately
                        // via curl_multi_timeout, but which is used implicitly by curl_multi_wait if it's shorter
                        // than the value we provide.
                        const int FailsafeTimeoutMilliseconds = 1000;
                        int       numFds;
                        unsafe
                        {
                            Interop.libcurl.curl_waitfd extraFds = new Interop.libcurl.curl_waitfd {
                                fd      = _wakeupRequestedPipeFd,
                                events  = Interop.libcurl.CURL_WAIT_POLLIN,
                                revents = 0
                            };
                            ThrowIfCURLMError(Interop.libcurl.curl_multi_wait(multiHandle, &extraFds, 1, FailsafeTimeoutMilliseconds, out numFds));
                            if ((extraFds.revents & Interop.libcurl.CURL_WAIT_POLLIN) != 0)
                            {
                                // We woke up (at least in part) because a wake-up was requested.
                                // Read the data out of the pipe to clear it.
                                Debug.Assert(numFds >= 1, "numFds should have been at least one, as the extraFd was set");
                                VerboseTrace("curl_multi_wait wake-up notification");
                                ReadFromWakeupPipeWhenKnownToContainData();
                            }
                            VerboseTraceIf(numFds == 0, "curl_multi_wait timeout");
                        }

                        // PERF NOTE: curl_multi_wait uses poll (assuming it's available), which is O(N) in terms of the number of fds
                        // being waited on. If this ends up being a scalability bottleneck, we can look into using the curl_multi_socket_*
                        // APIs, which would let us switch to using epoll by being notified when sockets file descriptors are added or
                        // removed and configuring the epoll context with EPOLL_CTL_ADD/DEL, which at the expense of a lot of additional
                        // complexity would let us turn the O(N) operation into an O(1) operation.  The additional complexity would come
                        // not only in the form of additional callbacks and managing the socket collection, but also in the form of timer
                        // management, which is necessary when using the curl_multi_socket_* APIs and which we avoid by using just
                        // curl_multi_wait/perform.
                    }
                }
                finally
                {
                    // If we got an unexpected exception, something very bad happened. We may have some
                    // operations that we initiated but that weren't completed. Make sure to clean up any
                    // such operations, failing them and releasing their resources.
                    if (_activeOperations.Count > 0)
                    {
                        Debug.Assert(!endingSuccessfully, "We should only have remaining operations if we got an unexpected exception");
                        foreach (KeyValuePair <IntPtr, ActiveRequest> pair in _activeOperations)
                        {
                            ActiveRequest failingOperation         = pair.Value;
                            IntPtr        failingOperationGcHandle = pair.Key;

                            DeactivateActiveRequest(multiHandle, failingOperation.Easy, failingOperationGcHandle, failingOperation.CancellationRegistration);

                            // Complete the operation's task and clean up any of its resources
                            failingOperation.Easy.FailRequest(CreateHttpRequestException());
                            failingOperation.Easy.Cleanup(); // no active processing remains, so cleanup
                        }

                        // Clear the table.
                        _activeOperations.Clear();
                    }

                    // Finally, dispose of the multi handle.
                    multiHandle.Dispose();
                }
            }