Пример #1
0
        public static Checksum Create <T>(WellKnownSynchronizationKind kind, T value, ISerializerService serializer)
        {
            using var stream = SerializableBytes.CreateWritableStream();

            using (var objectWriter = new ObjectWriter(stream, leaveOpen: true))
            {
                objectWriter.WriteInt32((int)kind);
                serializer.Serialize(value, objectWriter, CancellationToken.None);
            }

            stream.Position = 0;
            return(Create(stream));
        }
Пример #2
0
        public static Checksum Create(WellKnownSynchronizationKind kind, IObjectWritable @object)
        {
            using var stream = SerializableBytes.CreateWritableStream();

            using (var objectWriter = new ObjectWriter(stream, leaveOpen: true))
            {
                objectWriter.WriteInt32((int)kind);
                @object.WriteTo(objectWriter);
            }

            stream.Position = 0;
            return(Create(stream));
        }
Пример #3
0
        public static Checksum Create(WellKnownSynchronizationKind kind, IEnumerable <Checksum> checksums)
        {
            using var stream = SerializableBytes.CreateWritableStream();
            using var writer = new ObjectWriter(stream);
            writer.WriteInt32((int)kind);

            foreach (var checksum in checksums)
            {
                checksum.WriteTo(writer);
            }

            return(Create(stream));
        }
Пример #4
0
        public static Checksum Create(WellKnownSynchronizationKind kind, ImmutableArray <byte> bytes)
        {
            using var stream = SerializableBytes.CreateWritableStream();
            using var writer = new ObjectWriter(stream);
            writer.WriteInt32((int)kind);

            for (var i = 0; i < bytes.Length; i++)
            {
                writer.WriteByte(bytes[i]);
            }

            return(Create(stream));
        }
Пример #5
0
        public static Checksum Create(string kind, ImmutableArray <byte> bytes)
        {
            using (var stream = SerializableBytes.CreateWritableStream())
                using (var writer = new StreamObjectWriter(stream))
                {
                    writer.WriteString(kind);

                    for (var i = 0; i < bytes.Length; i++)
                    {
                        writer.WriteByte(bytes[i]);
                    }

                    return(Create(stream));
                }
        }
Пример #6
0
        public static Checksum Create(string kind, IEnumerable <Checksum> checksums)
        {
            using (var stream = SerializableBytes.CreateWritableStream())
                using (var writer = new StreamObjectWriter(stream))
                {
                    writer.WriteString(kind);

                    foreach (var checksum in checksums)
                    {
                        checksum.WriteTo(writer);
                    }

                    return(Create(stream));
                }
        }
Пример #7
0
        public static MetadataOnlyImage Create(Workspace workspace, ITemporaryStorageService service, Compilation compilation, CancellationToken cancellationToken)
        {
            cancellationToken.ThrowIfCancellationRequested();

            try
            {
                workspace.LogTestMessage($"Beginning to create a skeleton assembly for {compilation.AssemblyName}...");

                using (Logger.LogBlock(FunctionId.Workspace_SkeletonAssembly_EmitMetadataOnlyImage, cancellationToken))
                {
                    // TODO: make it to use SerializableBytes.WritableStream rather than MemoryStream so that
                    //       we don't allocate anything for skeleton assembly.
                    using (var stream = SerializableBytes.CreateWritableStream())
                    {
                        // note: cloning compilation so we don't retain all the generated symbols after its emitted.
                        // * REVIEW * is cloning clone p2p reference compilation as well?
                        var emitResult = compilation.Clone().Emit(stream, options: s_emitOptions, cancellationToken: cancellationToken);

                        if (emitResult.Success)
                        {
                            workspace.LogTestMessage($"Successfully emitted a skeleton assembly for {compilation.AssemblyName}");
                            var storage = service.CreateTemporaryStreamStorage(cancellationToken);

                            stream.Position = 0;
                            storage.WriteStream(stream, cancellationToken);

                            return(new MetadataOnlyImage(storage, compilation.AssemblyName));
                        }
                        else
                        {
                            workspace.LogTestMessage($"Failed to create a skeleton assembly for {compilation.AssemblyName}:");

                            foreach (var diagnostic in emitResult.Diagnostics)
                            {
                                workspace.LogTestMessage("  " + diagnostic.GetMessage());
                            }
                        }
                    }
                }
            }
            finally
            {
                workspace.LogTestMessage($"Done trying to create a skeleton assembly for {compilation.AssemblyName}");
            }

            return(Empty);
        }
            private async Task SaveGraphAsync(ProjectDependencyGraph graph, CancellationToken cancellationToken)
            {
                Contract.ThrowIfFalse(graph.Solution.BranchId == graph.Solution.Workspace.PrimaryBranchId);

                using (var stream = SerializableBytes.CreateWritableStream())
                    using (var writer = new ObjectWriter(stream))
                    {
                        graph.WriteTo(writer);
                        stream.Position = 0;

                        var persistenceService = WorkspaceService.GetService <IPersistentStorageService>(graph.Solution.Workspace);
                        using (var storage = persistenceService.GetStorage(graph.Solution))
                        {
                            await storage.WriteStreamAsync(PersistenceName, stream, cancellationToken).ConfigureAwait(false);
                        }
                    }
            }
Пример #9
0
        /// <summary>
        /// Load a text and a version of the document in the workspace.
        /// </summary>
        /// <exception cref="IOException"></exception>
        public override async Task <TextAndVersion> LoadTextAndVersionAsync(Workspace workspace, DocumentId documentId, CancellationToken cancellationToken)
        {
            DateTime prevLastWriteTime = FileUtilities.GetFileTimeStamp(_path);

            TextAndVersion textAndVersion;

            using (var stream = FileUtilities.OpenAsyncRead(_path))
            {
                System.Diagnostics.Debug.Assert(stream.IsAsync);
                var version = VersionStamp.Create(prevLastWriteTime);

                Contract.Requires(stream.Position == 0);

                // we do this so that we asynchronously read from file. and this should allocate less for IDE case.
                // but probably not for command line case where it doesn't use more sophisticated services.
                using (var readStream = await SerializableBytes.CreateReadableStreamAsync(stream, cancellationToken: cancellationToken).ConfigureAwait(false))
                {
                    var text = CreateText(readStream, workspace);
                    textAndVersion = TextAndVersion.Create(text, version, _path);
                }
            }

            // this has a potential to return corrupted state text if someone changed text in the middle of us reading it.
            // previously, we attempted to detect such case and return empty string with workspace failed event.
            // but that is nothing better or even worse than returning what we have read so far.
            //
            // I am letting it to return what we have read so far. and hopefully, file change event let us re-read this file.
            // (* but again, there is still a chance where file change event happens even before writing has finished which ends up
            //    let us stay in corrupted state)
            DateTime newLastWriteTime = FileUtilities.GetFileTimeStamp(_path);

            if (!newLastWriteTime.Equals(prevLastWriteTime))
            {
                // TODO: remove this once we know how often this can happen.
                //       I am leaving this here for now for diagnostic purpose.
                var message = string.Format(WorkspacesResources.FileWasExternallyModified, _path);
                workspace.OnWorkspaceFailed(new DocumentDiagnostic(WorkspaceDiagnosticKind.Failure, message, documentId));
            }

            return(textAndVersion);
        }
Пример #10
0
        public override async Task <TextAndVersion> LoadTextAndVersionAsync(Workspace workspace, DocumentId documentId, CancellationToken cancellationToken)
        {
            ValidateFileLength(workspace, Path);

            var prevLastWriteTime = FileUtilities.GetFileTimeStamp(Path);

            TextAndVersion textAndVersion;

            // In many .NET Framework versions (specifically the 4.5.* series, but probably much earlier
            // and also later) there is this particularly interesting bit in FileStream.BeginReadAsync:
            //
            //     // [ed: full comment clipped for brevity]
            //     //
            //     // If we did a sync read to fill the buffer, we could avoid the
            //     // problem, and any async read less than 64K gets turned into a
            //     // synchronous read by NT anyways...
            //     if (numBytes < _bufferSize)
            //     {
            //         if (_buffer == null) _buffer = new byte[_bufferSize];
            //         IAsyncResult bufferRead = BeginReadCore(_buffer, 0, _bufferSize, null, null, 0);
            //         _readLen = EndRead(bufferRead);
            //
            // In English, this means that if you do a asynchronous read for smaller than _bufferSize,
            // this is implemented by the framework by starting an asynchronous read, and then
            // blocking your thread until that read is completed. The comment implies this is "fine"
            // because the asynchronous read will actually be synchronous and thus EndRead won't do
            // any blocking -- it'll be an effective no-op. In theory, everything is fine here.
            //
            // In reality, this can end very poorly. That read in fact can be asynchronous, which means the
            // EndRead will enter a wait and block the thread. If we are running that call to ReadAsync on a
            // thread pool thread that completed a previous piece of IO, it means there has to be another
            // thread available to service the completion of that request in order for our thread to make
            // progress. Why is this worse than the claim about the operating system turning an
            // asynchronous read into a synchronous one? If the underlying native ReadFile completes
            // synchronously, that would mean just our thread is being blocked, and will be unblocked once
            // the kernel gets done with our work. In this case, if the OS does do the read asynchronously
            // we are now dependent on another thread being available to unblock us.
            //
            // So how does ths manifest itself? We have seen dumps from customers reporting hangs where
            // we have over a hundred thread pool threads all blocked on EndRead() calls as we read this stream.
            // In these cases, the user had just completed a build that had a bunch of XAML files, and
            // this resulted in many .g.i.cs files being written and updated. As a result, Roslyn is trying to
            // re-read them to provide a new compilation to the XAML language service that is asking for it.
            // Inspecting these dumps and sampling some of the threads made some notable discoveries:
            //
            // 1. When there was a read blocked, it was the _last_ chunk that we were reading in the file in
            //    the file that we were reading. This leads me to believe that it isn't simply very slow IO
            //    (like a network drive), because in that case I'd expect to see some threads in different
            //    places than others.
            // 2. Some stacks were starting by the continuation of a ReadAsync, and some were the first read
            //    of a file from the background parser. In the first case, all of those threads were if the
            //    files were over 4K in size. The ones with the BackgroundParser still on the stack were files
            //    less than 4K in size.
            // 3. The "time unresponsive" in seconds correlated with roughly the number of threads we had
            //    blocked, which makes me think we were impacted by the once-per-second hill climbing algorithm
            //    used by the thread pool.
            //
            // So what's my analysis? When the XAML language service updated all the files, we kicked off
            // background parses for all of them. If the file was over 4K the asynchronous read actually did
            // happen (see point #2), but we'd eventually block the thread pool reading the last chunk.
            // Point #1 confirms that it was always the last chunk. And in small file cases, we'd block on
            // the first chunk. But in either case, we'd be blocking off a thread pool thread until another
            // thread pool thread was available. Since we had enough requests going (over a hundred),
            // sometimes the user got unlucky and all the threads got blocked. At this point, the CLR
            // started slowly kicking off more threads, but each time it'd start a new thread rather than
            // starting work that would be needed to unblock a thread, it just handled an IO that resulted
            // in another file read hitting the end of the file and another thread would get blocked. The
            // CLR then must kick off another thread, rinse, repeat. Eventually it'll make progress once
            // there's no more pending IO requests, everything will complete, and life then continues.
            //
            // To work around this issue, we set bufferSize to 1, which means that all reads should bypass
            // this logic. This is tracked by https://github.com/dotnet/corefx/issues/6007, at least in
            // corefx. We also open the file for reading with FileShare mode read/write/delete so that
            // we do not lock this file.
            using (var stream = FileUtilities.RethrowExceptionsAsIOException(() => new FileStream(Path, FileMode.Open, FileAccess.Read, FileShare.ReadWrite | FileShare.Delete, bufferSize: 1, useAsync: true)))
            {
                var version = VersionStamp.Create(prevLastWriteTime);

                // we do this so that we asynchronously read from file. and this should allocate less for IDE case.
                // but probably not for command line case where it doesn't use more sophisticated services.
                using var readStream = await SerializableBytes.CreateReadableStreamAsync(stream, cancellationToken : cancellationToken).ConfigureAwait(false);

                var text = CreateText(readStream, workspace);
                textAndVersion = TextAndVersion.Create(text, version, Path);
            }

            // Check if the file was definitely modified and closed while we were reading. In this case, we know the read we got was
            // probably invalid, so throw an IOException which indicates to our caller that we should automatically attempt a re-read.
            // If the file hasn't been closed yet and there's another writer, we will rely on file change notifications to notify us
            // and reload the file.
            var newLastWriteTime = FileUtilities.GetFileTimeStamp(Path);

            if (!newLastWriteTime.Equals(prevLastWriteTime))
            {
                var message = string.Format(WorkspacesResources.File_was_externally_modified_colon_0, Path);
                throw new IOException(message);
            }

            return(textAndVersion);
        }
Пример #11
0
 protected override Stream GetSourceStream(CancellationToken cancellationToken)
 {
     return(SerializableBytes.CreateReadableStream(_xmlDocCommentBytes));
 }