protected static TextAndVersion LoadTextSynchronously(TextLoader loader, DocumentId documentId, SolutionServices services, bool reportInvalidDataException, CancellationToken cancellationToken) { var retries = 0; while (true) { try { return(loader.LoadTextAndVersionSynchronously(services.Workspace, documentId, cancellationToken)); } catch (OperationCanceledException) { // if load text is failed due to a cancellation, make sure we propagate it out to the caller throw; } catch (IOException e) { if (++retries > MaxRetries) { services.Workspace.OnWorkspaceFailed(new DocumentDiagnostic(WorkspaceDiagnosticKind.Failure, e.Message, documentId)); return(TextAndVersion.Create(SourceText.From(string.Empty, Encoding.UTF8), VersionStamp.Default, documentId.GetDebuggerDisplay())); } // fall out to try again } catch (InvalidDataException e) { // TODO: Adjust this behavior in the future if we add support for non-text additional files if (reportInvalidDataException) { services.Workspace.OnWorkspaceFailed(new DocumentDiagnostic(WorkspaceDiagnosticKind.Failure, e.Message, documentId)); } return(TextAndVersion.Create(SourceText.From(string.Empty, Encoding.UTF8), VersionStamp.Default, documentId.GetDebuggerDisplay())); } // try again after a delay Thread.Sleep(RetryDelay); } }
/// <summary> /// Load a text and a version of the document in the workspace. /// </summary> /// <exception cref="IOException"></exception> public override async Task <TextAndVersion> LoadTextAndVersionAsync(Workspace workspace, DocumentId documentId, CancellationToken cancellationToken) { DateTime prevLastWriteTime = FileUtilities.GetFileTimeStamp(_path); TextAndVersion textAndVersion; using (var stream = FileUtilities.OpenAsyncRead(_path)) { var version = VersionStamp.Create(prevLastWriteTime); Contract.Requires(stream.Position == 0); // we do this so that we asynchronously read from file. and this should allocate less for IDE case. // but probably not for command line case where it doesn't use more sophisticated services. using (var readStream = await SerializableBytes.CreateReadableStreamAsync(stream, cancellationToken: cancellationToken).ConfigureAwait(false)) { var text = CreateText(readStream, workspace); textAndVersion = TextAndVersion.Create(text, version, _path); } } // this has a potential to return corrupted state text if someone changed text in the middle of us reading it. // previously, we attempted to detect such case and return empty string with workspace failed event. // but that is nothing better or even worse than returning what we have read so far. // // I am letting it to return what we have read so far. and hopefully, file change event let us re-read this file. // (* but again, there is still a chance where file change event happens even before writing has finished which ends up // let us stay in corrupted state) DateTime newLastWriteTime = FileUtilities.GetFileTimeStamp(_path); if (!newLastWriteTime.Equals(prevLastWriteTime)) { // TODO: remove this once we know how often this can happen. // I am leaving this here for now for diagnostic purpose. var message = string.Format(WorkspacesResources.FileWasExternallyModified, _path); workspace.OnWorkspaceFailed(new DocumentDiagnostic(WorkspaceDiagnosticKind.Failure, message, documentId)); } return(textAndVersion); }
public Document AddDocument(ProjectId projectId, string name, SourceText text) { if (projectId == null) { throw new ArgumentNullException(nameof(projectId)); } if (name == null) { throw new ArgumentNullException(nameof(name)); } if (text == null) { throw new ArgumentNullException(nameof(text)); } var id = DocumentId.CreateNewId(projectId); var loader = TextLoader.From(TextAndVersion.Create(text, VersionStamp.Create())); return(this.AddDocument(DocumentInfo.Create(id, name, loader: loader))); }
private static async Task <TextAndVersion> LoadTextAsync(TextLoader loader, DocumentId documentId, SolutionServices services, CancellationToken cancellationToken) { try { using (ExceptionHelpers.SuppressFailFast()) { var result = await loader.LoadTextAndVersionAsync(services.Workspace, documentId, cancellationToken).ConfigureAwait(continueOnCapturedContext: false); return(result); } } catch (OperationCanceledException) { // if load text is failed due to a cancellation, make sure we propagate it out to the caller throw; } catch (IOException e) { services.Workspace.OnWorkspaceFailed(new DocumentDiagnostic(WorkspaceDiagnosticKind.FileAccessFailure, e.Message, documentId)); return(TextAndVersion.Create(SourceText.From(string.Empty, Encoding.UTF8), VersionStamp.Default, documentId.GetDebuggerDisplay())); } }
public static AdhocWorkspace WithFakeDocument( this AdhocWorkspace workspace, string name, string filePath, string text) { if (workspace.CurrentSolution.ProjectIds.Count == 0) { workspace.WithFakeProject( "FakeProject", "FakeProjectAssembly"); } workspace.AddDocument(DocumentInfo.Create( id: DocumentId.CreateNewId(workspace.CurrentSolution.ProjectIds[0]), name: name, filePath: filePath, loader: TextLoader.From(TextAndVersion.Create( version: VersionStamp.Default, text: SourceText.From(text))))); return(workspace); }
protected internal void OnAdditionalDocumentOpened(DocumentId documentId, SourceTextContainer textContainer, bool isCurrentContext = true) { CheckAdditionalDocumentIsInCurrentSolution(documentId); CheckDocumentIsClosed(documentId); using (_serializationLock.DisposableWait()) { var oldSolution = this.CurrentSolution; var oldDocument = oldSolution.GetAdditionalDocument(documentId); var oldText = oldDocument.GetTextAsync(CancellationToken.None).WaitAndGetResult(CancellationToken.None); // keep open document text alive by using PreserveIdentity var newText = textContainer.CurrentText; var currentSolution = oldSolution; if (oldText == newText || oldText.ContentEquals(newText)) { // if the supplied text is the same as the previous text, then also use same version var version = oldDocument.GetTextVersionAsync(CancellationToken.None).WaitAndGetResult(CancellationToken.None); var newTextAndVersion = TextAndVersion.Create(newText, version, oldDocument.FilePath); currentSolution = oldSolution.WithAdditionalDocumentText(documentId, newTextAndVersion, PreservationMode.PreserveIdentity); } else { currentSolution = oldSolution.WithAdditionalDocumentText(documentId, newText, PreservationMode.PreserveIdentity); } var newSolution = this.SetCurrentSolution(currentSolution); SignupForTextChanges(documentId, textContainer, isCurrentContext, (w, id, text, mode) => w.OnAdditionalDocumentTextChanged(id, text, mode)); this.RaiseWorkspaceChangedEventAsync(WorkspaceChangeKind.AdditionalDocumentChanged, oldSolution, newSolution, documentId: documentId); } // register outside of lock since it may call user code. this.RegisterText(textContainer); }
/// <exception cref="IOException"></exception> public override async Task <TextAndVersion> LoadTextAndVersionAsync(Workspace workspace, DocumentId documentId, CancellationToken cancellationToken) { DateTime prevLastWriteTime = FileUtilities.GetFileTimeStamp(this.path); // Open file for reading with FileShare mode read/write/delete so that we do not lock this file. // Allowing other theads/processes to write or delete the file is essential for scenarios such as // Rename refactoring where File.Replace API is invoked for updating the modified file. TextAndVersion textAndVersion; using (var stream = FileUtilities.OpenRead(this.path)) { var version = VersionStamp.Create(prevLastWriteTime); var memoryStream = await this.ReadStreamAsync(stream, cancellationToken : cancellationToken).ConfigureAwait(false); var text = CreateText(memoryStream, workspace); textAndVersion = TextAndVersion.Create(text, version, path); } // this has a potential to return corrupted state text if someone changed text in the middle of us reading it. // previously, we attempted to detect such case and return empty string with workspace failed event. // but that is nothing better or even worse than returning what we have read so far. // // I am letting it to return what we have read so far. and hopefully, file change event let us re-read this file. // (* but again, there is still a chance where file change event happens even before writing has finished which ends up // let us stay in corrupted state) DateTime newLastWriteTime = FileUtilities.GetFileTimeStamp(this.path); if (!newLastWriteTime.Equals(prevLastWriteTime)) { // TODO: remove this once we know how often this can happen. // I am leaving this here for now for diagnostic purpose. var message = string.Format(WorkspacesResources.FileWasExternallyModified, this.path); workspace.OnWorkspaceFailed(new DocumentDiagnostic(WorkspaceDiagnosticKind.FileAccessFailure, message, documentId)); } return(textAndVersion); }
/// <summary> /// Adds a document to the workspace. /// </summary> public DocumentId AddDocument(ProjectId projectId, string name, string text) { if (projectId == null) { throw new ArgumentNullException("projectId"); } if (name == null) { throw new ArgumentNullException("name"); } if (text == null) { throw new ArgumentNullException("text"); } var id = DocumentId.CreateNewId(projectId); this.AddDocument( DocumentInfo.Create(id, name, loader: TextLoader.From(TextAndVersion.Create(SourceText.From(text), VersionStamp.Create())))); return(id); }
public override async Task <TextAndVersion> LoadTextAndVersionAsync(Workspace workspace, DocumentId documentId, CancellationToken cancellationToken) { ValidateFileLength(workspace, Path); var prevLastWriteTime = FileUtilities.GetFileTimeStamp(Path); TextAndVersion textAndVersion; // In many .NET Framework versions (specifically the 4.5.* series, but probably much earlier // and also later) there is this particularly interesting bit in FileStream.BeginReadAsync: // // // [ed: full comment clipped for brevity] // // // // If we did a sync read to fill the buffer, we could avoid the // // problem, and any async read less than 64K gets turned into a // // synchronous read by NT anyways... // if (numBytes < _bufferSize) // { // if (_buffer == null) _buffer = new byte[_bufferSize]; // IAsyncResult bufferRead = BeginReadCore(_buffer, 0, _bufferSize, null, null, 0); // _readLen = EndRead(bufferRead); // // In English, this means that if you do a asynchronous read for smaller than _bufferSize, // this is implemented by the framework by starting an asynchronous read, and then // blocking your thread until that read is completed. The comment implies this is "fine" // because the asynchronous read will actually be synchronous and thus EndRead won't do // any blocking -- it'll be an effective no-op. In theory, everything is fine here. // // In reality, this can end very poorly. That read in fact can be asynchronous, which means the // EndRead will enter a wait and block the thread. If we are running that call to ReadAsync on a // thread pool thread that completed a previous piece of IO, it means there has to be another // thread available to service the completion of that request in order for our thread to make // progress. Why is this worse than the claim about the operating system turning an // asynchronous read into a synchronous one? If the underlying native ReadFile completes // synchronously, that would mean just our thread is being blocked, and will be unblocked once // the kernel gets done with our work. In this case, if the OS does do the read asynchronously // we are now dependent on another thread being available to unblock us. // // So how does ths manifest itself? We have seen dumps from customers reporting hangs where // we have over a hundred thread pool threads all blocked on EndRead() calls as we read this stream. // In these cases, the user had just completed a build that had a bunch of XAML files, and // this resulted in many .g.i.cs files being written and updated. As a result, Roslyn is trying to // re-read them to provide a new compilation to the XAML language service that is asking for it. // Inspecting these dumps and sampling some of the threads made some notable discoveries: // // 1. When there was a read blocked, it was the _last_ chunk that we were reading in the file in // the file that we were reading. This leads me to believe that it isn't simply very slow IO // (like a network drive), because in that case I'd expect to see some threads in different // places than others. // 2. Some stacks were starting by the continuation of a ReadAsync, and some were the first read // of a file from the background parser. In the first case, all of those threads were if the // files were over 4K in size. The ones with the BackgroundParser still on the stack were files // less than 4K in size. // 3. The "time unresponsive" in seconds correlated with roughly the number of threads we had // blocked, which makes me think we were impacted by the once-per-second hill climbing algorithm // used by the thread pool. // // So what's my analysis? When the XAML language service updated all the files, we kicked off // background parses for all of them. If the file was over 4K the asynchronous read actually did // happen (see point #2), but we'd eventually block the thread pool reading the last chunk. // Point #1 confirms that it was always the last chunk. And in small file cases, we'd block on // the first chunk. But in either case, we'd be blocking off a thread pool thread until another // thread pool thread was available. Since we had enough requests going (over a hundred), // sometimes the user got unlucky and all the threads got blocked. At this point, the CLR // started slowly kicking off more threads, but each time it'd start a new thread rather than // starting work that would be needed to unblock a thread, it just handled an IO that resulted // in another file read hitting the end of the file and another thread would get blocked. The // CLR then must kick off another thread, rinse, repeat. Eventually it'll make progress once // there's no more pending IO requests, everything will complete, and life then continues. // // To work around this issue, we set bufferSize to 1, which means that all reads should bypass // this logic. This is tracked by https://github.com/dotnet/corefx/issues/6007, at least in // corefx. We also open the file for reading with FileShare mode read/write/delete so that // we do not lock this file. using (var stream = FileUtilities.RethrowExceptionsAsIOException(() => new FileStream(Path, FileMode.Open, FileAccess.Read, FileShare.ReadWrite | FileShare.Delete, bufferSize: 1, useAsync: true))) { var version = VersionStamp.Create(prevLastWriteTime); // we do this so that we asynchronously read from file. and this should allocate less for IDE case. // but probably not for command line case where it doesn't use more sophisticated services. using var readStream = await SerializableBytes.CreateReadableStreamAsync(stream, cancellationToken : cancellationToken).ConfigureAwait(false); var text = CreateText(readStream, workspace); textAndVersion = TextAndVersion.Create(text, version, Path); } // Check if the file was definitely modified and closed while we were reading. In this case, we know the read we got was // probably invalid, so throw an IOException which indicates to our caller that we should automatically attempt a re-read. // If the file hasn't been closed yet and there's another writer, we will rely on file change notifications to notify us // and reload the file. var newLastWriteTime = FileUtilities.GetFileTimeStamp(Path); if (!newLastWriteTime.Equals(prevLastWriteTime)) { var message = string.Format(WorkspacesResources.File_was_externally_modified_colon_0, Path); throw new IOException(message); } return(textAndVersion); }
public override Task <TextAndVersion> LoadTextAndVersionAsync(Workspace workspace, DocumentId documentId, CancellationToken cancellationToken) { return(Task.FromResult(TextAndVersion.Create(this.container.CurrentText, this.version, this.filePath))); }
protected internal void OnDocumentOpened(DocumentId documentId, SourceTextContainer textContainer, bool isCurrentContext = true) { CheckDocumentIsInCurrentSolution(documentId); CheckDocumentIsClosed(documentId); using (this.serializationLock.DisposableWait()) { var oldSolution = this.CurrentSolution; var oldDocument = oldSolution.GetDocument(documentId); var oldText = oldDocument.GetTextAsync(CancellationToken.None).WaitAndGetResult(CancellationToken.None); using (this.stateLock.DisposableWrite()) { var openDocuments = GetProjectOpenDocuments_NoLock(documentId.ProjectId); if (openDocuments != null) { openDocuments.Add(documentId); } else { this.projectToOpenDocumentsMap.Add(documentId.ProjectId, new HashSet <DocumentId> { documentId }); } } // keep open document text alive by using PreserveIdentity var newText = textContainer.CurrentText; var currentSolution = oldSolution; if (oldText != newText) { if (oldText.ContentEquals(newText)) { // if the supplied text is the same as the previous text, then add with same version var version = oldDocument.GetTextVersionAsync(CancellationToken.None).WaitAndGetResult(CancellationToken.None); var newTextAndVersion = TextAndVersion.Create(newText, version, oldDocument.FilePath); currentSolution = oldSolution.WithDocumentText(documentId, newTextAndVersion, PreservationMode.PreserveIdentity); } else { currentSolution = oldSolution.WithDocumentText(documentId, newText, PreservationMode.PreserveIdentity); } } var newSolution = this.SetCurrentSolution(currentSolution); var tracker = new TextTracker(this, documentId, textContainer); this.textTrackers.Add(documentId, tracker); this.AddTextToDocumentIdMapping_NoLock(textContainer, documentId, isCurrentContext); tracker.Connect(); var newDoc = newSolution.GetDocument(documentId); this.OnDocumentTextChanged(newDoc); this.RaiseWorkspaceChangedEventAsync(WorkspaceChangeKind.DocumentChanged, oldSolution, newSolution, documentId: documentId); var tsk = this.RaiseDocumentOpenedEventAsync(newDoc); // don't await this } // register outside of lock since it may call user code. this.RegisterText(textContainer); }
public static SourceGeneratedDocumentState Create( string hintName, SourceText generatedSourceText, SyntaxTree generatedSyntaxTree, DocumentId documentId, ISourceGenerator sourceGenerator, HostLanguageServices languageServices, SolutionServices solutionServices, CancellationToken cancellationToken ) { var options = generatedSyntaxTree.Options; var filePath = generatedSyntaxTree.FilePath; var textAndVersion = TextAndVersion.Create(generatedSourceText, VersionStamp.Create()); ValueSource <TextAndVersion> textSource = new ConstantValueSource <TextAndVersion>( textAndVersion ); var root = generatedSyntaxTree.GetRoot(cancellationToken); Contract.ThrowIfNull( languageServices.SyntaxTreeFactory, "We should not have a generated syntax tree for a language that doesn't support trees." ); if (languageServices.SyntaxTreeFactory.CanCreateRecoverableTree(root)) { // We will only create recoverable text if we can create a recoverable tree; if we created a // recoverable text but not a new tree, it would mean tree.GetText() could still potentially return // the non-recoverable text, but asking the document directly for it's text would give a recoverable // text with a different object identity. textSource = CreateRecoverableText(textAndVersion, solutionServices); generatedSyntaxTree = languageServices.SyntaxTreeFactory.CreateRecoverableTree( documentId.ProjectId, filePath: generatedSyntaxTree.FilePath, options, textSource, generatedSourceText.Encoding, root ); } var treeAndVersion = TreeAndVersion.Create(generatedSyntaxTree, textAndVersion.Version); return(new SourceGeneratedDocumentState( languageServices, solutionServices, documentServiceProvider: null, new DocumentInfo.DocumentAttributes( documentId, name: hintName, folders: SpecializedCollections.EmptyReadOnlyList <string>(), options.Kind, filePath: filePath, isGenerated: true, designTimeOnly: false ), options, sourceText: null, // don't strongly hold the text textSource, treeAndVersion, sourceGenerator, hintName )); }
internal override TextAndVersion LoadTextAndVersionSynchronously(Workspace workspace, DocumentId documentId, CancellationToken cancellationToken) { return(TextAndVersion.Create(_container.CurrentText, _version, _filePath)); }