/// <summary> /// Review: this is fragile and expensive. We're doing real internet traffic and creating real objects on S3 and parse.com /// which (to a very small extent) costs us real money. This will be slow. Also, under S3 eventual consistency rules, /// there is no guarantee that the data we just created will actually be retrievable immediately. /// </summary> /// <param name="bookName"></param> /// <param name="id"></param> /// <param name="uploader"></param> /// <param name="data"></param> /// <returns></returns> public Tuple <string, string> UploadAndDownLoadNewBook(string bookName, string id, string uploader, string data) { // Create a book folder with meta.json that includes an uploader and id and some other files. var originalBookFolder = MakeBook(bookName, id, uploader, data); int fileCount = Directory.GetFiles(originalBookFolder).Length; Login(); //HashSet<string> notifications = new HashSet<string>(); var progress = new Palaso.Progress.StringBuilderProgress(); var s3Id = _transfer.UploadBook(originalBookFolder, progress); var uploadMessages = progress.Text.Split(new string[] { "Uploading" }, StringSplitOptions.RemoveEmptyEntries); Assert.That(uploadMessages.Length, Is.EqualTo(fileCount + 2)); // should get one per file, plus one for metadata, plus one for book order Assert.That(progress.Text.Contains("Uploading book metadata")); Assert.That(progress.Text.Contains("Uploading " + Path.GetFileName(Directory.GetFiles(originalBookFolder).First()))); _transfer.WaitUntilS3DataIsOnServer(originalBookFolder); var dest = _workFolderPath.CombineForPath("output"); Directory.CreateDirectory(dest); _downloadedBooks.Clear(); var newBookFolder = _transfer.DownloadBook(s3Id, dest); Assert.That(Directory.GetFiles(newBookFolder).Length, Is.EqualTo(fileCount + 1)); // book order is added during upload Assert.That(_downloadedBooks.Count, Is.EqualTo(1)); Assert.That(_downloadedBooks[0].FolderPath, Is.EqualTo(newBookFolder)); // Todo: verify that metadata was transferred to Parse.com return(new Tuple <string, string>(originalBookFolder, newBookFolder)); }
public void UploadBook_SameId_Replaces() { var bookFolder = MakeBook("unittest", "myId", "me", "something"); var jsonPath = bookFolder.CombineForPath(BookInfo.MetaDataFileName); var json = File.ReadAllText(jsonPath); var jsonStart = json.Substring(0, json.Length - 1); var newJson = jsonStart + ",\"bookLineage\":\"original\"}"; File.WriteAllText(jsonPath, newJson); Login(); string s3Id = _transfer.UploadBook(bookFolder, new NullProgress()); File.Delete(bookFolder.CombineForPath("one.css")); File.WriteAllText(Path.Combine(bookFolder, "one.htm"), "something new"); File.WriteAllText(Path.Combine(bookFolder, "two.css"), @"test"); // Tweak the json, but don't change the ID. newJson = jsonStart + ",\"bookLineage\":\"other\"}"; File.WriteAllText(jsonPath, newJson); _transfer.UploadBook(bookFolder, new NullProgress()); var dest = _workFolderPath.CombineForPath("output"); Directory.CreateDirectory(dest); var newBookFolder = _transfer.DownloadBook(BloomS3Client.UnitTestBucketName, s3Id, dest); var firstData = File.ReadAllText(newBookFolder.CombineForPath("one.htm")); Assert.That(firstData, Does.Contain("something new"), "We should have overwritten the changed file"); Assert.That(File.Exists(newBookFolder.CombineForPath("two.css")), Is.True, "We should have added the new file"); Assert.That(File.Exists(newBookFolder.CombineForPath("one.css")), Is.False, "We should have deleted the obsolete file"); // Verify that metadata was overwritten, new record not created. var records = _parseClient.GetBookRecords("myId" + _thisTestId); Assert.That(records.Count, Is.EqualTo(1), "Should have overwritten parse server record, not added or deleted"); var bookRecord = records[0]; Assert.That(bookRecord.bookLineage.Value, Is.EqualTo("other")); }