상속: SynchronizationWorkItem
		public void Should_reuse_pages_when_data_appended(int numberOfPages)
		{
			var file = SyncTestUtils.PreparePagesStream(numberOfPages);

			var sourceContent = new CombinedStream(file, SyncTestUtils.PreparePagesStream(numberOfPages));
				// add new pages at the end
			var destinationContent = file;

			sourceContent.Position = 0;
			source.UploadAsync("test", sourceContent).Wait();
			destinationContent.Position = 0;
			destination.UploadAsync("test", destinationContent).Wait();

			var contentUpdate = new ContentUpdateWorkItem("test", "http://localhost:12345", sourceRfs.Storage,
			                                              sourceRfs.SigGenerator);

			// force to upload entire file, we just want to check which pages will be reused
			contentUpdate.UploadToAsync(destination.ServerUrl).Wait();
			destination.Synchronization.ResolveConflictAsync("test", ConflictResolutionStrategy.RemoteVersion).Wait();
			contentUpdate.UploadToAsync(destination.ServerUrl).Wait();

			FileAndPages fileAndPages = null;
			destinationRfs.Storage.Batch(accessor => fileAndPages = accessor.GetFile("test", 0, 2*numberOfPages));

			Assert.Equal(2*numberOfPages, fileAndPages.Pages.Count);

			for(var i = 0; i < numberOfPages; i++)
			{
				Assert.Equal(i + 1, fileAndPages.Pages[i].Id);
					// if page ids are in the original order it means that they were used the existing pages
			}

			sourceContent.Position = 0;
			Assert.Equal(sourceContent.GetMD5Hash(), destination.GetMetadataForAsync("test").Result["Content-MD5"]);
		}
		public void Should_reuse_second_page_if_only_first_one_changed()
		{
			var file = SyncTestUtils.PreparePagesStream(2);
			file.Position = 0;

			var sourceContent = new MemoryStream();
			file.CopyTo(sourceContent);
			sourceContent.Position = 0;
			sourceContent.Write(new byte[] {0, 0, 0, 0}, 0, 4); // change content of the 1st page

			var destinationContent = file;

			sourceContent.Position = 0;
			source.UploadAsync("test", sourceContent).Wait();
			destinationContent.Position = 0;
			destination.UploadAsync("test", destinationContent).Wait();

			var contentUpdate = new ContentUpdateWorkItem("test", "http://localhost:12345", sourceRfs.Storage,
			                                              sourceRfs.SigGenerator);


			sourceContent.Position = 0;
			// force to upload entire file, we just want to check which pages will be reused
			contentUpdate.UploadToAsync(destination.ServerUrl).Wait();
			destination.Synchronization.ResolveConflictAsync("test", ConflictResolutionStrategy.RemoteVersion).Wait();
			contentUpdate.UploadToAsync(destination.ServerUrl).Wait();

			FileAndPages fileAndPages = null;
			destinationRfs.Storage.Batch(accessor => fileAndPages = accessor.GetFile("test", 0, 256));

			Assert.Equal(2, fileAndPages.Pages.Count);
			Assert.Equal(3, fileAndPages.Pages[0].Id); // new page -> id == 3
			Assert.Equal(2, fileAndPages.Pages[1].Id); // reused page -> id still == 2

			sourceContent.Position = 0;
			Assert.Equal(sourceContent.GetMD5Hash(), destination.GetMetadataForAsync("test").Result["Content-MD5"]);
		}
		public bool Equals(ContentUpdateWorkItem other)
		{
			if (ReferenceEquals(null, other)) return false;
			if (ReferenceEquals(this, other)) return true;
			return Equals(other.FileName, FileName) && Equals(other.FileETag, FileETag);
		}
		public void Should_detect_that_different_work_is_being_perfomed()
		{
			using (var sigGenerator = new SigGenerator())
			{
				transactionalStorage.Batch(accessor => accessor.PutFile(FileName, 0, EmptyETagMetadata));

				var contentUpdateWorkItem = new ContentUpdateWorkItem(FileName, "http://localhost:12345", transactionalStorage,
				                                                      sigGenerator);

				queue.EnqueueSynchronization(Destination, contentUpdateWorkItem);
				queue.SynchronizationStarted(contentUpdateWorkItem, Destination);

				Assert.True(queue.IsDifferentWorkForTheSameFileBeingPerformed(
					new RenameWorkItem(FileName, "rename.txt", "http://localhost:12345", transactionalStorage), Destination));
			}
		}