public void Setup() { TestRoot = Path.Combine(GetFolderPath(SpecialFolder.MyDocuments), "Personal Cloud"); Directory.CreateDirectory(TestRoot); TestRoot = Path.Combine(TestRoot, "Test Container"); if (Directory.Exists(TestRoot)) { Assert.Inconclusive("Previous test session may have failed. Please ensure \"" + Path.GetDirectoryName(TestRoot) + "\" is empty before starting a new session."); return; } Directory.CreateDirectory(TestRoot); #pragma warning disable CA2000 // Dispose objects before losing scope var fs = new PhysicalFileSystem(); #pragma warning restore CA2000 // Dispose objects before losing scope var subfs = new SubFileSystem(fs, fs.ConvertPathFromInternal(TestRoot), true); Server = new HttpProvider(10240, subfs); Server.Start(); Client = new TopFolderClient($"http://localhost:10240", new byte[32], ""); }
public void Setup() { TestRoot = Path.Combine(GetFolderPath(SpecialFolder.MyDocuments), "Personal Cloud"); Directory.CreateDirectory(TestRoot); TestRoot = Path.Combine(TestRoot, "Test Container"); if (Directory.Exists(TestRoot)) { Assert.Inconclusive("Previous test session may have failed. Please ensure \"" + Path.GetDirectoryName(TestRoot) + "\" is empty before starting a new session."); return; } Directory.CreateDirectory(TestRoot); Server = new HttpProvider(10240, new VirtualFileSystem(TestRoot)); Server.Start(); Client = new TopFolderClient($"http://localhost:10240", new byte[32], ""); }
public void Setup() { var logdir = TestRoot = Path.Combine(GetFolderPath(SpecialFolder.MyDocuments), "Personal Cloud"); Directory.CreateDirectory(TestRoot); TestRoot = Path.Combine(TestRoot, "Test Container"); if (Directory.Exists(TestRoot)) { Assert.Inconclusive("Previous test session may have failed. Please ensure \"" + Path.GetDirectoryName(TestRoot) + "\" is empty before starting a new session."); return; } Directory.CreateDirectory(TestRoot); Directory.CreateDirectory(logdir); Loggers = LoggerFactory.Create(builder => builder.//SetMinimumLevel(LogLevel.Trace). AddConsole(x => { x.TimestampFormat = "G"; })); #pragma warning disable CA2000 // Dispose objects before losing scope var pfs = new Zio.FileSystems.PhysicalFileSystem(); #pragma warning restore CA2000 // Dispose objects before losing scope pfs.CreateDirectory(pfs.ConvertPathFromInternal(TestRoot)); var fs = new Zio.FileSystems.SubFileSystem(pfs, pfs.ConvertPathFromInternal(TestRoot), true); Server = new HttpProvider(10240, fs); Server.Start(); var c = new TopFolderClient($"http://localhost:10240", new byte[32], ""); var dic = new Dictionary <string, IFileSystem>(); dic["Files"] = c; Client = new FileSystemContainer(dic, Loggers.CreateLogger("FileContainerWebApiTest")); }
private async Task <PersonalCloudInfo> GetPeerPCInfo(PersonalCloud pc, NodeInfo ninfo) { try { var url = new Uri(new Uri(ninfo.Url), "/api/share/cloud"); var s = await TopFolderClient.GetCloudInfo(httpClient.Value, url, pc.Id, pc.MasterKey).ConfigureAwait(false); var cfg = JsonConvert.DeserializeObject <PersonalCloudInfo>(s); var lis = cfg.Apps.Where(x => x.NodeId != ninfo.NodeGuid).ToList(); foreach (var item in lis) { cfg.Apps.Remove(item); } return(cfg); } catch (Exception e) { logger.LogError("Exception in GetPeerPCInfo", e); return(null); } }
public void Setup() { TestRoot = Path.Combine(GetFolderPath(SpecialFolder.MyDocuments), "Personal Cloud"); Directory.CreateDirectory(TestRoot); TestRoot = Path.Combine(TestRoot, "Test Container"); if (Directory.Exists(TestRoot)) { Assert.Inconclusive("Previous test session may have failed. Please ensure \"" + Path.GetDirectoryName(TestRoot) + "\" is empty before starting a new session."); return; } Directory.CreateDirectory(TestRoot); var logsDir = Path.Combine(TestRoot, "Logs"); Directory.CreateDirectory(logsDir); Loggers = LoggerFactory.Create(builder => builder.//SetMinimumLevel(LogLevel.Trace). AddConsole(x => { x.TimestampFormat = "G"; })); var dic = new Dictionary <string, IFileSystem>(); dic["Files"] = new VirtualFileSystem(TestRoot); var RootFs = new FileSystemContainer(dic, Loggers.CreateLogger("FileContainerWebApiTest")); Server = new HttpProvider(10240, RootFs); Server.Start(); Client = new TopFolderClient($"http://localhost:10240", new byte[32], ""); }
private void InsertRootFS(NodeInfoForPC node, TopFolderClient cli = null) { string nm = node.Name; string key = nm; var loccli = cli; if (RootFS.ClientList.ContainsKey(nm)) { //user input duplicated name for (int i = 2; i < int.MaxValue; i++) { key = $"{nm} ({i})"; if (RootFS.ClientList.ContainsKey(key)) { continue; } else { break; } } if (loccli == null) { #pragma warning disable CA2000 // Collection elements are disposed elsewhere. loccli = new TopFolderClient(node.Url, MasterKey, Id) { Name = node.Name, NodeId = node.NodeGuid, TimeStamp = node.PCTimeStamp }; #pragma warning restore CA2000 } var ret = RootFS.ClientList.TryAdd(key, loccli); if ((!ret) && (cli == null)) { loccli.Dispose(); } loccli = null; } else { if (loccli == null) { #pragma warning disable CA2000 // Collection elements are disposed elsewhere. loccli = new TopFolderClient(node.Url, MasterKey, Id) { Name = node.Name, NodeId = node.NodeGuid, TimeStamp = node.PCTimeStamp }; #pragma warning restore CA2000 } var ret = RootFS.ClientList.TryAdd(key, loccli); if ((!ret) && (cli == null)) { loccli.Dispose(); } loccli = null; } }
public async Task LargeFileTest() { long filesize = 10L * 1024 * 1024 * 1024; var testRoot = "I:\\Personal Cloud Test\\"; int parts = 128; var partsize = filesize / parts; if (((filesize / parts) % 256) != 0) { #pragma warning disable CA1303 // Do not pass literals as localized parameters Assert.Fail("filesize/parts must be a multiple of 256"); //otherwise you have to rewrite TestStream #pragma warning restore CA1303 // Do not pass literals as localized parameters } Directory.CreateDirectory(testRoot); #pragma warning disable CA2000 // Dispose objects before losing scope var fs = new PhysicalFileSystem(); #pragma warning restore CA2000 // Dispose objects before losing scope var subfs = new SubFileSystem(fs, fs.ConvertPathFromInternal(testRoot), true); using var server = new HttpProvider(100, subfs); server.Start(); using var client = new TopFolderClient($"http://localhost:100", new byte[32], ""); //if(false) { try { await client.DeleteAsync("test.txt").ConfigureAwait(false); } #pragma warning disable CA1031 // Do not catch general exception types catch { } using var teststrm = new TestStream(filesize); await client.WriteFileAsync("test.txt", teststrm).ConfigureAwait(false); await TestRead("test.txt", filesize, parts, client).ConfigureAwait(false); await Task.Delay(1000).ConfigureAwait(false); try { await client.DeleteAsync("test.txt").ConfigureAwait(false); } catch { } } //if (false) { try { await client.DeleteAsync("test1.txt").ConfigureAwait(false); } catch { } var origpart = parts; if ((filesize % parts) != 0) { parts++; } { using var teststrm = new TestStream(partsize); await client.WriteFileAsync("test1.txt", teststrm).ConfigureAwait(false); } for (int i = 1; i < origpart; i++) { using var teststrm = new TestStream(partsize); await client.WritePartialFileAsync("test1.txt", partsize *i, partsize, teststrm).ConfigureAwait(false); } if (origpart < parts) { partsize = filesize % origpart; using var teststrm = new TestStream(partsize); await client.WritePartialFileAsync("test1.txt", partsize *origpart, partsize, teststrm).ConfigureAwait(false); } await TestRead("test1.txt", filesize, parts, client).ConfigureAwait(false); await Task.Delay(1000).ConfigureAwait(false); try { await client.DeleteAsync("test1.txt").ConfigureAwait(false); } catch #pragma warning restore CA1031 // Do not catch general exception types { } } }
static private async Task TestRead(string filename, long filesize, int parts, TopFolderClient client) { var reflen = 1024 * 1024; var bufref = new byte[reflen]; var buf = new byte[reflen]; for (int i = 0; i < reflen; i++) { bufref[i] = (byte)i; } //if (false) { using var target = await client.ReadFileAsync(filename).ConfigureAwait(false); Assert.AreEqual(target.Length, filesize); for (long i = 0; i < filesize; i++) { var offset = i % reflen; var read = target.Read(buf, 0, reflen - (int)offset); if (read == 0) { Assert.Fail(); } if (!buf.AsSpan(0, read).SequenceEqual(bufref.AsSpan((int)offset, read))) { Assert.Fail(); } i += read - 1; } var lastread = target.Read(buf, 0, 1); if (lastread != 0) { Assert.Fail(); } } //if (false) { var partsize = filesize / parts; var origpart = parts; if ((filesize % parts) != 0) { parts++; } for (int j = 0; j < parts; j++) { using var target = await client.ReadPartialFileAsync(filename, j *partsize, (j + 1) *partsize - 1).ConfigureAwait(false); if (j != (parts - 1)) { Assert.AreEqual(target.Length, partsize); } else { if (origpart == parts) { Assert.AreEqual(target.Length, partsize); } else { Assert.AreEqual(target.Length, filesize % origpart); } } for (long i = 0; i < target.Length; i++) { var bufoffset = (i + j * partsize) % reflen; var read = target.Read(buf, 0, reflen - (int)bufoffset); if (read == 0) { Assert.Fail(); } if (!buf.AsSpan(0, read).SequenceEqual(bufref.AsSpan((int)bufoffset, read))) { Assert.Fail(); } i += read - 1; } var lastread = target.Read(buf, 0, 1); if (lastread != 0) { Assert.Fail(); } } } //if(false) { using var target = await client.ReadPartialFileAsync(filename, 0, filesize - 1).ConfigureAwait(false); Assert.AreEqual(target.Length, filesize); for (long i = 0; i < filesize; i++) { var offset = i % reflen; var read = target.Read(buf, 0, reflen - (int)offset); if (read == 0) { Assert.Fail(); } if (!buf.AsSpan(0, read).SequenceEqual(bufref.AsSpan((int)offset, read))) { Assert.Fail(); } i += read - 1; } var lastread = target.Read(buf, 0, 1); if (lastread != 0) { Assert.Fail(); } } }