public override async Task <int> Execute() { int uploaded = 0; TOP: var toUpload = await _sql.GetNextMirroredFile(); if (toUpload == default) { return(uploaded); } uploaded += 1; try { var creds = await BunnyCdnFtpInfo.GetCreds(StorageSpace.Mirrors); using var queue = new WorkQueue(); if (_archives.TryGetPath(toUpload.Hash, out var path)) { _logger.LogInformation($"Uploading mirror file {toUpload.Hash} {path.Size.FileSizeToString()}"); bool exists = false; using (var client = await GetClient(creds)) { exists = await client.FileExistsAsync($"{toUpload.Hash.ToHex()}/definition.json.gz"); } if (exists) { _logger.LogInformation($"Skipping {toUpload.Hash} it's already on the server"); await toUpload.Finish(_sql); goto TOP; } await _discord.Send(Channel.Spam, new DiscordMessage { Content = $"Uploading {toUpload.Hash} - {toUpload.Created} because {toUpload.Rationale}" }); var definition = await Client.GenerateFileDefinition(queue, path, (s, percent) => { }); using (var client = await GetClient(creds)) { await client.CreateDirectoryAsync($"{definition.Hash.ToHex()}"); await client.CreateDirectoryAsync($"{definition.Hash.ToHex()}/parts"); } string MakePath(long idx) { return($"{definition.Hash.ToHex()}/parts/{idx}"); } await definition.Parts.PMap(queue, async part => { _logger.LogInformation($"Uploading mirror part ({part.Index}/{definition.Parts.Length})"); var buffer = new byte[part.Size]; await using (var fs = await path.OpenShared()) { fs.Position = part.Offset; await fs.ReadAsync(buffer); } using var client = await GetClient(creds); var name = MakePath(part.Index); await client.UploadAsync(new MemoryStream(buffer), name); }); using (var client = await GetClient(creds)) { _logger.LogInformation($"Finishing mirror upload"); await using var ms = new MemoryStream(); await using (var gz = new GZipStream(ms, CompressionLevel.Optimal, true)) { definition.ToJson(gz); } ms.Position = 0; var remoteName = $"{definition.Hash.ToHex()}/definition.json.gz"; await client.UploadAsync(ms, remoteName); } await toUpload.Finish(_sql); } else { await toUpload.Fail(_sql, "Archive not found"); } } catch (Exception ex) { _logger.LogInformation($"{toUpload.Created} {toUpload.Uploaded}"); _logger.LogError(ex, "Error uploading"); await toUpload.Fail(_sql, ex.ToString()); } goto TOP; }
public override async Task <int> Execute() { int downloaded = 0; var lists = (await ModlistMetadata.LoadFromGithub()) .Concat(await ModlistMetadata.LoadUnlistedFromGithub()).ToList(); foreach (var list in lists) { try { ReportStarting(list.Links.MachineURL); if (await _sql.HaveIndexedModlist(list.Links.MachineURL, list.DownloadMetadata.Hash)) { continue; } if (!_maintainer.HaveArchive(list.DownloadMetadata !.Hash)) { _logger.Log(LogLevel.Information, $"Downloading {list.Links.MachineURL}"); await _discord.Send(Channel.Ham, new DiscordMessage { Content = $"Downloading {list.Links.MachineURL} - {list.DownloadMetadata.Hash}" }); var tf = new TempFile(); var state = DownloadDispatcher.ResolveArchive(list.Links.Download); if (state == null) { _logger.Log(LogLevel.Error, $"Now downloader found for list {list.Links.MachineURL} : {list.Links.Download}"); continue; } downloaded += 1; await state.Download(new Archive(state) { Name = $"{list.Links.MachineURL}.wabbajack" }, tf.Path); var hash = await tf.Path.FileHashAsync(); if (hash != list.DownloadMetadata.Hash) { _logger.Log(LogLevel.Error, $"Downloaded modlist {list.Links.MachineURL} {list.DownloadMetadata.Hash} didn't match metadata hash of {hash}"); await _sql.IngestModList(list.DownloadMetadata.Hash, list, new ModList(), true); continue; } await _maintainer.Ingest(tf.Path); } _maintainer.TryGetPath(list.DownloadMetadata.Hash, out var modlistPath); ModList modlist; await using (var fs = await modlistPath.OpenRead()) using (var zip = new ZipArchive(fs, ZipArchiveMode.Read)) await using (var entry = zip.GetEntry("modlist")?.Open()) { if (entry == null) { _logger.LogWarning($"Bad Modlist {list.Links.MachineURL}"); await _discord.Send(Channel.Ham, new DiscordMessage { Content = $"Bad Modlist {list.Links.MachineURL} - {list.DownloadMetadata.Hash}" }); continue; } try { modlist = entry.FromJson <ModList>(); } catch (JsonReaderException) { _logger.LogWarning($"Bad Modlist {list.Links.MachineURL}"); await _discord.Send(Channel.Ham, new DiscordMessage { Content = $"Bad Modlist {list.Links.MachineURL} - {list.DownloadMetadata.Hash}" }); continue; } } await _sql.IngestModList(list.DownloadMetadata !.Hash, list, modlist, false); } catch (Exception ex) { _logger.LogError(ex, $"Error downloading modlist {list.Links.MachineURL}"); await _discord.Send(Channel.Ham, new DiscordMessage { Content = $"Error downloading modlist {list.Links.MachineURL} - {list.DownloadMetadata.Hash}" }); } finally { ReportEnding(list.Links.MachineURL); } } _logger.Log(LogLevel.Information, $"Done checking modlists. Downloaded {downloaded} new lists"); if (downloaded > 0) { await _discord.Send(Channel.Ham, new DiscordMessage { Content = $"Downloaded {downloaded} new lists" }); } var fc = await _sql.EnqueueModListFilesForIndexing(); _logger.Log(LogLevel.Information, $"Enqueing {fc} files for downloading"); if (fc > 0) { await _discord.Send(Channel.Ham, new DiscordMessage { Content = $"Enqueing {fc} files for downloading" }); } return(downloaded); }
public override async Task <int> Execute() { int count = 0; while (true) { count++; var patch = await _sql.GetPendingPatch(); if (patch == default) { break; } try { _logger.LogInformation( $"Building patch from {patch.Src.Archive.State.PrimaryKeyString} to {patch.Dest.Archive.State.PrimaryKeyString}"); await _discordWebHook.Send(Channel.Spam, new DiscordMessage { Content = $"Building patch from {patch.Src.Archive.State.PrimaryKeyString} to {patch.Dest.Archive.State.PrimaryKeyString}" }); if (patch.Src.Archive.Hash == patch.Dest.Archive.Hash && patch.Src.Archive.State.PrimaryKeyString == patch.Dest.Archive.State.PrimaryKeyString) { await patch.Fail(_sql, "Hashes match"); continue; } if (patch.Src.Archive.Size > 2_500_000_000 || patch.Dest.Archive.Size > 2_500_000_000) { await patch.Fail(_sql, "Too large to patch"); continue; } _maintainer.TryGetPath(patch.Src.Archive.Hash, out var srcPath); _maintainer.TryGetPath(patch.Dest.Archive.Hash, out var destPath); await using var sigFile = new TempFile(); await using var patchFile = new TempFile(); await using var srcStream = await srcPath.OpenShared(); await using var destStream = await destPath.OpenShared(); await using var sigStream = await sigFile.Path.Create(); await using var patchOutput = await patchFile.Path.Create(); OctoDiff.Create(destStream, srcStream, sigStream, patchOutput, new OctoDiff.ProgressReporter(TimeSpan.FromSeconds(1), (s, p) => _logger.LogInformation($"Patch Builder: {p} {s}"))); await patchOutput.DisposeAsync(); var size = patchFile.Path.Size; await UploadToCDN(patchFile.Path, PatchName(patch)); await patch.Finish(_sql, size); await _discordWebHook.Send(Channel.Spam, new DiscordMessage { Content = $"Built {size.ToFileSizeString()} patch from {patch.Src.Archive.State.PrimaryKeyString} to {patch.Dest.Archive.State.PrimaryKeyString}" }); } catch (Exception ex) { _logger.LogError(ex, "Error while building patch"); await patch.Fail(_sql, ex.ToString()); await _discordWebHook.Send(Channel.Spam, new DiscordMessage { Content = $"Failure building patch from {patch.Src.Archive.State.PrimaryKeyString} to {patch.Dest.Archive.State.PrimaryKeyString}" }); } } if (count > 0) { // Notify the List Validator that we may have more patches await _quickSync.Notify <ListValidator>(); } if (!NoCleaning) { await CleanupOldPatches(); } return(count); }