public IMaybe <WallpaperData> GetById(Guid id) { using (var stream = _fs.OpenDownloadStream(id)) { if (stream != null) { var data = new byte[stream.Length]; stream.Read(data, 0, data.Length); var dto = new WallpaperData { Id = id, Data = data, }; if (stream.FileInfo.Metadata.IsBsonDocument && stream.FileInfo.Metadata.TryGetValue(WALLPAPER_ID_FIELD_NAME, out var wallpaperId) && wallpaperId.IsGuid) { dto.WallpaperId = wallpaperId.AsGuid; } return(new Maybe <WallpaperData>(dto)); } } return(new Maybe <WallpaperData>(null)); }
public GridFSDownloadStream DownloadFile(string fileId) { var fs = new GridFSBucket(_mongoDatabase); var stream = fs.OpenDownloadStream(new ObjectId(fileId)); return(stream); }
public async Task <MemoryStream> downTileData(string fileName) { GridFSFileInfo fileInfo; var filter = Builders <GridFSFileInfo> .Filter.Eq(x => x.Filename, fileName); var options = new GridFSFindOptions { Limit = 1 }; using (var cursor = gridFSBucket.Find(filter, options)) { fileInfo = cursor.ToList().FirstOrDefault(); } if (fileInfo.Id != null) { GridFSDownloadStream gridFSDownloadStream = gridFSBucket.OpenDownloadStream(fileInfo.Id); } MemoryStream destination = new MemoryStream(); destination.Seek(0, SeekOrigin.Begin); await gridFSBucket.DownloadToStreamByNameAsync(fileName, destination); if (destination != null) { return(destination); } else { return(null); } }
/// <summary> /// Update dump of the file on disks. /// </summary> /// <param name="fileId"></param> /// <param name="originalBlobBucket"></param> /// <param name="deleted">If true this is an operation of deletion of the artifact.</param> /// <returns></returns> private Boolean UpdateDumpOfFile( string fileId, GridFSBucket <String> originalBlobBucket, Boolean deleted) { var findIdFilter = Builders <GridFSFileInfo <string> > .Filter.Eq(x => x.Id, fileId); var source = originalBlobBucket.Find(findIdFilter).FirstOrDefault(); if (source == null) { return(false); //source stream does not exists } if (!deleted) { using (var stream = originalBlobBucket.OpenDownloadStream(fileId)) { _store.Store(stream, source.Filename, fileId); } } else { _store.Delete(source.Filename, fileId); } return(true); }
public Task <IActionResult> Get(string id) { return(this.InvokeAsync(() => { var bucket = new GridFSBucket(this.GetMonDatabase().MongoDatabase); var objId = new ObjectId(id); var fileInfo = bucket.Find(new BsonDocument("_id", objId)).FirstOrDefault(); var contentType = fileInfo.Metadata["ContentType"].ToString(); var output = bucket.OpenDownloadStream(objId); return File(output, contentType, fileInfo.Filename); })); }
public DFSFileInfo DownloadFile(string fileId) { var stream = fs.OpenDownloadStream(new ObjectId(fileId)); var result = new DFSFileInfo() { Stream = stream, FileName = stream.FileInfo.Filename, MD5 = stream.FileInfo.MD5 }; return(result); }
protected GridFSFileInfo GetGridFsFile(GridFSBucket gridFS, ObjectId id, bool nullThrowException = true) { var fs = gridFS.OpenDownloadStream(id); //var filter = Builders<GridFSFileInfo>.Filter.Where(p => p.Id == id); //var fs = gridFS.Find(filter).FirstOrDefault(); if ((fs == null || fs.FileInfo == null) && nullThrowException == true) { throw new MongoException(string.Format(FileNotExistsErrorMessage, id, gridFS.Database.DatabaseNamespace.DatabaseName)); } return(fs.FileInfo); }
public Stream GetStream(string fileId) { try { ObjectId id = ObjectId.Parse(fileId); return(_gridFs.OpenDownloadStream(id, new GridFSDownloadOptions() { Seekable = true })); } catch (Exception ex) { return(null); } }
public void DownloadBucket() { var bucket = new GridFSBucket(_blobDataBase, new GridFSBucketOptions() { BucketName = "Simple" }); var stream = bucket.OpenDownloadStream(ObjectId.Parse("58486e92c6a8bd38a41549e9"), new GridFSDownloadByNameOptions() { }); using (var newFs = new FileStream("new-image.jpg", FileMode.Create)) { var bytes = new byte[stream.Length]; stream.Read(bytes, 0, (int)stream.Length); newFs.Write(bytes, 0, bytes.Length); } }
public FileModel GetFile(string objectId, string userId) { var bucket = new GridFSBucket(_database, new GridFSBucketOptions() { BucketName = userId }); var file = bucket.OpenDownloadStream(ObjectId.Parse(objectId)); var model = new FileModel() { FileStream = file, ContentType = file.FileInfo.Metadata["ContentType"].AsString, FileName = file.FileInfo.Filename, Id = objectId }; return(model); }
public MongoReadContentProvider(GridFSBucket bucket, ObjectId id) { this.stream = bucket.OpenDownloadStream(id); }
/// <summary> /// Get a non running message from queue /// </summary> /// <param name="query">query where top level fields do not contain operators. Lower level fields can however. eg: valid {a: {$gt: 1}, "b.c": 3}, invalid {$and: [{...}, {...}]}</param> /// <param name="resetRunning">duration before this message is considered abandoned and will be given with another call to Get()</param> /// <param name="wait">duration to keep polling before returning null</param> /// <param name="poll">duration between poll attempts</param> /// <param name="approximateWait">whether to fluctuate the wait time randomly by +-10 percent. This ensures Get() calls seperate in time when multiple Queues are used in loops started at the same time</param> /// <returns>message or null</returns> /// <exception cref="ArgumentNullException">query is null</exception> public Message Get(QueryDocument query, TimeSpan resetRunning, TimeSpan wait, TimeSpan poll, bool approximateWait) { if (query == null) { throw new ArgumentNullException("query"); } //reset stuck messages collection.UpdateMany( new QueryDocument { { "running", true }, { "resetTimestamp", new BsonDocument("$lte", DateTime.UtcNow) } }, new UpdateDocument("$set", new BsonDocument("running", false)) ); var builtQuery = new QueryDocument("running", false); foreach (var field in query) { builtQuery.Add("payload." + field.Name, field.Value); } builtQuery.Add("earliestGet", new BsonDocument("$lte", DateTime.UtcNow)); var resetTimestamp = DateTime.UtcNow; try { resetTimestamp += resetRunning; } catch (ArgumentOutOfRangeException) { resetTimestamp = resetRunning > TimeSpan.Zero ? DateTime.MaxValue : DateTime.MinValue; } var sort = new SortByDocument { { "priority", 1 }, { "created", 1 } }; var update = new UpdateDocument("$set", new BsonDocument { { "running", true }, { "resetTimestamp", resetTimestamp } }); var fields = new FieldsDocument { { "payload", 1 }, { "streams", 1 } }; var end = DateTime.UtcNow; try { if (approximateWait) { //fluctuate randomly by 10 percent wait += TimeSpan.FromMilliseconds(wait.TotalMilliseconds * GetRandomDouble(-0.1, 0.1)); } end += wait; } catch (Exception e) { if (!(e is OverflowException) && !(e is ArgumentOutOfRangeException)) { throw e;//cant cover } end = wait > TimeSpan.Zero ? DateTime.MaxValue : DateTime.MinValue; } while (true) { var message = collection.FindOneAndUpdate( builtQuery, update, new FindOneAndUpdateOptions <BsonDocument, BsonDocument>() { IsUpsert = false, ReturnDocument = ReturnDocument.After, Sort = sort, Projection = fields }); if (message != null) { var handleStreams = new List <KeyValuePair <BsonValue, Stream> >(); var messageStreams = new Dictionary <string, Stream>(); foreach (var streamId in message["streams"].AsBsonArray) { var fileInfo = gridfs.Find(new QueryDocument { { "_id", streamId } }).First(); var stream = gridfs.OpenDownloadStream(streamId); handleStreams.Add(new KeyValuePair <BsonValue, Stream>(streamId, stream)); messageStreams.Add(fileInfo.Filename, stream); } var handle = new Handle(message["_id"].AsObjectId, handleStreams); return(new Message(handle, message["payload"].AsBsonDocument, messageStreams)); } if (DateTime.UtcNow >= end) { return(null); } try { Thread.Sleep(poll); } catch (ArgumentOutOfRangeException) { if (poll < TimeSpan.Zero) { poll = TimeSpan.Zero; } else { poll = TimeSpan.FromMilliseconds(int.MaxValue); } Thread.Sleep(poll); } if (DateTime.UtcNow >= end) { return(null); } } }
public async Task <IActionResult> Generate(ObjectId memoryId) { var client = new MongoDB.Driver.MongoClient("mongodb://*****:*****@localhost/?safe=true"); var db = client.GetDatabase("memoryvault"); var coll = db.GetCollection <MemoryItemModel>("memories"); var founds = coll.Find(p => p.Id == memoryId).FirstOrDefault(); if (founds == null) { return(NotFound("File not found.")); } GridFSBucketOptions opts = new GridFSBucketOptions(); opts.BucketName = "memoryfiles"; var bucket = new GridFSBucket(db, opts); bool isChanged = false; List <ThumbnailItemModel> lstThumbs = new List <ThumbnailItemModel>(); foreach (ThumbnailSizes size in Enum.GetValues(typeof(ThumbnailSizes))) { using (var stream = bucket.OpenDownloadStream(founds.ItemId)) { var dimension = GetDimension(size); using (Image <Rgba32> bg = new Image <Rgba32>(dimension, dimension)) { try { using (Image <Rgba32> image = Image.Load(stream)) //open the file and detect the file type and decode it { int iWidth; int iHeight; int margin = 3; float ratio = (float)image.Width / (float)image.Height; if (ratio > 1) { iWidth = dimension - 2 * margin; iHeight = (int)(iWidth / ratio); } else { iHeight = dimension - 2 * margin; iWidth = (int)(iHeight * ratio); } // image is now in a file format agnostic structure in memory as a series of Rgba32 pixels image.Mutate(ctx => ctx.Resize(iWidth, iHeight)); // resize the image in place and return it for chaining bg.Mutate(ctx => ctx.Fill(Rgba32.Black)); int pX = (int)((dimension - iWidth) / 2); int pY = (int)((dimension - iHeight) / 2); bg.Mutate(ctx => ctx.DrawImage(image, PixelBlenderMode.Normal, 1, new SixLabors.Primitives.Point(pX, pY))); //var thPath = $"c:/temp/thumbs/th.{memoryId}.{DateTime.Now.ToString("MMDD_HHmmsss")}.{(ThumbnailSizes)size}.png"; //using (var fs = new System.IO.FileStream(thPath, FileMode.CreateNew)) //{ // bg.SaveAsPng(fs); // based on the file extension pick an encoder then encode and write the data to disk // await fs.FlushAsync(); // fs.Close(); //} using (var ms = new MemoryStream()) { bg.SaveAsPng(ms); lstThumbs.Add(new ThumbnailItemModel { CreateTime = DateTime.Now, Size = size, Data = ms.ToArray() }); isChanged = true; } } } catch (Exception exc) { Debug.WriteLine(exc.Message); } } } } if (isChanged) { await coll.UpdateOneAsync( p => p.Id == memoryId , Builders <MemoryItemModel> .Update.AddToSetEach(s => s.Thumbnails, lstThumbs) ); } return(Ok()); }
public AppQueryHandler(IMongoCollection <AppData> apps, GridFSBucket files, DataTransferManager dataTransfer, IActorRef changeTracker) { Receive <QueryChangeSource>(changeTracker.Forward); MakeQueryCall <QueryApps, AppList>("QueryApps", (query, _) => new AppList(apps.AsQueryable().Select(ad => ad.ToInfo()).ToImmutableList())); MakeQueryCall <QueryApp, AppInfo>("QueryApp", (query, reporter) => { var data = apps.AsQueryable().FirstOrDefault(e => e.Name == query.AppName); if (data != null) { return(data.ToInfo()); } reporter.Compled(OperationResult.Failure(BuildErrorCodes.QueryAppNotFound)); return(null); }); MakeQueryCall <QueryBinarys, FileTransactionId>("QueryBinaries", (query, reporter) => { if (query.Manager == null) { return(null); } var data = apps.AsQueryable().FirstOrDefault(e => e.Name == query.AppName); if (data == null) { reporter.Compled(OperationResult.Failure(BuildErrorCodes.QueryAppNotFound)); return(null); } var targetVersion = query.AppVersion != -1 ? query.AppVersion : data.Last; var file = data.Versions.FirstOrDefault(f => f.Version == targetVersion); if (file == null) { reporter.Compled(OperationResult.Failure(BuildErrorCodes.QueryFileNotFound)); return(null); } var request = DataTransferRequest.FromStream(() => files.OpenDownloadStream(file.File), query.Manager, query.AppName); dataTransfer.Request(request); return(new FileTransactionId(request.OperationId)); }); MakeQueryCall <QueryBinaryInfo, BinaryList>("QueryBinaryInfo", (binarys, reporter) => { var data = apps.AsQueryable().FirstOrDefault(ad => ad.Name == binarys.AppName); if (data != null) { return(new BinaryList(data.Versions.Select(i => new AppBinary(data.Name, i.Version, i.CreationTime, i.Deleted, i.Commit, data.Repository)).ToImmutableList())); } reporter.Compled(OperationResult.Failure(BuildErrorCodes.QueryAppNotFound)); return(null); }); }
public void Send() { var now = DateTime.Now; var payload = new BsonDocument { { "key1", 0 }, { "key2", true } }; using (var streamOne = new MemoryStream()) using (var streamTwo = new MemoryStream()) { gridfs.UploadFromStream("one", streamOne);//making sure same file names are ok as long as their ids are diffrent streamOne.WriteByte(111); streamTwo.WriteByte(222); streamOne.Position = 0; streamTwo.Position = 0; queue.Send(payload, now, 0.8, new Dictionary <string, Stream> { { "one", streamOne }, { "two", streamTwo } }); } var expected = new BsonDocument { //_id added below { "payload", payload }, { "running", false }, { "resetTimestamp", new BsonDateTime(DateTime.MaxValue) }, { "earliestGet", new BsonDateTime(now) }, { "priority", 0.8 }, //streams added below //created added below }; var message = collection.Find(new QueryDocument()).First(); var actualCreated = message["created"]; expected["created"] = actualCreated; actualCreated = actualCreated.ToUniversalTime(); var actualStreamIds = message["streams"].AsBsonArray; expected["streams"] = actualStreamIds; Assert.IsTrue(actualCreated <= DateTime.UtcNow); Assert.IsTrue(actualCreated > DateTime.UtcNow - TimeSpan.FromSeconds(10)); expected.InsertAt(0, new BsonElement("_id", message["_id"])); Assert.AreEqual(expected, message); var fileOne = gridfs.Find(new QueryDocument { { "_id", actualStreamIds[0] } }).First(); Assert.AreEqual("one", fileOne.Filename); using (var stream = gridfs.OpenDownloadStream(fileOne.Id)) Assert.AreEqual(111, stream.ReadByte()); var fileTwo = gridfs.Find(new QueryDocument { { "_id", actualStreamIds[1] } }).First(); Assert.AreEqual("two", fileTwo.Filename); using (var stream = gridfs.OpenDownloadStream(fileTwo.Id)) Assert.AreEqual(222, stream.ReadByte()); }
public GridFSDownloadStream GetStream(ObjectId objectId) { GridFSBucket gridFS = new GridFSBucket(_context.Database); return(gridFS.OpenDownloadStream(objectId)); }