public ObjectId UploadFromStream(Stream source, string fileName, GridFSUploadOptions options = null) { MongoClient client = GetClient(m_connectionStr); var db = client.GetDatabase(DatabaseName); var bucket = new GridFSBucket(db, BucksOptions); return(bucket.UploadFromStream(fileName, source, options)); }
public string AdicionarOuAtualizar(string filename) { Stream valor = File.Open(filename, FileMode.Open); var fileInfo = gridFs.UploadFromStream(filename, valor); valor.Close(); return(fileInfo.ToString()); }
/// <summary> /// GridFS文件操作——上传 /// </summary> /// <param name="fileName"></param> /// <param name="fileStream"></param> /// <returns></returns> public ObjectId UpLoad(string fileName, Stream fileStream) { var gridfs = new GridFSBucket(database, new GridFSBucketOptions { }); ObjectId oId = gridfs.UploadFromStream(fileName, fileStream); return(oId); }
//******************* UPLOAD ******************************** // Upload 1 file và trả về objectid của bài hát đó // return id bài hát sau khi up lên cloud private ObjectId UploadFile(string filepath) { IGridFSBucket bucket = new GridFSBucket(CloudDB.database); Stream src = File.OpenRead(filepath); var id = bucket.UploadFromStream(System.IO.Path.GetFileName(filepath), src); //---MessageBox.Show("Upload Done!"); return(id); }
/// <summary> /// 上传文件(流) /// </summary> /// <param name="bucketName">相当于文件夹名</param> /// <param name="fileName">文件名</param> /// <param name="fs">文件流</param> /// <returns></returns> public static ObjectId UploadFile(string bucketName, string fileName, Stream fs) { GridFSBucketOptions options = new GridFSBucketOptions(); options.BucketName = bucketName; var bucket = new GridFSBucket(mongoContext, options); var oid = bucket.UploadFromStream(fileName, fs); return(oid); }
public void Store(Guid id, Stream imageData) { var database = GetDatabase(); var fileStorage = new GridFSBucket(database, new GridFSBucketOptions { BucketName = "images" }); fileStorage.UploadFromStream(id.ToString(), imageData); }
private FileInfo AddFile(Stream fileStream, string fileName) { var briefInfo = new FileInfo(); GridFSUploadOptions options = new GridFSUploadOptions(); options.ChunkSizeBytes = _chunkSize; ObjectId id = _gridFs.UploadFromStream(fileName, fileStream, options); briefInfo = GetFileInfo(id.ToString()); return(briefInfo); }
public String SaveImage(string fileName, Stream stream) { MongoContext mc = new MongoContext(); var database = mc.MongoCliente.GetDatabase("imagen"); var bucket = new GridFSBucket(database); using (var fs = stream) { var gridFsInfo = bucket.UploadFromStream(fileName, fs); _id = gridFsInfo; fs.Close(); } return(ID); }
/// <summary> /// 新增数据到Mongodb /// </summary> /// <param name="fileStream"></param> /// <param name="insertObj"></param> /// <param name="msg"></param> /// <returns></returns> public static string InsertFile(FileStream fileStream, JObject insertObj, out string msg) { try { #region 获得数据 string collection_name = "fs"; //集合名称 string file_name = string.Empty, ext = string.Empty; if (insertObj.Property("file_name") != null) { file_name = insertObj["file_name"].ToString(); } if (insertObj.Property("ext") != null) { ext = insertObj["ext"].ToString(); } if (insertObj.Property("collection_name") != null) { collection_name = insertObj["collection_name"].ToString(); } #endregion #region IMongoDatabase _database = GetDataBaseInstance(out msg); //初始化GridFSBucket var bucket = new GridFSBucket(_database, new GridFSBucketOptions { BucketName = collection_name, //设置根节点名 ChunkSizeBytes = 1024 * 256, //设置块的大小为256KB WriteConcern = WriteConcern.WMajority, //写入确认级别为majority ReadPreference = ReadPreference.Secondary //优先从从节点读取 }); ObjectId fileId; fileId = bucket.UploadFromStream(filename: file_name + "." + ext, source: fileStream, options: null); return(fileId.ToString()); #endregion } catch (Exception ex) { msg = ex.Message; Logger.Error(ex); return(string.Empty); } }
public string StorageFile(Stream fileStream, string filePath) { string[] stringArray = filePath.Split('\\'); if (stringArray.Length < 2) { new FileUtilsException($"文件路径不正确:{filePath}"); } GridFSBucket fileSystem = new GridFSBucket(db, new GridFSBucketOptions { BucketName = stringArray[0] }); string fileName = filePath.Substring(stringArray[0].Length + 1, filePath.Length - stringArray[0].Length - 1); fileSystem.UploadFromStream(fileName, fileStream); return(filePath); }
public ActionResult <string> Upload(IFormFile file) { var uploads = _folder; if (!Directory.Exists(uploads)) { Directory.CreateDirectory(uploads); } var client = new MongoClient("mongodb://localhost:27017"); IMongoDatabase db = client.GetDatabase("TestCol"); GridFSBucket bucket = new GridFSBucket(db); ObjectId id = bucket.UploadFromStream(file.FileName, file.OpenReadStream()); return(Ok(id.ToString())); }
/// <summary> /// 上传文件 /// </summary> /// <param name="FileName"></param> public int FilePut(string FileName) { GridFSBucket fs = new GridFSBucket(_db); using (FileStream stream = new FileStream(FileName, FileMode.Open)) { var obj = fs.UploadFromStream(FileName, stream); if (obj != null) { return(obj.Timestamp); } else { return(0); } } }
public string AddFile(Stream fileStream, string fileName, string contentType, string userId) { var bucket = new GridFSBucket(_database, new GridFSBucketOptions() { BucketName = userId }); var metadata = new BsonDocument { { new BsonElement("ContentType", contentType) }, { new BsonElement("UserId", userId) } }; return(bucket.UploadFromStream(fileName, fileStream, new GridFSUploadOptions() { Metadata = metadata }).ToString()); }
public void UploadBucket() { var pathToFile = "credolab.jpg"; var fileStream = File.Open(pathToFile, FileMode.Open); var bucket = new GridFSBucket(_blobDataBase, new GridFSBucketOptions() { BucketName = "Simple" }); bucket.UploadFromStream(Path.GetFileNameWithoutExtension(pathToFile), fileStream, new GridFSUploadOptions() { Metadata = new BsonDocument() { { "creator", "Vitek" } } }); }
public ObjectId SubirFileAsync(String archivo, String fileName, MetadataDeFotos laMetadata) { var database = ConectarConBaseDeDatos(); IGridFSBucket bucket = new GridFSBucket(database); Stream strem = File.Open(archivo, FileMode.Open); var options = new GridFSUploadOptions() { Metadata = new BsonDocument { { "descripcion", laMetadata.Descripcion }, { "fechaYHora", laMetadata.FechaYHora } } }; var id = bucket.UploadFromStream(fileName, strem, options); return(id); }
// Adds an image from the data sent through the endpoint in ImageController public Image Create(Stream Fs, string ImageDescription, string UserId, string UserName, string FileName, string ContentType) { var Id = _bucket.UploadFromStream(FileName, Fs); Image Image = new Image { CreatorUserId = UserId, CreatorName = UserName, CreatedOn = DateTime.Now, ImageDescription = ImageDescription, FileName = FileName, ContentType = ContentType, ImageId = Id }; _images.InsertOne(Image); return(Image); }
public async Task <string> Upload(string fileName, Stream source) { var gf = new GridFile { Acl = new Acl(), FileName = fileName, MimeType = MimeMapping.MimeUtility.GetMimeMapping(fileName), LastModified = DateTimeOffset.Now, }; await _files.InsertOneAsync(gf); gf.GridName = $"{gf.Id}-{gf.FileName}"; await _files.ReplaceOneAsync(x => x.Id == gf.Id, gf); _bucket.UploadFromStream(gf.GridName, source); return(gf.GridName); }
private void StoreImage(HttpPostedFileBase file, Rental rental) { var imageId = ObjectId.GenerateNewId(); rental.ImageId = imageId.ToString(); Context.Rentals.ReplaceOne(x => x.Id == rental.Id, rental); var options = new GridFSUploadOptions() { Metadata = new BsonDocument { { "Id", imageId }, { "ContentType", file.ContentType }, } }; var gridFs = new GridFSBucket(Context.MongoDatabase); gridFs.UploadFromStream(file.FileName, file.InputStream, options); }
public List <ObjectId> UploadFile(GridFSBucket fs) { string path = @"G:\Projekty\WebAppParser\Content\images\"; List <string> files = Utils.FilesInFolder(path); List <ObjectId> objectIds = new List <ObjectId>(); foreach (var item in files) { using (var s = File.OpenRead(path + item)) { var t = Task.Run <ObjectId>(() => { return(fs.UploadFromStream(item, s)); }); objectIds.Add(t.Result); } } return(objectIds); }
public static void UploadFileFromAStream() { var database = DatabaseHelper.GetDatabaseReference("localhost", "file_store"); IGridFSBucket bucket = new GridFSBucket(database); Stream stream = File.Open("sample.pdf", FileMode.Open); var options = new GridFSUploadOptions() { Metadata = new BsonDocument() { { "author", "Mark Twain" }, { "year", 1900 } } }; var id = bucket.UploadFromStream("sample1.pdf", stream, options); Console.WriteLine(id.ToString()); }
public override void AddAttachment(string id, Stream fileStream, string contentType, string attachmentName) { var bucket = new GridFSBucket(database, new GridFSBucketOptions() { BucketName = "attachments" }); var options = new GridFSUploadOptions { ChunkSizeBytes = 1048576, // 1MB Metadata = new BsonDocument { { "content-type", contentType }, { "collection", CollectionName }, { "document-id", id }, { "attachment-name", attachmentName }, { "attachment-id", id + "-" + attachmentName }, } }; bucket.UploadFromStream(attachmentName, fileStream, options); }
public string UploadFile(string fileName, Stream stream) { var id = fs.UploadFromStream(fileName, stream); return(id.ToString()); }
public void Send() { var now = DateTime.Now; var payload = new BsonDocument { { "key1", 0 }, { "key2", true } }; using (var streamOne = new MemoryStream()) using (var streamTwo = new MemoryStream()) { gridfs.UploadFromStream("one", streamOne);//making sure same file names are ok as long as their ids are diffrent streamOne.WriteByte(111); streamTwo.WriteByte(222); streamOne.Position = 0; streamTwo.Position = 0; queue.Send(payload, now, 0.8, new Dictionary <string, Stream> { { "one", streamOne }, { "two", streamTwo } }); } var expected = new BsonDocument { //_id added below { "payload", payload }, { "running", false }, { "resetTimestamp", new BsonDateTime(DateTime.MaxValue) }, { "earliestGet", new BsonDateTime(now) }, { "priority", 0.8 }, //streams added below //created added below }; var message = collection.Find(new QueryDocument()).First(); var actualCreated = message["created"]; expected["created"] = actualCreated; actualCreated = actualCreated.ToUniversalTime(); var actualStreamIds = message["streams"].AsBsonArray; expected["streams"] = actualStreamIds; Assert.IsTrue(actualCreated <= DateTime.UtcNow); Assert.IsTrue(actualCreated > DateTime.UtcNow - TimeSpan.FromSeconds(10)); expected.InsertAt(0, new BsonElement("_id", message["_id"])); Assert.AreEqual(expected, message); var fileOne = gridfs.Find(new QueryDocument { { "_id", actualStreamIds[0] } }).First(); Assert.AreEqual("one", fileOne.Filename); using (var stream = gridfs.OpenDownloadStream(fileOne.Id)) Assert.AreEqual(111, stream.ReadByte()); var fileTwo = gridfs.Find(new QueryDocument { { "_id", actualStreamIds[1] } }).First(); Assert.AreEqual("two", fileTwo.Filename); using (var stream = gridfs.OpenDownloadStream(fileTwo.Id)) Assert.AreEqual(222, stream.ReadByte()); }
public string Upload(string fileFullName, Stream fileStream) { GridFSBucket gridFS = new GridFSBucket(this.GetDB()); return(gridFS.UploadFromStream(fileFullName, fileStream).ToString()); }
public ObjectId Upload(string fileName, Stream stream) { return(gridFSBucket.UploadFromStream(fileName, stream)); }
/// <summary> /// Ack handle and send payload to queue, atomically. /// </summary> /// <param name="handle">handle to ack received from Get()</param> /// <param name="payload">payload to send</param> /// <param name="earliestGet">earliest instant that a call to Get() can return message</param> /// <param name="priority">priority for order out of Get(). 0 is higher priority than 1</param> /// <param name="newTimestamp">true to give the payload a new timestamp or false to use given message timestamp</param> /// <param name="streams">streams to upload into gridfs or null to forward handle's streams</param> /// <exception cref="ArgumentNullException">handle or payload is null</exception> /// <exception cref="ArgumentException">priority was NaN</exception> public void AckSend(Handle handle, BsonDocument payload, DateTime earliestGet, double priority, bool newTimestamp, IEnumerable <KeyValuePair <string, Stream> > streams) { if (handle == null) { throw new ArgumentNullException("handle"); } if (payload == null) { throw new ArgumentNullException("payload"); } if (Double.IsNaN(priority)) { throw new ArgumentException("priority was NaN", "priority"); } var toSet = new BsonDocument { { "payload", payload }, { "running", false }, { "resetTimestamp", DateTime.MaxValue }, { "earliestGet", earliestGet }, { "priority", priority }, }; if (newTimestamp) { toSet["created"] = DateTime.UtcNow; } if (streams != null) { var streamIds = new BsonArray(); foreach (var stream in streams) { streamIds.Add(gridfs.UploadFromStream(stream.Key, stream.Value)); } toSet["streams"] = streamIds; } //using upsert because if no documents found then the doc was removed (SHOULD ONLY HAPPEN BY SOMEONE MANUALLY) so we can just send collection.UpdateOne( new QueryDocument("_id", handle.Id), new UpdateDocument("$set", toSet), new UpdateOptions { IsUpsert = true }); foreach (var existingStream in handle.Streams) { existingStream.Value.Dispose(); } if (streams != null) { foreach (var existingStream in handle.Streams) { gridfs.Delete(existingStream.Key); } } }
public AppCommandProcessor(IMongoCollection <AppData> apps, GridFSBucket files, RepositoryApi repository, DataTransferManager dataTransfer, IMongoCollection <ToDeleteRevision> toDelete, WorkDistributor <BuildRequest, BuildCompled> workDistributor, IActorRef changeTracker) { _apps = apps; CommandPhase1 <CreateAppCommand>("CreateApp", repository, (command, reporter) => { reporter.Send(DeploymentMessages.RegisterRepository); return(new RegisterRepository(command.TargetRepo) { IgnoreDuplicate = true }); }, (command, reporter, op) => new ContinueCreateApp(op, command, reporter)); CommandPhase2 <ContinueCreateApp, CreateAppCommand, AppInfo>("CreateApp2", (command, result, reporter, data) => { if (!result.Ok) { if (reporter.IsCompled) { return(null); } reporter.Compled(OperationResult.Failure(result.Error ?? BuildErrorCodes.CommandErrorRegisterRepository)); return(null); } if (data != null) { reporter.Compled(OperationResult.Failure(BuildErrorCodes.CommandDuplicateApp)); return(null); } var newData = new AppData(command.AppName, -1, DateTime.UtcNow, DateTime.MinValue, command.TargetRepo, command.ProjectName, ImmutableList <AppFileInfo> .Empty); apps.InsertOne(newData); var info = newData.ToInfo(); changeTracker.Tell(info); return(info); }); CommandPhase1 <PushVersionCommand>("PushVersion", (command, reporter) => { var data = apps.AsQueryable().FirstOrDefault(ad => ad.Name == command.AppName); if (data == null) { reporter.Compled(OperationResult.Failure(BuildErrorCodes.CommandAppNotFound)); } else { BuildRequest.SendWork(workDistributor, reporter, data, repository, BuildEnv.TempFiles.CreateFile()) .PipeTo(Self, success: c => new ContinuePushNewVersion(OperationResult.Success(c), command, reporter), failure: e => new ContinuePushNewVersion(OperationResult.Failure(e.Unwrap()?.Message ?? "Cancel"), command, reporter)); } }); CommandPhase2 <ContinuePushNewVersion, PushVersionCommand, AppBinary>("PushVersion2", (command, result, reporter, data) => { if (data == null) { if (!reporter.IsCompled) { reporter.Compled(OperationResult.Failure(BuildErrorCodes.CommandAppNotFound)); } return(null); } if (!result.Ok) { return(null); } using var transaction = apps.Database.Client.StartSession(new ClientSessionOptions { DefaultTransactionOptions = new TransactionOptions(writeConcern: WriteConcern.Acknowledged) }); var dataFilter = Builders <AppData> .Filter.Eq(ad => ad.Name, data.Name); var(commit, fileName) = ((string, ITempFile))result.Outcome !; using var targetStream = fileName; var newId = files.UploadFromStream(data.Name + ".zip", targetStream.Stream); var newBinary = new AppFileInfo(newId, data.Last + 1, DateTime.UtcNow, false, commit); var newBinarys = data.Versions.Add(newBinary); var definition = Builders <AppData> .Update; var updates = new List <UpdateDefinition <AppData> > { definition.Set(ad => ad.Last, newBinary.Version), definition.Set(ad => ad.Versions, newBinarys) }; var deleteUpdates = new List <ToDeleteRevision>(); if (data.Versions.Count(s => !s.Deleted) > 5) { foreach (var info in newBinarys.OrderByDescending(i => i.CreationTime).Skip(5)) { if (info.Deleted) { continue; } info.Deleted = true; deleteUpdates.Add(new ToDeleteRevision(info.File.ToString())); } } transaction.StartTransaction(); if (deleteUpdates.Count != 0) { toDelete.InsertMany(transaction, deleteUpdates); } if (!apps.UpdateOne(transaction, dataFilter, definition.Combine(updates)).IsAcknowledged) { transaction.AbortTransaction(); reporter.Compled(OperationResult.Failure(BuildErrorCodes.DatabaseError)); return(null); } transaction.CommitTransaction(); changeTracker.Tell(_apps.AsQueryable().FirstOrDefault(ad => ad.Name == command.AppName)); return(new AppBinary(command.AppName, newBinary.Version, newBinary.CreationTime, false, newBinary.Commit, data.Repository)); });
private bool UpdateRepository(RepositoryEntry data, Reporter reporter, TransferRepository repository, string commitInfo, Stream repozip) { Log.Info("Try Update Repository"); UpdateLock.EnterWriteLock(); try { var downloadCompled = false; var repoConfiguration = new RepositoryConfiguration(reporter, data); using var repoPath = RepoEnv.TempFiles.CreateDic(); var data2 = _repos.AsQueryable().FirstOrDefault(r => r.RepoName == repository.RepoName); if (data2 != null && commitInfo != data2.LastUpdate) { if (!string.IsNullOrWhiteSpace(data.FileName)) { try { Log.Info("Downloading Repository {Name} From Server", repository.RepoName); reporter.Send(RepositoryMessages.GetRepositoryFromDatabase); _bucket.DownloadToStream(ObjectId.Parse(data.FileName), repozip); downloadCompled = true; } catch (Exception e) { Log.Error(e, "Error on Download Repo File {Name}", data.FileName); } } if (downloadCompled) { Log.Info("Unpack Repository {Name}", repository.RepoName); repozip.Seek(0, SeekOrigin.Begin); using var unpackZip = new ZipArchive(repozip); reporter.Send(RepositoryMessages.ExtractRepository); unpackZip.ExtractToDirectory(repoPath.FullPath); } Log.Info("Execute Git Pull for {Name}", repository.RepoName); using var updater = GitUpdater.GetOrNew(repoConfiguration); var result = updater.RunUpdate(repoPath.FullPath); var dataUpdate = Builders <RepositoryEntry> .Update.Set(e => e.LastUpdate, result.Sha); Log.Info("Compress Repository {Name}", repository.RepoName); reporter.Send(RepositoryMessages.CompressRepository); if (repozip.Length != 0) { repozip.SetLength(0); } using (var archive = new ZipArchive(repozip, ZipArchiveMode.Create, true)) archive.AddFilesFromDictionary(repoPath.FullPath); repozip.Seek(0, SeekOrigin.Begin); Log.Info("Upload and Update Repository {Name}", repository.RepoName); reporter.Send(RepositoryMessages.UploadRepositoryToDatabase); var current = data.FileName; var id = _bucket.UploadFromStream(repository.RepoName.Replace('/', '_') + ".zip", repozip); dataUpdate = dataUpdate.Set(e => e.FileName, id.ToString()); if (!string.IsNullOrWhiteSpace(current)) { _revisions.InsertOne(new ToDeleteRevision(current)); } _repos.UpdateOne(e => e.RepoName == data.RepoName, dataUpdate); data.FileName = id.ToString(); repozip.Seek(0, SeekOrigin.Begin); return(true); } } finally { UpdateLock.ExitWriteLock(); } return(false); }