public void Put(string remotename, System.IO.Stream stream) { HttpWebRequest req = CreateRequest("/" + remotename, ""); req.Method = "PUT"; req.ContentType = "application/octet-stream"; try { req.ContentLength = stream.Length; } catch { } //If we can pre-calculate the MD5 hash before transmission, do so /*if (stream.CanSeek) { System.Security.Cryptography.MD5 md5 = System.Security.Cryptography.MD5.Create(); req.Headers["ETag"] = Core.Utility.ByteArrayAsHexString(md5.ComputeHash(stream)).ToLower(); stream.Seek(0, System.IO.SeekOrigin.Begin); using (System.IO.Stream s = req.GetRequestStream()) Core.Utility.CopyStream(stream, s); //Reset the timeout to the default value of 100 seconds to // avoid blocking the GetResponse() call req.Timeout = 100000; //The server handles the eTag verification for us, and gives an error if the hash was a mismatch using (HttpWebResponse resp = (HttpWebResponse)req.GetResponse()) if ((int)resp.StatusCode >= 300) throw new WebException(Strings.CloudFiles.FileUploadError, null, WebExceptionStatus.ProtocolError, resp); } else //Otherwise use a client-side calculation */ //TODO: We cannot use the local MD5 calculation, because that could involve a throttled read, // and may invoke various events { string fileHash = null; Utility.AsyncHttpRequest areq = new Utility.AsyncHttpRequest(req); using (System.IO.Stream s = areq.GetRequestStream()) using (MD5CalculatingStream mds = new MD5CalculatingStream(s)) { Utility.Utility.CopyStream(stream, mds); fileHash = mds.GetFinalHashString(); } string md5Hash = null; //We need to verify the eTag locally try { using (HttpWebResponse resp = (HttpWebResponse)areq.GetResponse()) if ((int)resp.StatusCode >= 300) throw new WebException(Strings.CloudFiles.FileUploadError, null, WebExceptionStatus.ProtocolError, resp); else md5Hash = resp.Headers["ETag"]; } catch (WebException wex) { //Catch 404 and turn it into a FolderNotFound error if (wex.Response is HttpWebResponse && ((HttpWebResponse)wex.Response).StatusCode == HttpStatusCode.NotFound) throw new FolderMissingException(wex); //Other error, just re-throw throw; } if (md5Hash == null || md5Hash.ToLower() != fileHash.ToLower()) { //Remove the broken file try { Delete(remotename); } catch { } throw new Exception(Strings.CloudFiles.ETagVerificationError); } } }
public void Put(string remotename, System.IO.Stream stream) { try { Google.Documents.Document folder = GetFolder(); //Special, since uploads can overwrite or create, // we must figure out if the file exists in advance. //Unfortunately it would be wastefull to request the list // for each upload request, so we rely on the cache being // correct TaggedFileEntry doc = null; if (m_files == null) doc = TryGetFile(remotename); else m_files.TryGetValue(remotename, out doc); try { string resumableUri; if (doc != null) { if (doc.MediaUrl == null) { //Strange, we could not get the edit url, perhaps it is readonly? //Fallback strategy is "delete-then-upload" try { this.Delete(remotename); } catch { } doc = TryGetFile(remotename); if (doc != null || doc.MediaUrl == null) throw new Exception(string.Format(Strings.GoogleDocs.FileIsReadOnlyError, remotename)); } } //File does not exist, we upload a new one if (doc == null) { //First we need to get a resumeable upload url HttpWebRequest req = (HttpWebRequest)WebRequest.Create("https://docs.google.com/feeds/upload/create-session/default/private/full/" + System.Web.HttpUtility.UrlEncode(folder.ResourceId) + "/contents?convert=false"); req.Method = "POST"; req.Headers.Add("X-Upload-Content-Length", stream.Length.ToString()); req.Headers.Add("X-Upload-Content-Type", "application/octet-stream"); req.UserAgent = USER_AGENT; req.Headers.Add("GData-Version", "3.0"); //Build the atom entry describing the file we want to create string labels = ""; foreach (string s in m_labels) if (s.Trim().Length > 0) labels += string.Format(ATTRIBUTE_TEMPLATE, s); //Apply the name and content-type to the not-yet-uploaded file byte[] data = System.Text.Encoding.UTF8.GetBytes(string.Format(CREATE_ITEM_TEMPLATE, System.Web.HttpUtility.HtmlEncode(remotename), labels)); req.ContentLength = data.Length; req.ContentType = "application/atom+xml"; //Authenticate our request m_cla.ApplyAuthenticationToRequest(req); Utility.AsyncHttpRequest areq = new Utility.AsyncHttpRequest(req); using (System.IO.Stream s = areq.GetRequestStream()) s.Write(data, 0, data.Length); using (HttpWebResponse resp = (HttpWebResponse)areq.GetResponse()) { int code = (int)resp.StatusCode; if (code < 200 || code >= 300) //For some reason Mono does not throw this automatically throw new System.Net.WebException(resp.StatusDescription, null, System.Net.WebExceptionStatus.ProtocolError, resp); resumableUri = resp.Headers["Location"]; } } else { //First we need to get a resumeable upload url HttpWebRequest req = (HttpWebRequest)WebRequest.Create(doc.MediaUrl); req.Method = "PUT"; req.Headers.Add("X-Upload-Content-Length", stream.Length.ToString()); req.Headers.Add("X-Upload-Content-Type", "application/octet-stream"); req.UserAgent = USER_AGENT; req.Headers.Add("If-Match", doc.ETag); req.Headers.Add("GData-Version", "3.0"); //This is a blank marker request req.ContentLength = 0; //Bad... docs say "text/plain" or "text/xml", but really needs to be content type, otherwise overwrite fails //req.ContentType = "text/plain"; req.ContentType = "application/octet-stream"; //Authenticate our request m_cla.ApplyAuthenticationToRequest(req); Utility.AsyncHttpRequest areq = new Utility.AsyncHttpRequest(req); using (HttpWebResponse resp = (HttpWebResponse)areq.GetResponse()) { int code = (int)resp.StatusCode; if (code < 200 || code >= 300) //For some reason Mono does not throw this automatically throw new System.Net.WebException(resp.StatusDescription, null, System.Net.WebExceptionStatus.ProtocolError, resp); resumableUri = resp.Headers["Location"]; } } //Ensure that we have a resumeable upload url if (resumableUri == null) throw new Exception(Strings.GoogleDocs.NoResumeURLError); string id = null; byte[] buffer = new byte[8 * 1024]; int retries = 0; long initialPosition; DateTime initialRequestTime = DateTime.Now; while (stream.Position != stream.Length) { initialPosition = stream.Position; long postbytes = Math.Min(stream.Length - initialPosition, TRANSFER_CHUNK_SIZE); //Post a fragment of the file as a partial request HttpWebRequest req = (HttpWebRequest)WebRequest.Create(resumableUri); req.Method = "PUT"; req.UserAgent = USER_AGENT; req.ContentLength = postbytes; req.ContentType = "application/octet-stream"; req.Headers.Add("Content-Range", string.Format("bytes {0}-{1}/{2}", initialPosition, initialPosition + (postbytes - 1), stream.Length.ToString())); //Copy the current fragment of bytes Utility.AsyncHttpRequest areq = new Utility.AsyncHttpRequest(req); using (System.IO.Stream s = areq.GetRequestStream()) { long bytesleft = postbytes; long written = 0; int a; while (bytesleft != 0 && ((a = stream.Read(buffer, 0, (int)Math.Min(buffer.Length, bytesleft))) != 0)) { s.Write(buffer, 0, a); bytesleft -= a; written += a; } s.Flush(); if (bytesleft != 0 || postbytes != written) throw new System.IO.EndOfStreamException(); } try { using (HttpWebResponse resp = (HttpWebResponse)areq.GetResponse()) { int code = (int)resp.StatusCode; if (code < 200 || code >= 300) //For some reason Mono does not throw this automatically throw new System.Net.WebException(resp.StatusDescription, null, System.Net.WebExceptionStatus.ProtocolError, resp); //If all goes well, we should now get an atom entry describing the new element System.Xml.XmlDocument xml = new XmlDocument(); using (System.IO.Stream s = areq.GetResponseStream()) xml.Load(s); System.Xml.XmlNamespaceManager mgr = new XmlNamespaceManager(xml.NameTable); mgr.AddNamespace("atom", "http://www.w3.org/2005/Atom"); mgr.AddNamespace("gd", "http://schemas.google.com/g/2005"); id = xml.SelectSingleNode("atom:entry/atom:id", mgr).InnerText; string resourceId = xml.SelectSingleNode("atom:entry/gd:resourceId", mgr).InnerText; string url = xml.SelectSingleNode("atom:entry/atom:content", mgr).Attributes["src"].Value; string mediaUrl = null; foreach(XmlNode n in xml.SelectNodes("atom:entry/atom:link", mgr)) if (n.Attributes["rel"] != null && n.Attributes["href"] != null &&n.Attributes["rel"].Value.EndsWith("#resumable-edit-media")) { mediaUrl = n.Attributes["href"].Value; break; } if (doc == null) { TaggedFileEntry tf = new TaggedFileEntry(remotename, stream.Length, initialRequestTime, initialRequestTime, resourceId, url, mediaUrl, resp.Headers["ETag"]); m_files.Add(remotename, tf); } else { //Since we update an existing item, we just need to update the ETag doc.ETag = resp.Headers["ETag"]; } } retries = 0; } catch (WebException wex) { bool acceptedError = wex.Status == WebExceptionStatus.ProtocolError && wex.Response is HttpWebResponse && (int)((HttpWebResponse)wex.Response).StatusCode == 308; //Mono does not give us the response object, // so we rely on the error code being present // in the string, not ideal, but I have found // no other workaround :( if (Duplicati.Library.Utility.Utility.IsMono) { acceptedError |= wex.Status == WebExceptionStatus.ProtocolError && wex.Message.Contains("308"); } //Accept the 308 until we are complete if (acceptedError && initialPosition + postbytes != stream.Length) { retries = 0; //Accept the 308 until we are complete } else { //Retries are handled in Duplicati, but it is much more efficient here, // because we only re-submit the last TRANSFER_CHUNK_SIZE bytes, // instead of the entire file retries++; if (retries > 2) throw; else System.Threading.Thread.Sleep(2000 * retries); stream.Position = initialPosition; } } } if (string.IsNullOrEmpty(id)) throw new Exception(Strings.GoogleDocs.NoIDReturnedError); } catch { //Clear the cache as we have no idea what happened m_files = null; throw; } } catch (Google.GData.Client.CaptchaRequiredException cex) { throw new Exception(string.Format(Strings.GoogleDocs.CaptchaRequiredError, CAPTCHA_UNLOCK_URL), cex); } }
public void Put(string remotename, System.IO.Stream stream) { if (stream.Length > BITS_FILE_SIZE_LIMIT) { // Get extra info for BITS var uid = UserID; var fid = FolderID.Split('.')[2]; // Create a session var url = string.Format("https://cid-{0}.users.storage.live.com/items/{1}/{2}?access_token={3}", uid, fid, Utility.Uri.UrlPathEncode(remotename), Library.Utility.Uri.UrlEncode(m_oauth.AccessToken)); var req = (HttpWebRequest)WebRequest.Create(url); req.UserAgent = USER_AGENT; req.Method = "POST"; req.ContentType = "application/json"; req.Headers.Add("X-Http-Method-Override", "BITS_POST"); req.Headers.Add("BITS-Packet-Type", "Create-Session"); req.Headers.Add("BITS-Supported-Protocols", "{7df0354d-249b-430f-820d-3d2a9bef4931}"); req.ContentLength = 0; var areq = new Utility.AsyncHttpRequest(req); string sessionid; using(var resp = (HttpWebResponse)areq.GetResponse()) { var packtype = resp.Headers["BITS-Packet-Type"]; if (!packtype.Equals("Ack", StringComparison.InvariantCultureIgnoreCase)) throw new Exception(string.Format("Unable to create BITS transfer, got status: {0}", packtype)); sessionid = resp.Headers["BITS-Session-Id"]; } if (string.IsNullOrEmpty(sessionid)) throw new Exception("BITS session-id was missing"); // Session is now created, start uploading chunks var offset = 0L; var retries = 0; while (offset < stream.Length) { try { var bytesInChunk = Math.Min(BITS_CHUNK_SIZE, stream.Length - offset); req = (HttpWebRequest)WebRequest.Create(url); req.UserAgent = USER_AGENT; req.Method = "POST"; req.Headers.Add("X-Http-Method-Override", "BITS_POST"); req.Headers.Add("BITS-Packet-Type", "Fragment"); req.Headers.Add("BITS-Session-Id", sessionid); req.Headers.Add("Content-Range", string.Format("bytes {0}-{1}/{2}", offset, offset + bytesInChunk - 1, stream.Length)); req.ContentLength = bytesInChunk; if (stream.Position != offset) stream.Position = offset; areq = new Utility.AsyncHttpRequest(req); var remaining = (int)bytesInChunk; using(var reqs = areq.GetRequestStream()) { int read; while ((read = stream.Read(m_copybuffer, 0, Math.Min(m_copybuffer.Length, remaining))) != 0) { reqs.Write(m_copybuffer, 0, read); remaining -= read; } } using(var resp = (HttpWebResponse)areq.GetResponse()) { if (resp.StatusCode != HttpStatusCode.OK) throw new WebException("Invalid partial upload response", null, WebExceptionStatus.UnknownError, resp); } offset += bytesInChunk; retries = 0; } catch (Exception ex) { var retry = false; // If we get a 5xx error, or some network issue, we retry if (ex is WebException && ((WebException)ex).Response is HttpWebResponse) { var code = (int)((HttpWebResponse)((WebException)ex).Response).StatusCode; retry = code >= 500 && code <= 599; } else if (ex is System.Net.Sockets.SocketException || ex is System.IO.IOException || ex.InnerException is System.Net.Sockets.SocketException || ex.InnerException is System.IO.IOException) { retry = true; } // Retry with exponential backoff if (retry && retries < 5) { System.Threading.Thread.Sleep(TimeSpan.FromSeconds(Math.Pow(2, retries))); retries++; } else throw; } } // Transfer completed, now commit the upload and close the session req = (HttpWebRequest)WebRequest.Create(url); req.UserAgent = USER_AGENT; req.Method = "POST"; req.Headers.Add("X-Http-Method-Override", "BITS_POST"); req.Headers.Add("BITS-Packet-Type", "Close-Session"); req.Headers.Add("BITS-Session-Id", sessionid); req.ContentLength = 0; areq = new Utility.AsyncHttpRequest(req); using(var resp = (HttpWebResponse)areq.GetResponse()) { if (resp.StatusCode != HttpStatusCode.OK) throw new Exception("Invalid partial upload commit response"); } } else { var url = string.Format("{0}/{1}/files/{2}?access_token={3}", WLID_SERVER, FolderID, Utility.Uri.UrlPathEncode(remotename), Library.Utility.Uri.UrlEncode(m_oauth.AccessToken)); var req = (HttpWebRequest)WebRequest.Create(url); req.UserAgent = USER_AGENT; req.Method = "PUT"; try { req.ContentLength = stream.Length; } catch { } // Docs says not to set this ? //req.ContentType = "application/octet-stream"; var areq = new Utility.AsyncHttpRequest(req); using(var reqs = areq.GetRequestStream()) Utility.Utility.CopyStream(stream, reqs, true, m_copybuffer); using(var resp = (HttpWebResponse)areq.GetResponse()) using(var rs = areq.GetResponseStream()) using(var tr = new System.IO.StreamReader(rs)) using(var jr = new Newtonsoft.Json.JsonTextReader(tr)) { var nf = new Newtonsoft.Json.JsonSerializer().Deserialize<WLID_FolderItem>(jr); m_fileidCache[remotename] = nf.id; } } }
public void Put(string remotename, System.IO.Stream input) { System.Net.FtpWebRequest req = null; try { req = CreateRequest(remotename); req.Method = System.Net.WebRequestMethods.Ftp.UploadFile; req.UseBinary = true; long streamLen = -1; try { streamLen = input.Length; } catch {} Utility.AsyncHttpRequest areq = new Utility.AsyncHttpRequest(req); using (System.IO.Stream rs = areq.GetRequestStream()) Utility.Utility.CopyStream(input, rs, true); if (m_listVerify) { List<IFileEntry> files = List(remotename); StringBuilder sb = new StringBuilder(); foreach(IFileEntry fe in files) if (fe.Name.Equals(remotename) || fe.Name.EndsWith("/" + remotename) || fe.Name.EndsWith("\\" + remotename)) { if (fe.Size < 0 || streamLen < 0 || fe.Size == streamLen) return; throw new Exception(string.Format(Strings.FTPBackend.ListVerifySizeFailure, remotename, fe.Size, streamLen)); } else sb.AppendLine(fe.Name); throw new Exception(string.Format(Strings.FTPBackend.ListVerifyFailure, remotename, sb.ToString())); } } catch (System.Net.WebException wex) { if (req != null && wex.Response as System.Net.FtpWebResponse != null && (wex.Response as System.Net.FtpWebResponse).StatusCode == System.Net.FtpStatusCode.ActionNotTakenFileUnavailable) throw new Interface.FolderMissingException(string.Format(Strings.FTPBackend.MissingFolderError, req.RequestUri.PathAndQuery, wex.Message), wex); else throw; } }
public void Put(string remotename, System.IO.Stream stream) { try { System.Net.HttpWebRequest req = CreateRequest(remotename); req.Method = System.Net.WebRequestMethods.Http.Put; req.ContentType = "application/octet-stream"; try { req.ContentLength = stream.Length; } catch { } Utility.AsyncHttpRequest areq = new Utility.AsyncHttpRequest(req); using (System.IO.Stream s = areq.GetRequestStream()) Utility.Utility.CopyStream(stream, s); using (System.Net.HttpWebResponse resp = (System.Net.HttpWebResponse)areq.GetResponse()) { int code = (int)resp.StatusCode; if (code < 200 || code >= 300) //For some reason Mono does not throw this automatically throw new System.Net.WebException(resp.StatusDescription, null, System.Net.WebExceptionStatus.ProtocolError, resp); } } catch (System.Net.WebException wex) { //Convert to better exception if (wex.Response as System.Net.HttpWebResponse != null) if ((wex.Response as System.Net.HttpWebResponse).StatusCode == System.Net.HttpStatusCode.Conflict || (wex.Response as System.Net.HttpWebResponse).StatusCode == System.Net.HttpStatusCode.NotFound) throw new Interface.FolderMissingException(string.Format(Strings.WEBDAV.MissingFolderError, m_path, wex.Message), wex); throw; } }
private WLID_FolderItem FindFolders(bool autocreate) { var folders = (m_rootfolder + '/' + m_prefix).Split(new char[] { '/' }, StringSplitOptions.RemoveEmptyEntries); if (folders.Length == 0) { var url = string.Format("{0}/{1}?access_token={2}", WLID_SERVER, ROOT_FOLDER_ID, Library.Utility.Uri.UrlEncode(m_oauth.AccessToken)); return m_oauth.GetJSONData<WLID_FolderItem>(url); } WLID_FolderItem cur = null; foreach (var f in folders) { var n = FindFolder(f, cur == null ? null : cur.id); if (n == null) { if (autocreate) { var url = string.Format("{0}/{1}?access_token={2}", WLID_SERVER, cur == null ? ROOT_FOLDER_ID : cur.id, Library.Utility.Uri.UrlEncode(m_oauth.AccessToken)); var req = (HttpWebRequest)WebRequest.Create(url); req.UserAgent = USER_AGENT; req.Method = "POST"; var areq = new Utility.AsyncHttpRequest(req); using (var ms = new System.IO.MemoryStream()) using (var sw = new System.IO.StreamWriter(ms)) { new Newtonsoft.Json.JsonSerializer().Serialize(sw, new WLID_CreateFolderData() { name = f, description = Strings.OneDrive.AutoCreatedFolderLabel }); sw.Flush(); ms.Position = 0; req.ContentLength = ms.Length; req.ContentType = "application/json"; using (var reqs = areq.GetRequestStream()) Utility.Utility.CopyStream(ms, reqs, true, m_copybuffer); } using (var resp = (HttpWebResponse)areq.GetResponse()) using (var rs = areq.GetResponseStream()) using (var tr = new System.IO.StreamReader(rs)) using (var jr = new Newtonsoft.Json.JsonTextReader(tr)) { if ((int)resp.StatusCode < 200 || (int)resp.StatusCode > 299) throw new ProtocolViolationException(Strings.OneDrive.UnexpectedError(resp.StatusCode, resp.StatusDescription)); cur = new Newtonsoft.Json.JsonSerializer().Deserialize<WLID_FolderItem>(jr); } } else throw new FolderMissingException(Strings.OneDrive.MissingFolderError(f)); } else cur = n; } return cur; }
public void Put(string remotename, System.IO.Stream stream) { if (stream.Length > BITS_FILE_SIZE_LIMIT) { // Get extra info for BITS var uid = UserID; var fid = FolderID.Split('.')[2]; // Create a session var url = string.Format("https://cid-{0}.users.storage.live.com/items/{1}/{2}?access_token={3}", uid, fid, Utility.Uri.UrlPathEncode(remotename), Library.Utility.Uri.UrlEncode(m_oauth.AccessToken)); var req = (HttpWebRequest)WebRequest.Create(url); req.UserAgent = USER_AGENT; req.Method = "POST"; req.ContentType = "application/json"; req.Headers.Add("X-Http-Method-Override", "BITS_POST"); req.Headers.Add("BITS-Packet-Type", "Create-Session"); req.Headers.Add("BITS-Supported-Protocols", "{7df0354d-249b-430f-820d-3d2a9bef4931}"); req.ContentLength = 0; var areq = new Utility.AsyncHttpRequest(req); string sessionid; using (var resp = (HttpWebResponse)areq.GetResponse()) { var packtype = resp.Headers["BITS-Packet-Type"]; if (!packtype.Equals("Ack", StringComparison.OrdinalIgnoreCase)) { throw new Exception(string.Format("Unable to create BITS transfer, got status: {0}", packtype)); } sessionid = resp.Headers["BITS-Session-Id"]; } if (string.IsNullOrEmpty(sessionid)) { throw new Exception("BITS session-id was missing"); } // Session is now created, start uploading chunks var offset = 0L; var retries = 0; while (offset < stream.Length) { try { var bytesInChunk = Math.Min(BITS_CHUNK_SIZE, stream.Length - offset); req = (HttpWebRequest)WebRequest.Create(url); req.UserAgent = USER_AGENT; req.Method = "POST"; req.Headers.Add("X-Http-Method-Override", "BITS_POST"); req.Headers.Add("BITS-Packet-Type", "Fragment"); req.Headers.Add("BITS-Session-Id", sessionid); req.Headers.Add("Content-Range", string.Format("bytes {0}-{1}/{2}", offset, offset + bytesInChunk - 1, stream.Length)); req.ContentLength = bytesInChunk; if (stream.Position != offset) { stream.Position = offset; } areq = new Utility.AsyncHttpRequest(req); var remaining = (int)bytesInChunk; using (var reqs = areq.GetRequestStream()) { int read; while ((read = stream.Read(m_copybuffer, 0, Math.Min(m_copybuffer.Length, remaining))) != 0) { reqs.Write(m_copybuffer, 0, read); remaining -= read; } } using (var resp = (HttpWebResponse)areq.GetResponse()) { if (resp.StatusCode != HttpStatusCode.OK) { throw new WebException("Invalid partial upload response", null, WebExceptionStatus.UnknownError, resp); } } offset += bytesInChunk; retries = 0; } catch (Exception ex) { var retry = false; // If we get a 5xx error, or some network issue, we retry if (ex is WebException && ((WebException)ex).Response is HttpWebResponse) { var code = (int)((HttpWebResponse)((WebException)ex).Response).StatusCode; retry = code >= 500 && code <= 599; } else if (ex is System.Net.Sockets.SocketException || ex is System.IO.IOException || ex.InnerException is System.Net.Sockets.SocketException || ex.InnerException is System.IO.IOException) { retry = true; } // Retry with exponential backoff if (retry && retries < 5) { System.Threading.Thread.Sleep(TimeSpan.FromSeconds(Math.Pow(2, retries))); retries++; } else { throw; } } } // Transfer completed, now commit the upload and close the session req = (HttpWebRequest)WebRequest.Create(url); req.UserAgent = USER_AGENT; req.Method = "POST"; req.Headers.Add("X-Http-Method-Override", "BITS_POST"); req.Headers.Add("BITS-Packet-Type", "Close-Session"); req.Headers.Add("BITS-Session-Id", sessionid); req.ContentLength = 0; areq = new Utility.AsyncHttpRequest(req); using (var resp = (HttpWebResponse)areq.GetResponse()) { if (resp.StatusCode != HttpStatusCode.OK) { throw new Exception("Invalid partial upload commit response"); } } } else { var url = string.Format("{0}/{1}/files/{2}?access_token={3}", WLID_SERVER, FolderID, Utility.Uri.UrlPathEncode(remotename), Library.Utility.Uri.UrlEncode(m_oauth.AccessToken)); var req = (HttpWebRequest)WebRequest.Create(url); req.UserAgent = USER_AGENT; req.Method = "PUT"; try { req.ContentLength = stream.Length; } catch { } // Docs says not to set this ? //req.ContentType = "application/octet-stream"; var areq = new Utility.AsyncHttpRequest(req); using (var reqs = areq.GetRequestStream()) Utility.Utility.CopyStream(stream, reqs, true, m_copybuffer); using (var resp = (HttpWebResponse)areq.GetResponse()) using (var rs = areq.GetResponseStream()) using (var tr = new System.IO.StreamReader(rs)) using (var jr = new Newtonsoft.Json.JsonTextReader(tr)) { var nf = new Newtonsoft.Json.JsonSerializer().Deserialize <WLID_FolderItem>(jr); m_fileidCache[remotename] = nf.id; } } }
public List<IFileEntry> List() { System.Net.HttpWebRequest req = CreateRequest(""); req.Method = "PROPFIND"; req.Headers.Add("Depth", "1"); req.ContentType = "text/xml"; req.ContentLength = PROPFIND_BODY.Length; Utility.AsyncHttpRequest areq = new Utility.AsyncHttpRequest(req); using (System.IO.Stream s = areq.GetRequestStream()) s.Write(PROPFIND_BODY, 0, PROPFIND_BODY.Length); try { System.Xml.XmlDocument doc = new System.Xml.XmlDocument(); using (System.Net.HttpWebResponse resp = (System.Net.HttpWebResponse)areq.GetResponse()) { int code = (int)resp.StatusCode; if (code < 200 || code >= 300) //For some reason Mono does not throw this automatically throw new System.Net.WebException(resp.StatusDescription, null, System.Net.WebExceptionStatus.ProtocolError, resp); if (!string.IsNullOrEmpty(m_debugPropfindFile)) { using (System.IO.FileStream fs = new System.IO.FileStream(m_debugPropfindFile, System.IO.FileMode.Create, System.IO.FileAccess.Write, System.IO.FileShare.None)) Utility.Utility.CopyStream(areq.GetResponseStream(), fs, false); doc.Load(m_debugPropfindFile); } else doc.Load(areq.GetResponseStream()); } System.Xml.XmlNamespaceManager nm = new System.Xml.XmlNamespaceManager(doc.NameTable); nm.AddNamespace("D", "DAV:"); List<IFileEntry> files = new List<IFileEntry>(); m_filenamelist = new List<string>(); foreach (System.Xml.XmlNode n in doc.SelectNodes("D:multistatus/D:response/D:href", nm)) { //IIS uses %20 for spaces and %2B for + //Apache uses %20 for spaces and + for + string name = System.Web.HttpUtility.UrlDecode(n.InnerText.Replace("+", "%2B")); string cmp_path; //TODO: This list is getting ridiculous, should change to regexps if (name.StartsWith(m_url)) cmp_path = m_url; else if (name.StartsWith(m_rawurl)) cmp_path = m_rawurl; else if (name.StartsWith(m_rawurlPort)) cmp_path = m_rawurlPort; else if (name.StartsWith(m_path)) cmp_path = m_path; else if (name.StartsWith(m_sanitizedUrl)) cmp_path = m_sanitizedUrl; else if (name.StartsWith(m_reverseProtocolUrl)) cmp_path = m_reverseProtocolUrl; else continue; if (name.Length <= cmp_path.Length) continue; name = name.Substring(cmp_path.Length); long size = -1; DateTime lastAccess = new DateTime(); DateTime lastModified = new DateTime(); bool isCollection = false; System.Xml.XmlNode stat = n.ParentNode.SelectSingleNode("D:propstat/D:prop", nm); if (stat != null) { System.Xml.XmlNode s = stat.SelectSingleNode("D:getcontentlength", nm); if (s != null) size = long.Parse(s.InnerText); s = stat.SelectSingleNode("D:getlastmodified", nm); if (s != null) try { //Not important if this succeeds lastAccess = lastModified = DateTime.Parse(s.InnerText, System.Globalization.CultureInfo.InvariantCulture); } catch { } s = stat.SelectSingleNode("D:iscollection", nm); if (s != null) isCollection = s.InnerText.Trim() == "1"; else isCollection = (stat.SelectSingleNode("D:resourcetype/D:collection", nm) != null); } FileEntry fe = new FileEntry(name, size, lastAccess, lastModified); fe.IsFolder = isCollection; files.Add(fe); m_filenamelist.Add(name); } return files; } catch (System.Net.WebException wex) { if (wex.Response as System.Net.HttpWebResponse != null && ((wex.Response as System.Net.HttpWebResponse).StatusCode == System.Net.HttpStatusCode.NotFound || (wex.Response as System.Net.HttpWebResponse).StatusCode == System.Net.HttpStatusCode.Conflict)) throw new Interface.FolderMissingException(string.Format(Strings.WEBDAV.MissingFolderError, m_path, wex.Message), wex); throw; } }
private WLID_FolderItem FindFolders(bool autocreate) { var folders = (m_rootfolder + '/' + m_prefix).Split(new char[] { '/' }, StringSplitOptions.RemoveEmptyEntries); if (folders.Length == 0) { var url = string.Format("{0}/{1}?access_token={2}", WLID_SERVER, ROOT_FOLDER_ID, Library.Utility.Uri.UrlEncode(m_oauth.AccessToken)); return(m_oauth.GetJSONData <WLID_FolderItem>(url, x => x.UserAgent = USER_AGENT)); } WLID_FolderItem cur = null; foreach (var f in folders) { var n = FindFolder(f, cur == null ? null : cur.id); if (n == null) { if (autocreate) { var url = string.Format("{0}/{1}?access_token={2}", WLID_SERVER, cur == null ? ROOT_FOLDER_ID : cur.id, Library.Utility.Uri.UrlEncode(m_oauth.AccessToken)); var req = (HttpWebRequest)WebRequest.Create(url); req.UserAgent = USER_AGENT; req.Method = "POST"; var areq = new Utility.AsyncHttpRequest(req); using (var ms = new System.IO.MemoryStream()) using (var sw = new System.IO.StreamWriter(ms)) { new Newtonsoft.Json.JsonSerializer().Serialize(sw, new WLID_CreateFolderData() { name = f, description = Strings.OneDrive.AutoCreatedFolderLabel }); sw.Flush(); ms.Position = 0; req.ContentLength = ms.Length; req.ContentType = "application/json"; using (var reqs = areq.GetRequestStream()) Utility.Utility.CopyStream(ms, reqs, true, m_copybuffer); } using (var resp = (HttpWebResponse)areq.GetResponse()) using (var rs = areq.GetResponseStream()) using (var tr = new System.IO.StreamReader(rs)) using (var jr = new Newtonsoft.Json.JsonTextReader(tr)) { if ((int)resp.StatusCode < 200 || (int)resp.StatusCode > 299) { throw new ProtocolViolationException(Strings.OneDrive.UnexpectedError(resp.StatusCode, resp.StatusDescription)); } cur = new Newtonsoft.Json.JsonSerializer().Deserialize <WLID_FolderItem>(jr); } } else { throw new FolderMissingException(Strings.OneDrive.MissingFolderError(f)); } } else { cur = n; } } return(cur); }
private IEnumerable <IFileEntry> ListWithouExceptionCatch() { var req = CreateRequest(""); req.Method = "PROPFIND"; req.Headers.Add("Depth", "1"); req.ContentType = "text/xml"; req.ContentLength = PROPFIND_BODY.Length; var areq = new Utility.AsyncHttpRequest(req); using (System.IO.Stream s = areq.GetRequestStream()) s.Write(PROPFIND_BODY, 0, PROPFIND_BODY.Length); var doc = new System.Xml.XmlDocument(); using (var resp = (System.Net.HttpWebResponse)areq.GetResponse()) { int code = (int)resp.StatusCode; if (code < 200 || code >= 300) //For some reason Mono does not throw this automatically { throw new System.Net.WebException(resp.StatusDescription, null, System.Net.WebExceptionStatus.ProtocolError, resp); } if (!string.IsNullOrEmpty(m_debugPropfindFile)) { using (var rs = areq.GetResponseStream()) using (var fs = new System.IO.FileStream(m_debugPropfindFile, System.IO.FileMode.Create, System.IO.FileAccess.Write, System.IO.FileShare.None)) Utility.Utility.CopyStream(rs, fs, false, m_copybuffer); doc.Load(m_debugPropfindFile); } else { using (var rs = areq.GetResponseStream()) doc.Load(rs); } } System.Xml.XmlNamespaceManager nm = new System.Xml.XmlNamespaceManager(doc.NameTable); nm.AddNamespace("D", "DAV:"); List <IFileEntry> files = new List <IFileEntry>(); m_filenamelist = new List <string>(); foreach (System.Xml.XmlNode n in doc.SelectNodes("D:multistatus/D:response/D:href", nm)) { //IIS uses %20 for spaces and %2B for + //Apache uses %20 for spaces and + for + string name = Library.Utility.Uri.UrlDecode(n.InnerText.Replace("+", "%2B")); string cmp_path; //TODO: This list is getting ridiculous, should change to regexps if (name.StartsWith(m_url, StringComparison.Ordinal)) { cmp_path = m_url; } else if (name.StartsWith(m_rawurl, StringComparison.Ordinal)) { cmp_path = m_rawurl; } else if (name.StartsWith(m_rawurlPort, StringComparison.Ordinal)) { cmp_path = m_rawurlPort; } else if (name.StartsWith(m_path, StringComparison.Ordinal)) { cmp_path = m_path; } else if (name.StartsWith("/" + m_path, StringComparison.Ordinal)) { cmp_path = "/" + m_path; } else if (name.StartsWith(m_sanitizedUrl, StringComparison.Ordinal)) { cmp_path = m_sanitizedUrl; } else if (name.StartsWith(m_reverseProtocolUrl, StringComparison.Ordinal)) { cmp_path = m_reverseProtocolUrl; } else { continue; } if (name.Length <= cmp_path.Length) { continue; } name = name.Substring(cmp_path.Length); long size = -1; DateTime lastAccess = new DateTime(); DateTime lastModified = new DateTime(); bool isCollection = false; System.Xml.XmlNode stat = n.ParentNode.SelectSingleNode("D:propstat/D:prop", nm); if (stat != null) { System.Xml.XmlNode s = stat.SelectSingleNode("D:getcontentlength", nm); if (s != null) { size = long.Parse(s.InnerText); } s = stat.SelectSingleNode("D:getlastmodified", nm); if (s != null) { try { //Not important if this succeeds lastAccess = lastModified = DateTime.Parse(s.InnerText, System.Globalization.CultureInfo.InvariantCulture); } catch { } } s = stat.SelectSingleNode("D:iscollection", nm); if (s != null) { isCollection = s.InnerText.Trim() == "1"; } else { isCollection = (stat.SelectSingleNode("D:resourcetype/D:collection", nm) != null); } } FileEntry fe = new FileEntry(name, size, lastAccess, lastModified); fe.IsFolder = isCollection; files.Add(fe); m_filenamelist.Add(name); } return(files); }
public void Put(string remotename, System.IO.Stream stream) { try { Google.Documents.Document folder = GetFolder(); //Special, since uploads can overwrite or create, // we must figure out if the file exists in advance. //Unfortunately it would be wastefull to request the list // for each upload request, so we rely on the cache being // correct TaggedFileEntry doc = null; if (m_files == null) { doc = TryGetFile(remotename); } else { m_files.TryGetValue(remotename, out doc); } try { string resumableUri; if (doc != null) { if (doc.MediaUrl == null) { //Strange, we could not get the edit url, perhaps it is readonly? //Fallback strategy is "delete-then-upload" try { this.Delete(remotename); } catch { } doc = TryGetFile(remotename); if (doc != null || doc.MediaUrl == null) { throw new Exception(string.Format(Strings.GoogleDocs.FileIsReadOnlyError, remotename)); } } } //File does not exist, we upload a new one if (doc == null) { //First we need to get a resumeable upload url HttpWebRequest req = (HttpWebRequest)WebRequest.Create("https://docs.google.com/feeds/upload/create-session/default/private/full/" + System.Web.HttpUtility.UrlEncode(folder.ResourceId) + "/contents?convert=false"); req.Method = "POST"; req.Headers.Add("X-Upload-Content-Length", stream.Length.ToString()); req.Headers.Add("X-Upload-Content-Type", "application/octet-stream"); req.UserAgent = USER_AGENT; req.Headers.Add("GData-Version", "3.0"); //Build the atom entry describing the file we want to create string labels = ""; foreach (string s in m_labels) { if (s.Trim().Length > 0) { labels += string.Format(ATTRIBUTE_TEMPLATE, s); } } //Apply the name and content-type to the not-yet-uploaded file byte[] data = System.Text.Encoding.UTF8.GetBytes(string.Format(CREATE_ITEM_TEMPLATE, System.Web.HttpUtility.HtmlEncode(remotename), labels)); req.ContentLength = data.Length; req.ContentType = "application/atom+xml"; //Authenticate our request m_cla.ApplyAuthenticationToRequest(req); Utility.AsyncHttpRequest areq = new Utility.AsyncHttpRequest(req); using (System.IO.Stream s = areq.GetRequestStream()) s.Write(data, 0, data.Length); using (HttpWebResponse resp = (HttpWebResponse)areq.GetResponse()) { int code = (int)resp.StatusCode; if (code < 200 || code >= 300) //For some reason Mono does not throw this automatically { throw new System.Net.WebException(resp.StatusDescription, null, System.Net.WebExceptionStatus.ProtocolError, resp); } resumableUri = resp.Headers["Location"]; } } else { //First we need to get a resumeable upload url HttpWebRequest req = (HttpWebRequest)WebRequest.Create(doc.MediaUrl); req.Method = "PUT"; req.Headers.Add("X-Upload-Content-Length", stream.Length.ToString()); req.Headers.Add("X-Upload-Content-Type", "application/octet-stream"); req.UserAgent = USER_AGENT; req.Headers.Add("If-Match", doc.ETag); req.Headers.Add("GData-Version", "3.0"); //This is a blank marker request req.ContentLength = 0; //Bad... docs say "text/plain" or "text/xml", but really needs to be content type, otherwise overwrite fails //req.ContentType = "text/plain"; req.ContentType = "application/octet-stream"; //Authenticate our request m_cla.ApplyAuthenticationToRequest(req); Utility.AsyncHttpRequest areq = new Utility.AsyncHttpRequest(req); using (HttpWebResponse resp = (HttpWebResponse)areq.GetResponse()) { int code = (int)resp.StatusCode; if (code < 200 || code >= 300) //For some reason Mono does not throw this automatically { throw new System.Net.WebException(resp.StatusDescription, null, System.Net.WebExceptionStatus.ProtocolError, resp); } resumableUri = resp.Headers["Location"]; } } //Ensure that we have a resumeable upload url if (resumableUri == null) { throw new Exception(Strings.GoogleDocs.NoResumeURLError); } string id = null; byte[] buffer = new byte[8 * 1024]; int retries = 0; long initialPosition; DateTime initialRequestTime = DateTime.Now; while (stream.Position != stream.Length) { initialPosition = stream.Position; long postbytes = Math.Min(stream.Length - initialPosition, TRANSFER_CHUNK_SIZE); //Post a fragment of the file as a partial request HttpWebRequest req = (HttpWebRequest)WebRequest.Create(resumableUri); req.Method = "PUT"; req.UserAgent = USER_AGENT; req.ContentLength = postbytes; req.ContentType = "application/octet-stream"; req.Headers.Add("Content-Range", string.Format("bytes {0}-{1}/{2}", initialPosition, initialPosition + (postbytes - 1), stream.Length.ToString())); //Copy the current fragment of bytes Utility.AsyncHttpRequest areq = new Utility.AsyncHttpRequest(req); using (System.IO.Stream s = areq.GetRequestStream()) { long bytesleft = postbytes; long written = 0; int a; while (bytesleft != 0 && ((a = stream.Read(buffer, 0, (int)Math.Min(buffer.Length, bytesleft))) != 0)) { s.Write(buffer, 0, a); bytesleft -= a; written += a; } s.Flush(); if (bytesleft != 0 || postbytes != written) { throw new System.IO.EndOfStreamException(); } } try { using (HttpWebResponse resp = (HttpWebResponse)areq.GetResponse()) { int code = (int)resp.StatusCode; if (code < 200 || code >= 300) //For some reason Mono does not throw this automatically { throw new System.Net.WebException(resp.StatusDescription, null, System.Net.WebExceptionStatus.ProtocolError, resp); } //If all goes well, we should now get an atom entry describing the new element System.Xml.XmlDocument xml = new XmlDocument(); using (System.IO.Stream s = areq.GetResponseStream()) xml.Load(s); System.Xml.XmlNamespaceManager mgr = new XmlNamespaceManager(xml.NameTable); mgr.AddNamespace("atom", "http://www.w3.org/2005/Atom"); mgr.AddNamespace("gd", "http://schemas.google.com/g/2005"); id = xml.SelectSingleNode("atom:entry/atom:id", mgr).InnerText; string resourceId = xml.SelectSingleNode("atom:entry/gd:resourceId", mgr).InnerText; string url = xml.SelectSingleNode("atom:entry/atom:content", mgr).Attributes["src"].Value; string mediaUrl = null; foreach (XmlNode n in xml.SelectNodes("atom:entry/atom:link", mgr)) { if (n.Attributes["rel"] != null && n.Attributes["href"] != null && n.Attributes["rel"].Value.EndsWith("#resumable-edit-media")) { mediaUrl = n.Attributes["href"].Value; break; } } if (doc == null) { TaggedFileEntry tf = new TaggedFileEntry(remotename, stream.Length, initialRequestTime, initialRequestTime, resourceId, url, mediaUrl, resp.Headers["ETag"]); m_files.Add(remotename, tf); } else { //Since we update an existing item, we just need to update the ETag doc.ETag = resp.Headers["ETag"]; } } retries = 0; } catch (WebException wex) { bool acceptedError = wex.Status == WebExceptionStatus.ProtocolError && wex.Response is HttpWebResponse && (int)((HttpWebResponse)wex.Response).StatusCode == 308; //Mono does not give us the response object, // so we rely on the error code being present // in the string, not ideal, but I have found // no other workaround :( if (Duplicati.Library.Utility.Utility.IsMono) { acceptedError |= wex.Status == WebExceptionStatus.ProtocolError && wex.Message.Contains("308"); } //Accept the 308 until we are complete if (acceptedError && initialPosition + postbytes != stream.Length) { retries = 0; //Accept the 308 until we are complete } else { //Retries are handled in Duplicati, but it is much more efficient here, // because we only re-submit the last TRANSFER_CHUNK_SIZE bytes, // instead of the entire file retries++; if (retries > 2) { throw; } else { System.Threading.Thread.Sleep(2000 * retries); } stream.Position = initialPosition; } } } if (string.IsNullOrEmpty(id)) { throw new Exception(Strings.GoogleDocs.NoIDReturnedError); } } catch { //Clear the cache as we have no idea what happened m_files = null; throw; } } catch (Google.GData.Client.CaptchaRequiredException cex) { throw new Exception(string.Format(Strings.GoogleDocs.CaptchaRequiredError, CAPTCHA_UNLOCK_URL), cex); } }
public void Put(string remotename, System.IO.Stream stream) { HttpWebRequest req = CreateRequest("/" + remotename, ""); req.Method = "PUT"; req.ContentType = "application/octet-stream"; try { req.ContentLength = stream.Length; } catch { } //If we can pre-calculate the MD5 hash before transmission, do so /*if (stream.CanSeek) * { * System.Security.Cryptography.MD5 md5 = System.Security.Cryptography.MD5.Create(); * req.Headers["ETag"] = Core.Utility.ByteArrayAsHexString(md5.ComputeHash(stream)).ToLower(); * stream.Seek(0, System.IO.SeekOrigin.Begin); * * using (System.IO.Stream s = req.GetRequestStream()) * Core.Utility.CopyStream(stream, s); * * //Reset the timeout to the default value of 100 seconds to * // avoid blocking the GetResponse() call * req.Timeout = 100000; * * //The server handles the eTag verification for us, and gives an error if the hash was a mismatch * using (HttpWebResponse resp = (HttpWebResponse)req.GetResponse()) * if ((int)resp.StatusCode >= 300) * throw new WebException(Strings.CloudFiles.FileUploadError, null, WebExceptionStatus.ProtocolError, resp); * * } * else //Otherwise use a client-side calculation */ //TODO: We cannot use the local MD5 calculation, because that could involve a throttled read, // and may invoke various events { string fileHash = null; Utility.AsyncHttpRequest areq = new Utility.AsyncHttpRequest(req); using (System.IO.Stream s = areq.GetRequestStream()) using (var mds = new Utility.MD5CalculatingStream(s)) { Utility.Utility.CopyStream(stream, mds, true, m_copybuffer); fileHash = mds.GetFinalHashString(); } string md5Hash = null; //We need to verify the eTag locally try { using (HttpWebResponse resp = (HttpWebResponse)areq.GetResponse()) if ((int)resp.StatusCode >= 300) { throw new WebException(Strings.CloudFiles.FileUploadError, null, WebExceptionStatus.ProtocolError, resp); } else { md5Hash = resp.Headers["ETag"]; } } catch (WebException wex) { //Catch 404 and turn it into a FolderNotFound error if (wex.Response is HttpWebResponse && ((HttpWebResponse)wex.Response).StatusCode == HttpStatusCode.NotFound) { throw new FolderMissingException(wex); } //Other error, just re-throw throw; } if (md5Hash == null || md5Hash.ToLower() != fileHash.ToLower()) { //Remove the broken file try { Delete(remotename); } catch { } throw new Exception(Strings.CloudFiles.ETagVerificationError); } } }
public void Put(string remotename, System.IO.Stream stream) { // Some challenges with uploading to Jottacloud: // - Jottacloud supports use of a custom header where we can tell the server the MD5 hash of the file // we are uploading, and then it will verify the content of our request against it. But the HTTP // status code we get back indicates success even if there is a mismatch, so we must dig into the // XML response to see if we were able to correctly upload the new content or not. Another issue // is that if the stream is not seek-able we have a challenge pre-calculating MD5 hash on it before // writing it out on the HTTP request stream. And even if the stream is seek-able it may be throttled. // One way to avoid using the throttled stream for calculating the MD5 is to try to get the // underlying stream from the "m_basestream" field, with fall-back to a temporary file. // - We can instead chose to upload the data without setting the MD5 hash header. The server will // calculate the MD5 on its side and return it in the response back to use. We can then compare it // with the MD5 hash of the stream (using a MD5CalculatingStream), and if there is a mismatch we can // request the server to remove the file again and throw an exception. But there is a requirement that // we specify the file size in a custom header. And if the stream is not seek-able we are not able // to use stream.Length, so we are back at square one. Duplicati.Library.Utility.TempFile tmpFile = null; var baseStream = stream; while (baseStream is Duplicati.Library.Utility.OverrideableStream) { baseStream = typeof(Duplicati.Library.Utility.OverrideableStream).GetField("m_basestream", System.Reflection.BindingFlags.DeclaredOnly | System.Reflection.BindingFlags.Instance | System.Reflection.BindingFlags.NonPublic).GetValue(baseStream) as System.IO.Stream; } if (baseStream == null) { throw new Exception(string.Format("Unable to unwrap stream from: {0}", stream.GetType())); } string md5Hash; if (baseStream.CanSeek) { var originalPosition = baseStream.Position; using (var md5 = System.Security.Cryptography.MD5.Create()) md5Hash = Library.Utility.Utility.ByteArrayAsHexString(md5.ComputeHash(baseStream)); baseStream.Position = originalPosition; } else { // No seeking possible, use a temp file tmpFile = new Duplicati.Library.Utility.TempFile(); using (var os = System.IO.File.OpenWrite(tmpFile)) using (var md5 = new Utility.MD5CalculatingStream(baseStream)) { Library.Utility.Utility.CopyStream(md5, os, true, m_copybuffer); md5Hash = md5.GetFinalHashString(); } stream = System.IO.File.OpenRead(tmpFile); } try { // Create request, with query parameter, and a few custom headers. // NB: If we wanted to we could send the same POST request as below but without the file contents // and with "cphash=[md5Hash]" as the only query parameter. Then we will get an HTTP 200 (OK) response // if an identical file already exists, and we can skip uploading the new file. We will get // HTTP 404 (Not Found) if file does not exists or it exists with a different hash, in which // case we must send a new request to upload the new content. var fileSize = stream.Length; var req = CreateRequest(System.Net.WebRequestMethods.Http.Post, remotename, "umode=nomultipart", true); req.Headers.Add("JMd5", md5Hash); // Not required, but it will make the server verify the content and mark the file as corrupt if there is a mismatch. req.Headers.Add("JSize", fileSize.ToString()); // Required, and used to mark file as incomplete if we upload something be the total size of the original file! // File time stamp headers: Since we are working with a stream here we do not know the local file's timestamps, // and then we can just omit the JCreated and JModified and let the server automatically set the current time. //req.Headers.Add("JCreated", timeCreated); //req.Headers.Add("JModified", timeModified); req.ContentType = "application/octet-stream"; req.ContentLength = fileSize; // Write post data request var areq = new Utility.AsyncHttpRequest(req); using (var rs = areq.GetRequestStream()) Utility.Utility.CopyStream(stream, rs, true, m_copybuffer); // Send request, and check response using (var resp = (System.Net.HttpWebResponse)areq.GetResponse()) { if (resp.StatusCode != System.Net.HttpStatusCode.Created) { throw new System.Net.WebException(Strings.Jottacloud.FileUploadError, null, System.Net.WebExceptionStatus.ProtocolError, resp); } // Request seems to be successful, but we must verify the response XML content to be sure that the file // was correctly uploaded: The server will verify the JSize header and mark the file as incomplete if // there was mismatch, and it will verify the JMd5 header and mark the file as corrupt if there was a hash // mismatch. The returned XML contains a file element, and if upload was error free it contains a single // child element "currentRevision", which has a "state" child element with the string "COMPLETED". // If there was a problem we should have a "latestRevision" child element, and this will have state with // value "INCOMPLETE" or "CORRUPT". If the file was new or had no previous complete versions the latestRevision // will be the only child, but if not there may also be a "currentRevision" representing the previous // complete version - and then we need to detect the case where our upload failed but there was an existing // complete version! using (var rs = areq.GetResponseStream()) { var doc = new System.Xml.XmlDocument(); try { doc.Load(rs); } catch (System.Xml.XmlException) { throw new System.Net.WebException(Strings.Jottacloud.FileUploadError, System.Net.WebExceptionStatus.ProtocolError); } bool uploadCompletedSuccessfully = false; var xFile = doc["file"]; if (xFile != null) { var xRevState = xFile.SelectSingleNode("latestRevision"); if (xRevState == null) { xRevState = xFile.SelectSingleNode("currentRevision/state"); if (xRevState != null) { uploadCompletedSuccessfully = xRevState.InnerText == "COMPLETED"; // Success: There is no "latestRevision", only a "currentRevision" (and it specifies the file is complete, but I think it always will). } } } if (!uploadCompletedSuccessfully) // Report error (and we just let the incomplete/corrupt file revision stay on the server..) { throw new System.Net.WebException(Strings.Jottacloud.FileUploadError, System.Net.WebExceptionStatus.ProtocolError); } } } } finally { try { if (tmpFile != null) { tmpFile.Dispose(); } } catch { } } }