private static void Main()
        {
            using (
                var client = new HttpClient {BaseAddress = new Uri(ConfigurationManager.AppSettings["VSOnlineBaseUrl"])}
                )
            {
                var history = new Historian(
                    new WorkItemRepository(new TfsConnection(ConfigurationManager.AppSettings["username"],
                        ConfigurationManager.AppSettings["password"], client)));

                var burnup = history.GetBurnUpDataSince(new DateTime(2014, 7, 7, 23, 59, 59), @"BPS.Scrum\Dev -SEP Project");
                using (var writer = new StreamWriter(@".\completed" + DateTime.Now.ToString("yyyymmmmdd") + ".xls"))
                {
                    var csvWriter = new CsvWriter(writer, new CsvConfiguration { Delimiter = "\t" });

                    csvWriter.WriteRecords(burnup.First(s => s.Title == "Completed").Data);
                }

                using (var writer = new StreamWriter(@".\requested" + DateTime.Now.ToString("yyyymmmmdd") + ".xls"))
                {
                    var csvWriter = new CsvWriter(writer, new CsvConfiguration { Delimiter = "\t" });

                    csvWriter.WriteRecords(burnup.First(s => s.Title == "Requested").Data);
                }
            }
        }
    public TrendingDataSet getTrendsforChannelIDDate(string ChannelID, string targetDate)
    {
        //DateTime epoch = new DateTime(1970, 1, 1);
        //string theSproc = "dbo.selectTrendingDataByChannelIDDate2";
        //DataSet dataSet = new DataSet();
        //TrendingDataSet trendingDataSet = new TrendingDataSet();

        //using (SqlConnection connection = new SqlConnection(connectionstring))
        //using (SqlCommand command = connection.CreateCommand())
        //using (SqlDataAdapter adapter = new SqlDataAdapter(command))
        //{
        //    command.CommandText = theSproc;
        //    command.CommandType = CommandType.StoredProcedure;
        //    command.Parameters.AddWithValue("@EventDate", targetDate);
        //    command.Parameters.AddWithValue("@ChannelID", ChannelID);
        //    command.CommandTimeout = 300;

        //    connection.Open();
        //    adapter.Fill(dataSet);

        //    trendingDataSet.ChannelData = dataSet.Tables[0].Rows
        //        .Cast<DataRow>()
        //        .Select(row => new TrendingDataPoint()
        //        {
        //            Time = row.Field<DateTime>("thedate").Subtract(epoch).TotalMilliseconds,
        //            Maximum = row.Field<double>("themaximum"),
        //            Minimum = row.Field<double>("theminimum"),
        //            Average = row.Field<double>("theaverage")
        //        })
        //        .ToArray();

        //    trendingDataSet.AlarmLimits = dataSet.Tables[1].Rows
        //        .Cast<DataRow>()
        //        .Select(row => new TrendingAlarmLimit()
        //        {
        //            TimeStart = row.Field<DateTime>("thedatefrom").Subtract(epoch).TotalMilliseconds,
        //            TimeEnd = row.Field<DateTime>("thedateto").Subtract(epoch).TotalMilliseconds,
        //            High = row.Field<double?>("alarmlimithigh"),
        //            Low = row.Field<double?>("alarmlimitlow")
        //        })
        //        .ToArray();

        //    trendingDataSet.OffNormalLimits = dataSet.Tables[2].Rows
        //        .Cast<DataRow>()
        //        .Select(row => new TrendingAlarmLimit()
        //        {
        //            TimeStart = row.Field<DateTime>("thedatefrom").Subtract(epoch).TotalMilliseconds,
        //            TimeEnd = row.Field<DateTime>("thedateto").Subtract(epoch).TotalMilliseconds,
        //            High = row.Field<double?>("offlimithigh"),
        //            Low = row.Field<double?>("offlimitlow")
        //        })
        //        .ToArray();
        //}
        string historianServer;
        string historianInstance;
        IEnumerable<int> channelIDs = new List<int>() { Convert.ToInt32(ChannelID) };
        DateTime startDate = Convert.ToDateTime(targetDate);
        DateTime endDate = startDate.AddDays(1);
        TrendingDataSet trendingDataSet = new TrendingDataSet();
        DateTime epoch = new DateTime(1970, 1, 1);

        using (AdoDataConnection connection = new AdoDataConnection(connectionstring, typeof(SqlConnection), typeof(SqlDataAdapter)))
        {
            historianServer = connection.ExecuteScalar<string>("SELECT Value FROM Setting WHERE Name = 'Historian.Server'") ?? "127.0.0.1";
            historianInstance = connection.ExecuteScalar<string>("SELECT Value FROM Setting WHERE Name = 'Historian.Instance'") ?? "XDA";

            using (Historian historian = new Historian(historianServer, historianInstance))
            {
                foreach (openHistorian.XDALink.TrendingDataPoint point in historian.Read(channelIDs, startDate, endDate))
                {
                    if (!trendingDataSet.ChannelData.Exists(x => x.Time == point.Timestamp.Subtract(epoch).TotalMilliseconds))
                    {
                        trendingDataSet.ChannelData.Add(new TrendingDataDatum());
                        trendingDataSet.ChannelData[trendingDataSet.ChannelData.Count - 1].Time = point.Timestamp.Subtract(epoch).TotalMilliseconds;
                    }

                    if (point.SeriesID.ToString() == "Average")
                        trendingDataSet.ChannelData[trendingDataSet.ChannelData.IndexOf(x => x.Time == point.Timestamp.Subtract(epoch).TotalMilliseconds)].Average = point.Value;
                    else if (point.SeriesID.ToString() == "Minimum")
                        trendingDataSet.ChannelData[trendingDataSet.ChannelData.IndexOf(x => x.Time == point.Timestamp.Subtract(epoch).TotalMilliseconds)].Minimum = point.Value;
                    else if (point.SeriesID.ToString() == "Maximum")
                        trendingDataSet.ChannelData[trendingDataSet.ChannelData.IndexOf(x => x.Time == point.Timestamp.Subtract(epoch).TotalMilliseconds)].Maximum = point.Value;

                }
            }
            IEnumerable<DataRow> table = Enumerable.Empty<DataRow>();

            table = connection.RetrieveData(" Select {0} AS thedatefrom, " +
                                                        "        DATEADD(DAY, 1, {0}) AS thedateto, " +
                                                        "        CASE WHEN AlarmRangeLimit.PerUnit <> 0 AND Channel.PerUnitValue IS NOT NULL THEN AlarmRangeLimit.High * PerUnitValue ELSE AlarmRangeLimit.High END AS alarmlimithigh," +
                                                        "        CASE WHEN AlarmRangeLimit.PerUnit <> 0 AND Channel.PerUnitValue IS NOT NULL THEN AlarmRangeLimit.Low * PerUnitValue ELSE AlarmRangeLimit.Low END AS alarmlimitlow " +
                                                        " FROM   AlarmRangeLimit JOIN " +
                                                        "        Channel ON AlarmRangeLimit.ChannelID = Channel.ID " +
                                                        "WHERE   AlarmRangeLimit.AlarmTypeID = (SELECT ID FROM AlarmType where Name = 'Alarm') AND " +
                                                        "        AlarmRangeLimit.ChannelID = {1}", startDate, Convert.ToInt32(ChannelID)).Select();

            foreach (DataRow row in table)
            {
                trendingDataSet.AlarmLimits.Add(new TrendingAlarmLimit() { High = row.Field<double?>("alarmlimithigh"), Low = row.Field<double?>("alarmlimitlow"), TimeEnd = row.Field<DateTime>("thedateto").Subtract(epoch).TotalMilliseconds, TimeStart = row.Field<DateTime>("thedatefrom").Subtract(epoch).TotalMilliseconds });
            }

            table = Enumerable.Empty<DataRow>();

            table = connection.RetrieveData(" DECLARE @dayOfWeek INT = DATEPART(DW, {0}) - 1 " +
                                                        " DECLARE @hourOfWeek INT = @dayOfWeek * 24 " +
                                                        " ; WITH HourlyIndex AS" +
                                                        " ( " +
                                                        "   SELECT @hourOfWeek AS HourOfWeek " +
                                                        "   UNION ALL " +
                                                        "   SELECT HourOfWeek + 1 " +
                                                        "   FROM HourlyIndex" +
                                                        "   WHERE (HourOfWeek + 1) < @hourOfWeek + 24" +
                                                        " ) " +
                                                        " SELECT " +
                                                        "        DATEADD(HOUR, HourlyIndex.HourOfWeek - @hourOfWeek, {0}) AS thedatefrom, " +
                                                        "        DATEADD(HOUR, HourlyIndex.HourOfWeek - @hourOfWeek + 1, {0}) AS thedateto, " +
                                                        "        HourOfWeekLimit.High AS offlimithigh, " +
                                                        "        HourOfWeekLimit.Low AS offlimitlow " +
                                                        " FROM " +
                                                        "        HourlyIndex LEFT OUTER JOIN " +
                                                        "        HourOfWeekLimit ON HourOfWeekLimit.HourOfWeek = HourlyIndex.HourOfWeek " +
                                                        " WHERE " +
                                                        "        HourOfWeekLimit.ChannelID IS NULL OR " +
                                                        "        HourOfWeekLimit.ChannelID = {1} ", startDate, Convert.ToInt32(ChannelID)).Select();

            foreach (DataRow row in table)
            {
                trendingDataSet.OffNormalLimits.Add(new TrendingAlarmLimit() { High = row.Field<double?>("offlimithigh"), Low = row.Field<double?>("offlimitlow"), TimeEnd = row.Field<DateTime>("thedateto").Subtract(epoch).TotalMilliseconds, TimeStart = row.Field<DateTime>("thedatefrom").Subtract(epoch).TotalMilliseconds });
            }

        }

        return trendingDataSet;
    }
    private List<List<TrendingDataLocation>> GetFramesFromHistorian(ContourQuery contourQuery)
    {
        DataTable idTable;
        string historianServer;
        string historianInstance;

        using (AdoDataConnection connection = new AdoDataConnection(connectionstring, typeof(SqlConnection), typeof(SqlDataAdapter)))
        {
            string query =
                "SELECT " +
                "    Channel.ID AS ChannelID, " +
                "    Meter.ID AS MeterID, " +
                "    Meter.Name AS MeterName, " +
                "    MeterLocation.Latitude, " +
                "    MeterLocation.Longitude, " +
                "    Channel.PerUnitValue " +
                "FROM " +
                "    Meter JOIN " +
                "    MeterLocation ON Meter.MeterLocationID = MeterLocation.ID LEFT OUTER JOIN " +
                "    Channel ON " +
                "        Channel.MeterID = Meter.ID AND " +
                "        Channel.ID IN (SELECT ChannelID FROM ContourChannel WHERE ContourColorScaleName = {1}) " +
                "WHERE Meter.ID IN (SELECT * FROM authMeters({0}))";

            idTable = connection.RetrieveData(query, contourQuery.UserName, contourQuery.ColorScaleName);
            historianServer = connection.ExecuteScalar<string>("SELECT Value FROM Setting WHERE Name = 'Historian.Server'") ?? "127.0.0.1";
            historianInstance = connection.ExecuteScalar<string>("SELECT Value FROM Setting WHERE Name = 'Historian.Instance'") ?? "XDA";
        }

        if (!string.IsNullOrEmpty(contourQuery.Meters))
        {
            const int byteSize = 8;

            // Meter selections are stored as a base-64 string without padding, using '-' instead of '+' and '_' instead of '/'
            string padding = "A==".Remove(3 - (contourQuery.Meters.Length + 3) % 4);
            string base64 = contourQuery.Meters.Replace('-', '+').Replace('_', '/') + padding;
            byte[] meterSelections = Convert.FromBase64String(base64);

            // The resulting byte array is a simple set of bitflags ordered by meter ID and packed into the most significant bits.
            // In order to properly interpret the bytes, we must first group and order the data by meter ID to determine the location
            // of each meter's bitflag. Then we can filter out the unwanted data from the original table of IDs
            idTable.Select()
                .Select((Row, Index) => new { Row, Index })
                .GroupBy(obj => obj.Row.ConvertField<int>("MeterID"))
                .OrderBy(grouping => grouping.Key)
                .Where((grouping, index) => (meterSelections[index / byteSize] & (0x80 >> (index % byteSize))) == 0)
                .SelectMany(grouping => grouping)
                .OrderByDescending(obj => obj.Index)
                .ToList()
                .ForEach(obj => idTable.Rows.RemoveAt(obj.Index));
        }

        List<DataRow> meterRows = idTable
            .Select()
            .DistinctBy(row => row.ConvertField<int>("MeterID"))
            .ToList();

        DateTime startDate = contourQuery.GetStartDate();
        DateTime endDate = contourQuery.GetEndDate();
        int stepSize = contourQuery.StepSize;

        // The frames to be included are those whose timestamps fall
        // within the range which is specified by startDate and
        // endDate. We start by aligning startDate and endDate with
        // the nearest frame timestamps which fall within that range
        int startTimeOffset = (int)Math.Ceiling((startDate - startDate.Date).TotalMinutes / stepSize);
        startDate = startDate.Date.AddMinutes(startTimeOffset * stepSize);

        int endTimeOffset = (int)Math.Floor((endDate - endDate.Date).TotalMinutes / stepSize);
        endDate = endDate.Date.AddMinutes(endTimeOffset * stepSize);

        // Since each frame includes data from all timestamps between
        // the previous frame's timestamp and its own timestamp, we
        // must include one additional frame of data before startDate
        startDate = startDate.AddMinutes(-stepSize);

        int frameCount = (int)((endDate - startDate).TotalMinutes / stepSize);

        List<Dictionary<int, TrendingDataLocation>> frames = Enumerable.Repeat(meterRows, frameCount)
            .Select(rows => rows.Select(row => new TrendingDataLocation()
            {
                id = row.ConvertField<int>("MeterID"),
                name = row.ConvertField<string>("MeterName"),
                Latitude = row.ConvertField<double>("Latitude"),
                Longitude = row.ConvertField<double>("Longitude")
            }))
            .Select(locations => locations.ToDictionary(location => location.id))
            .ToList();

        Dictionary<int, double?> nominalLookup = idTable
            .Select("ChannelID IS NOT NULL")
            .ToDictionary(row => row.ConvertField<int>("ChannelID"), row => row.ConvertField<double?>("PerUnitValue"));

        Dictionary<int, List<TrendingDataLocation>> lookup = idTable
            .Select("ChannelID IS NOT NULL")
            .Select(row =>
            {
                int meterID = row.ConvertField<int>("MeterID");

                return new
                {
                    ChannelID = row.ConvertField<int>("ChannelID"),
                    Frames = frames.Select(locationLookup => locationLookup[meterID]).ToList()
                };
            })
            .ToDictionary(obj => obj.ChannelID, obj => obj.Frames);

        using (Historian historian = new Historian(historianServer, historianInstance))
        {
            foreach (TrendingDataPoint point in historian.Read(lookup.Keys, startDate, endDate))
            {
                List<TrendingDataLocation> locations = lookup[point.ChannelID];

                // Use ceiling to sort data into the next nearest frame.
                // Subtract 1 because startDate was shifted to include one additional frame of data
                int frameIndex = (int)Math.Ceiling((point.Timestamp - startDate).TotalMinutes / stepSize) - 1;

                if (frameIndex < 0 || frameIndex >= locations.Count)
                    continue;

                TrendingDataLocation frame = locations[frameIndex];

                double nominal = nominalLookup[point.ChannelID] ?? 1.0D;
                double value = point.Value / nominal;

                switch (point.SeriesID)
                {
                    case SeriesID.Minimum:
                        frame.Minimum = Math.Min(value, frame.Minimum ?? value);
                        break;

                    case SeriesID.Maximum:
                        frame.Maximum = Math.Max(value, frame.Maximum ?? value);
                        break;

                    case SeriesID.Average:
                        frame.Aggregate(value);
                        frame.Average = frame.GetAverage();
                        break;
                }
            }
        }

        return frames
            .Select(frame => frame.Values.ToList())
            .ToList();
    }
        void SaveHistorian(Historian historian, bool isNew)
        {
            bool continueSave = true;

            if (!isNew && (historian.TypeName != "HistorianAdapters.LocalOutputAdapter" || !historian.IsLocal))
            {
                SystemMessages sm = new SystemMessages(new Message() { UserMessage = "You are changing your historian type.", SystemMessage = "You are changing your historian type from an in-process local historian to another historian provider. Please note that once the changes are applied, any customizations you may have made to the in-process local historian in the openPDC configuration file will be lost." + Environment.NewLine + "Do you want to continue?", UserMessageType = MessageType.Confirmation }, ButtonType.YesNo);
                sm.Closed += new EventHandler(delegate(object popupWindow, EventArgs eargs)
                {
                    if ((bool)sm.DialogResult)
                        continueSave = true;
                    else
                        continueSave = false;
                });
                sm.ShowPopup();
            }

            if (continueSave)
                m_client.SaveHistorianAsync(historian, isNew);
        }
Exemple #5
0
 public void AddNewHistorian(Historian historian)
 {
     DataContext.Table <Historian>().AddNewRecord(historian);
 }
    private List<List<TrendingDataLocation>> GetFramesFromHistorian(ContourQuery contourQuery)
    {
        DataTable idTable;
        string historianServer;
        string historianInstance;

        using (AdoDataConnection connection = new AdoDataConnection(connectionstring, typeof(SqlConnection), typeof(SqlDataAdapter)))
        {
            string query =
                "SELECT " +
                "    Channel.ID AS ChannelID, " +
                "    Meter.ID AS MeterID, " +
                "    Meter.Name AS MeterName, " +
                "    MeterLocation.Latitude, " +
                "    MeterLocation.Longitude, " +
                "    Channel.PerUnitValue " +
                "FROM " +
                "    Meter JOIN " +
                "    MeterLocation ON Meter.MeterLocationID = MeterLocation.ID LEFT OUTER JOIN " +
                "    Channel ON " +
                "        Channel.MeterID = Meter.ID AND " +
                "        Channel.ID IN (SELECT ChannelID FROM ContourChannel WHERE ContourColorScaleName = {1}) " +
                "WHERE " +
                "    Meter.ID IN (SELECT * FROM authMeters({0}))";

            idTable = connection.RetrieveData(query, contourQuery.UserName, contourQuery.ColorScaleName);
            historianServer = connection.ExecuteScalar<string>("SELECT Value FROM Setting WHERE Name = 'Historian.Server'") ?? "127.0.0.1";
            historianInstance = connection.ExecuteScalar<string>("SELECT Value FROM Setting WHERE Name = 'Historian.Instance'") ?? "XDA";
        }

        List<DataRow> meterRows = idTable
            .Select()
            .DistinctBy(row => row.ConvertField<int>("MeterID"))
            .ToList();

        DateTime startDate = contourQuery.GetStartDate();
        DateTime endDate = contourQuery.GetEndDate();
        int stepSize = contourQuery.StepSize;

        // The frames to be included are those whose timestamps fall
        // within the range which is specified by startDate and
        // endDate. We start by aligning startDate and endDate with
        // the nearest frame timestamps which fall within that range
        int startTimeOffset = (int)Math.Ceiling((startDate - startDate.Date).TotalMinutes / stepSize);
        startDate = startDate.Date.AddMinutes(startTimeOffset * stepSize);

        int endTimeOffset = (int)Math.Floor((endDate - endDate.Date).TotalMinutes / stepSize);
        endDate = endDate.Date.AddMinutes(endTimeOffset * stepSize);

        // Since each frame includes data from all timestamps between
        // the previous frame's timestamp and its own timestamp, we
        // must include one additional frame of data before startDate
        startDate = startDate.AddMinutes(-stepSize);

        int frameCount = (int)((endDate - startDate).TotalMinutes / stepSize);

        List<Dictionary<int, TrendingDataLocation>> frames = Enumerable.Repeat(meterRows, frameCount)
            .Select(rows => rows.Select(row => new TrendingDataLocation()
            {
                id = row.ConvertField<int>("MeterID"),
                name = row.ConvertField<string>("MeterName"),
                Latitude = row.ConvertField<double>("Latitude"),
                Longitude = row.ConvertField<double>("Longitude")
            }))
            .Select(locations => locations.ToDictionary(location => location.id))
            .ToList();

        Dictionary<int, double?> nominalLookup = idTable
            .Select("ChannelID IS NOT NULL")
            .ToDictionary(row => row.ConvertField<int>("ChannelID"), row => row.ConvertField<double?>("PerUnitValue"));

        Dictionary<int, List<TrendingDataLocation>> lookup = idTable
            .Select("ChannelID IS NOT NULL")
            .Select(row =>
            {
                int meterID = row.ConvertField<int>("MeterID");

                return new
                {
                    ChannelID = row.ConvertField<int>("ChannelID"),
                    Frames = frames.Select(locationLookup => locationLookup[meterID]).ToList()
                };
            })
            .ToDictionary(obj => obj.ChannelID, obj => obj.Frames);

        using (Historian historian = new Historian(historianServer, historianInstance))
        {
            foreach (TrendingDataPoint point in historian.Read(lookup.Keys, startDate, endDate))
            {
                List<TrendingDataLocation> locations = lookup[point.ChannelID];

                // Use ceiling to sort data into the next nearest frame.
                // Subtract 1 because startDate was shifted to include one additional frame of data
                int frameIndex = (int)Math.Ceiling((point.Timestamp - startDate).TotalMinutes / stepSize) - 1;

                if (frameIndex < 0 || frameIndex >= locations.Count)
                    continue;

                TrendingDataLocation frame = locations[frameIndex];

                double nominal = nominalLookup[point.ChannelID] ?? 1.0D;
                double value = point.Value / nominal;

                switch (point.SeriesID)
                {
                    case SeriesID.Minimum:
                        frame.Minimum = Math.Min(value, frame.Minimum ?? value);
                        break;

                    case SeriesID.Maximum:
                        frame.Maximum = Math.Max(value, frame.Maximum ?? value);
                        break;

                    case SeriesID.Average:
                        frame.Aggregate(value);
                        frame.Average = frame.GetAverage();
                        break;
                }
            }
        }

        return frames
            .Select(frame => frame.Values.ToList())
            .ToList();
    }
        private void StrategyAsGetCurrent(string fileName)
        {
            Storage.Batch(accessor =>
            {
                var conflict = accessor.GetConfigurationValue <ConflictItem>(RavenFileNameHelper.ConflictConfigNameForFile(fileName));

                var localMetadata = accessor.GetFile(fileName, 0, 0).Metadata;
                var localHistory  = Historian.DeserializeHistory(localMetadata);

                // incorporate remote version history into local
                foreach (var remoteHistoryItem in conflict.RemoteHistory.Where(remoteHistoryItem => !localHistory.Contains(remoteHistoryItem)))
                {
                    localHistory.Add(remoteHistoryItem);
                }

                localMetadata[SynchronizationConstants.RavenSynchronizationHistory] = Historian.SerializeHistory(localHistory);

                accessor.UpdateFileMetadata(fileName, localMetadata);

                ConflictArtifactManager.Delete(fileName, accessor);
                Publisher.Publish(new ConflictResolvedNotification {
                    FileName = fileName
                });
            });
        }
        public async Task <HttpResponseMessage> MultipartProceed()
        {
            if (!Request.Content.IsMimeMultipartContent())
            {
                throw new HttpResponseException(HttpStatusCode.UnsupportedMediaType);
            }

            var fileName     = Request.Headers.GetValues(SyncingMultipartConstants.FileName).FirstOrDefault();
            var tempFileName = RavenFileNameHelper.DownloadingFileName(fileName);

            var sourceServerInfo = InnerHeaders.Value <ServerInfo>(SyncingMultipartConstants.SourceServerInfo);
            var sourceFileETag   = Guid.Parse(InnerHeaders.GetValues("ETag").First().Trim('\"'));

            var report = new SynchronizationReport(fileName, sourceFileETag, SynchronizationType.ContentUpdate);

            Log.Debug("Starting to process multipart synchronization request of a file '{0}' with ETag {1} from {2}", fileName, sourceFileETag, sourceServerInfo);

            StorageStream localFile          = null;
            var           isNewFile          = false;
            var           isConflictResolved = false;

            try
            {
                Storage.Batch(accessor =>
                {
                    AssertFileIsNotBeingSynced(fileName, accessor);
                    FileLockManager.LockByCreatingSyncConfiguration(fileName, sourceServerInfo, accessor);
                });

                PublishSynchronizationNotification(fileName, sourceServerInfo, report.Type, SynchronizationAction.Start);

                Storage.Batch(accessor => StartupProceed(fileName, accessor));

                RavenJObject sourceMetadata = GetFilteredMetadataFromHeaders(InnerHeaders); // InnerHeaders.FilterHeadersToObject();

                var localMetadata = GetLocalMetadata(fileName);

                if (localMetadata != null)
                {
                    AssertConflictDetection(fileName, localMetadata, sourceMetadata, sourceServerInfo, out isConflictResolved);
                    localFile = StorageStream.Reading(Storage, fileName);
                }
                else
                {
                    isNewFile = true;
                }

                Historian.UpdateLastModified(sourceMetadata);

                var synchronizingFile = SynchronizingFileStream.CreatingOrOpeningAndWritting(Storage, Search, StorageOperationsTask,
                                                                                             tempFileName, sourceMetadata);

                var provider = new MultipartSyncStreamProvider(synchronizingFile, localFile);

                Log.Debug("Starting to process multipart content of a file '{0}'", fileName);

                await Request.Content.ReadAsMultipartAsync(provider);

                Log.Debug("Multipart content of a file '{0}' was processed", fileName);

                report.BytesCopied     = provider.BytesCopied;
                report.BytesTransfered = provider.BytesTransfered;
                report.NeedListLength  = provider.NumberOfFileParts;

                synchronizingFile.PreventUploadComplete = false;
                synchronizingFile.Dispose();
                sourceMetadata["Content-MD5"] = synchronizingFile.FileHash;

                Storage.Batch(accessor => accessor.UpdateFileMetadata(tempFileName, sourceMetadata));

                Storage.Batch(accessor =>
                {
                    StorageOperationsTask.IndicateFileToDelete(fileName);
                    accessor.RenameFile(tempFileName, fileName);

                    Search.Delete(tempFileName);
                    Search.Index(fileName, sourceMetadata);
                });

                if (isNewFile)
                {
                    Log.Debug("Temporary downloading file '{0}' was renamed to '{1}'. Indexes was updated.", tempFileName, fileName);
                }
                else
                {
                    Log.Debug("Old file '{0}' was deleted. Indexes was updated.", fileName);
                }

                if (isConflictResolved)
                {
                    ConflictArtifactManager.Delete(fileName);
                    Publisher.Publish(new ConflictResolvedNotification {
                        FileName = fileName
                    });
                }
            }
            catch (Exception ex)
            {
                report.Exception = ex;
            }
            finally
            {
                if (localFile != null)
                {
                    localFile.Dispose();
                }
            }

            if (report.Exception == null)
            {
                Log.Debug(
                    "File '{0}' was synchronized successfully from {1}. {2} bytes were transfered and {3} bytes copied. Need list length was {4}",
                    fileName, sourceServerInfo, report.BytesTransfered, report.BytesCopied, report.NeedListLength);
            }
            else
            {
                Log.WarnException(
                    string.Format("Error has occurred during synchronization of a file '{0}' from {1}", fileName, sourceServerInfo),
                    report.Exception);
            }

            FinishSynchronization(fileName, report, sourceServerInfo, sourceFileETag);

            PublishFileNotification(fileName, isNewFile ? FileChangeAction.Add : FileChangeAction.Update);
            PublishSynchronizationNotification(fileName, sourceServerInfo, report.Type, SynchronizationAction.Finish);

            return(this.GetMessageWithObject(report, HttpStatusCode.OK));
        }
        public HttpResponseMessage Delete(string fileName)
        {
            var sourceServerInfo = InnerHeaders.Value <ServerInfo>(SyncingMultipartConstants.SourceServerInfo);
            var sourceFileETag   = Guid.Parse(InnerHeaders.GetValues("ETag").First().Trim('\"'));

            Log.Debug("Starting to delete a file '{0}' with ETag {1} from {2} because of synchronization", fileName, sourceFileETag, sourceServerInfo);

            var report = new SynchronizationReport(fileName, sourceFileETag, SynchronizationType.Delete);

            try
            {
                Storage.Batch(accessor =>
                {
                    AssertFileIsNotBeingSynced(fileName, accessor);
                    FileLockManager.LockByCreatingSyncConfiguration(fileName, sourceServerInfo, accessor);
                });

                PublishSynchronizationNotification(fileName, sourceServerInfo, report.Type, SynchronizationAction.Start);

                Storage.Batch(accessor => StartupProceed(fileName, accessor));

                var localMetadata = GetLocalMetadata(fileName);

                if (localMetadata != null)
                {
                    // REVIEW: Use InnerHeaders for consistency?
                    var sourceMetadata = GetFilteredMetadataFromHeaders(Request.Headers); // Request.Headers.FilterHeadersToObject();

                    bool isConflictResolved;

                    AssertConflictDetection(fileName, localMetadata, sourceMetadata, sourceServerInfo, out isConflictResolved);

                    Storage.Batch(accessor =>
                    {
                        StorageOperationsTask.IndicateFileToDelete(fileName);

                        var tombstoneMetadata = new RavenJObject
                        {
                            {
                                SynchronizationConstants.RavenSynchronizationHistory,
                                localMetadata[SynchronizationConstants.RavenSynchronizationHistory]
                            },
                            {
                                SynchronizationConstants.RavenSynchronizationVersion,
                                localMetadata[SynchronizationConstants.RavenSynchronizationVersion]
                            },
                            {
                                SynchronizationConstants.RavenSynchronizationSource,
                                localMetadata[SynchronizationConstants.RavenSynchronizationSource]
                            }
                        }.WithDeleteMarker();

                        Historian.UpdateLastModified(tombstoneMetadata);
                        accessor.PutFile(fileName, 0, tombstoneMetadata, true);
                    });

                    PublishFileNotification(fileName, FileChangeAction.Delete);
                }
            }
            catch (Exception ex)
            {
                report.Exception = ex;

                Log.WarnException(string.Format("Error was occurred during deletion synchronization of file '{0}' from {1}", fileName, sourceServerInfo), ex);
            }
            finally
            {
                FinishSynchronization(fileName, report, sourceServerInfo, sourceFileETag);

                PublishSynchronizationNotification(fileName, sourceServerInfo, report.Type, SynchronizationAction.Finish);
            }

            if (report.Exception == null)
            {
                Log.Debug("File '{0}' was deleted during synchronization from {1}", fileName, sourceServerInfo);
            }

            return(this.GetMessageWithObject(report, HttpStatusCode.OK));
        }
        public HttpResponseMessage UpdateMetadata(string fileName)
        {
            var sourceServerInfo = InnerHeaders.Value <ServerInfo>(SyncingMultipartConstants.SourceServerInfo);
            // REVIEW: (Oren) It works, but it seems to me it is not an scalable solution.
            var sourceFileETag = Guid.Parse(InnerHeaders.GetValues("ETag").First().Trim('\"'));

            Log.Debug("Starting to update a metadata of file '{0}' with ETag {1} from {2} because of synchronization", fileName,
                      sourceFileETag, sourceServerInfo);

            var report = new SynchronizationReport(fileName, sourceFileETag, SynchronizationType.MetadataUpdate);

            try
            {
                Storage.Batch(accessor =>
                {
                    AssertFileIsNotBeingSynced(fileName, accessor);
                    FileLockManager.LockByCreatingSyncConfiguration(fileName, sourceServerInfo, accessor);
                });

                PublishSynchronizationNotification(fileName, sourceServerInfo, report.Type, SynchronizationAction.Start);

                Storage.Batch(accessor => StartupProceed(fileName, accessor));

                var localMetadata  = GetLocalMetadata(fileName);
                var sourceMetadata = GetFilteredMetadataFromHeaders(InnerHeaders);

                bool isConflictResolved;

                AssertConflictDetection(fileName, localMetadata, sourceMetadata, sourceServerInfo, out isConflictResolved);

                Historian.UpdateLastModified(sourceMetadata);

                Storage.Batch(accessor => accessor.UpdateFileMetadata(fileName, sourceMetadata));

                Search.Index(fileName, sourceMetadata);

                if (isConflictResolved)
                {
                    ConflictArtifactManager.Delete(fileName);
                    Publisher.Publish(new ConflictResolvedNotification {
                        FileName = fileName
                    });
                }

                PublishFileNotification(fileName, FileChangeAction.Update);
            }
            catch (Exception ex)
            {
                report.Exception = ex;

                Log.WarnException(
                    string.Format("Error was occured during metadata synchronization of file '{0}' from {1}", fileName,
                                  sourceServerInfo), ex);
            }
            finally
            {
                FinishSynchronization(fileName, report, sourceServerInfo, sourceFileETag);

                PublishSynchronizationNotification(fileName, sourceServerInfo, report.Type, SynchronizationAction.Finish);
            }

            if (report.Exception == null)
            {
                Log.Debug("Metadata of file '{0}' was synchronized successfully from {1}", fileName, sourceServerInfo);
            }

            return(this.GetMessageWithObject(report, HttpStatusCode.OK));
        }
Exemple #11
0
        public async Task <HttpResponseMessage> Put(string name, string uploadId = null, bool preserveTimestamps = false)
        {
            try
            {
                FileSystem.MetricsCounters.FilesPerSecond.Mark();

                name = FileHeader.Canonize(name);

                var headers = this.GetFilteredMetadataFromHeaders(ReadInnerHeaders);
                if (preserveTimestamps)
                {
                    if (!headers.ContainsKey(Constants.RavenCreationDate))
                    {
                        if (headers.ContainsKey(Constants.CreationDate))
                        {
                            headers[Constants.RavenCreationDate] = headers[Constants.CreationDate];
                        }
                        else
                        {
                            throw new InvalidOperationException("Preserve Timestamps requires that the client includes the Raven-Creation-Date header.");
                        }
                    }

                    var lastModified = GetHeader(Constants.RavenLastModified);
                    if (lastModified != null)
                    {
                        DateTimeOffset when;
                        if (!DateTimeOffset.TryParse(lastModified, out when))
                        {
                            when = DateTimeOffset.UtcNow;
                        }

                        Historian.UpdateLastModified(headers, when);
                    }
                    else
                    {
                        Historian.UpdateLastModified(headers);
                    }
                }
                else
                {
                    headers[Constants.RavenCreationDate] = DateTimeOffset.UtcNow;

                    Historian.UpdateLastModified(headers);
                }

                // TODO: To keep current filesystems working. We should remove when adding a new migration.
                headers[Constants.CreationDate] = headers[Constants.RavenCreationDate].Value <DateTimeOffset>().ToString("yyyy-MM-ddTHH:mm:ss.fffffffZ", CultureInfo.InvariantCulture);

                Historian.Update(name, headers);

                SynchronizationTask.Cancel(name);

                long?size = -1;
                ConcurrencyAwareExecutor.Execute(() => Storage.Batch(accessor =>
                {
                    AssertPutOperationNotVetoed(name, headers);
                    AssertFileIsNotBeingSynced(name, accessor, true);

                    var contentLength = Request.Content.Headers.ContentLength;
                    var sizeHeader    = GetHeader("RavenFS-size");

                    long sizeForParse;
                    if (contentLength == 0 || long.TryParse(sizeHeader, out sizeForParse) == false)
                    {
                        size = contentLength;
                        if (Request.Headers.TransferEncodingChunked ?? false)
                        {
                            size = null;
                        }
                    }
                    else
                    {
                        size = sizeForParse;
                    }

                    FileSystem.PutTriggers.Apply(trigger => trigger.OnPut(name, headers));

                    using (FileSystem.DisableAllTriggersForCurrentThread())
                    {
                        StorageOperationsTask.IndicateFileToDelete(name);
                    }

                    accessor.PutFile(name, size, headers);

                    FileSystem.PutTriggers.Apply(trigger => trigger.AfterPut(name, size, headers));

                    Search.Index(name, headers);
                }));

                log.Debug("Inserted a new file '{0}' with ETag {1}", name, headers.Value <Guid>(Constants.MetadataEtagField));

                using (var contentStream = await Request.Content.ReadAsStreamAsync())
                    using (var readFileToDatabase = new ReadFileToDatabase(BufferPool, Storage, FileSystem.PutTriggers, contentStream, name, headers))
                    {
                        await readFileToDatabase.Execute();

                        if (readFileToDatabase.TotalSizeRead != size)
                        {
                            StorageOperationsTask.IndicateFileToDelete(name);
                            throw new HttpResponseException(HttpStatusCode.BadRequest);
                        }

                        if (!preserveTimestamps)
                        {
                            Historian.UpdateLastModified(headers); // update with the final file size.
                        }
                        log.Debug("File '{0}' was uploaded. Starting to update file metadata and indexes", name);

                        headers["Content-MD5"] = readFileToDatabase.FileHash;

                        Storage.Batch(accessor => accessor.UpdateFileMetadata(name, headers));

                        int totalSizeRead = readFileToDatabase.TotalSizeRead;
                        headers["Content-Length"] = totalSizeRead.ToString(CultureInfo.InvariantCulture);

                        Search.Index(name, headers);
                        Publisher.Publish(new FileChangeNotification {
                            Action = FileChangeAction.Add, File = FilePathTools.Cannoicalise(name)
                        });

                        log.Debug("Updates of '{0}' metadata and indexes were finished. New file ETag is {1}", name, headers.Value <Guid>(Constants.MetadataEtagField));

                        StartSynchronizeDestinationsInBackground();
                    }
            }
            catch (Exception ex)
            {
                if (uploadId != null)
                {
                    Guid uploadIdentifier;
                    if (Guid.TryParse(uploadId, out uploadIdentifier))
                    {
                        Publisher.Publish(new CancellationNotification {
                            UploadId = uploadIdentifier, File = name
                        });
                    }
                }

                log.WarnException(string.Format("Failed to upload a file '{0}'", name), ex);

                var concurrencyException = ex as ConcurrencyException;
                if (concurrencyException != null)
                {
                    throw ConcurrencyResponseException(concurrencyException);
                }

                throw;
            }

            return(GetEmptyMessage(HttpStatusCode.Created));
        }