public static GatedDetectorData ExtractFromBlock(Block b, GatedDetectorExtractSpec gate) { GatedDetectorData gd = new GatedDetectorData(); GatedDetectorExtractFunction f; if (gate.Integrate) { f = new GatedDetectorExtractFunction(b.GetTOFIntegralArray); } else { f = new GatedDetectorExtractFunction(b.GetTOFMeanArray); } double[] rawData = f(gate.Index, gate.GateLow, gate.GateHigh); //if (gate.BackgroundSubtract) //{ //TOFFitResults results = (new TOFFitter()).FitTOF(b.GetAverageTOF(gate.Index)); //double bg = results.Background * (gate.GateHigh - gate.GateLow); //double[] bgSubData = new double[rawData.Length]; //for (int i = 0; i < rawData.Length; i++) bgSubData[i] = rawData[i] - bg; //gd.PointValues.AddRange(bgSubData); //gd.SubtractedBackground = bg; //} //else //{ gd.PointValues.AddRange(rawData); //} gd.Gate = gate; return(gd); }
// this is a funny kind of division! The mean of d1 is left unchanged, but d1 is // divided through point by point by d2. It's probably an abuse of operator overloading. public static GatedDetectorData operator /(GatedDetectorData d1, GatedDetectorData d2) { GatedDetectorData d3 = new GatedDetectorData(); d3.Gate = d1.Gate; d3.SubtractedBackground = d1.SubtractedBackground; double d2Mean = 0; for (int i = 0; i < d2.PointValues.Count; i++) d2Mean += d2.PointValues[i]; d2Mean /= d2.PointValues.Count; for (int i = 0; i < d1.PointValues.Count; i++) d3.PointValues.Add((d2Mean * d1.PointValues[i]) / d2.PointValues[i]); return d3; }
// This adds the gated tofs of d1 point by point by the gated tofs of d2 public static GatedDetectorData operator +(GatedDetectorData d1, GatedDetectorData d2) { GatedDetectorData d3 = new GatedDetectorData(); d3.Gate = d1.Gate; d3.SubtractedBackground = d1.SubtractedBackground; for (int i = 0; i < d1.PointValues.Count; i++) { d3.PointValues.Add((d1.PointValues[i]) + d2.PointValues[i]); } return(d3); }
// this is a funny kind of division! The mean of d1 is left unchanged, but d1 is // divided through point by point by d2. It's probably an abuse of operator overloading. public static GatedDetectorData operator /(GatedDetectorData d1, GatedDetectorData d2) { GatedDetectorData d3 = new GatedDetectorData(); d3.Gate = d1.Gate; d3.SubtractedBackground = d1.SubtractedBackground; double d2Mean = 0; for (int i = 0; i < d2.PointValues.Count; i++) { d2Mean += d2.PointValues[i]; } d2Mean /= d2.PointValues.Count; for (int i = 0; i < d1.PointValues.Count; i++) { d3.PointValues.Add((d2Mean * d1.PointValues[i]) / d2.PointValues[i]); } return(d3); }
public static GatedDetectorData ExtractFromBlock(Block b, GatedDetectorExtractSpec gate) { GatedDetectorData gd = new GatedDetectorData(); GatedDetectorExtractFunction f; if (gate.Integrate) f = new GatedDetectorExtractFunction(b.GetTOFIntegralArray); else f = new GatedDetectorExtractFunction(b.GetTOFMeanArray); double[] rawData = f(gate.Index, gate.GateLow, gate.GateHigh); //if (gate.BackgroundSubtract) //{ //TOFFitResults results = (new TOFFitter()).FitTOF(b.GetAverageTOF(gate.Index)); //double bg = results.Background * (gate.GateHigh - gate.GateLow); //double[] bgSubData = new double[rawData.Length]; //for (int i = 0; i < rawData.Length; i++) bgSubData[i] = rawData[i] - bg; //gd.PointValues.AddRange(bgSubData); //gd.SubtractedBackground = bg; //} //else //{ gd.PointValues.AddRange(rawData); //} gd.Gate = gate; return gd; }
public static DetectorFT MakeFT(GatedDetectorData data, int average) { //double[] dataList = new double[data.PointValues.Count]; //data.PointValues.CopyTo(dataList); //ComplexDouble[] fft = Transforms.RealFft(dataList); //// extract the magnitude and reduce by averaging //int reducedFourierLength = dataList.Length / average; //double[] reducedFourier = new double[reducedFourierLength]; //double rootLength = Math.Sqrt(dataList.Length); // makes it agree with Mathematica //for (int i = 0; i < reducedFourierLength; i++) //{ // double ptVal = 0.0; // for (int j = 0; j < average; j++) ptVal += fft[(i * average) + j].Magnitude; // ptVal /= average; // reducedFourier[i] = ptVal / rootLength; //} //return new DetectorFT(reducedFourier); // TEMP: return a blank FT int reducedFourierLength = data.PointValues.Count / average; double[] reducedFourier = new double[reducedFourierLength]; return new DetectorFT(reducedFourier); }
public static DetectorFT MakeFT(GatedDetectorData data, int average) { //double[] dataList = new double[data.PointValues.Count]; //data.PointValues.CopyTo(dataList); //ComplexDouble[] fft = Transforms.RealFft(dataList); //// extract the magnitude and reduce by averaging //int reducedFourierLength = dataList.Length / average; //double[] reducedFourier = new double[reducedFourierLength]; //double rootLength = Math.Sqrt(dataList.Length); // makes it agree with Mathematica //for (int i = 0; i < reducedFourierLength; i++) //{ // double ptVal = 0.0; // for (int j = 0; j < average; j++) ptVal += fft[(i * average) + j].Magnitude; // ptVal /= average; // reducedFourier[i] = ptVal / rootLength; //} //return new DetectorFT(reducedFourier); // TEMP: return a blank FT int reducedFourierLength = data.PointValues.Count / average; double[] reducedFourier = new double[reducedFourierLength]; return(new DetectorFT(reducedFourier)); }
// This function gates the detector data first, and then demodulates the channels. // This means that it can give innacurate results for non-linear combinations // of channels that vary appreciably over the TOF. There's another, slower, function // DemodulateBlockNL that takes care of this. public DemodulatedBlock DemodulateBlock(Block b, DemodulationConfig config) { // *** copy across the metadata *** DemodulatedBlock db = new DemodulatedBlock(); db.TimeStamp = b.TimeStamp; db.Config = b.Config; db.DemodulationConfig = config; // *** extract the gated detector data using the given config *** List <GatedDetectorData> gatedDetectorData = new List <GatedDetectorData>(); int ind = 0; foreach (string d in b.detectors) { GatedDetectorExtractSpec gdes; config.GatedDetectorExtractSpecs.TryGetValue(d, out gdes); if (gdes != null) { gatedDetectorData.Add(GatedDetectorData.ExtractFromBlock(b, gdes)); db.DetectorIndices.Add(gdes.Name, ind); ind++; db.DetectorCalibrations.Add(gdes.Name, ((TOF)((EDMPoint)b.Points[0]).Shot.TOFs[gdes.Index]).Calibration); } } //foreach (KeyValuePair<string, GatedDetectorExtractSpec> spec in config.GatedDetectorExtractSpecs) //{ // GatedDetectorExtractSpec gate = spec.Value; // gatedDetectorData.Add(GatedDetectorData.ExtractFromBlock(b, gate)); // db.DetectorIndices.Add(gate.Name, ind); // ind++; // db.DetectorCalibrations.Add(gate.Name, // ((TOF)((EDMPoint)b.Points[0]).Shot.TOFs[gate.Index]).Calibration); //} // ** normalise the top detector ** gatedDetectorData.Add( gatedDetectorData[db.DetectorIndices["top"]] / gatedDetectorData[db.DetectorIndices["norm"]]); db.DetectorIndices.Add("topNormed", db.DetectorIndices.Count); // *** extract the point detector data *** List <PointDetectorData> pointDetectorData = new List <PointDetectorData>(); foreach (string channel in config.PointDetectorChannels) { pointDetectorData.Add(PointDetectorData.ExtractFromBlock(b, channel)); // for the moment all single point detector channels are set to have a calibration // of 1.0 . db.DetectorCalibrations.Add(channel, 1.0); } // *** build the list of detector data *** List <DetectorData> detectorData = new List <DetectorData>(); for (int i = 0; i < gatedDetectorData.Count; i++) { detectorData.Add(gatedDetectorData[i]); } for (int i = 0; i < config.PointDetectorChannels.Count; i++) { detectorData.Add(pointDetectorData[i]); db.DetectorIndices.Add(config.PointDetectorChannels[i], i + gatedDetectorData.Count); } // calculate the norm FFT db.NormFourier = DetectorFT.MakeFT(gatedDetectorData[db.DetectorIndices["norm"]], kFourierAverage); // *** demodulate channels *** // ** build the list of modulations ** List <string> modNames = new List <string>(); List <Waveform> modWaveforms = new List <Waveform>(); foreach (AnalogModulation mod in b.Config.AnalogModulations) { modNames.Add(mod.Name); modWaveforms.Add(mod.Waveform); } foreach (DigitalModulation mod in b.Config.DigitalModulations) { modNames.Add(mod.Name); modWaveforms.Add(mod.Waveform); } foreach (TimingModulation mod in b.Config.TimingModulations) { modNames.Add(mod.Name); modWaveforms.Add(mod.Waveform); } // ** work out the switch state for each point ** int blockLength = modWaveforms[0].Length; List <bool[]> wfBits = new List <bool[]>(); foreach (Waveform wf in modWaveforms) { wfBits.Add(wf.Bits); } List <uint> switchStates = new List <uint>(blockLength); for (int i = 0; i < blockLength; i++) { uint switchState = 0; for (int j = 0; j < wfBits.Count; j++) { if (wfBits[j][i]) { switchState += (uint)Math.Pow(2, j); } } switchStates.Add(switchState); } // pre-calculate the state signs for each analysis channel // the first index selects the analysis channel, the second the switchState int numStates = (int)Math.Pow(2, modWaveforms.Count); int[,] stateSigns = new int[numStates, numStates]; for (uint i = 0; i < numStates; i++) { for (uint j = 0; j < numStates; j++) { stateSigns[i, j] = stateSign(j, i); } } // ** the following needs to be done for each detector ** for (int detector = 0; detector < detectorData.Count; detector++) { DetectorChannelValues dcv = new DetectorChannelValues(); for (int i = 0; i < modNames.Count; i++) { dcv.SwitchMasks.Add(modNames[i], (uint)(1 << i)); } // * divide the data up into bins according to switch state * List <List <double> > statePoints = new List <List <double> >(numStates); for (int i = 0; i < numStates; i++) { statePoints.Add(new List <double>(blockLength / numStates)); } for (int i = 0; i < blockLength; i++) { statePoints[(int)switchStates[i]].Add(detectorData[detector].PointValues[i]); } // * calculate the channel values * int subLength = blockLength / numStates; double[,] channelValues = new double[numStates, subLength]; for (int channel = 0; channel < numStates; channel++) { for (int subIndex = 0; subIndex < subLength; subIndex++) { double chanVal = 0; for (int i = 0; i < numStates; i++) { chanVal += stateSigns[channel, i] * statePoints[i][subIndex]; } chanVal /= (double)numStates; channelValues[channel, subIndex] = chanVal; } } //* calculate the channel means * double[] channelMeans = new double[numStates]; for (int channel = 0; channel < numStates; channel++) { double total = 0; for (int i = 0; i < subLength; i++) { total += channelValues[channel, i]; } total /= blockLength / numStates; channelMeans[channel] = total; } dcv.Values = channelMeans; //* calculate the channel errors * double[] channelErrors = new double[numStates]; for (int channel = 0; channel < numStates; channel++) { double total = 0; for (int i = 0; i < subLength; i++) { total += Math.Pow(channelValues[channel, i] - channelMeans[channel], 2); } total /= subLength * (subLength - 1); total = Math.Sqrt(total); channelErrors[channel] = total; } dcv.Errors = channelErrors; db.ChannelValues.Add(dcv); } return(db); }