/// <summary> /// Given the symbolic output we check which columns are completely zero, /// which effectively implies that the corresponding variables do not participate /// in the Jacobian. /// </summary> /// <param name="output"></param> public static void ReportSparsity(LPSTerm[] output) { var matrix = LPSTerm.UnderlyingMatrix(output); Vector<double> zeros = DenseVector.Create(output.Length,0.0); // int sparse_count = 0; List<Tuple<int, double>> stats = new List<Tuple<int, double>>(); for (int i=0; i < matrix.ColumnCount; i++) { var col = matrix.Column(i); stats.Add(new Tuple<int, double>(i, col.Maximum())); } stats.Sort(delegate(Tuple<int,double> t1, Tuple<int,double> t2) { return t1.Item2.CompareTo(t2.Item2); }); foreach (var s in stats) { Console.WriteLine(s.Item2); } }
/// <summary> /// Generate symbolic inputs and (potentially) a term for the epsilon of the objective. /// </summary> public static Tuple<LPSTerm[],LPSTerm> GenSymbolicInputs(int inputDimension) { LPSTerm[] inputs = null; LPSTerm epsilon = null; LPSTerm.ResetVariableFactory(inputDimension + 1); var all = LPSTerm.FreshVariables(inputDimension+1); epsilon = all[inputDimension]; inputs = new LPSTerm[inputDimension]; Array.Copy(all, inputs, inputDimension); return new Tuple<LPSTerm[], LPSTerm>(inputs, epsilon); }
public void AddConstraint(LPSConstraint ct) { int ctid = ct_cnt; solver_.AddRow("constraint" + ct_cnt, out ctid); Vector <double> coefficients = ct.Term.GetCoefficients(); int totalvars = LPSTerm.TotalVarCount(); for (int j = 0; j < totalvars; j++) { // Due to the way MSF works, if we are adding a 0 coefficient // this amounts to actually removing it. However, the coefficient // is not there to start with, hence let's not add it, at all! if (coefficients[j] != 0) { solver_.SetCoefficient(ctid, vars_[j], coefficients[j]); } } switch (ct.Inequality) { case InequalityType.LT: solver_.SetUpperBound(ctid, -ct.Term.Intercept); // - RobustnessOptions.StrictInequalityLambda * Math.Abs(ct.Term.Intercept)); break; case InequalityType.LE: solver_.SetUpperBound(ctid, -ct.Term.Intercept); break; case InequalityType.GT: solver_.SetLowerBound(ctid, -ct.Term.Intercept); // + RobustnessOptions.StrictInequalityLambda * Math.Abs(ct.Term.Intercept)); break; case InequalityType.GE: solver_.SetLowerBound(ctid, -ct.Term.Intercept); break; case InequalityType.EQ: // solver_.SetValue(ctid, -ct.Term.Intercept); WRONG solver_.SetBounds(ctid, -ct.Term.Intercept, -ct.Term.Intercept); break; default: break; } ct_cnt++; }
public LPSolver( int input_dimension, int total_constraint_count, double[] origin, // Just the image, not the epsilon double originbound // Bounding rectangle ) { solver_ = new GurobiSolver(); input_dimension_ = input_dimension; int varCount = LPSTerm.TotalVarCount(); Console.WriteLine("Number of variables: " + varCount); vars_ = new int[varCount]; for (int i = 0; i < varCount; i++) { int vid; solver_.AddVariable("x" + i, out vid); solver_.SetIntegrality(vid, RobustnessOptions.Integrality); if (i < origin.Length) { double lb = Math.Max(Utils.RobustnessOptions.MinValue, origin[i] - originbound); double ub = Math.Min(Utils.RobustnessOptions.MaxValue, origin[i] + originbound); if (lb <= ub) { // Tighter bounds for the image variables! solver_.SetBounds(vid, lb, ub); } else { // Bound validation failed, very weird. Oh well just don't use the bounds. // The programmer got the Min/Max values wrong. solver_.SetBounds(vid, origin[i] - originbound, origin[i] + originbound); } } else { solver_.SetBounds(vid, Utils.RobustnessOptions.MinValue, Utils.RobustnessOptions.MaxValue); } vars_[i] = vid; } }
public void AddConstraints(LPSConstraints constraints, Nullable <LPSObjective> objective) { // Constraints int numConstraints = constraints.Count; int tmp = 0; Console.WriteLine("LP constraints: " + numConstraints); int varCount = LPSTerm.TotalVarCount(); foreach (LPSConstraint ct in constraints) { AddConstraint(ct); tmp++; // Console.Write("\rAdding LP constraints: {0:0.000}%", (double)tmp * 100.0 / numConstraints); } Console.WriteLine(); if (objective.HasValue) { int objid; solver_.AddRow("Objective", out objid); for (int j = 0; j < varCount; j++) { solver_.SetCoefficient(objid, vars_[j], objective.Value.term.GetCoefficient(j)); // objConstr += objective.Value.term.GetCoefficient(j) * vars[j]; } switch (objective.Value.type) { case LPSObjectiveType.Max: solver_.AddGoal(objid, 10, false); objective_id = objid; break; case LPSObjectiveType.Min: solver_.AddGoal(objid, 10, true); objective_id = objid; break; } } }
/// <summary> /// Synthesize a counterexample from an existing labelled image. /// </summary> /// <param name="options"></param> /// <param name="nn">The model.</param> /// <param name="imageLab">The image and labeling information from the network.</param> /// <param name="instr"></param> /// <param name="realLabel">The label of the image from the training set.</param> /// <param name="rowSize"></param> /// <param name="colSize"></param> /// <param name="isColor"></param> /// <returns>NULL if we were not able to synthesize a counterexample, otherwise some information about it.</returns> public static Nullable<LabelWithConfidence> SynthesizeCounterexample ( NeuralNet nn , LPSTerm[] inputs // Symbolic inputs (cropped) , LPSTerm epsilon // Epsilon variable , LabelWithConfidence imageLab // Original image classification info (uncropped) , NNInstrumentation instr , int realLabel // Ground truth for this image (from training set) , int rowSize // Original (uncropped) row size , int colSize // Original (uncropped) col size , bool isColor) { int origLabel = imageLab.actualLabel; int targetLabel = imageLab.secBestLabel; int input_dimension_pre_crop = nn.InputDimensionPreCrop; int input_dimension_post_crop = nn.InputDimensionPostCrop; double[] orig_image = imageLab.datum; double[] orig_image_crop = nn.CropMaybe(DenseVector.OfArray(orig_image)).ToArray(); if (realLabel != origLabel) { Console.WriteLine("This image is misclassifed already! Skipping."); return null; } if (RobustnessOptions.IgnoreLowConfidence && imageLab.softMaxValue < RobustnessOptions.LowConfidenceThreshold) { Console.WriteLine("This image is misclassifed with low confidence! Skipping."); return null; } // Fast path: // DiffInfo diff_info; /* ********************* * DV: Commenting out the fast path for now (but we are still keeping the Dictionary, for debugging) * ********************* if (diffDict.TryGetValue(new Tuple<int,int>(origLabel,targetLabel),out diff_info)) { Console.WriteLine("Got a hit in the difference cache!"); Vector<double> diff_counterexample = diff_info.diff; Vector<double> cand = DenseVector.OfArray(orig_image) + diff_counterexample; Console.WriteLine("oooooooooooooooo Checking with the fast path!"); double[] cand_arr_crop = nn.CropMaybe(cand).ToArray(); if (RobustnessOptions.QuantizationSafety) { Utils.UArray.InPlaceRoundDoubleArray(cand_arr_crop); } LabelWithConfidence candLab = Utils.ULabel.LabelWithConfidence(nn, cand_arr_crop,false); // Already cropped, don't crop! if (candLab.actualLabel != origLabel) { Console.WriteLine("=> Real counterexample (from fast path)!"); diff_info.number++; return candLab; } Console.WriteLine("xxxx Fast path failed, continuing with symbolic interpreter ..."); // otherwise continue with the slow path ... } ***********************/ var state = new LPSState(instr, orig_image_crop); int nomodelcount = 0; double[] newImageUnrounded; NOMODELLOOP: if (nomodelcount++ > 0) return null; state.ClearConstraints(); LPSTerm[] output = nn.EvaluateNNSymbolicPostCrop(state, inputs); // Just some tracing ... // ReportSparsity(output); LPSConstraints currentCts = state.CurrentCts; LPSConstraints deferredCts = state.DeferredCts; // Conjoin the label formula currentCts.And(NNetFormulas.LabelFormula(output, targetLabel, RobustnessOptions.LabelConfidenceDiff)); // If we are just looking for bounds, then the variables themselves will contain "origin" bounds if (RobustnessOptions.DoOptimization) { NNETObjectives.AddEpsilonBounds(currentCts, inputs, epsilon, orig_image_crop); } // Ensure that at least *one* entry is different by at least 1.0 if (RobustnessOptions.QuantizationSafety) { NNETObjectives.AddQuantizationSafety(currentCts, inputs, orig_image_crop); } // Create objective Nullable<LPSObjective> objective = null; if (RobustnessOptions.DoOptimization) { switch (RobustnessOptions.ObjectiveKind) { case LPSObjectiveKind.MinLinf: objective = NNETObjectives.MinLInf(currentCts, inputs, epsilon, orig_image_crop); break; case LPSObjectiveKind.MaxConf: objective = NNETObjectives.MaxConf(output, origLabel, targetLabel); break; default: break; } } if (!RobustnessOptions.CEGAR) { currentCts.And(deferredCts); deferredCts = new LPSConstraints(); } // CEGAR loop header LabelWithConfidence newLab; Console.WriteLine( "Current constraints: {0}, deferred: {1}", currentCts.Count, deferredCts.Count); LPSolver lps = new LPSolver( input_dimension_post_crop, currentCts.Count + deferredCts.Count, orig_image_crop, RobustnessOptions.Epsilon); lps.AddConstraints(currentCts, objective); int cegar_iterations = 0; while (true) { if (cegar_iterations++ > RobustnessOptions.CEGARGiveUpIterations) { Console.WriteLine("xxxxxxxxxxxxxxxx Giving up CEGAR, could not find model!"); goto NOMODELLOOP; } var newImage = lps.SolveLowLevelLP(); currentCts = new LPSConstraints(); if (newImage == null) { Console.WriteLine("xxxxxxxxxxxxxxxx No model!"); goto NOMODELLOOP; } Console.WriteLine("oooooooooooooooo Found model!"); newImageUnrounded = new double[newImage.Length]; Array.Copy(newImage, newImageUnrounded, newImage.Length); if (RobustnessOptions.QuantizationSafety) { Utils.UArray.InPlaceRoundDoubleArray(newImage); } int samcount = Utils.UArray.ComputeRoundIdenticals(orig_image_crop, newImage); Console.WriteLine("Synthesized image has {0} identical inputs (after rounding) to original (cropped)", samcount); // Now, try to label the new example newLab = Utils.ULabel.LabelWithConfidence(nn, newImage,false); // Already cropped, don't crop! if (newLab.actualLabel != targetLabel) { if (newLab.actualLabel == realLabel) { // Here the synthesized image is not really a counterexample. // This could be due to either (a) quantization errors or (b) CEGAR // underapproximation. But the only thing we can try and do here is // add mor constraints and try to resolve. if (RobustnessOptions.CEGAR) Console.WriteLine("Not really a counterexample, going round CEGAR loop."); int added = 0; // new_image_plus_eps = newImage : 0.0 // so that the length matches the coefficients of each constraint ... double[] newimage_plus_eps = new double[newImage.Length+1]; Array.Copy(newImageUnrounded,newimage_plus_eps,newImage.Length); newimage_plus_eps[newImage.Length] = 0.0; Vector<double> newImageVec_eps = DenseVector.OfArray(newimage_plus_eps); var denumerator = deferredCts.GetEnumerator(); Parallel.For(0, deferredCts.Count, i => { LPSConstraint curr_deferred; if (added > 699) return; lock (lockObj) { denumerator.MoveNext(); curr_deferred = (LPSConstraint)denumerator.Current; if (curr_deferred.Added == true) return; } bool sat = Satisfiable(curr_deferred, newImageVec_eps); lock (lockObj) { if (!sat) { lps.AddConstraint(curr_deferred); // currentCts.And(curr_deferred.Term, curr_deferred.Inequality); curr_deferred.Added = true; added++; } } }); Console.WriteLine(); Console.WriteLine("Added {0} constraints for CEGAR", added); if (added == 0) { Console.WriteLine("=> CEGAR cannot improve things."); goto NOMODELLOOP; // return null; } // lps.AddConstraints(currentCts, null); continue; } else { Console.WriteLine("=> Real counterexample! (Although with different label than expected)"); break; } } else { Console.WriteLine("=> Real counterexample! (New image has second-best label"); break; } } if (RobustnessOptions.DisplaySynthesizedImagesAndPause) { Utils.UDraw.DisplayImageAndPause(Utils.UArray.ToIntArray(imageLab.datum), rowSize, colSize, isColor); Utils.UDraw.DisplayImageAndPause(Utils.UArray.ToIntArray(newLab.datum), rowSize, colSize, isColor); } /* NB: Uncrop the image in newLab */ newLab.datum = nn.UnCropMaybe(DenseVector.OfArray(orig_image), DenseVector.OfArray(newLab.datum)).ToArray(); double[] tmp = nn.UnCropMaybe(DenseVector.OfArray(orig_image), DenseVector.OfArray(newImageUnrounded)).ToArray(); Vector<double> diff_val = DenseVector.OfArray(tmp) - DenseVector.OfArray(orig_image); var key = new Tuple<int, int>(origLabel, newLab.actualLabel); DiffInfo dinfo; if (diffDict.TryGetValue(key, out dinfo)) { dinfo.number++; } else { dinfo = new DiffInfo(); dinfo.diff = diff_val; dinfo.number = 1; diffDict.Add(new Tuple<int, int>(origLabel, newLab.actualLabel), dinfo); } return newLab; }