|
| 1 | +import groovy.json.JsonBuilder |
| 2 | +import groovy.json.JsonSlurper |
| 3 | +import abs.* |
| 4 | + |
| 5 | +//import java.nio.file.Paths |
| 6 | +//import abs.callgraph.BFS |
| 7 | +//import abs.callgraph.BenchmarkFinderImpl |
| 8 | +//import abs.callgraph.InterfaceImplementerAll |
| 9 | +//import abs.callgraph.StaticWalker |
| 10 | +//import abs.callgraph.TypeSolverFactory |
| 11 | +//TODO: switch dependency as soon as version 0.5.3 comes out. This is a hacky solution that needs manually changing the grape/ivy repository |
| 12 | +// @Grab(group='com.github.javaparser', module='java-symbol-solver-core', version='0.5.2-cl') |
| 13 | + |
| 14 | + |
| 15 | +def writeToLog(def logfile, def run, def name, def result) { |
| 16 | + result.each { benchmark, data -> |
| 17 | + data.each { item -> |
| 18 | + logfile.append("$run;$name;$benchmark;$item\n") |
| 19 | + } |
| 20 | + } |
| 21 | +} |
| 22 | + |
| 23 | +def defrostResultsFromLog(def filename) { |
| 24 | + def results = [:] |
| 25 | + new File(filename).splitEachLine(";") { fields -> |
| 26 | + def run = fields[0] as int |
| 27 | + def method = fields[1] |
| 28 | + def benchmark = fields[2] |
| 29 | + def val = fields[3] as double |
| 30 | + if(!results.containsKey(run)) |
| 31 | + results[run] = [:] |
| 32 | + if(!results[run].containsKey(method)) |
| 33 | + results[run][method] = [:] |
| 34 | + if(!results[run][method].containsKey(benchmark)) |
| 35 | + results[run][method][benchmark] = [] |
| 36 | + results[run][method][benchmark] << val |
| 37 | + } |
| 38 | + return results |
| 39 | +} |
| 40 | + |
| 41 | +def do_run(def runnr) { |
| 42 | + |
| 43 | + BenchmarkRunner runner = |
| 44 | + (config.build_system && config.build_system == 'gradle') ? |
| 45 | + new GradleJMHBenchmarkRunner(config.project, config.benchmarks, config.benchmark_jar, config.custom_benchmark_config, config.gradle_target) : |
| 46 | + new MvnJMHBenchmarkRunner(config.project, config.benchmarks, config.benchmark_jar, config.custom_benchmark_config) |
| 47 | + |
| 48 | + if(config.benchmarks_to_execute) { |
| 49 | + def parsed = parseBenchmarksToExecute(config.benchmarks_to_execute) |
| 50 | + runner.setBenchmarksToExecute(parsed) |
| 51 | + } |
| 52 | + |
| 53 | + // config sanity check |
| 54 | + config.files.each { file -> |
| 55 | + file.methods.each { method -> |
| 56 | + RegressionInducer changer = new PerfRegressionInducer("${config.project}/${file.test_file}", |
| 57 | + method.name, method.params, config.degree_of_violation as double) |
| 58 | + changer.doUpdate() |
| 59 | + changer.resetChanges() |
| 60 | + } |
| 61 | + } |
| 62 | + println "##### Config seems ok #####" |
| 63 | + |
| 64 | + new File("codedumps/$runnr").mkdir() |
| 65 | + |
| 66 | + println "##### Baseline Run $runnr #####" |
| 67 | + // baseline run |
| 68 | + def baselineResult = runner.run(new EmptyRegressionInducer(), "") |
| 69 | + writeToLog(logfile, runnr, "Baseline", baselineResult) |
| 70 | + println "##### Baseline Run $runnr Finished #####" |
| 71 | + |
| 72 | + // test runs |
| 73 | + def results = [:] |
| 74 | + config.files.each { file -> |
| 75 | + if (file == null || file.test_file == null) { |
| 76 | + println "##### Empty file, not executing" |
| 77 | + return |
| 78 | + } |
| 79 | + println "##### Started Running $runnr for ${file.test_file} #####" |
| 80 | + def testfile = file.test_file |
| 81 | + def dumpFileName = testfile.replaceAll("/",".").replaceAll(".java","") |
| 82 | + new File("codedumps/$runnr/$dumpFileName").mkdir() |
| 83 | + file.methods.each { method -> |
| 84 | + println "##### Test run $runnr for ${method.name} #####" |
| 85 | + RegressionInducer changer = new PerfRegressionInducer("${config.project}/$testfile", |
| 86 | + method.name, method.params, config.degree_of_violation as double) |
| 87 | + def testResult = runner.run(changer, "codedumps/$runnr/$dumpFileName/${method.name}") |
| 88 | + def fullname = "$testfile.${method.name}(${method.params})" |
| 89 | + results[fullname] = testResult |
| 90 | + writeToLog(logfile, runnr, fullname, testResult) |
| 91 | + } |
| 92 | + } |
| 93 | + println "##### Finished $runnr #####" |
| 94 | + |
| 95 | +} |
| 96 | + |
| 97 | +def parseBenchmarksToExecute(def listOfConfigs) { |
| 98 | + listOfConfigs.collect{ config -> |
| 99 | + new BenchmarkToExecute(pattern:config.pattern, params:config.params) |
| 100 | + } |
| 101 | +} |
| 102 | + |
| 103 | +def detectableSlowdown(def allresults, def method, def run) { |
| 104 | + |
| 105 | + def runresults = allresults[run] |
| 106 | + def baselineresults = collectBaselineResults(allresults) |
| 107 | + def testresults = runresults.subMap([method]) |
| 108 | + |
| 109 | + def tester = new TTester() |
| 110 | + def pVals = tester.testForChanges(baselineresults, testresults[method]) |
| 111 | + // Bonferroni correction |
| 112 | + def correctedAlpha = Double.parseDouble(config.confidence) / pVals.size() |
| 113 | + def activeTests = pVals.findAll{ _, entry -> |
| 114 | + entry["p"] < correctedAlpha && entry["dm"] > (config.min_effect_size as double) |
| 115 | + } |
| 116 | + println "For $method in run $run, ${activeTests.size()} benchmarks showed a difference (of ${pVals.size()})" |
| 117 | + if(activeTests.size() > 0) { |
| 118 | + println "Indicating benchmarks:" |
| 119 | + activeTests.each{m, r -> println " $m" } |
| 120 | + } |
| 121 | + return activeTests |
| 122 | +} |
| 123 | + |
| 124 | +def collectBaselineResults(def all) { |
| 125 | + def maps = all.collect{_, run -> |
| 126 | + run['Baseline'] |
| 127 | + } |
| 128 | + def collectedMaps = [:] |
| 129 | + maps[0].each{ key, val -> |
| 130 | + collectedMaps[key] = maps.collect{ it[key] }.flatten() |
| 131 | + } |
| 132 | + return collectedMaps |
| 133 | +} |
| 134 | + |
| 135 | +def buildProject(config) { |
| 136 | + if (!config.scg.compile) { |
| 137 | + return |
| 138 | + } |
| 139 | + procStr = "" |
| 140 | + if (config.build_system && config.build_system == "gradle") { |
| 141 | + procStr = "./gradlew ${config.gradle_target}" |
| 142 | + } else { |
| 143 | + // assume maven |
| 144 | + procStr = "mvn clean install -DskipTests" |
| 145 | + } |
| 146 | + def proc = procStr.execute(null, new File(config.project)) |
| 147 | + proc.in.eachLine { line -> println line } |
| 148 | + proc.out.close() |
| 149 | + proc.waitFor() |
| 150 | +} |
| 151 | + |
| 152 | + |
| 153 | +def parseArgs(args) { |
| 154 | + def cli = new CliBuilder(usage: 'copper') |
| 155 | + cli.d('run dynamic ptc') |
| 156 | + cli.s('run static callgraph ptc') |
| 157 | + cli.c('config file', required:true, args:1) |
| 158 | + def options = cli.parse(args) |
| 159 | + if (options == null) { |
| 160 | + return null |
| 161 | + } |
| 162 | + |
| 163 | + if (!options.getProperty('d') && !options.getProperty('s')) { |
| 164 | + println("error: Missing required option: either d or s") |
| 165 | + cli.usage() |
| 166 | + return null |
| 167 | + } |
| 168 | + return options |
| 169 | +} |
| 170 | + |
| 171 | +def options = parseArgs(this.args) |
| 172 | +if (options == null) { |
| 173 | + return |
| 174 | +} |
| 175 | + |
| 176 | +def configPath = options.getProperty('c') |
| 177 | +println("# load config file: $configPath") |
| 178 | +def slurper = new JsonSlurper() |
| 179 | +config = slurper.parse(new File(configPath)) |
| 180 | + |
| 181 | +// dynamic approach |
| 182 | +if (options.getProperty('d')) { |
| 183 | + logfile = new File((String)config.log) |
| 184 | + logfile.write("") |
| 185 | + repeats = config.repeats as int |
| 186 | + |
| 187 | + println "##### Creating directory for code dumps #####" |
| 188 | + if(new File("codedumps").exists()) { |
| 189 | + new File("codedumps").deleteDir() |
| 190 | + } |
| 191 | + new File("codedumps").mkdir() |
| 192 | + |
| 193 | + repeats.times { run -> |
| 194 | + do_run(run) |
| 195 | + } |
| 196 | + |
| 197 | + allResults = defrostResultsFromLog((String)config.log) |
| 198 | + // allResults = defrostResultsFromLog("/Users/philipp/Downloads/results/trial_run_1/result_run3_0.5_0.4_3.csv") |
| 199 | + runs = allResults.keySet() |
| 200 | + methods = [] |
| 201 | + allResults.each {run, results -> |
| 202 | + results.each { method, _ -> |
| 203 | + methods << method |
| 204 | + } |
| 205 | + } |
| 206 | + methods = methods.unique() - "Baseline" |
| 207 | + |
| 208 | + slowdownDetections = methods.count { method -> |
| 209 | + def slowdownsDetected = runs.collect { run -> |
| 210 | + detectableSlowdown(allResults, method, run) |
| 211 | + } |
| 212 | + |
| 213 | + def benchmarks = allResults[0]["Baseline"].collect{ it.key } |
| 214 | + def allDetected = benchmarks.any{benchmark -> |
| 215 | + slowdownsDetected.every{run -> |
| 216 | + run.keySet().contains(benchmark) |
| 217 | + } |
| 218 | + } |
| 219 | + return allDetected |
| 220 | + } |
| 221 | + ptc = slowdownDetections / (double)methods.size() |
| 222 | + println "Dynamic coverage value was ${ptc}" |
| 223 | +} |
| 224 | + |
| 225 | +// // static approach |
| 226 | +// if (options.getProperty('s')) { |
| 227 | +// // compile project |
| 228 | +// buildProject(config) |
| 229 | +// // get jars |
| 230 | +// def filePattern = config.scg.filePattern |
| 231 | +// if (filePattern == null || filePattern == "") { |
| 232 | +// filePattern = ".*" |
| 233 | +// } |
| 234 | +// |
| 235 | +// def jars = Project.jars(config.project, config.scg.jars, filePattern) |
| 236 | +// def typeSolver = TypeSolverFactory.get(jars) |
| 237 | +// |
| 238 | +// def bf = new BenchmarkFinderImpl(typeSolver) |
| 239 | +// def benchmarkMethods = bf.all(Paths.get(config.project, config.benchmarks).toString()) |
| 240 | +// |
| 241 | +// // run call graph walker |
| 242 | +// def scg = new StaticWalker(config.project, config.scg.lib, config.scg.qualifiedPathPrefix, config.files, benchmarkMethods) |
| 243 | +// jars.each { jar -> |
| 244 | +// def path = jar.getAbsolutePath() |
| 245 | +// println("add jar '${path}'") |
| 246 | +// scg.addJar(path) |
| 247 | +// } |
| 248 | +// scg.addInterfaceToClassEdges(new InterfaceImplementerAll(config.project, typeSolver)) |
| 249 | +// def finder = new BFS() |
| 250 | +// def rms = scg.reachableMethods(benchmarkMethods, finder) |
| 251 | +// |
| 252 | +// def found = new HashSet() |
| 253 | +// rms.forEach({ _, fs -> |
| 254 | +// fs.each { f -> |
| 255 | +// found.add(f) |
| 256 | +// } |
| 257 | +// }) |
| 258 | +// // calculate static performance test coverage |
| 259 | +// def sum = 0 |
| 260 | +// config.files.each { f -> |
| 261 | +// sum += f.methods.size() |
| 262 | +// } |
| 263 | +// def ptc = 0 |
| 264 | +// if (sum != 0) { |
| 265 | +// ptc = found.size()*1.0 / sum |
| 266 | +// } |
| 267 | +// println("Core Method Finder: ${finder.noExceptions} successful searches; ${finder.exceptions} exceptions") |
| 268 | +// println("Static call graph coverage value was ${ptc}") |
| 269 | +// |
| 270 | +// // print results |
| 271 | +// String outPath = config.scg.out |
| 272 | +// if (outPath != null && outPath != "") { |
| 273 | +// def outValues = new HashMap() |
| 274 | +// outValues.put("coverage", ptc) |
| 275 | +// outValues.put("methods", rms) |
| 276 | +// def out = new File(outPath) |
| 277 | +// out.write(new JsonBuilder(outValues).toPrettyString()) |
| 278 | +// } |
| 279 | +// } |
0 commit comments