From 996bd42ed7ec9d879fa6c16d01da37b877ad619c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Patrick=20L=C3=BChne?= Date: Thu, 30 Nov 2017 13:44:11 +0100 Subject: [PATCH] Intermediate commit with evaluation tool --- evaluate.py | 301 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 301 insertions(+) create mode 100755 evaluate.py diff --git a/evaluate.py b/evaluate.py new file mode 100755 index 000000000..ff4c3aa0a --- /dev/null +++ b/evaluate.py @@ -0,0 +1,301 @@ +#!/usr/bin/python3 + +import math +import os +import re +import subprocess +import sys +import time +import yaml + +import pprint + +gray = (186, 189, 182) + +def executeCommand(command, stdin = None, cwd = None): + with subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE, stdin = (subprocess.PIPE if stdin != None else None), cwd = cwd) as process: + stdout, stderr = process.communicate(input = (stdin.encode("utf-8") if stdin != None else None)) + exitCode = process.returncode + + return stdout.decode("utf-8"), stderr.decode("utf-8"), exitCode + +def git(command, cwd, enforce = False): + stdout, stderr, exitCode = executeCommand(["git"] + command, cwd = cwd) + + if exitCode != 0: + print(stderr, file = sys.stderr) + + if enforce: + raise RuntimeError("git error") + +def initRepo(config): + dataDir = config["storage"]["local"] + + # clone repo if not existing + if not os.path.isdir(config["storage"]["local"]): + git(["clone", config["storage"]["remote"], dataDir], None, enforce = True) + + # fetch origin + git(["fetch"], cwd = dataDir) + + # pull all branches + for key, branch in config["storage"]["branches"].items(): + git(["checkout", branch], cwd = dataDir, enforce = True) + git(["pull"], cwd = dataDir) + +def readBenchmarkConfig(config): + initRepo(config) + + dataDir = config["storage"]["local"] + + # checkout config branch + git(["checkout", config["storage"]["branches"]["config"]], cwd = dataDir, enforce = True) + + # read instance list + instancesFile = os.path.join(config["storage"]["local"], "instances.yml") + + with open(instancesFile, "r") as stream: + instances = yaml.load(stream, Loader=yaml.CLoader) + + # read configurations to test + configurationsFile = os.path.join(config["storage"]["local"], "configurations.yml") + + with open(configurationsFile, "r") as stream: + configurations = yaml.load(stream, Loader=yaml.CLoader) + + # flatten lists of options + for configuration in configurations["configurations"]: + configuration["options"] = [item for sublist in configuration["options"] for item in sublist] + + return {"configurations": configurations, "instances": instances} + +def outputFilenames(configuration, instance, config): + instanceID = instance["ipc"] + "_" + instance["domain"] + "_" + str(instance["instance"]) + outputFile = os.path.join(configuration["id"], instanceID + ".out") + errorFile = os.path.join(configuration["id"], instanceID + ".err") + environmentFile = os.path.join(configuration["id"], instanceID + ".env") + + return {"outputFile": outputFile, "errorFile": errorFile, "environmentFile": environmentFile} + +def jobKey(configuration, instance): + return (configuration["id"], instance["ipc"], instance["domain"], instance["instance"]) + +def instanceKey(instance): + return (instance["ipc"], instance["domain"], instance["instance"]) + +def addResult(results, configuration, instance, result): + if not configuration["id"] in results: + results[configuration["id"]] = {} + + results[configuration["id"]][instanceKey(instance)] = result + +def result(results, configuration, instance): + return results[configuration["id"]][instanceKey(instance)] + +def mix(color1, color2, t): + return (color1[0] * (1 - t) + color2[0] * t, color1[1] * (1 - t) + color2[1] * t, color1[2] * (1 - t) + color2[2] * t) + +def resultColor(result, config): + if result <= 0: + return colors[0] + elif result >= config["limits"]["time"]: + return colors[-1] + + normalizedResult = (result / config["limits"]["time"]) ** 0.2 + normalizedResult *= (len(colors) - 1) + + c0 = min(math.floor(normalizedResult), len(colors) - 1) + t = normalizedResult - c0 + + if t <= 0: + return colors[c0] + elif t >= 1: + return colors[c0 + 1] + + return mix(colors[c0], colors[c0 + 1], t) + +def collectResults(config): + benchmarkConfig = readBenchmarkConfig(config) + + dataDir = config["storage"]["local"] + + # checkout results branch + git(["checkout", config["storage"]["branches"]["results"]], cwd = dataDir, enforce = True) + + configurations = benchmarkConfig["configurations"]["configurations"] + instances = benchmarkConfig["instances"] + + results = {} + + for instanceSetName, instanceSet in instances.items(): + for instance in instanceSet: + for configuration in configurations: + filenames = outputFilenames(configuration, instance, config) + outputFile = os.path.join(config["storage"]["local"], filenames["outputFile"]) + errorFile = os.path.join(config["storage"]["local"], filenames["errorFile"]) + environmentFile = os.path.join(config["storage"]["local"], filenames["environmentFile"]) + + if not os.path.exists(outputFile) or not os.path.exists(errorFile) or not os.path.exists(environmentFile): + addResult(results, configuration, instance, None) + continue + + with open(errorFile, "r") as errorOutput: + errors = errorOutput.read() + + finishedRE = re.compile("^FINISHED CPU", re.M) + runtimeRE = re.compile("", re.M) + timeoutRE = re.compile("^TIMEOUT CPU", re.M) + memoutRE = re.compile("^MEM CPU", re.M) + exitCodeRE = re.compile("^# exit code: (\d+)$", re.M) + + finished = finishedRE.search(errors) + runtime = runtimeRE.search(errors) + timeout = timeoutRE.search(errors) + memout = memoutRE.search(errors) + exitCode = exitCodeRE.search(errors) + + if exitCode and int(exitCode.group(1)) != 0: + text = "error" + color = None + elif finished: + value = float(runtime.group(1)) / 1000 + text = str(value) + color = (value / config["limits"]["time"]) ** 0.2 + elif timeout: + text = "> " + str(config["limits"]["time"]) + color = 1.0 + elif memout: + text = "> " + str(config["limits"]["memory"] / 1000000) + " GB" + color = 1.0 + + result = {"text": text, "color": color} + + addResult(results, configuration, instance, result) + + return configurations, instances, results + +def aggregateResults(configurations, instanceSetID, instanceSet, instances, results): + aggregatedResults = {} + + for instance in instanceSet: + ipcDomain = (instance["ipc"], instance["domain"]) + + if not ipcDomain in aggregatedResults: + aggregatedResults[ipcDomain] = {} + + for configuration in configurations: + if not instanceSetID in configuration["instanceSets"]: + continue + + if not configuration["id"] in aggregatedResults[ipcDomain]: + aggregatedResults[ipcDomain][configuration["id"]] = {"total": 0, "average": None, "results": []} + + r = result(results, configuration, instance) + + value = 900.0 + + try: + value = float(r["text"]) + aggregatedResults[ipcDomain][configuration["id"]]["total"] += 1 + except: + pass + + aggregatedResults[ipcDomain][configuration["id"]]["results"].append(value) + + for ipcDomain, results in aggregatedResults.items(): + for configurationKey, configurationResults in aggregatedResults.items() + configurationResults["average"] = sum(configurationResults["results"]) / len(configurationResults["results"]) + + return aggregatedResults + +def requiresInstance(configuration, instance, instances): + for requiredInstanceSet in configuration["instanceSets"]: + if not requiredInstanceSet in instances: + raise RuntimeError("undefined instance set “" + requiredInstanceSet + "”") + + if instance in instances[requiredInstanceSet]: + return True + + return False + +def renderResultsTable(configurations, instanceSetID, instanceSet, instances, results): + print("

" + instanceSetID + "

") + + for configuration in configurations: + if not instanceSetID in configuration["instanceSets"]: + continue + + print("") + + print("") + + for instance in instanceSet: + print("") + + for configuration in configurations: + if not instanceSetID in configuration["instanceSets"]: + continue + + r = result(results, configuration, instance) + + if r and r["text"] != "error": + print("") + + print ("") + + print("") + + print("
IPCdomaininstance
" + configuration["id"] + "
" + instance["ipc"] + "" + instance["domain"] + "" + str(instance["instance"]) + "") + print(r["text"]) + elif r and r["text"] == "error": + print("") + print(r["text"]) + else: + print("") + + print("
") + +def renderAggregatedResultsTable(type, configurations, instanceSetID, instanceSet, instances, results): + aggregatedResults = aggregateResults(configurations, instanceSetID, instanceSet, instances, results) + + if not aggregatedResults: + print("") + return + + print("

" + instanceSetID + " (" + type + ")

") + + for configurationKey, configurationResults in aggregatedResults.items(): + print("") + + print("") + + for ipcDomain, results in aggregatedResults.items(): + print("") + + for configurationKey, configurationResults in aggregatedResults.items(): + + + r = results[type] + + print("") + + print("") + + print("
IPCdomain
" + configurations[configurationKey]["id"] + "
" + ipcDomain[0] + "" + ipcDomain[1] + "" + str(r) + "
") + +def main(): + with open("config.yml", "r") as stream: + config = yaml.load(stream, Loader=yaml.CLoader) + + configurations, instances, results = collectResults(config) + + print("TPLP benchmark results

TPLP Benchmark Results

last updated at " + time.strftime("%Y-%m-%d %H:%M:%S %z") + "
") + + for instanceSetID, instanceSet in instances.items(): + renderResultsTable(configurations, instanceSetID, instanceSet, instances, results) + renderAggregatedResultsTable("total", configurations, instanceSetID, instanceSet, instances, results) + renderAggregatedResultsTable("average", configurations, instanceSetID, instanceSet, instances, results) + + print("
") + +main()