From 852e38ee883ea57ec0556d371ec030036502ed28 Mon Sep 17 00:00:00 2001
From: Rico Bergmann <rico.bergmann1@tu-dresden.de>
Date: Thu, 20 Jun 2019 16:32:16 +0200
Subject: [PATCH] Only ignore 'stronger' models in subsequent run

---
 scripts/run.py | 53 +++++++++++++++++++++++++++++---------------------
 1 file changed, 31 insertions(+), 22 deletions(-)

diff --git a/scripts/run.py b/scripts/run.py
index c0db52e..2424b7e 100755
--- a/scripts/run.py
+++ b/scripts/run.py
@@ -52,36 +52,45 @@ def benchmark(conf):
         shutil.copy(header, result_file)
         # os.remove(result_file)
     os.environ['Runs'] = str(conf.Runs)
-    failed_tools = set()
+
+    # by default we test every model for every tool
+    available_models = { tool : conf.Models for tool in conf.Tools }
+
     for r in range(0, conf.Runs):
         os.environ['RunIndex'] = str(r)
         print("## Run {}".format(r))
+
         for tool in conf.Tools:
             config = ConfigParser.ConfigParser()
             config.read(os.path.join(BASE_DIRECTORY, "solutions", tool, "solution.ini"))
             set_working_directory("solutions", tool)
             os.environ['Tool'] = tool
-            for model in conf.Models:
-                if not tool in failed_tools:
-                    full_model_path = os.path.abspath(os.path.join(BASE_DIRECTORY, "models", model))
-                    os.environ['Model'] = model
-                    os.environ['ModelPath'] = full_model_path
-                    print("Running benchmark: tool = " + tool + ", model = " + full_model_path)
-                    try:
-                        output = subprocess.check_output(config.get('run', 'cmd'), shell=True, timeout=conf.Timeout)
-                        with open(result_file, "ab") as file:
-                            file.write(output)
-                    except CalledProcessError as e:
-                        print("Program exited with error")
-                        print('stdout:')
-                        print(e.output)
-                        print('stderr:')
-                        print(e.stderr)
-                    except subprocess.TimeoutExpired as e:
-                        print("Program reached the timeout set ({0} seconds). The command we executed was '{1}'".format(e.timeout, e.cmd))
-                        failed_tools.add(tool)
-                else:
-                    print("Skipping tool {0} for model {1} because it already failed earlier".format(tool, model))
+
+            print('# Available models for {tool} are {models}'.format(tool=tool, models=available_models[tool]))
+
+            for model in available_models[tool]:
+                full_model_path = os.path.abspath(os.path.join(BASE_DIRECTORY, "models", model))
+                os.environ['Model'] = model
+                os.environ['ModelPath'] = full_model_path
+                print("Running benchmark: tool = " + tool + ", model = " + full_model_path)
+
+                try:
+                    output = subprocess.check_output(config.get('run', 'cmd'), shell=True, timeout=conf.Timeout)
+                    with open(result_file, "ab") as file:
+                        file.write(output)
+                except CalledProcessError as e:
+                    print("Program exited with error")
+                    print('stdout:')
+                    print(e.output)
+                    print('stderr:')
+                    print(e.stderr)
+                except subprocess.TimeoutExpired as e:
+                    print("Program reached the timeout set ({0} seconds). The command we executed was '{1}'".format(e.timeout, e.cmd))
+
+                    # if a tool failed running some model we remove that model as well as all "stronger" models
+                    failed_model_idx = conf.Models.index(model)
+                    available_models[tool] = conf.Models[ : failed_model_idx]
+                    print('# Ignoring models {models} for tool {tool} in the future'.format(models=conf.Models[failed_model_idx : ], tool=tool))
 
 
 def clean_dir(*path):
-- 
GitLab