Skip to content
Snippets Groups Projects
Commit 0324de5f authored by René Schöne's avatar René Schöne
Browse files

Finalizing measurements.

- fab task to download from remote
- attribute metric figures plus doc in ipynb
- updated some docs
parent e63eed60
Branches
Tags
No related merge requests found
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
- clone [the racr repo](https://github.com/christoff-buerger/racr.git) - clone [the racr repo](https://github.com/christoff-buerger/racr.git)
- install racr - install racr
- let scheme IDE know about racr - let scheme IDE know about racr
- racket: Create a symlink in `$RACKET_INSTALL_DIR/collects` pointing to `$RACR_REPO/racr/racket-bin/racr`
# Links # Links
......
- X = {normal, flushed, noncached}
- (X1,X2) = {(normal, flushed), (normal, noncached), (flushed, noncached)}
# Meaningful percentages to calculate # Meaningful percentages to calculate
1) `total.X.computed / total.X.called` with X = {normal, flushed, noncached} 1) {X}basline: `total.X.computed / total.X.called`
- the baseline of the method X - the baseline of the method X
2) `total.normal.computed / total.Y.called` with Y = {flushed, noncached} 2) ratio{X1}To{X2}: `total.{X1}.computed / total.{X2}.called`
- the efficiency of the incremental approach in comparison to the method Y, - the efficiency of the incremental approach in comparison to the method X2,
i.e. the ratio between actual work done normally compared to possible work done with method Y i.e. the ratio between actual work done in X1 compared to possible work done with method X2
3) `(total.Y.computed / total.Y.called) - (total.normal.computed / total.Y.called)` with Y = {flushed, noncached} 3) speedup{X1}To{X2}: `(total.{X2}.computed / total.{X2}.called) - (total.{X1}.computed / total.{X2}.called)`
- = `baseline(Y) - ratio(normal, Y)` - = `baseline({X2}) - ratio({X1}, {X2})`
- the "speed-up" of the incremental approach in comparison to the method Y - the "speed-up" of the incremental approach (normal or flushed) in comparison to the method X2
# GLPK-ILP-Generator
org.haec.optimizer.ilp.GLPK_ILPGenerator
## Generation of ILP
- Times: `/opt/luna/eclipse/logs/durations.csv`, times in milli-sec
## Solving of ILP
- Using GLPK
- Times: `/home/rschoene/git/mquat/project_systems/gen_sys/*/sol.csv`, times in nano-sec
- ILP: `/home/rschoene/git/mquat/project_systems/gen_sys/*/gip.glpk`
- Solution: `/opt/luna/eclipse/logs/csvs/<timestamp>.csv`
# Toy-Generator
org.coolsoftware.requests.resource.request.ui.actions.ILPGenerator2
## Generation of ILP
- Times: `/home/rschoene/git/mquat/project_systems/gen_sys/*/gen.csv`, times in milli-sec
## Solving of ILP
- Using LP_solve
- Times: `/home/rschoene/git/mquat/project_systems/gen_sys/*/sol.csv`, times in milli-sec
- Solution: `/home/rschoene/git/mquat/project_systems/gen_sys/*/ilp-result-max-3.txt`
# LP-Generator
org.haec.optimizer.ilp.ILPGenerator
## Generation of ILP
- Times: `/home/rschoene/git/mquat/project_systems/gen_sys/*/gen.csv`, times in milli-sec
## Solving of ILP
- Using LP_solve
- Times: `/home/rschoene/git/mquat/project_systems/gen_sys/*/sol.csv`, times in milli-sec
# Schemas
- `durations.csv`: problemName, timestamp¹, dSetup, dBinVar, dUsage, dProperty, dBuffer, dTotal
- `*/gen.csv`: solver, timestamp², architectural constraints, resource negotation, nfp negotiation, objective function
- `*/sol.csv`: solver, timestamp², rows, cols, duration
¹: "DD.MM.YY HH:MM"
²: "YYYY-MM-DD'T'HH:MM:SS.NNN" (Python datetime.datetime.isoformat())
...@@ -4,7 +4,8 @@ import ilp_measurement as measure ...@@ -4,7 +4,8 @@ import ilp_measurement as measure
import ilp_check as check import ilp_check as check
import install import install
import sockets import sockets
from fabric.api import local, task import os
from fabric.api import local, task, get, hosts, run
@task @task
def call_racket(f, *args): def call_racket(f, *args):
...@@ -30,3 +31,11 @@ def cloc(not_match, *args, **kwargs): ...@@ -30,3 +31,11 @@ def cloc(not_match, *args, **kwargs):
not_match = 'sockets|java|tricks|example-ast|larceny|Makefile|ls' + ('|'+not_match if not_match else '') not_match = 'sockets|java|tricks|example-ast|larceny|Makefile|ls' + ('|'+not_match if not_match else '')
local('cloc . --exclude-dir=doc,gen,profiling,test,racket-bin,larceny-bin --not-match-f="{0}" {1} {2}'.format(not_match, ' '.join(args), local('cloc . --exclude-dir=doc,gen,profiling,test,racket-bin,larceny-bin --not-match-f="{0}" {1} {2}'.format(not_match, ' '.join(args),
' '.join("{!s}={!r}".format(k,v) for (k,v) in kwargs.iteritems()))) ' '.join("{!s}={!r}".format(k,v) for (k,v) in kwargs.iteritems())))
@task
@hosts('rschoene@141.76.65.177')
def dl(name, rdir = '~/git/racr-mquat/'):
#run('uname -a')
target = os.path.join(rdir, name)
print target
get(target, local_path = name)
Source diff could not be displayed: it is too large. Options to address this: view the blob.
...@@ -200,17 +200,17 @@ def merge_att_measurements(): ...@@ -200,17 +200,17 @@ def merge_att_measurements():
with open('profiling/att-percentages.csv', 'w') as fd: with open('profiling/att-percentages.csv', 'w') as fd:
w = csv.writer(fd) w = csv.writer(fd)
w.writerow(['dir', 'normalBaseline', 'flushedBaseline', 'noncachedBaseline', w.writerow(['dir', 'normalBaseline', 'flushedBaseline', 'noncachedBaseline',
'ratioToFlushed', 'ratioToNoncached', 'ratioNormalToFlushed', 'ratioNormalToNoncached', 'ratioFlushedToNoncached',
'speedupToFlushed', 'speedupToNoncached']) 'speedupNormalToFlushed', 'speedupNormalToNoncached', 'speedupFlushedToNoncached'])
baseline = lambda att: att.computed * 1.0 / att.called if att.called > 0 else 0 baseline = lambda att: att.computed * 1.0 / att.called if att.called > 0 else 0
ratio = lambda normal, y: normal.computed * 1.0 / y.called if y.called > 0 else 0 ratio = lambda x, y: x.computed * 1.0 / y.called if y.called > 0 else 0
for dir_name, total in totals.iteritems(): for dir_name, total in totals.iteritems():
normal, flushed, noncached = total.normal, total.flushed, total.noncached normal, flushed, noncached = total.normal, total.flushed, total.noncached
w.writerow([dir_name, baseline(normal), baseline(flushed), baseline(noncached), w.writerow([dir_name, baseline(normal), baseline(flushed), baseline(noncached),
ratio(normal, flushed), ratio(normal, noncached), ratio(normal, flushed), ratio(normal, noncached), ratio(flushed, noncached),
baseline(flushed) - ratio(normal, flushed), baseline(flushed) - ratio(normal, flushed),
baseline(noncached) - ratio(normal, noncached)]) baseline(noncached) - ratio(normal, noncached),
baseline(noncached) - ratio(flushed, noncached)])
@task(name = 'conflate-results') @task(name = 'conflate-results')
def conflate_results(pathname = '*', skip_gen = False, skip_sol = False, impls = 'larceny:plt-r6rs'): def conflate_results(pathname = '*', skip_gen = False, skip_sol = False, impls = 'larceny:plt-r6rs'):
...@@ -245,7 +245,6 @@ def conflate_results(pathname = '*', skip_gen = False, skip_sol = False, impls = ...@@ -245,7 +245,6 @@ def conflate_results(pathname = '*', skip_gen = False, skip_sol = False, impls =
impl = tokens[0].split('/')[-1] impl = tokens[0].split('/')[-1]
gen_time = '.'.join(tokens[1:3]) gen_time = '.'.join(tokens[1:3])
row = [mod.isoformat(), impl, dirname(d), f.split('.')[0], gen_time] row = [mod.isoformat(), impl, dirname(d), f.split('.')[0], gen_time]
#print row
writer.writerow(row) writer.writerow(row)
os.rename(f, os.path.join(gen_old_dir, os.path.basename(f))) os.rename(f, os.path.join(gen_old_dir, os.path.basename(f)))
...@@ -256,13 +255,9 @@ def conflate_results(pathname = '*', skip_gen = False, skip_sol = False, impls = ...@@ -256,13 +255,9 @@ def conflate_results(pathname = '*', skip_gen = False, skip_sol = False, impls =
with open(f) as fd: with open(f) as fd:
contents = fd.readlines() contents = fd.readlines()
count = next(int(line[line.rindex(',')+1:-1]) for line in contents if line.startswith('to-ilp ,')) count = next(int(line[line.rindex(',')+1:-1]) for line in contents if line.startswith('to-ilp ,'))
print f, count, max_count
if count > max_count: if count > max_count:
contents_hightest = contents contents_hightest = contents
max_count = count max_count = count
# sys.stdout.write('new highest {}\r'.format(f))
# sys.stdout.flush()
# sys.stdout.write('\n')
if len(att_measures) > 0: if len(att_measures) > 0:
with open(att_results, 'w') as fd: with open(att_results, 'w') as fd:
w = csv.writer(fd) w = csv.writer(fd)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment