Commit 68efa726 authored by Andreas Schmidt's avatar Andreas Schmidt

Merge remote-tracking branch 'origin/rtn-2018-dump' into develop

parents b87b0b79 3cc35d81
......@@ -29,7 +29,7 @@ def duration_to_string(d):
return str(d['Start']) + "->" + str(d['Stop'])
def analyse(df1, df2, config, export=False):
for d in latency.get_durations(df1, config):
for d in latency.get_durations_2([df1, df2], config):
data1 = df1[d['Stop']] - df1[d['Start']]
data2 = df2[d['Stop']] - df2[d['Start']]
if samples_are_different(remove_outliers(data1), remove_outliers(data2)) and samples_are_different(data1, data2):
......
import math
import numpy as np
from .util import cdf, extract_durations
from scipy import stats
import matplotlib.pyplot as plt
import xlap.analyse.latency as latency
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = False
plt.rcParams['font.family'] = 'sans-serif'
#plt.rcParams['font.sans-serif'] = 'Latin Modern Sans'
#plt.rcParams['font.monospace'] = 'FreeMono'
# Taken from: http://composition.al/blog/2015/11/29/a-better-way-to-add-labels-to-bar-charts-with-matplotlib/
def autolabel(rects, ax, labels):
# Get y-axis height to calculate label position from.
(x_left, x_right) = ax.get_xlim()
x_width = x_right - x_left
for i, rect in enumerate(rects):
width = rect.get_width()
color = "black"
align = "left"
# Fraction of axis height taken up by this rectangle
p_width = (width / x_width)
# If we can fit the label above the column, do that;
# otherwise, put it inside the column.
if p_width > 0.50: # arbitrary; 95% looked good to me.
label_position = width - (x_width * 0.01)
color = "white"
align = "right"
else:
label_position = width + (x_width * 0.00)
mono = {'family': 'monospace'}
ax.text(label_position, rect.get_y(), labels[i], ha=align, va='bottom', rotation=0, color=color, fontdict=mono)
def _plot_regions(dataset):
"""
plot regions, sorted by latency criticality
"""
relevant = sorted([x for x in dataset if x['Slowdown'] > 0 and x['Slowdown'] <= 5], key=lambda x: x['Slowdown'])
x = np.arange(len(relevant))
correlations = list(map(lambda x: x['Slowdown'], relevant))
ticks = list(map(lambda x: "<%s,%s>" % (x['Start'][:-2], x['End'][:-2]), relevant))
fig, ax = plt.subplots()
#rects = ax.barh(x, correlations, align="center", tick_label="", color='#3babaf')
rects = ax.barh(x, correlations, align="center", tick_label="")
autolabel(rects, ax, ticks)
plt.xlabel('Normalized slowdown')
plt.tight_layout()
plt.savefig("normalized-slowdown.pdf")
plt.close()
def duration_to_string(d):
return str(d['Start']) + "->" + str(d['Stop'])
def analyse(df1, df2, config, export=False):
data1 = df1['EndToEnd_D']
data2 = df2['EndToEnd_D']
print("#1 e2e: " + str(np.mean(data1)) + " +- " + str(np.std(data1)))
print("#2 e2e: " + str(np.mean(data2)) + " +- " + str(np.std(data2)))
normalize = np.mean(data1) / np.mean(data2)
dataset = []
for d in latency.get_durations_2([df1, df2], config):
local1 = df1[d['Stop']] - df1[d['Start']]
local2 = df2[d['Stop']] - df2[d['Start']]
# too short -> ignore
if np.mean(local1) == 0 or np.mean(local2) == 0:
continue
slowdown = np.mean(local1) / np.mean(local2)
#print(duration_to_string(d)+" "+str(lf) + " / "+str(frac)+ " = " + str(lf/frac))
#print("\"<"+d['Start']+","+d['Stop']+">\" "+str(slowdown/normalize)+" "+str(slowdown)+" "+str(normalize))
dataset += [{ 'Start': d['Start'], 'End': d['Stop'], 'Slowdown': slowdown }]
_plot_regions(dataset)
......@@ -5,9 +5,15 @@ import numpy as np
import sys
import graphviz
class LatencyAnalysis():
def __init__(self, cfg=None, hdb=None):
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = False
plt.rcParams['font.family'] = 'sans-serif'
#plt.rcParams['font.sans-serif'] = 'Latin Modern Sans'
#plt.rcParams['font.monospace'] = 'FreeMono'
self.cfg = cfg
correlations = []
......@@ -152,13 +158,15 @@ def autolabel(rects, ax, labels):
# If we can fit the label above the column, do that;
# otherwise, put it inside the column.
if p_width > 0.50: # arbitrary; 95% looked good to me.
label_position = width - (x_width) + 0.7
#label_position = width - (x_width) + 0.7
label_position = width - (x_width * 0.01)
color = "white"
align = "right"
else:
label_position = width + (x_width * 0.01)
label_position = width + (x_width * 0.002)
ax.text(label_position, rect.get_y(), labels[i], ha=align, va='bottom', rotation=0, color=color)
mono = {'family': 'monospace'}
ax.text(label_position, rect.get_y(), labels[i], ha=align, va='bottom', rotation=0, color=color, fontdict=mono)
def _plot_critical_regions(hdb):
......@@ -169,11 +177,12 @@ def _plot_critical_regions(hdb):
x = np.arange(len(relevant))
correlations = list(map(lambda x: x['Correlation'], relevant))
ticks = list(map(lambda x: "%s-%s" % (x['Start'][:-2], x['End'][:-2]), relevant))
ticks = list(map(lambda x: "<%s,%s>" % (x['Start'][:-2], x['End'][:-2]), relevant))
fig, ax = plt.subplots()
rects = ax.barh(x, correlations, align="center", tick_label="")
autolabel(rects, ax, ticks)
plt.xlabel('Latency criticality')
plt.tight_layout()
plt.savefig("latency-criticality.pdf")
plt.close()
......@@ -204,6 +213,10 @@ def get_durations(df, config):
hdb += [{'Start': str(event1), 'Stop': str(event2), 'Source': 'cfa'}]
return hdb
def get_durations_2(dfs, config):
df = dfs[0] + dfs[1]
return get_durations(df, config)
def analyse(df, config):
hb = []
......
......@@ -4,11 +4,13 @@ from xlap.parse import evaluate, parse_config
import xlap.analyse.jitter as jitter
import xlap.analyse.latency as latency
import xlap.analyse.diff as difference
import xlap.analyse.e2e as e2e
tasks = {
"jitter": None,
"latency": None,
"difference": None,
"e2e": None,
"capture": None
}
......@@ -43,18 +45,29 @@ def main():
f.write("\n")
else:
print(output)
elif command == "e2e":
khz1 = 2000000
khz2 = 3000000
path="../publications/rtn-18/eval/20180419_energy/"
df_data1 = evaluate(path+"sender-"+str(khz1)+".csv", path+"receiver-"+str(khz1)+".csv", config=config, kind=0)
df_data2 = evaluate(path+"sender-"+str(khz2)+".csv", path+"receiver-"+str(khz2)+".csv", config=config, kind=0)
e2e.analyse(df_data1, df_data2, config)
elif command == "latency":
df_data = evaluate(data_files["sender"], data_files["receiver"], config=config, kind=0)
a = latency.analyse(df_data, config)
print(a.corr.sort_values(ascending=False))
elif command == "difference":
df_data1 = evaluate("../prrt/out/s.csv", "../prrt/out/r.csv", config=config, kind=0)
path="../publications/rtn-18/eval/20180420_"
khz = 3000000
df_data1 = evaluate(path+"base1/sender-"+str(khz)+".csv", path+"base1/receiver-"+str(khz)+".csv", config=config, kind=0)
# sanity check:
#df_data2 = evaluate("../prrt/out/s.csv", "../prrt/out/r.csv", config=config, kind=0)
# same setup, different measurement run:
#df_data2 = evaluate("../prrt/out/s+same.csv", "../prrt/out/r+same.csv", config=config, kind=0)
# different setup:
df_data2 = evaluate("../prrt/out/s+send.csv", "../prrt/out/r+send.csv", config=config, kind=0)
#df_data2 = evaluate("../prrt/out/s+send.csv", "../prrt/out/r+send.csv", config=config, kind=0)
df_data2 = evaluate(path+"base2/sender-"+str(khz)+".csv", path+"base2/receiver-"+str(khz)+".csv", config=config, kind=0)
#df_data2 = evaluate(path+"changed/sender-"+str(khz)+".csv", path+"changed/receiver-"+str(khz)+".csv", config=config, kind=0)
difference.analyse(df_data1, df_data2, config)
else:
df_data = evaluate(data_files["sender"], data_files["receiver"], config=config, kind=0)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment