Commit 96a298a8 authored by Andreas Schmidt's avatar Andreas Schmidt
Browse files

Add evaluation code and data for NSDI'18.

parent 7817e094
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
data_files:
sender: "sender.csv"
receiver: "receiver.csv"
threads:
- app_send
- trans_send
- trans_recv
- app_recv
cycle_reference:
app_send:
Start: PrrtSendStart
Stop: PrrtSendEnd
trans_send:
Start: PrrtTransmitStart
Stop: LinkTransmitEnd
trans_recv:
Start: LinkReceive
Stop: PrrtReturnPackage
app_recv:
Start: PrrtReceivePackage
Stop: PrrtDeliver
time_reference:
sender:
Start: PrrtSendStart
Stop: LinkTransmitEnd
receiver:
Start: LinkReceive
Stop: PrrtDeliver
stamps:
PrrtSendStart:
Source: sender
Thread: app_send
Type: time
PrrtSubmitPackage:
Source: sender
Thread: app_send
Type: cycle
PrrtSendEnd:
Source: sender
Thread: app_send
Type: time
PrrtTransmitStart:
Source: sender
Thread: trans_send
Type: time
PrrtTransmitEnd:
Source: sender
Thread: trans_send
Type: cycle
PrrtEncodeStart:
Source: sender
Thread: trans_send
Type: cycle
PrrtEncodeEnd:
Source: sender
Thread: trans_send
Type: cycle
LinkTransmitStart:
Source: sender
Thread: trans_send
Type: cycle
ChannelTransmit:
Source: sender
Thread: trans_send
Type: time
LinkTransmitEnd:
Source: sender
Thread: trans_send
Type: time
ChannelReceive:
Source: receiver
Thread: trans_recv
Type: time
LinkReceive:
Source: receiver
Thread: trans_recv
Type: time
SendFeedbackStart:
Source: receiver
Thread: trans_recv
Type: cycle
SendFeedbackEnd:
Source: receiver
Thread: trans_recv
Type: cycle
DecodeStart:
Source: receiver
Thread: trans_recv
Type: cycle
DecodeEnd:
Source: receiver
Thread: trans_recv
Type: cycle
HandlePacketStart:
Source: receiver
Thread: trans_recv
Type: cycle
HandlePacketEnd:
Source: receiver
Thread: trans_recv
Type: cycle
PrrtReturnPackage:
Source: receiver
Thread: trans_recv
Type: time
PrrtDeliver:
Source: receiver
Thread: app_recv
Type: time
CopyOutputStart:
Source: receiver
Thread: app_recv
Type: cycle
CopyOutputEnd:
Source: receiver
Thread: app_recv
Type: cycle
PrrtReceivePackage:
Source: receiver
Thread: app_recv
Type: time
durations:
Send:
Start: PrrtSendStart
Stop: PrrtSendEnd
Source: sender
PrrtTransmit:
Start: PrrtTransmitStart
Stop: PrrtTransmitEnd
Source: sender
LinkTransmit:
Start: LinkTransmitStart
Stop: LinkTransmitEnd
Source: sender
Submit:
Start: PrrtSendStart
Stop: PrrtSubmitPackage
Source: sender
Enqueue:
Start: PrrtSubmitPackage
Stop: PrrtSendEnd
Source: sender
SenderIPC:
Start: PrrtSubmitPackage
Stop: PrrtTransmitStart
Source: sender
SenderEnqueued:
Start: PrrtSendEnd
Stop: LinkTransmitStart
Source: sender
ReceiverIPC:
Start: PrrtReturnPackage
Stop: PrrtReceivePackage
Source: receiver
HandlePacket:
Start: HandlePacketStart
Stop: HandlePacketEnd
Source: receiver
Feedback:
Start: SendFeedbackStart
Stop: SendFeedbackEnd
Source: receiver
Decoding:
Start: DecodeStart
Stop: DecodeEnd
Source: receiver
packet_types:
Data: 0
Redundancy: 1
......@@ -24,7 +24,8 @@ static inline unsigned long long timestampByTime(struct timespec *ts)
void XlapTimestampTableDump(FILE *out, XlapTimestampPacketKind kind, XlapTimestampTable *table)
{
# define OUT(id) fprintf(out, ",%llu,%llu", timestampByTime(&table->rows[row].time[ts_##id].actual.t), (unsigned long long) table->rows[row].time[ts_##id].actual.c);
for (unsigned int row = 0; row < TS_ROWS; row++) {
// Start at 1 to remove the scratch row 0 from output.
for (unsigned int row = 1; row < TS_ROWS; row++) {
fprintf(out, "%u,%u", row, (unsigned) kind);
PP_foreach(PP_join_space, OUT, TIMESTAMP_ID_LIST)
fprintf(out, "\n");
......
......@@ -32,6 +32,8 @@ typedef union XlapTimestamp {
PrrtTransmitEnd, \
LinkTransmitStart, \
LinkTransmitEnd, \
ChannelTransmit, \
ChannelReceive, \
LinkReceive, \
DecodeStart, \
DecodeEnd, \
......@@ -82,23 +84,65 @@ typedef struct XlapTimestampTableRow {
* Table that stores timestamp table rows
*/
#ifdef XLAP
typedef struct XlapTimestampTable {
typedef struct XlapTimestampTable {
XlapTimestampTableRow rows[TS_ROWS];
} XlapTimestampTable;
} XlapTimestampTable;
#else /* XLAP */
typedef char XlapTimestampTable;
typedef char XlapTimestampTable;
#endif
/*
* Dummy data structure to store a single timestamp table row.
*/
#ifdef XLAP
typedef struct XlapTimestampPlaceholder {
typedef struct XlapTimestampPlaceholder {
_Atomic(XlapTimestampTable *) tstable[1];
XlapTimestampTableRow rows[1];
} XlapTimestampPlaceholder;
} XlapTimestampPlaceholder;
#else /* XLAP */
typedef char XlapTimestampPlaceholder;
typedef char XlapTimestampPlaceholder;
#endif
/*
* update the clock value of a timestamp by setting an explicit value
*
* This macro will cause a SIGSEGV if the application does not install a
* timestamp table to the socket.
*/
#ifdef XLAP
# define XlapTimeStampValue(sck, kind, seqno, id, value) do { \
atomic_load_explicit(&(sck)->tstable[kind], memory_order_acquire)->rows[(seqno) % TS_ROWS].time[ts_##id].actual.t = value; \
} while (0)
#else /* XLAP */
# define XlapTimeStampValue(sck, kind, seqno, id, value) do { \
(void) (sck); \
(void) (kind); \
(void) (seqno); \
(void) (ts_##id); \
(void) (value); \
} while (0)
#endif
/*
* update the clock value of a cyclestamp by setting an explicit value
*
* This macro will cause a SIGSEGV if the application does not install a
* timestamp table to the socket.
*/
#ifdef XLAP
# define XlapCycleStampValue(sck, kind, seqno, id, value) do { \
atomic_load_explicit(&(sck)->tstable[kind], memory_order_acquire)->rows[(seqno) % TS_ROWS].time[ts_##id].actual.c = value; \
} while (0)
#else /* XLAP */
# define XlapCycleStampValue(sck, kind, seqno, id, value) do { \
(void) (sck); \
(void) (kind); \
(void) (seqno); \
(void) (ts_##id); \
(void) (value); \
} while (0)
#endif
/*
......@@ -163,7 +207,7 @@ typedef struct XlapTimestampTableRow {
* print a timestamp dump header
*/
#ifdef XLAP
extern void XlapTimestampTableDumpHeader(FILE *);
extern void XlapTimestampTableDumpHeader(FILE *);
#else
# define XlapTimestampTableDumpHeader(f) do { \
(void) (f); \
......@@ -174,7 +218,7 @@ typedef struct XlapTimestampTableRow {
* dump a timestamp table
*/
#ifdef XLAP
extern void XlapTimestampTableDump(FILE *, XlapTimestampPacketKind, XlapTimestampTable *);
extern void XlapTimestampTableDump(FILE *, XlapTimestampPacketKind, XlapTimestampTable *);
#else
# define XlapTimestampTableDump(f, k, t) do { \
(void) (f); \
......
data_files:
sender: "rtn2017/results/on/2017_03_28_09_33_00_Sender.csv"
receiver: "rtn2017/results/on/2017_03_28_09_33_00_Receiver.csv"
sender: "nsdi2018/results/2018_02_14_12_57_01_sender.csv"
receiver: "nsdi2018/results/2018_02_14_12_57_01_receiver.csv"
threads:
- app_send
- trans_send
- trans_recv
- app_recv
cycle_reference:
sender:
app_send:
Start: PrrtSendStart
Stop: PrrtSendEnd
trans_send:
Start: PrrtTransmitStart
Stop: LinkTransmitEnd
receiver:
trans_recv:
Start: LinkReceive
Stop: PrrtReturnPackage
app_recv:
Start: PrrtReceivePackage
Stop: PrrtDeliver
time_reference:
......@@ -18,70 +29,98 @@ time_reference:
stamps:
PrrtSendStart:
Source: sender
Type: time
PrrtSendEnd:
Source: sender
Thread: app_send
Type: time
PrrtSubmitPackage:
Source: sender
Thread: app_send
Type: cycle
PrrtEncodeStart:
Source: sender
Type: cycle
PrrtEncodeEnd:
PrrtSendEnd:
Source: sender
Type: cycle
Thread: app_send
Type: time
PrrtTransmitStart:
Source: sender
Thread: trans_send
Type: time
PrrtTransmitEnd:
Source: sender
Thread: trans_send
Type: cycle
PrrtTransmitStart:
PrrtEncodeStart:
Source: sender
Thread: trans_send
Type: cycle
PrrtTransmitEnd:
PrrtEncodeEnd:
Source: sender
Thread: trans_send
Type: cycle
LinkTransmitStart:
Source: sender
Thread: trans_send
Type: cycle
ChannelTransmit:
Source: sender
Thread: trans_send
Type: time
LinkTransmitEnd:
Source: sender
Thread: trans_send
Type: time
LinkReceive:
ChannelReceive:
Source: receiver
Thread: trans_recv
Type: time
PrrtDeliver:
LinkReceive:
Source: receiver
Thread: trans_recv
Type: time
SendFeedbackStart:
Source: receiver
Thread: trans_recv
Type: cycle
SendFeedbackEnd:
Source: receiver
Thread: trans_recv
Type: cycle
DecodeStart:
Source: receiver
Thread: trans_recv
Type: cycle
DecodeEnd:
Source: receiver
Thread: trans_recv
Type: cycle
HandlePacketStart:
Source: receiver
Thread: trans_recv
Type: cycle
HandlePacketEnd:
Source: receiver
Thread: trans_recv
Type: cycle
PrrtReturnPackage:
Source: receiver
Thread: trans_recv
Type: time
PrrtDeliver:
Source: receiver
Thread: app_recv
Type: time
CopyOutputStart:
Source: receiver
Thread: app_recv
Type: cycle
CopyOutputEnd:
Source: receiver
Type: cycle
PrrtReturnPackage:
Source: receiver
Thread: app_recv
Type: cycle
PrrtReceivePackage:
Source: receiver
Thread: app_recv
Type: time
durations:
Send:
......@@ -129,6 +168,7 @@ durations:
Start: DecodeStart
Stop: DecodeEnd
Source: receiver
packet_types:
Data: 0
Redundancy: 1
......@@ -28,12 +28,13 @@ def jitter_causes(df, durations, export=False, file_name=None):
print("Outliers:", len(outliers), ";", "Threshold[us]:", threshold)
def trace_jitter(data_frame, export=False, file_name=None):
def trace_jitter(data_frame, threshold=None, export=False, file_name=None):
"""
Displays (and saves) a stacked boxplot of durations.
"""
thresh = get_outlier_threshold(data_frame["EndToEnd_D"].describe())
df_no_outliers = data_frame[data_frame["EndToEnd_D"] <= thresh]
if threshold is None:
threshold = get_outlier_threshold(data_frame["EndToEnd_D"].describe())
df_no_outliers = data_frame[data_frame["EndToEnd_D"] <= threshold]
box(df_no_outliers, export, file_name)
print("{} / {} are no outliers.".format(len(df_no_outliers), len(data_frame)))
fig = plt.gcf()
......
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats.stats import pearsonr
import numpy as np
import sys
import graphviz
class LatencyAnalysis():
def __init__(self, cfg=None, hdb=None):
self.cfg = cfg
correlations = []
labels = []
for x in hdb:
correlations += [x["Correlation"]]
labels += ["{} -> {}".format(x["Start"], x["End"])]
corr = pd.Series(correlations, index=labels)
self.corr = corr
def _get_thread_for_event(config, e):
name = str(e)[:-2]
try:
return config["stamps"][name]["Thread"]
except KeyError:
print("Cannot find {}".format(name), file=sys.stderr)
return None
def _happens_before(df, a, b, config):
"""
check if a happens-before b in the trace
"""
# check whether a and b occur in the same thread. If so, a and b cannot be
# concurrent.
ta = _get_thread_for_event(config, a)
tb = _get_thread_for_event(config, b)
cname_a = str(a)[:-2] + "_C"
cname_b = str(b)[:-2] + "_C"
if (ta == tb and ta != None and tb != None):
tg = df[a] > df[b]
if tg.any():
return False
df2 = df[df[a] == df[b]]
return not ((df2[cname_a] > df2[cname_b]) & df2[cname_b] != 0).any()
# since a and b occur in different threads, we cannot compare cyclestamps.
# If in doubt, a and b are concurrent.
return not (df[a] >= df[b]).any()
def _fast_happens_before(df, a, b, hb):
"""
check if a happens-before b, using a pre-computed relation
"""
return any(r['Start'] == str(a) and r['End'] == str(b) for r in hb)
def _happens_directly_before(df, a, b, hb):
"""
check if a happens-directly-before b in the trace
"""
if not _fast_happens_before(df, a, b, hb):
return False
for event in df:
if str(event) == str(a) or str(event) == str(b):
continue
if _fast_happens_before(df, a, event, hb) and _fast_happens_before(df, event, b, hb):
return False
return True
def _locally_happens_directly_before(df, a, b, hb, config):
"""
check if a happens-directly-before b in the trace but, be a little bit more
tolerant regarding intra-thread durations. Consider the following scenario
Thread A: A1 -> A2 -> A3
Thread B: B1 -> B2
This setup can result in the following directly-happens-before graph:
A1 B1
\ /
A2
/ \
A3 B2
In this case, B1 does not happens-directly-before B2 because, by chance, A2
happens inbetween. If we ignore this anomaly, we only analyse <B1,A2> and
<A2,B2>. Both ranges probably have low latency criticality, since their
durations are random (because they depend on thread interleavings).
However, we really want to analyse <B1,B2> because they actually represent
a meaningful range in thread B.
This function therefore computes a modified happens-directly-before graph
with relaxed restrictions for intra-thread ranges:
A1 B1
\ /|
A2 |
/ \|
A3 B2
"""
if not _fast_happens_before(df, a, b, hb):
return False
ta = _get_thread_for_event(config, a)
tb = _get_thread_for_event(config, b)
if ta == tb and ta != None and tb != None:
for c in df:
if _get_thread_for_event(config, c) != ta:
continue
if _fast_happens_before(df, a, c, hb) and _fast_happens_before(df, c, b, hb):
return False
return True
else:
return _happens_directly_before(df, a, b, hb)
def _plot_controlflow_graph(df, hdb):
"""
generate the control flow graph using dot
"""
t_columns = [x for x in df.columns if x.endswith("_T")]
graph = graphviz.Digraph(filename="graph", format="pdf")
for event1 in df[t_columns]:
graph.node(str(event1)[:-2])
for edge in hdb:
graph.edge(edge["Start"][:-2], edge["End"][:-2])
graph.render() # saves to graph.pdf in local folder
return graph
# Taken from: http://composition.al/blog/2015/11/29/a-better-way-to-add-labels-to-bar-charts-with-matplotlib/
def autolabel(rects, ax, labels):
# Get y-axis height to calculate label position from.
(x_left, x_right) = ax.get_xlim()
x_width = x_right - x_left
for i, rect in enumerate(rects):
width = rect.get_width()
color = "black"
align = "left"
# Fraction of axis height taken up by this rectangle
p_width = (width / x_width)
# If we can fit the label above the column, do that;
# otherwise, put it inside the column.
if p_width > 0.50: # arbitrary; 95% looked good to me.
label_position = width - (x_width) + 0.7
color = "white"
align = "right"
else:
label_position = width + (x_width * 0.01)
ax.text(label_position, rect.get_y(), labels[i], ha=align, va='bottom', rotation=0, color=color)
def _plot_critical_regions(hdb):
"""
plot regions, sorted by latency criticality
"""
relevant = sorted([x for x in hdb if x['Correlation'] > 0], key=lambda x: -x['Correlation'], reverse=True)
x = np.arange(len(relevant))
correlations = list(map(lambda x: x['Correlation'], relevant))
ticks = list(map(lambda x: "%s-%s" % (x['Start'][:-2], x['End'][:-2]), relevant))
fig, ax = plt.subplots()
rects = ax.barh(x, correlations, align="center", tick_label="")
autolabel(rects, ax, ticks)
plt.tight_layout()
plt.savefig("latency-criticality.pdf")
plt.close()
def analyse(df, config):
hb = []
events = [column for