1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
#!/usr/bin/env python
import testlog_parser, sys, os, xml, glob, re
from table_formatter import *
from optparse import OptionParser
numeric_re = re.compile("(\d+)")
cvtype_re = re.compile("(8U|8S|16U|16S|32S|32F|64F)C(\d{1,3})")
cvtypes = { '8U': 0, '8S': 1, '16U': 2, '16S': 3, '32S': 4, '32F': 5, '64F': 6 }
convert = lambda text: int(text) if text.isdigit() else text
keyselector = lambda a: cvtype_re.sub(lambda match: " " + str(cvtypes.get(match.group(1), 7) + (int(match.group(2))-1) * 8) + " ", a)
alphanum_keyselector = lambda key: [ convert(c) for c in numeric_re.split(keyselector(key)) ]
def getSetName(tset, idx, columns, short = True):
if columns and len(columns) > idx:
prefix = columns[idx]
else:
prefix = None
if short and prefix:
return prefix
name = tset[0].replace(".xml","").replace("_", "\n")
if prefix:
return prefix + "\n" + ("-"*int(len(max(prefix.split("\n"), key=len))*1.5)) + "\n" + name
return name
if __name__ == "__main__":
if len(sys.argv) < 2:
print >> sys.stderr, "Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml [<log_name2>.xml ...]"
exit(0)
parser = OptionParser()
parser.add_option("-o", "--output", dest="format", help="output results in text format (can be 'txt', 'html' or 'auto' - default)", metavar="FMT", default="auto")
parser.add_option("-m", "--metric", dest="metric", help="output metric", metavar="NAME", default="gmean")
parser.add_option("-u", "--units", dest="units", help="units for output values (s, ms (default), mks, ns or ticks)", metavar="UNITS", default="ms")
parser.add_option("-f", "--filter", dest="filter", help="regex to filter tests", metavar="REGEX", default=None)
parser.add_option("", "--module", dest="module", default=None, metavar="NAME", help="module prefix for test names")
parser.add_option("", "--columns", dest="columns", default=None, metavar="NAMES", help="comma-separated list of column aliases")
parser.add_option("", "--no-relatives", action="store_false", dest="calc_relatives", default=True, help="do not output relative values")
parser.add_option("", "--with-cycles-reduction", action="store_true", dest="calc_cr", default=False, help="output cycle reduction percentages")
parser.add_option("", "--with-score", action="store_true", dest="calc_score", default=False, help="output automatic classification of speedups")
parser.add_option("", "--progress", action="store_true", dest="progress_mode", default=False, help="enable progress mode")
parser.add_option("", "--show-all", action="store_true", dest="showall", default=False, help="also include empty and \"notrun\" lines")
parser.add_option("", "--match", dest="match", default=None)
parser.add_option("", "--match-replace", dest="match_replace", default="")
parser.add_option("", "--regressions-only", dest="regressionsOnly", default=None, metavar="X-FACTOR", help="show only tests with performance regressions not")
(options, args) = parser.parse_args()
options.generateHtml = detectHtmlOutputType(options.format)
if options.metric not in metrix_table:
options.metric = "gmean"
if options.metric.endswith("%") or options.metric.endswith("$"):
options.calc_relatives = False
options.calc_cr = False
if options.columns:
options.columns = [s.strip().replace("\\n", "\n") for s in options.columns.split(",")]
# expand wildcards and filter duplicates
files = []
seen = set()
for arg in args:
if ("*" in arg) or ("?" in arg):
flist = [os.path.abspath(f) for f in glob.glob(arg)]
flist = sorted(flist, key= lambda text: str(text).replace("M", "_"))
files.extend([ x for x in flist if x not in seen and not seen.add(x)])
else:
fname = os.path.abspath(arg)
if fname not in seen and not seen.add(fname):
files.append(fname)
# read all passed files
test_sets = []
for arg in files:
try:
tests = testlog_parser.parseLogFile(arg)
if options.filter:
expr = re.compile(options.filter)
tests = [t for t in tests if expr.search(str(t))]
if options.match:
tests = [t for t in tests if t.get("status") != "notrun"]
if tests:
test_sets.append((os.path.basename(arg), tests))
except IOError as err:
sys.stderr.write("IOError reading \"" + arg + "\" - " + str(err) + os.linesep)
except xml.parsers.expat.ExpatError as err:
sys.stderr.write("ExpatError reading \"" + arg + "\" - " + str(err) + os.linesep)
if not test_sets:
sys.stderr.write("Error: no test data found" + os.linesep)
quit()
# find matches
setsCount = len(test_sets)
test_cases = {}
name_extractor = lambda name: str(name)
if options.match:
reg = re.compile(options.match)
name_extractor = lambda name: reg.sub(options.match_replace, str(name))
for i in range(setsCount):
for case in test_sets[i][1]:
name = name_extractor(case)
if options.module:
name = options.module + "::" + name
if name not in test_cases:
test_cases[name] = [None] * setsCount
test_cases[name][i] = case
# build table
getter = metrix_table[options.metric][1]
getter_score = metrix_table["score"][1] if options.calc_score else None
getter_p = metrix_table[options.metric + "%"][1] if options.calc_relatives else None
getter_cr = metrix_table[options.metric + "$"][1] if options.calc_cr else None
tbl = table(metrix_table[options.metric][0])
# header
tbl.newColumn("name", "Name of Test", align = "left", cssclass = "col_name")
i = 0
for set in test_sets:
tbl.newColumn(str(i), getSetName(set, i, options.columns, False), align = "center")
i += 1
metric_sets = test_sets[1:]
if options.calc_cr:
i = 1
for set in metric_sets:
reference = getSetName(test_sets[0], 0, options.columns) if not options.progress_mode else 'previous'
tbl.newColumn(str(i) + "$", getSetName(set, i, options.columns) + "\nvs\n" + reference + "\n(cycles reduction)", align = "center", cssclass = "col_cr")
i += 1
if options.calc_relatives:
i = 1
for set in metric_sets:
reference = getSetName(test_sets[0], 0, options.columns) if not options.progress_mode else 'previous'
tbl.newColumn(str(i) + "%", getSetName(set, i, options.columns) + "\nvs\n" + reference + "\n(x-factor)", align = "center", cssclass = "col_rel")
i += 1
if options.calc_score:
i = 1
for set in metric_sets:
reference = getSetName(test_sets[0], 0, options.columns) if not options.progress_mode else 'previous'
tbl.newColumn(str(i) + "S", getSetName(set, i, options.columns) + "\nvs\n" + reference + "\n(score)", align = "center", cssclass = "col_name")
i += 1
# rows
prevGroupName = None
needNewRow = True
lastRow = None
for name in sorted(test_cases.iterkeys(), key=alphanum_keyselector):
cases = test_cases[name]
if needNewRow:
lastRow = tbl.newRow()
if not options.showall:
needNewRow = False
tbl.newCell("name", name)
groupName = next(c for c in cases if c).shortName()
if groupName != prevGroupName:
prop = lastRow.props.get("cssclass", "")
if "firstingroup" not in prop:
lastRow.props["cssclass"] = prop + " firstingroup"
prevGroupName = groupName
for i in range(setsCount):
case = cases[i]
if case is None:
tbl.newCell(str(i), "-")
if options.calc_relatives and i > 0:
tbl.newCell(str(i) + "%", "-")
if options.calc_cr and i > 0:
tbl.newCell(str(i) + "$", "-")
if options.calc_score and i > 0:
tbl.newCell(str(i) + "$", "-")
else:
status = case.get("status")
if status != "run":
tbl.newCell(str(i), status, color = "red")
if status != "notrun":
needNewRow = True
if options.calc_relatives and i > 0:
tbl.newCell(str(i) + "%", "-", color = "red")
if options.calc_cr and i > 0:
tbl.newCell(str(i) + "$", "-", color = "red")
if options.calc_score and i > 0:
tbl.newCell(str(i) + "S", "-", color = "red")
else:
val = getter(case, cases[0], options.units)
def getter_fn(fn):
if fn and i > 0 and val:
for j in reversed(range(i)) if options.progress_mode else [0]:
r = cases[j]
if r is not None and r.get("status") == 'run':
return fn(case, r, options.units)
return None
valp = getter_fn(getter_p) if options.calc_relatives or options.progress_mode else None
valcr = getter_fn(getter_cr) if options.calc_cr else None
val_score = getter_fn(getter_score) if options.calc_score else None
if not valp or i == 0:
color = None
elif valp > 1.05:
color = "green"
elif valp < 0.95:
color = "red"
else:
color = None
if val:
needNewRow = True
tbl.newCell(str(i), formatValue(val, options.metric, options.units), val, color = color)
if options.calc_relatives and i > 0:
tbl.newCell(str(i) + "%", formatValue(valp, "%"), valp, color = color, bold = color)
if options.calc_cr and i > 0:
tbl.newCell(str(i) + "$", formatValue(valcr, "$"), valcr, color = color, bold = color)
if options.calc_score and i > 0:
tbl.newCell(str(i) + "S", formatValue(val_score, "S"), val_score, color = color, bold = color)
if not needNewRow:
tbl.trimLastRow()
if options.regressionsOnly:
for r in reversed(range(len(tbl.rows))):
delete = True
i = 1
for set in metric_sets:
val = tbl.rows[r].cells[len(tbl.rows[r].cells)-i].value
if val is not None and val < float(options.regressionsOnly):
delete = False
i += 1
if (delete):
tbl.rows.pop(r)
# output table
if options.generateHtml:
if options.format == "moinwiki":
tbl.htmlPrintTable(sys.stdout, True)
else:
htmlPrintHeader(sys.stdout, "Summary report for %s tests from %s test logs" % (len(test_cases), setsCount))
tbl.htmlPrintTable(sys.stdout)
htmlPrintFooter(sys.stdout)
else:
tbl.consolePrintTable(sys.stdout)
if options.regressionsOnly:
sys.exit(len(tbl.rows))