1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
|
# Copyright 2017 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Classes that draw conclusions out of a comparison and represent them."""
from collections import Counter
FORMAT_RED = '\033[01;31m{0}\033[00m'
FORMAT_GREEN = '\033[01;32m{0}\033[00m'
FORMAT_MAGENTA = '\033[01;35m{0}\033[00m'
FORMAT_CYAN = '\033[01;36m{0}\033[00m'
FORMAT_NORMAL = '{0}'
RATING_FAILURE = 'failure'
RATING_REGRESSION = 'regression'
RATING_IMPROVEMENT = 'improvement'
RATING_NO_CHANGE = 'no_change'
RATING_SMALL_CHANGE = 'small_change'
RATINGS = [
RATING_FAILURE,
RATING_REGRESSION,
RATING_IMPROVEMENT,
RATING_NO_CHANGE,
RATING_SMALL_CHANGE
]
RATING_TO_COLOR = {
RATING_FAILURE: FORMAT_MAGENTA,
RATING_REGRESSION: FORMAT_RED,
RATING_IMPROVEMENT: FORMAT_CYAN,
RATING_NO_CHANGE: FORMAT_GREEN,
RATING_SMALL_CHANGE: FORMAT_NORMAL,
}
class ComparisonConclusions(object):
"""All conclusions drawn from a comparison.
This is initialized empty and then processes pairs of results for each test
case, determining the rating for that case, which can be:
"failure" if either or both runs for the case failed.
"regression" if there is a significant increase in time for the test case.
"improvement" if there is a significant decrease in time for the test case.
"no_change" if the time for the test case did not change at all.
"small_change" if the time for the test case changed but within the threshold.
"""
def __init__(self, threshold_significant):
"""Initializes an empty ComparisonConclusions.
Args:
threshold_significant: Float with the tolerance beyond which changes in
measurements are considered significant.
The change is considered as a multiplication rather than an addition
of a fraction of the previous measurement, that is, a
threshold_significant of 1.0 will flag test cases that became over
100% slower (> 200% of the previous time measured) or over 100% faster
(< 50% of the previous time measured).
threshold_significant 0.02 -> 98.04% to 102% is not significant
threshold_significant 0.1 -> 90.9% to 110% is not significant
threshold_significant 0.25 -> 80% to 125% is not significant
threshold_significant 1 -> 50% to 200% is not significant
threshold_significant 4 -> 20% to 500% is not significant
"""
self.threshold_significant = threshold_significant
self.threshold_significant_negative = (1 / (1 + threshold_significant)) - 1
self.params = {'threshold': threshold_significant}
self.summary = ComparisonSummary()
self.case_results = {}
def ProcessCase(self, case_name, before, after):
"""Feeds a test case results to the ComparisonConclusions.
Args:
case_name: String identifying the case.
before: Measurement for the "before" version of the code.
after: Measurement for the "after" version of the code.
"""
# Switch 0 to None to simplify the json dict output. All zeros are
# considered failed runs, so they will be represented by "null".
if not before:
before = None
if not after:
after = None
if not before or not after:
ratio = None
rating = RATING_FAILURE
else:
ratio = (float(after) / before) - 1.0
if ratio > self.threshold_significant:
rating = RATING_REGRESSION
elif ratio < self.threshold_significant_negative:
rating = RATING_IMPROVEMENT
elif ratio == 0:
rating = RATING_NO_CHANGE
else:
rating = RATING_SMALL_CHANGE
case_result = CaseResult(case_name, before, after, ratio, rating)
self.summary.ProcessCaseResult(case_result)
self.case_results[case_name] = case_result
def GetSummary(self):
"""Gets the ComparisonSummary with consolidated totals."""
return self.summary
def GetCaseResults(self):
"""Gets a dict mapping each test case identifier to its CaseResult."""
return self.case_results
def GetOutputDict(self):
"""Returns a conclusions dict with all the conclusions drawn.
Returns:
A serializable dict with the format illustrated below:
{
"params": {
"threshold": 0.02
},
"summary": {
"total": 123,
"failure": 1,
"regression": 2,
"improvement": 1,
"no_change": 100,
"small_change": 19
},
"comparison_by_case": {
"testing/resources/new_test.pdf": {
"before": None,
"after": 1000,
"ratio": None,
"rating": "failure"
},
"testing/resources/test1.pdf": {
"before": 100,
"after": 120,
"ratio": 0.2,
"rating": "regression"
},
"testing/resources/test2.pdf": {
"before": 100,
"after": 2000,
"ratio": 19.0,
"rating": "regression"
},
"testing/resources/test3.pdf": {
"before": 1000,
"after": 1005,
"ratio": 0.005,
"rating": "small_change"
},
"testing/resources/test4.pdf": {
"before": 1000,
"after": 1000,
"ratio": 0.0,
"rating": "no_change"
},
"testing/resources/test5.pdf": {
"before": 1000,
"after": 600,
"ratio": -0.4,
"rating": "improvement"
}
}
}
"""
output_dict = {}
output_dict['params'] = {'threshold': self.threshold_significant}
output_dict['summary'] = self.summary.GetOutputDict()
output_dict['comparison_by_case'] = {
cr.case_name: cr.GetOutputDict()
for cr in self.GetCaseResults().values()
}
return output_dict
class ComparisonSummary(object):
"""Totals computed for a comparison."""
def __init__(self):
self.rating_counter = Counter()
def ProcessCaseResult(self, case_result):
self.rating_counter[case_result.rating] += 1
def GetTotal(self):
"""Gets the number of test cases processed."""
return sum(self.rating_counter.values())
def GetCount(self, rating):
"""Gets the number of test cases processed with a given rating."""
return self.rating_counter[rating]
def GetOutputDict(self):
"""Returns a dict that can be serialized with all the totals."""
result = {'total': self.GetTotal()}
for rating in RATINGS:
result[rating] = self.GetCount(rating)
return result
class CaseResult(object):
"""The conclusion for the comparison of a single test case."""
def __init__(self, case_name, before, after, ratio, rating):
"""Initializes an empty ComparisonConclusions.
Args:
case_name: String identifying the case.
before: Measurement for the "before" version of the code.
after: Measurement for the "after" version of the code.
ratio: Difference between |after| and |before| as a fraction of |before|.
rating: Rating for this test case.
"""
self.case_name = case_name
self.before = before
self.after = after
self.ratio = ratio
self.rating = rating
def GetOutputDict(self):
"""Returns a dict with the test case's conclusions."""
return {'before': self.before,
'after': self.after,
'ratio': self.ratio,
'rating': self.rating}
def PrintConclusionsDictHumanReadable(conclusions_dict, colored, key=None):
"""Prints a conclusions dict in a human-readable way.
Args:
conclusions_dict: Dict to print.
colored: Whether to color the output to highlight significant changes.
key: String with the CaseResult dictionary key to sort the cases.
"""
# Print header
print '=' * 80
print '{0:>11s} {1:>15s} {2}' .format(
'% Change',
'Time after',
'Test case')
print '-' * 80
color = FORMAT_NORMAL
# Print cases
if key is not None:
case_pairs = sorted(conclusions_dict['comparison_by_case'].iteritems(),
key=lambda kv: kv[1][key])
else:
case_pairs = sorted(conclusions_dict['comparison_by_case'].iteritems())
for case_name, case_dict in case_pairs:
if case_dict['rating'] == RATING_FAILURE:
print '%s to measure time for %s' % (
RATING_TO_COLOR[RATING_FAILURE].format('Failed'), case_name)
continue
if colored:
color = RATING_TO_COLOR[case_dict['rating']]
print '{0} {1:15,d} {2}' .format(
color.format('{:+11.4%}'.format(case_dict['ratio'])),
case_dict['after'],
case_name)
# Print totals
totals = conclusions_dict['summary']
print '=' * 80
print 'Test cases run: %d' % totals['total']
if colored:
color = FORMAT_MAGENTA if totals[RATING_FAILURE] else FORMAT_GREEN
print ('Failed to measure: %s'
% color.format(totals[RATING_FAILURE]))
if colored:
color = FORMAT_RED if totals[RATING_REGRESSION] else FORMAT_GREEN
print ('Regressions: %s'
% color.format(totals[RATING_REGRESSION]))
if colored:
color = FORMAT_CYAN if totals[RATING_IMPROVEMENT] else FORMAT_GREEN
print ('Improvements: %s'
% color.format(totals[RATING_IMPROVEMENT]))
|