1 |
|
2 |
######################################################## |
3 |
# |
4 |
# Copyright (c) 2003-2008 by University of Queensland |
5 |
# Earth Systems Science Computational Center (ESSCC) |
6 |
# http://www.uq.edu.au/esscc |
7 |
# |
8 |
# Primary Business: Queensland, Australia |
9 |
# Licensed under the Open Software License version 3.0 |
10 |
# http://www.opensource.org/licenses/osl-3.0.php |
11 |
# |
12 |
######################################################## |
13 |
|
14 |
__copyright__="""Copyright (c) 2003-2008 by University of Queensland |
15 |
Earth Systems Science Computational Center (ESSCC) |
16 |
http://www.uq.edu.au/esscc |
17 |
Primary Business: Queensland, Australia""" |
18 |
__license__="""Licensed under the Open Software License version 3.0 |
19 |
http://www.opensource.org/licenses/osl-3.0.php""" |
20 |
__url__="http://www.uq.edu.au/esscc/escript-finley" |
21 |
|
22 |
filter# $Id:$ |
23 |
|
24 |
""" |
25 |
A simple framework to run benchmarks under OpenMP and to summarize the results |
26 |
in tables for instance in HTML |
27 |
|
28 |
@var __author__: name of author |
29 |
@var __license__: licence agreement |
30 |
@var __copyright__: copyrights |
31 |
@var __url__: url entry point on documentation |
32 |
@var __version__: version |
33 |
@var __date__: date of the version |
34 |
""" |
35 |
|
36 |
__author__="Lutz Gross, l.gross@uq.edu.au" |
37 |
|
38 |
import os,socket,time,sys,traceback |
39 |
from esys.escript import setNumberOfThreads |
40 |
|
41 |
class BenchmarkSuite(object): |
42 |
""" |
43 |
Framework to run a bunch of L{Benchmark}s using the object and creating a |
44 |
table of statistics. |
45 |
|
46 |
@cvar MAX_LEVEL: maximum number of level in headers for output |
47 |
""" |
48 |
MAX_LEVEL=5 |
49 |
def __init__(self,name=None): |
50 |
""" |
51 |
Sets up a suite of benchmarks. |
52 |
|
53 |
@param name: name of the benchmark suite. If no name is given the class |
54 |
name is used. |
55 |
@type name: C{str} |
56 |
""" |
57 |
super(BenchmarkSuite,self).__init__() |
58 |
self.__benchmarks=[] |
59 |
self.__scale=1 |
60 |
if name==None: |
61 |
self.__name=self.__class__.__name__ |
62 |
else: |
63 |
self.__name=name |
64 |
|
65 |
def __str__(self): |
66 |
""" |
67 |
Returns the name of the benchmark suite. |
68 |
|
69 |
@return: the name |
70 |
@rtype: C{str} |
71 |
""" |
72 |
return self.__name |
73 |
|
74 |
def addBenchmark(self,benchmark): |
75 |
""" |
76 |
Adds a new L{Benchmark} to the suite. |
77 |
|
78 |
@param benchmark: the benchmark to add |
79 |
@type benchmark: L{Benchmark} |
80 |
""" |
81 |
self.__benchmarks.append(benchmark) |
82 |
|
83 |
def __len__(self): |
84 |
""" |
85 |
Returns the number of benchmarks in the suite. |
86 |
|
87 |
@return: number of benchmarks |
88 |
@rtype: C{int} |
89 |
""" |
90 |
return len(self.__benchmarks) |
91 |
|
92 |
def __getitem__(self,i): |
93 |
""" |
94 |
Returns the i-th benchmark in the suite through self[i]. |
95 |
|
96 |
@param i: index of the requested benchmark |
97 |
@type i: C{int} |
98 |
@return: i-th benchmark |
99 |
@rtype: L{Benchmark} |
100 |
|
101 |
""" |
102 |
return self.__benchmarks[i] |
103 |
|
104 |
def run(self,scale=1): |
105 |
""" |
106 |
Runs all benchmarks. |
107 |
|
108 |
@param scale: defines the number of (OpenMP) threads to be used. If |
109 |
C{scale} is a scalar all benchmarks are run with C{scale} |
110 |
number of threads. If C{scale} is a C{list}, the p-th |
111 |
problem in each of the benchmarks in the suite is run with |
112 |
C{scale[p]} threads. If C{scale[p]}<1 the p-th problem is |
113 |
omitted. |
114 |
@type scale: C{int} or C{list} of C{int}s |
115 |
""" |
116 |
self.__scale=scale |
117 |
for i in range(len(self)): self[i].run(scale=scale) |
118 |
|
119 |
def getHTML(self,filter,level=1): |
120 |
""" |
121 |
Returns the results of the last benchmark run in HTML format. |
122 |
|
123 |
@param filter: filter to be applied to the results |
124 |
@type filter: L{BenchmarkFilter} |
125 |
@param level: level used in header <H?> tags |
126 |
@type level: C{int} |
127 |
@return: HTML document |
128 |
@rtype: C{str} |
129 |
""" |
130 |
out="" |
131 |
if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self) |
132 |
out+="<H%s>%s</H%s>\n"%(level,str(self),level) |
133 |
if level==1: |
134 |
m="" |
135 |
if isinstance(self.__scale,int): |
136 |
if self.__scale>1: |
137 |
m=" (%s threads)"%self.__scale |
138 |
out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m) |
139 |
for i in range(len(self)): |
140 |
out+="<p>\n" |
141 |
out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL)) |
142 |
out+="<p>\n" |
143 |
if level==1: |
144 |
try: |
145 |
name=os.getlogin() |
146 |
out+="<hr><p align=\"center\">by %s at %s</p>\n"%(name,time.strftime('%X %x %Z')) |
147 |
except OSError: |
148 |
out+="<hr><p align=\"center\">%s</p>\n"%(time.strftime('%X %x %Z')) |
149 |
|
150 |
out+="</BODY></HTML>\n" |
151 |
return out |
152 |
|
153 |
|
154 |
class Benchmark(object): |
155 |
""" |
156 |
Runs a bunch of similar L{BenchmarkProblem}s with a bunch of L{Options}. |
157 |
""" |
158 |
def __init__(self,name=None,description=None): |
159 |
""" |
160 |
Sets up a benchmark. |
161 |
|
162 |
@param name: name of the benchmark. If no name is given the class name |
163 |
is used. |
164 |
@type name: C{str} |
165 |
@param description: description of the benchmark |
166 |
@type description: C{str} or C{None} |
167 |
""" |
168 |
super(Benchmark,self).__init__() |
169 |
self.__options=[] |
170 |
self.__problems=[] |
171 |
self.__results=[] |
172 |
self.__scale=1 |
173 |
if name==None: |
174 |
self.__name=self.__class__.__name__ |
175 |
else: |
176 |
self.__name=name |
177 |
self.__description=description |
178 |
|
179 |
def __str__(self): |
180 |
""" |
181 |
Returns the name of the benchmark suite. |
182 |
|
183 |
@return: the name |
184 |
@rtype: C{str} |
185 |
""" |
186 |
return self.__name |
187 |
|
188 |
def addProblem(self,problem): |
189 |
""" |
190 |
Adds a problem to the benchmark. |
191 |
|
192 |
@param problem: the problem to be added |
193 |
@type problem: L{BenchmarkProblem} |
194 |
""" |
195 |
self.__problems.append(problem) |
196 |
|
197 |
def addOptions(self,options): |
198 |
""" |
199 |
Adds options to the benchmark. |
200 |
|
201 |
@param options: the options to be added to the benchmark. If |
202 |
options==None the options are left unchanged. |
203 |
@type options: L{Options} |
204 |
""" |
205 |
if options!=None: self.__options.append(options) |
206 |
|
207 |
def run(self,scale=1): |
208 |
""" |
209 |
Runs all problems with all options. |
210 |
|
211 |
@param scale: defines the number of (OpenMP) threads to be used. If |
212 |
C{scale} is a scalar all benchmarks are run with C{scale} |
213 |
number of threads. If C{scale} is a C{list}, the p-th |
214 |
problem in each of the benchmarks in the suite is run with |
215 |
C{scale[p]} threads. If C{scale[p]}<1 the p-th problem is |
216 |
omitted. |
217 |
@type scale: C{int} or C{list} of C{int}s |
218 |
""" |
219 |
if isinstance(scale,list): |
220 |
c_max=min(len(scale),len(self.__problems)) |
221 |
else: |
222 |
c_max=len(self.__problems) |
223 |
self.__filter=filter |
224 |
self.__scale=scale |
225 |
self.__results=[] |
226 |
for c in range(c_max): |
227 |
r=self.__problems[c] |
228 |
if isinstance(scale,list): |
229 |
s=scale[c] |
230 |
else: |
231 |
s=scale |
232 |
row=[] |
233 |
if s>0: |
234 |
t0=time.time() |
235 |
print "%s with %s threads started."%(r.__class__,s) |
236 |
for p in self.__options: |
237 |
setNumberOfThreads(s) |
238 |
try: |
239 |
row.append(r.run(p)) |
240 |
except: |
241 |
traceback.print_exc(file=sys.stdout) |
242 |
row.append(None) |
243 |
t0=time.time()-t0 |
244 |
print "%s with %s threads finished (walltime=%s sec)."%(r.__class__,s,t0) |
245 |
self.__results.append(row) |
246 |
|
247 |
def getHTML(self,filter,level=1): |
248 |
""" |
249 |
Returns the results of the last benchmark run in HTML format. |
250 |
|
251 |
@param filter: filter to be applied to the results |
252 |
@type filter: L{BenchmarkFilter} |
253 |
@param level: level used in header <H?> tags |
254 |
@type level: C{int} |
255 |
@return: HTML document |
256 |
@rtype: C{str} |
257 |
""" |
258 |
out="" |
259 |
if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self) |
260 |
out+="<H%s>%s</H%s>\n"%(level,str(self),level) |
261 |
if level==1: |
262 |
m="" |
263 |
if isinstance(self.__scale,int): |
264 |
if self.__scale>1: |
265 |
m=" (%s threads)"%self.__scale |
266 |
out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m) |
267 |
if self.__description: out+="<p>%s</p>\n"%str(self.__description) |
268 |
if len(self.__problems)>0: |
269 |
out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n" |
270 |
h1_seg="" |
271 |
rn=filter.getResultNames() |
272 |
if len(rn)==0: |
273 |
h1_seg+="<TD></TD>" |
274 |
else: |
275 |
for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n |
276 |
h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>" |
277 |
h1="<TR>" |
278 |
if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>" |
279 |
for o in self.__options: |
280 |
if len(rn)==0: |
281 |
h0+="<TH ALIGN=\"center\">%s</TH>"%str(o) |
282 |
colspan=1 |
283 |
elif len(rn)==1: |
284 |
h0+="<TH ALIGN=\"center\">%s</TH>"%str(o) |
285 |
colspan=1 |
286 |
empty_h1=False |
287 |
else: |
288 |
colspan=len(rn) |
289 |
h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(colspan,str(o)) |
290 |
h1+=h1_seg |
291 |
out+=h0+"</TR>\n"+h1+"</TR>\n" |
292 |
c=0 |
293 |
for r in range(len(self.__results)): |
294 |
out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r]) |
295 |
if isinstance(self.__scale,list): |
296 |
out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c] |
297 |
for col in self.__results[r]: |
298 |
if col==None: |
299 |
out+="<TD ALIGN=\"center\" COLSPAN=%s>failed.</TD>"%colspan |
300 |
else: |
301 |
for e in filter(col): out+="<TD ALIGN=\"right\">%s</TD>"%e |
302 |
out+="</TR>\n" |
303 |
c+=1 |
304 |
out+="</TABLE>" |
305 |
if level==1: |
306 |
out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z')) |
307 |
out+="</BODY></HTML>\n" |
308 |
return out |
309 |
|
310 |
class BenchmarkProblem(object): |
311 |
""" |
312 |
Represents a benchmark problem that can be run and which returns a list of |
313 |
characteristics such as timing, MFlops, error, etc. |
314 |
""" |
315 |
def __init__(self,name=None): |
316 |
""" |
317 |
Sets up a benchmark problem. |
318 |
|
319 |
@param name: name of the problem. If no name is given the class name |
320 |
is used. |
321 |
@type name: C{str} |
322 |
""" |
323 |
super(BenchmarkProblem,self).__init__() |
324 |
if name==None: |
325 |
self.__name=self.__class__.__name__ |
326 |
else: |
327 |
self.__name=name |
328 |
|
329 |
def __str__(self): |
330 |
""" |
331 |
Returns the name of the benchmark suite. |
332 |
|
333 |
@return: the name |
334 |
@rtype: C{str} |
335 |
""" |
336 |
return self.__name |
337 |
|
338 |
def run(self,options=None): |
339 |
""" |
340 |
Runs the problem and returns a list of run characteristics. |
341 |
|
342 |
@param options: the options that are used for the run. Note that the |
343 |
number of OpenMP threads is controlled by the |
344 |
L{Benchmark} the problem is run in. |
345 |
@type options: L{Options} |
346 |
@return: run characteristics |
347 |
@rtype: any type that can be read by the L{BenchmarkFilter} applied |
348 |
to it |
349 |
@note: this function has to be overwritten by a particular problem |
350 |
""" |
351 |
raise NotImplementedError |
352 |
return [] |
353 |
|
354 |
class BenchmarkFilter(object): |
355 |
""" |
356 |
Object to filter the characteristics returned by Benchmark runs. |
357 |
|
358 |
""" |
359 |
def __init__(self): |
360 |
""" |
361 |
Sets up a filter. |
362 |
""" |
363 |
pass |
364 |
|
365 |
def getResultNames(self): |
366 |
""" |
367 |
Returns the names of the results produced when C{run()} is called. |
368 |
|
369 |
@return: the list of the names to be used when the results of |
370 |
the C{run()} call are printed |
371 |
@rtype: C{list} of C{str} |
372 |
@note: this function has to overwritten by a particular problem |
373 |
""" |
374 |
raise NotImplementedError |
375 |
return [] |
376 |
|
377 |
def __call__(self,result): |
378 |
""" |
379 |
Filters out results returned as characteristics of a problem run. |
380 |
|
381 |
@param result: values to be filtered |
382 |
@type result: any type that is produced by the L{BenchmarkProblem} |
383 |
it is applied to |
384 |
@return: a list of strings selected from result |
385 |
@rtype: C{list} of C{str} |
386 |
@note: this function has to be overwritten by a particular problem |
387 |
""" |
388 |
raise NotImplementedError |
389 |
return [] |
390 |
|
391 |
|
392 |
class Options(object): |
393 |
""" |
394 |
Defines a set of options to be used to run a L{BenchmarkProblem}. |
395 |
""" |
396 |
def __init__(self,name=None): |
397 |
""" |
398 |
Sets up the options. |
399 |
|
400 |
@param name: name of the option. If no name is given the class name |
401 |
is used. |
402 |
@type name: C{str} |
403 |
""" |
404 |
super(Options,self).__init__() |
405 |
if name==None: |
406 |
self.__name=self.__class__.__name__ |
407 |
else: |
408 |
self.__name=name |
409 |
|
410 |
def __str__(self): |
411 |
""" |
412 |
Returns the name of this options object. |
413 |
|
414 |
@return: the name |
415 |
@rtype: C{str} |
416 |
""" |
417 |
return self.__name |
418 |
|
419 |
if __name__=="__main__": |
420 |
|
421 |
class OptionsTest1(Options): |
422 |
pass |
423 |
class OptionsTest2(Options): |
424 |
pass |
425 |
|
426 |
class BenchmarkProblemTest1(BenchmarkProblem): |
427 |
def __init__(self): |
428 |
super(BenchmarkProblemTest1,self).__init__(name="TEST1") |
429 |
def run(self,options=None): |
430 |
import time |
431 |
return time.time(),"A" |
432 |
|
433 |
class BenchmarkProblemTest2(BenchmarkProblem): |
434 |
def __init__(self): |
435 |
super(BenchmarkProblemTest2,self).__init__(name="TEST2") |
436 |
def run(self,options=None): |
437 |
import time |
438 |
return -time.time(),"B" |
439 |
|
440 |
class SimpleFilter(BenchmarkFilter): |
441 |
def getResultNames(self): |
442 |
return ["r0","r1"] |
443 |
def __call__(self,result): |
444 |
return [str(result[0]),str(result[1])] |
445 |
|
446 |
bm=Benchmark("Example") |
447 |
bm.addProblem(BenchmarkProblemTest1()) |
448 |
bm.addProblem(BenchmarkProblemTest2()) |
449 |
bm.addOptions(OptionsTest1()) |
450 |
bm.addOptions(OptionsTest2()) |
451 |
|
452 |
bms=BenchmarkSuite("A Test") |
453 |
bms.addBenchmark(bm) |
454 |
|
455 |
bms.run() |
456 |
print bms.getHTML(filter=SimpleFilter()) |
457 |
|
458 |
bms.run(scale=4) |
459 |
print bms.getHTML(filter=SimpleFilter()) |
460 |
|
461 |
bms.run(scale=[1,2]) |
462 |
print bms.getHTML(filter=SimpleFilter()) |
463 |
|