1 |
filter# $Id:$ |
2 |
|
3 |
""" |
4 |
A simple framework to run benchmarks under OPENMP and to summarize the results in tables for instance in HTML |
5 |
|
6 |
@var __author__: name of author |
7 |
@var __license__: licence agreement |
8 |
@var __copyright__: copyrights |
9 |
@var __url__: url entry point on documentation |
10 |
@var __version__: version |
11 |
@var __date__: date of the version |
12 |
""" |
13 |
|
14 |
__author__="Lutz Gross, l.gross@uq.edu.au" |
15 |
__copyright__=""" Copyright (c) 2006 by ACcESS MNRF |
16 |
http://www.access.edu.au |
17 |
Primary Business: Queensland, Australia""" |
18 |
__license__="""Licensed under the Open Software License version 3.0 |
19 |
http://www.opensource.org/licenses/osl-3.0.php""" |
20 |
__url__="http://www.iservo.edu.au/esys/escript" |
21 |
__version__="$Revision$" |
22 |
__date__="$Date$" |
23 |
|
24 |
import os,socket,time,sys,traceback |
25 |
from esys.escript import setNumberOfThreads |
26 |
|
27 |
class BenchmarkSuite(object): |
28 |
""" |
29 |
framework to run a bunch of L{Benchmark}s with the object to create a table of statistics. |
30 |
|
31 |
@cvar MAX_LEVEL: maximum number of level in headers for output |
32 |
""" |
33 |
MAX_LEVEL=5 |
34 |
def __init__(self,name=None): |
35 |
""" |
36 |
sets up a suite of benchmarks |
37 |
|
38 |
@param name: name of the benchmark suite. If no name is given the class name is used. |
39 |
@type name: C{str} |
40 |
""" |
41 |
super(BenchmarkSuite,self).__init__() |
42 |
self.__benchmarks=[] |
43 |
self.__scale=1 |
44 |
if name==None: |
45 |
self.__name=self.__class__.__name__ |
46 |
else: |
47 |
self.__name=name |
48 |
|
49 |
def __str__(self): |
50 |
""" |
51 |
returns the name of the benchmark suite |
52 |
|
53 |
@return: name |
54 |
@rtype: C{str} |
55 |
""" |
56 |
return self.__name |
57 |
def addBenchmark(self,benchmark): |
58 |
""" |
59 |
adds a L{Benchmark} to the suite |
60 |
|
61 |
@param benchmark: adds a new L{Benchmark} to the suite |
62 |
@type benchmark: L{Benchmark} |
63 |
""" |
64 |
self.__benchmarks.append(benchmark) |
65 |
def __len__(self): |
66 |
""" |
67 |
returns the number of benchmarks in the suite |
68 |
|
69 |
@return: number of benchmarks |
70 |
@rtype: C{int} |
71 |
""" |
72 |
return len(self.__benchmarks) |
73 |
def __getitem__(self,i): |
74 |
""" |
75 |
returns the i-th benchmark in the suite through self[i] |
76 |
|
77 |
@param i: index of the requested benchmark |
78 |
@type i: C{int} |
79 |
@return: i-th benchmark |
80 |
@rtype: L{Benchmark} |
81 |
|
82 |
""" |
83 |
return self.__benchmarks[i] |
84 |
def run(self,scale=1): |
85 |
""" |
86 |
runs all benchmarks |
87 |
|
88 |
@param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks |
89 |
are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks |
90 |
in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted. |
91 |
@type scale: C{int} or C{list} of C{int}s. |
92 |
""" |
93 |
self.__scale=scale |
94 |
for i in range(len(self)): self[i].run(scale=scale) |
95 |
def getHTML(self,filter,level=1): |
96 |
""" |
97 |
returns the results of the last benchmark run in HTML format. |
98 |
|
99 |
@param filter: filter to be applied to the results |
100 |
@type filter: L{BenchmarkFilter} |
101 |
@param level: level used in header <H?> tags |
102 |
@type level: C{int} |
103 |
@return: HTML document |
104 |
@rtype: C{str} |
105 |
""" |
106 |
out="" |
107 |
if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self) |
108 |
out+="<H%s>%s</H%s>\n"%(level,str(self),level) |
109 |
if level==1: |
110 |
m="" |
111 |
if isinstance(self.__scale,int): |
112 |
if self.__scale>1: |
113 |
m=" (%s threads)"%self.__scale |
114 |
out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m) |
115 |
for i in range(len(self)): |
116 |
out+="<p>\n" |
117 |
out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL)) |
118 |
out+="<p>\n" |
119 |
if level==1: |
120 |
try: |
121 |
name=os.getlogin() |
122 |
out+="<hr><p align=\"center\">by %s at %s</p>\n"%(name,time.strftime('%X %x %Z')) |
123 |
except OSError: |
124 |
out+="<hr><p align=\"center\">%s</p>\n"%(time.strftime('%X %x %Z')) |
125 |
|
126 |
out+="</BODY></HTML>\n" |
127 |
return out |
128 |
|
129 |
|
130 |
class Benchmark(object): |
131 |
""" |
132 |
runs a bunch of similar L{BenchmarkProblem}s with a bunch of L{Options} |
133 |
""" |
134 |
def __init__(self,name=None,description=None): |
135 |
""" |
136 |
sets up a benchmark |
137 |
|
138 |
@param name: name of the benchmark. If no name is given the class name is used. |
139 |
@type name: C{str} |
140 |
@param description: description of the benchmark. |
141 |
@type description: C{str} or C{None} |
142 |
""" |
143 |
super(Benchmark,self).__init__() |
144 |
self.__options=[] |
145 |
self.__problems=[] |
146 |
self.__results=[] |
147 |
self.__scale=1 |
148 |
if name==None: |
149 |
self.__name=self.__class__.__name__ |
150 |
else: |
151 |
self.__name=name |
152 |
self.__description=description |
153 |
|
154 |
def __str__(self): |
155 |
""" |
156 |
returns the name of the benchmark suite |
157 |
|
158 |
@return: name |
159 |
@rtype: C{str} |
160 |
""" |
161 |
return self.__name |
162 |
|
163 |
def addProblem(self,problem): |
164 |
""" |
165 |
adds a problem to the benchmark |
166 |
|
167 |
@param problem: adds a new problem to the bechmark |
168 |
@type problem: L{BenchmarkProblem} |
169 |
""" |
170 |
self.__problems.append(problem) |
171 |
|
172 |
def addOptions(self,options): |
173 |
""" |
174 |
adds a options to the benchmark |
175 |
|
176 |
@param options: adds a new option to the bechmark. If options==None they are are ignored |
177 |
@type options: L{Options} |
178 |
""" |
179 |
if options!=None: self.__options.append(options) |
180 |
|
181 |
def run(self,scale=1): |
182 |
""" |
183 |
runs all problems with all options. |
184 |
|
185 |
|
186 |
@param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks |
187 |
are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks |
188 |
in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted. |
189 |
@type scale: C{int} or C{list} of C{int}s. |
190 |
""" |
191 |
if isinstance(scale,list): |
192 |
c_max=min(len(scale),len(self.__problems)) |
193 |
else: |
194 |
c_max=len(self.__problems) |
195 |
self.__filter=filter |
196 |
self.__scale=scale |
197 |
self.__results=[] |
198 |
for c in range(c_max): |
199 |
r=self.__problems[c] |
200 |
if isinstance(scale,list): |
201 |
s=scale[c] |
202 |
else: |
203 |
s=scale |
204 |
row=[] |
205 |
if s>0: |
206 |
t0=time.time() |
207 |
print "%s with %s threads started."%(r.__class__,s) |
208 |
for p in self.__options: |
209 |
setNumberOfThreads(s) |
210 |
try: |
211 |
row.append(r.run(p)) |
212 |
except: |
213 |
traceback.print_exc(file=sys.stdout) |
214 |
row.append(None) |
215 |
t0=time.time()-t0 |
216 |
print "%s with %s threads finished (walltime =%s sec)."%(r.__class__,s,t0) |
217 |
self.__results.append(row) |
218 |
def getHTML(self,filter,level=1): |
219 |
""" |
220 |
returns the results of the last benchmark run in HTML format. |
221 |
|
222 |
@param filter: filter to be applied to the results |
223 |
@type filter: L{BenchmarkFilter} |
224 |
@param level: level used in header <H?> tags |
225 |
@type level: C{int} |
226 |
@return: HTML document |
227 |
@rtype: C{str} |
228 |
""" |
229 |
out="" |
230 |
if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self) |
231 |
out+="<H%s>%s</H%s>\n"%(level,str(self),level) |
232 |
if level==1: |
233 |
m="" |
234 |
if isinstance(self.__scale,int): |
235 |
if self.__scale>1: |
236 |
m=" (%s threads)"%self.__scale |
237 |
out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m) |
238 |
if self.__description: out+="<p>%s</p>\n"%str(self.__description) |
239 |
if len(self.__problems)>0: |
240 |
out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n" |
241 |
h1_seg="" |
242 |
rn=filter.getResultNames() |
243 |
if len(rn)==0: |
244 |
h1_seg+="<TD></TD>" |
245 |
else: |
246 |
for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n |
247 |
h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>" |
248 |
h1="<TR>" |
249 |
if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>" |
250 |
for o in self.__options: |
251 |
if len(rn)==0: |
252 |
h0+="<TH ALIGN=\"center\">%s</TH>"%str(o) |
253 |
colspan=1 |
254 |
elif len(rn)==1: |
255 |
h0+="<TH ALIGN=\"center\">%s</TH>"%str(o) |
256 |
colspan=1 |
257 |
empty_h1=False |
258 |
else: |
259 |
colspan=len(rn) |
260 |
h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(colspan,str(o)) |
261 |
h1+=h1_seg |
262 |
out+=h0+"</TR>\n"+h1+"</TR>\n" |
263 |
c=0 |
264 |
for r in range(len(self.__results)): |
265 |
out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r]) |
266 |
if isinstance(self.__scale,list): |
267 |
out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c] |
268 |
for col in self.__results[r]: |
269 |
if col==None: |
270 |
out+="<TD ALIGN=\"center\" COLSPAN=%s>failed.</TD>"%colspan |
271 |
else: |
272 |
for e in filter(col): out+="<TD ALIGN=\"right\">%s</TD>"%e |
273 |
out+="</TR>\n" |
274 |
c+=1 |
275 |
out+="</TABLE>" |
276 |
if level==1: |
277 |
out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z')) |
278 |
out+="</BODY></HTML>\n" |
279 |
return out |
280 |
|
281 |
class BenchmarkProblem(object): |
282 |
""" |
283 |
something that can be run and returns a list of characteristics such as timing, Mflops, error, etc. |
284 |
""" |
285 |
def __init__(self,name=None): |
286 |
""" |
287 |
sets up a benchmark problem |
288 |
|
289 |
@param name: name of the problem. If no name is given the class name is used. |
290 |
@type name: C{str} |
291 |
""" |
292 |
super(BenchmarkProblem,self).__init__() |
293 |
if name==None: |
294 |
self.__name=self.__class__.__name__ |
295 |
else: |
296 |
self.__name=name |
297 |
|
298 |
|
299 |
def __str__(self): |
300 |
""" |
301 |
returns the name of the benchmark suite |
302 |
|
303 |
@return: name |
304 |
@rtype: C{str} |
305 |
""" |
306 |
return self.__name |
307 |
|
308 |
def run(self,options=None): |
309 |
""" |
310 |
runs the problem and returns a list of run characteristics |
311 |
|
312 |
|
313 |
@param options: the options that are used for the run. Note that the number of OpenMP threads is controlled |
314 |
by the L{Benchmark} the problem is run in. |
315 |
@type options: L{Options} |
316 |
@return: run characteristics |
317 |
@rtype: any type that can be read by the L{BenchmarkFilter} applied to it. |
318 |
@note: this function has to overwritten by a particular problem |
319 |
""" |
320 |
raise NotImplementedError |
321 |
return [] |
322 |
|
323 |
class BenchmarkFilter(object): |
324 |
""" |
325 |
object to filter the characteristcs returned by Bechmark runs. |
326 |
|
327 |
""" |
328 |
def __init__(self): |
329 |
""" |
330 |
sets up a filter |
331 |
""" |
332 |
pass |
333 |
|
334 |
|
335 |
def getResultNames(self): |
336 |
""" |
337 |
return the names of the results produced when run() is called. |
338 |
|
339 |
@return: names the list of the names to be used when the results of the run() call are printed |
340 |
@rtype: C{list} of C{str} |
341 |
@note: this function has to overwritten by a particular problem |
342 |
""" |
343 |
raise NotImplementedError |
344 |
return [] |
345 |
|
346 |
def __call__(self,result): |
347 |
""" |
348 |
filters out values results returned as characteristcs of a problem run |
349 |
|
350 |
@param result: values to be filtered |
351 |
@type result: any type that is produced by the L{BenchmarkProblem} it is applied to |
352 |
@return: a list of strings selected from result |
353 |
@rtype: C{list} of C{str} |
354 |
@note: this function has to overwritten by a particular problem |
355 |
""" |
356 |
raise NotImplementedError |
357 |
return [] |
358 |
|
359 |
|
360 |
class Options(object): |
361 |
""" |
362 |
defines a set of options to be used to run a L{BenchmarkProblem} |
363 |
""" |
364 |
def __init__(self,name=None): |
365 |
""" |
366 |
sets up the options |
367 |
|
368 |
@param name: name of the option. If no name is given the class name is used. |
369 |
@type name: C{str} |
370 |
""" |
371 |
super(Options,self).__init__() |
372 |
if name==None: |
373 |
self.__name=self.__class__.__name__ |
374 |
else: |
375 |
self.__name=name |
376 |
def __str__(self): |
377 |
""" |
378 |
returns the name of the benchmark suite |
379 |
|
380 |
@return: name |
381 |
@rtype: C{str} |
382 |
""" |
383 |
return self.__name |
384 |
|
385 |
if __name__=="__main__": |
386 |
|
387 |
class OptionsTest1(Options): |
388 |
pass |
389 |
class OptionsTest2(Options): |
390 |
pass |
391 |
|
392 |
class BenchmarkProblemTest1(BenchmarkProblem): |
393 |
def __init__(self): |
394 |
super(BenchmarkProblemTest1,self).__init__(name="TEST1") |
395 |
def run(self,options=None): |
396 |
import time |
397 |
return time.time(),"A" |
398 |
|
399 |
class BenchmarkProblemTest2(BenchmarkProblem): |
400 |
def __init__(self): |
401 |
super(BenchmarkProblemTest2,self).__init__(name="TEST2") |
402 |
def run(self,options=None): |
403 |
import time |
404 |
return -time.time(),"B" |
405 |
|
406 |
class SimpleFilter(BenchmarkFilter): |
407 |
def getResultNames(self): |
408 |
return ["r0","r1"] |
409 |
def __call__(self,result): |
410 |
return [str(result[0]),str(result[1])] |
411 |
|
412 |
bm=Benchmark("Example") |
413 |
bm.addProblem(BenchmarkProblemTest1()) |
414 |
bm.addProblem(BenchmarkProblemTest2()) |
415 |
bm.addOptions(OptionsTest1()) |
416 |
bm.addOptions(OptionsTest2()) |
417 |
|
418 |
bms=BenchmarkSuite("A Test") |
419 |
bms.addBenchmark(bm) |
420 |
|
421 |
bms.run() |
422 |
print bms.getHTML(filter=SimpleFilter()) |
423 |
|
424 |
bms.run(scale=4) |
425 |
print bms.getHTML(filter=SimpleFilter()) |
426 |
|
427 |
bms.run(scale=[1,2]) |
428 |
print bms.getHTML(filter=SimpleFilter()) |