1 |
filter# $Id:$ |
2 |
|
3 |
# |
4 |
# COPYRIGHT ACcESS 2004 - All Rights Reserved |
5 |
# |
6 |
# This software is the property of ACcESS. No part of this code |
7 |
# may be copied in any form or by any means without the expressed written |
8 |
# consent of ACcESS. Copying, use or modification of this software |
9 |
# by any unauthorised person is illegal unless that |
10 |
# person has a software license agreement with ACcESS. |
11 |
# |
12 |
|
13 |
""" |
14 |
A simple framework to run benchmarks under OPENMP and to summarize the results in tables for instance in HTML |
15 |
|
16 |
@var __author__: name of author |
17 |
@var __licence__: licence agreement |
18 |
var __url__: url entry point on documentation |
19 |
@var __version__: version |
20 |
@var __date__: date of the version |
21 |
""" |
22 |
|
23 |
__author__="Lutz Gross, l.gross@uq.edu.au" |
24 |
__licence__="contact: esys@access.uq.edu.au" |
25 |
__url__="http://www.iservo.edu.au/esys/escript" |
26 |
__version__="$Revision:$" |
27 |
__date__="$Date:$" |
28 |
|
29 |
import os,socket,time,sys,traceback |
30 |
from esys.escript import setNumberOfThreads |
31 |
|
32 |
class BenchmarkSuite(object): |
33 |
""" |
34 |
framework to run a bunch of L{Benchmark}s with the object to create a table of statistics. |
35 |
@var MAX_LEVEL: maximum number of level in headers for output |
36 |
""" |
37 |
MAX_LEVEL=5 |
38 |
def __init__(self,name=None): |
39 |
""" |
40 |
sets up a suite of benchmarks |
41 |
|
42 |
@param name: name of the benchmark suite. If no name is given the class name is used. |
43 |
@type name: C{str} |
44 |
""" |
45 |
super(BenchmarkSuite,self).__init__() |
46 |
self.__benchmarks=[] |
47 |
self.__scale=1 |
48 |
if name==None: |
49 |
self.__name=self.__class__.__name__ |
50 |
else: |
51 |
self.__name=name |
52 |
|
53 |
def __str__(self): |
54 |
""" |
55 |
returns the name of the benchmark suite |
56 |
|
57 |
@return: name |
58 |
@rtype: C{str} |
59 |
""" |
60 |
return self.__name |
61 |
def addBenchmark(self,benchmark): |
62 |
""" |
63 |
adds a L{Benchmark} to the suite |
64 |
|
65 |
@param benchmark: adds a new L{Benchmark} to the suite |
66 |
@type benchmark: L{Benchmark} |
67 |
""" |
68 |
self.__benchmarks.append(benchmark) |
69 |
def __len__(self): |
70 |
""" |
71 |
returns the number of benchmarks in the suite |
72 |
|
73 |
@return: number of benchmarks |
74 |
@rtype: C{int} |
75 |
""" |
76 |
return len(self.__benchmarks) |
77 |
def __getitem__(self,i): |
78 |
""" |
79 |
returns the i-th benchmark in the suite through self[i] |
80 |
|
81 |
@param i: index of the requested benchmark |
82 |
@type i: C{int} |
83 |
@return: i-th benchmark |
84 |
@rtype: L{Benchmark} |
85 |
|
86 |
""" |
87 |
return self.__benchmarks[i] |
88 |
def run(self,scale=1): |
89 |
""" |
90 |
runs all benchmarks |
91 |
|
92 |
@param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks |
93 |
are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks |
94 |
in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted. |
95 |
@type scale: C{int} or C{list} of C{int}s. |
96 |
""" |
97 |
self.__scale=scale |
98 |
for i in range(len(self)): self[i].run(scale=scale) |
99 |
def getHTML(self,filter,level=1): |
100 |
""" |
101 |
returns the results of the last benchmark run in HTML format. |
102 |
|
103 |
@param filter: filter to be applied to the results |
104 |
@type filter: L{BenchmarkFilter} |
105 |
@param level: level used in header <H?> tags |
106 |
@type level: C{int} |
107 |
@return: HTML document |
108 |
@rtype: C{str} |
109 |
""" |
110 |
out="" |
111 |
if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self) |
112 |
out+="<H%s>%s</H%s>\n"%(level,str(self),level) |
113 |
if level==1: |
114 |
m="" |
115 |
if isinstance(self.__scale,int): |
116 |
if self.__scale>1: |
117 |
m=" (%s threads)"%self.__scale |
118 |
out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m) |
119 |
for i in range(len(self)): |
120 |
out+="<p>\n" |
121 |
out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL)) |
122 |
out+="<p>\n" |
123 |
if level==1: |
124 |
try: |
125 |
name=os.getlogin() |
126 |
out+="<hr><p align=\"center\">by %s at %s</p>\n"%(name,time.strftime('%X %x %Z')) |
127 |
except OSError: |
128 |
out+="<hr><p align=\"center\">%s</p>\n"%(time.strftime('%X %x %Z')) |
129 |
|
130 |
out+="</BODY></HTML>\n" |
131 |
return out |
132 |
|
133 |
|
134 |
class Benchmark(object): |
135 |
""" |
136 |
runs a bunch of similar L{BenchmarkProblem}s with a bunch of L{Options} |
137 |
""" |
138 |
def __init__(self,name=None,description=None): |
139 |
""" |
140 |
sets up a benchmark |
141 |
|
142 |
@param name: name of the benchmark. If no name is given the class name is used. |
143 |
@type name: C{str} |
144 |
@param description: description of the benchmark. |
145 |
@type description: C{str} or C{None} |
146 |
""" |
147 |
super(Benchmark,self).__init__() |
148 |
self.__options=[] |
149 |
self.__problems=[] |
150 |
self.__results=[] |
151 |
self.__scale=1 |
152 |
if name==None: |
153 |
self.__name=self.__class__.__name__ |
154 |
else: |
155 |
self.__name=name |
156 |
self.__description=description |
157 |
|
158 |
def __str__(self): |
159 |
""" |
160 |
returns the name of the benchmark suite |
161 |
|
162 |
@return: name |
163 |
@rtype: C{str} |
164 |
""" |
165 |
return self.__name |
166 |
|
167 |
def addProblem(self,problem): |
168 |
""" |
169 |
adds a problem to the benchmark |
170 |
|
171 |
@param problem: adds a new problem to the bechmark |
172 |
@type problem: L{BenchmarkProblem} |
173 |
""" |
174 |
self.__problems.append(problem) |
175 |
|
176 |
def addOptions(self,options): |
177 |
""" |
178 |
adds a options to the benchmark |
179 |
|
180 |
@param options: adds a new option to the bechmark. If options==None they are are ignored |
181 |
@type problem: L{Options} |
182 |
""" |
183 |
if options!=None: self.__options.append(options) |
184 |
|
185 |
def run(self,scale=1): |
186 |
""" |
187 |
runs all problems with all options. |
188 |
|
189 |
|
190 |
@param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks |
191 |
are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks |
192 |
in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted. |
193 |
@type scale: C{int} or C{list} of C{int}s. |
194 |
""" |
195 |
if isinstance(scale,list): |
196 |
c_max=min(len(scale),len(self.__problems)) |
197 |
else: |
198 |
c_max=len(self.__problems) |
199 |
self.__filter=filter |
200 |
self.__scale=scale |
201 |
self.__results=[] |
202 |
for c in range(c_max): |
203 |
r=self.__problems[c] |
204 |
if isinstance(scale,list): |
205 |
s=scale[c] |
206 |
else: |
207 |
s=scale |
208 |
row=[] |
209 |
if s>0: |
210 |
t0=time.time() |
211 |
print "%s with %s threads started."%(r.__class__,s) |
212 |
for p in self.__options: |
213 |
setNumberOfThreads(s) |
214 |
try: |
215 |
row.append(r.run(p)) |
216 |
except: |
217 |
traceback.print_exc(file=sys.stdout) |
218 |
row.append(None) |
219 |
t0=time.time()-t0 |
220 |
print "%s with %s threads finished (walltime =%s sec)."%(r.__class__,s,t0) |
221 |
self.__results.append(row) |
222 |
def getHTML(self,filter,level=1): |
223 |
""" |
224 |
returns the results of the last benchmark run in HTML format. |
225 |
|
226 |
@param filter: filter to be applied to the results |
227 |
@type filter: L{BenchmarkFilter} |
228 |
@param level: level used in header <H?> tags |
229 |
@type level: C{int} |
230 |
@return: HTML document |
231 |
@rtype: C{str} |
232 |
""" |
233 |
out="" |
234 |
if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self) |
235 |
out+="<H%s>%s</H%s>\n"%(level,str(self),level) |
236 |
if level==1: |
237 |
m="" |
238 |
if isinstance(self.__scale,int): |
239 |
if self.__scale>1: |
240 |
m=" (%s threads)"%self.__scale |
241 |
out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m) |
242 |
if self.__description: out+="<p>%s</p>\n"%str(self.__description) |
243 |
if len(self.__problems)>0: |
244 |
out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n" |
245 |
h1_seg="" |
246 |
rn=filter.getResultNames() |
247 |
if len(rn)==0: |
248 |
h1_seg+="<TD></TD>" |
249 |
else: |
250 |
for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n |
251 |
h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>" |
252 |
h1="<TR>" |
253 |
if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>" |
254 |
for o in self.__options: |
255 |
if len(rn)==0: |
256 |
h0+="<TH ALIGN=\"center\">%s</TH>"%str(o) |
257 |
colspan=1 |
258 |
elif len(rn)==1: |
259 |
h0+="<TH ALIGN=\"center\">%s</TH>"%str(o) |
260 |
colspan=1 |
261 |
empty_h1=False |
262 |
else: |
263 |
colspan=len(rn) |
264 |
h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(colspan,str(o)) |
265 |
h1+=h1_seg |
266 |
out+=h0+"</TR>\n"+h1+"</TR>\n" |
267 |
c=0 |
268 |
for r in range(len(self.__results)): |
269 |
out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r]) |
270 |
if isinstance(self.__scale,list): |
271 |
out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c] |
272 |
for col in self.__results[r]: |
273 |
if col==None: |
274 |
out+="<TD ALIGN=\"center\" COLSPAN=%s>failed.</TD>"%colspan |
275 |
else: |
276 |
for e in filter(col): out+="<TD ALIGN=\"right\">%s</TD>"%e |
277 |
out+="</TR>\n" |
278 |
c+=1 |
279 |
out+="</TABLE>" |
280 |
if level==1: |
281 |
out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z')) |
282 |
out+="</BODY></HTML>\n" |
283 |
return out |
284 |
|
285 |
class BenchmarkProblem(object): |
286 |
""" |
287 |
something that can be run and returns a list of characteristics such as timing, Mflops, error, etc. |
288 |
""" |
289 |
def __init__(self,name=None): |
290 |
""" |
291 |
sets up a benchmark problem |
292 |
|
293 |
@param name: name of the problem. If no name is given the class name is used. |
294 |
@type name: C{str} |
295 |
""" |
296 |
super(BenchmarkProblem,self).__init__() |
297 |
if name==None: |
298 |
self.__name=self.__class__.__name__ |
299 |
else: |
300 |
self.__name=name |
301 |
|
302 |
|
303 |
def __str__(self): |
304 |
""" |
305 |
returns the name of the benchmark suite |
306 |
|
307 |
@return: name |
308 |
@rtype: C{str} |
309 |
""" |
310 |
return self.__name |
311 |
|
312 |
def run(self,options=None): |
313 |
""" |
314 |
runs the problem and returns a list of run characteristics |
315 |
|
316 |
|
317 |
@param options: the options that are used for the run. Note that the number of OpenMP threads is controlled |
318 |
by the L{Benchmark} the problem is run in. |
319 |
@type options: L{Options} |
320 |
@return: run characteristics |
321 |
@rtype: any type that can be read by the L{BenchmarkFilter} applied to it. |
322 |
@remark: this function has to overwritten by a particular problem |
323 |
""" |
324 |
raise NotImplementedError |
325 |
return [] |
326 |
|
327 |
class BenchmarkFilter(object): |
328 |
""" |
329 |
object to filter the characteristcs returned by Bechmark runs. |
330 |
|
331 |
""" |
332 |
def __init__(self): |
333 |
""" |
334 |
sets up a filter |
335 |
""" |
336 |
pass |
337 |
|
338 |
|
339 |
def getResultNames(self): |
340 |
""" |
341 |
return the names of the results produced when run() is called. |
342 |
|
343 |
@return: names the list of the names to be used when the results of the run() call are printed |
344 |
@rtype: C{list} of C{str} |
345 |
@remark: this function has to overwritten by a particular problem |
346 |
""" |
347 |
raise NotImplementedError |
348 |
return [] |
349 |
|
350 |
def __call__(self,result): |
351 |
""" |
352 |
filters out values results returned as characteristcs of a problem run |
353 |
|
354 |
@param result: values to be filtered |
355 |
@type result: any type that is produced by the L{BenchmarkProblem} it is applied to |
356 |
@return: a list of strings selected from result |
357 |
@rtype: C{list} of C{str} |
358 |
@remark: this function has to overwritten by a particular problem |
359 |
""" |
360 |
raise NotImplementedError |
361 |
return [] |
362 |
|
363 |
|
364 |
class Options(object): |
365 |
""" |
366 |
defines a set of options to be used to run a L{BenchmarkProblem} |
367 |
""" |
368 |
def __init__(self,name=None): |
369 |
""" |
370 |
sets up the options |
371 |
|
372 |
@param name: name of the option. If no name is given the class name is used. |
373 |
@type name: C{str} |
374 |
""" |
375 |
super(Options,self).__init__() |
376 |
if name==None: |
377 |
self.__name=self.__class__.__name__ |
378 |
else: |
379 |
self.__name=name |
380 |
def __str__(self): |
381 |
""" |
382 |
returns the name of the benchmark suite |
383 |
|
384 |
@return: name |
385 |
@rtype: C{str} |
386 |
""" |
387 |
return self.__name |
388 |
|
389 |
if __name__=="__main__": |
390 |
|
391 |
class OptionsTest1(Options): |
392 |
pass |
393 |
class OptionsTest2(Options): |
394 |
pass |
395 |
|
396 |
class BenchmarkProblemTest1(BenchmarkProblem): |
397 |
def __init__(self): |
398 |
super(BenchmarkProblemTest1,self).__init__(name="TEST1") |
399 |
def run(self,options=None): |
400 |
import time |
401 |
return time.time(),"A" |
402 |
|
403 |
class BenchmarkProblemTest2(BenchmarkProblem): |
404 |
def __init__(self): |
405 |
super(BenchmarkProblemTest2,self).__init__(name="TEST2") |
406 |
def run(self,options=None): |
407 |
import time |
408 |
return -time.time(),"B" |
409 |
|
410 |
class SimpleFilter(BenchmarkFilter): |
411 |
def getResultNames(self): |
412 |
return ["r0","r1"] |
413 |
def __call__(self,result): |
414 |
return [str(result[0]),str(result[1])] |
415 |
|
416 |
bm=Benchmark("Example") |
417 |
bm.addProblem(BenchmarkProblemTest1()) |
418 |
bm.addProblem(BenchmarkProblemTest2()) |
419 |
bm.addOptions(OptionsTest1()) |
420 |
bm.addOptions(OptionsTest2()) |
421 |
|
422 |
bms=BenchmarkSuite("A Test") |
423 |
bms.addBenchmark(bm) |
424 |
|
425 |
bms.run() |
426 |
print bms.getHTML(filter=SimpleFilter()) |
427 |
|
428 |
bms.run(scale=4) |
429 |
print bms.getHTML(filter=SimpleFilter()) |
430 |
|
431 |
bms.run(scale=[1,2]) |
432 |
print bms.getHTML(filter=SimpleFilter()) |