1 |
filter# $Id:$ |
2 |
|
3 |
# |
4 |
# COPYRIGHT ACcESS 2004 - All Rights Reserved |
5 |
# |
6 |
# This software is the property of ACcESS. No part of this code |
7 |
# may be copied in any form or by any means without the expressed written |
8 |
# consent of ACcESS. Copying, use or modification of this software |
9 |
# by any unauthorised person is illegal unless that |
10 |
# person has a software license agreement with ACcESS. |
11 |
# |
12 |
|
13 |
""" |
14 |
A simple framework to run benchmarks under OPENMP and to summarize the results in tables for instance in HTML |
15 |
|
16 |
@var __author__: name of author |
17 |
@var __licence__: licence agreement |
18 |
var __url__: url entry point on documentation |
19 |
@var __version__: version |
20 |
@var __date__: date of the version |
21 |
""" |
22 |
|
23 |
__author__="Lutz Gross, l.gross@uq.edu.au" |
24 |
__licence__="contact: esys@access.uq.edu.au" |
25 |
__url__="http://www.iservo.edu.au/esys/escript" |
26 |
__version__="$Revision:$" |
27 |
__date__="$Date:$" |
28 |
|
29 |
import os,socket,time,sys |
30 |
from esys.escript import setNumberOfThreads |
31 |
|
32 |
class BenchmarkSuite(object): |
33 |
""" |
34 |
framework to run a bunch of L{Benchmark}s with the object to create a table of statistics. |
35 |
@var MAX_LEVEL: maximum number of level in headers for output |
36 |
""" |
37 |
MAX_LEVEL=5 |
38 |
def __init__(self,name=None): |
39 |
""" |
40 |
sets up a suite of benchmarks |
41 |
|
42 |
@param name: name of the benchmark suite. If no name is given the class name is used. |
43 |
@type name: C{str} |
44 |
""" |
45 |
super(BenchmarkSuite,self).__init__() |
46 |
self.__benchmarks=[] |
47 |
self.__scale=1 |
48 |
if name==None: |
49 |
self.__name=self.__class__.__name__ |
50 |
else: |
51 |
self.__name=name |
52 |
|
53 |
def __str__(self): |
54 |
""" |
55 |
returns the name of the benchmark suite |
56 |
|
57 |
@return: name |
58 |
@rtype: C{str} |
59 |
""" |
60 |
return self.__name |
61 |
def addBenchmark(self,benchmark): |
62 |
""" |
63 |
adds a L{Benchmark} to the suite |
64 |
|
65 |
@param benchmark: adds a new L{Benchmark} to the suite |
66 |
@type benchmark: L{Benchmark} |
67 |
""" |
68 |
self.__benchmarks.append(benchmark) |
69 |
def __len__(self): |
70 |
""" |
71 |
returns the number of benchmarks in the suite |
72 |
|
73 |
@return: number of benchmarks |
74 |
@rtype: C{int} |
75 |
""" |
76 |
return len(self.__benchmarks) |
77 |
def __getitem__(self,i): |
78 |
""" |
79 |
returns the i-th benchmark in the suite through self[i] |
80 |
|
81 |
@param i: index of the requested benchmark |
82 |
@type i: C{int} |
83 |
@return: i-th benchmark |
84 |
@rtype: L{Benchmark} |
85 |
|
86 |
""" |
87 |
return self.__benchmarks[i] |
88 |
def run(self,scale=1): |
89 |
""" |
90 |
runs all benchmarks |
91 |
|
92 |
@param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks |
93 |
are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks |
94 |
in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted. |
95 |
@type scale: C{int} or C{list} of C{int}s. |
96 |
""" |
97 |
self.__scale=scale |
98 |
for i in range(len(self)): self[i].run(scale=scale) |
99 |
def getHTML(self,filter,level=1): |
100 |
""" |
101 |
returns the results of the last benchmark run in HTML format. |
102 |
|
103 |
@param filter: filter to be applied to the results |
104 |
@type filter: L{BenchmarkFilter} |
105 |
@param level: level used in header <H?> tags |
106 |
@type level: C{int} |
107 |
@return: HTML document |
108 |
@rtype: C{str} |
109 |
""" |
110 |
out="" |
111 |
if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self) |
112 |
out+="<H%s>%s</H%s>\n"%(level,str(self),level) |
113 |
if level==1: |
114 |
m="" |
115 |
if isinstance(self.__scale,int): |
116 |
if self.__scale>1: |
117 |
m=" (%s threads)"%self.__scale |
118 |
out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m) |
119 |
for i in range(len(self)): |
120 |
out+="<p>\n" |
121 |
out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL)) |
122 |
out+="<p>\n" |
123 |
if level==1: |
124 |
try: |
125 |
name=os.getlogin() |
126 |
out+="<hr><p align=\"center\">by %s at %s</p>\n"%(name,time.strftime('%X %x %Z')) |
127 |
except OSError: |
128 |
out+="<hr><p align=\"center\">%s</p>\n"%(time.strftime('%X %x %Z')) |
129 |
|
130 |
out+="</BODY></HTML>\n" |
131 |
return out |
132 |
|
133 |
|
134 |
class Benchmark(object): |
135 |
""" |
136 |
runs a bunch of similar L{BenchmarkProblem}s with a bunch of L{Options} |
137 |
""" |
138 |
def __init__(self,name=None,description=None): |
139 |
""" |
140 |
sets up a benchmark |
141 |
|
142 |
@param name: name of the benchmark. If no name is given the class name is used. |
143 |
@type name: C{str} |
144 |
@param description: description of the benchmark. |
145 |
@type description: C{str} or C{None} |
146 |
""" |
147 |
super(Benchmark,self).__init__() |
148 |
self.__options=[] |
149 |
self.__problems=[] |
150 |
self.__results=[] |
151 |
self.__scale=1 |
152 |
if name==None: |
153 |
self.__name=self.__class__.__name__ |
154 |
else: |
155 |
self.__name=name |
156 |
self.__description=description |
157 |
|
158 |
def __str__(self): |
159 |
""" |
160 |
returns the name of the benchmark suite |
161 |
|
162 |
@return: name |
163 |
@rtype: C{str} |
164 |
""" |
165 |
return self.__name |
166 |
|
167 |
def addProblem(self,problem): |
168 |
""" |
169 |
adds a problem to the benchmark |
170 |
|
171 |
@param problem: adds a new problem to the bechmark |
172 |
@type problem: L{BenchmarkProblem} |
173 |
""" |
174 |
self.__problems.append(problem) |
175 |
|
176 |
def addOptions(self,Options): |
177 |
""" |
178 |
adds a options to the benchmark |
179 |
|
180 |
@param options: adds a new option to the bechmark |
181 |
@type problem: L{Options} |
182 |
""" |
183 |
self.__options.append(Options) |
184 |
|
185 |
def run(self,scale=1): |
186 |
""" |
187 |
runs all problems with all options. |
188 |
|
189 |
|
190 |
@param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks |
191 |
are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks |
192 |
in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted. |
193 |
@type scale: C{int} or C{list} of C{int}s. |
194 |
""" |
195 |
if isinstance(scale,list): |
196 |
c_max=min(len(scale),len(self.__problems)) |
197 |
else: |
198 |
c_max=len(self.__problems) |
199 |
self.__filter=filter |
200 |
self.__scale=scale |
201 |
self.__results=[] |
202 |
for c in range(c_max): |
203 |
r=self.__problems[c] |
204 |
if isinstance(scale,list): |
205 |
s=scale[c] |
206 |
else: |
207 |
s=scale |
208 |
row=[] |
209 |
if s>0: |
210 |
t0=time.time() |
211 |
print "%s with %s threads started."%(r.__class__,s) |
212 |
for p in self.__options: |
213 |
setNumberOfThreads(s) |
214 |
row.append(r.run(p)) |
215 |
t0=time.time()-t0 |
216 |
print "%s with %s threads finished (walltime =%s sec)."%(r.__class__,s,t0) |
217 |
self.__results.append(row) |
218 |
def getHTML(self,filter,level=1): |
219 |
""" |
220 |
returns the results of the last benchmark run in HTML format. |
221 |
|
222 |
@param filter: filter to be applied to the results |
223 |
@type filter: L{BenchmarkFilter} |
224 |
@param level: level used in header <H?> tags |
225 |
@type level: C{int} |
226 |
@return: HTML document |
227 |
@rtype: C{str} |
228 |
""" |
229 |
out="" |
230 |
if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self) |
231 |
out+="<H%s>%s</H%s>\n"%(level,str(self),level) |
232 |
if level==1: |
233 |
m="" |
234 |
if isinstance(self.__scale,int): |
235 |
if self.__scale>1: |
236 |
m=" (%s threads)"%self.__scale |
237 |
out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m) |
238 |
if self.__description: out+="<p>%s</p>\n"%str(self.__description) |
239 |
if len(self.__problems)>0: |
240 |
out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n" |
241 |
h1_seg="" |
242 |
rn=filter.getResultNames() |
243 |
if len(rn)==0: |
244 |
h1_seg+="<TD></TD>" |
245 |
else: |
246 |
for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n |
247 |
h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>" |
248 |
h1="<TR>" |
249 |
if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>" |
250 |
for o in self.__options: |
251 |
if len(rn)==0: |
252 |
h0+="<TH ALIGN=\"center\">%s</TH>"%str(o) |
253 |
elif len(rn)==1: |
254 |
h0+="<TH ALIGN=\"center\">%s</TH>"%str(o) |
255 |
empty_h1=False |
256 |
else: |
257 |
h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(len(rn),str(o)) |
258 |
h1+=h1_seg |
259 |
out+=h0+"</TR>\n"+h1+"</TR>\n" |
260 |
c=0 |
261 |
for r in range(len(self.__results)): |
262 |
out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r]) |
263 |
if isinstance(self.__scale,list): out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c] |
264 |
for col in self.__results[r]: |
265 |
for e in filter(col): out+="<TD ALIGN=\"right\">%s</TD>"%e |
266 |
out+="</TR>\n" |
267 |
c+=1 |
268 |
out+="</TABLE>" |
269 |
if level==1: |
270 |
out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z')) |
271 |
out+="</BODY></HTML>\n" |
272 |
return out |
273 |
|
274 |
class BenchmarkProblem(object): |
275 |
""" |
276 |
something that can be run and returns a list of characteristics such as timing, Mflops, error, etc. |
277 |
""" |
278 |
def __init__(self,name=None): |
279 |
""" |
280 |
sets up a benchmark problem |
281 |
|
282 |
@param name: name of the problem. If no name is given the class name is used. |
283 |
@type name: C{str} |
284 |
""" |
285 |
super(BenchmarkProblem,self).__init__() |
286 |
if name==None: |
287 |
self.__name=self.__class__.__name__ |
288 |
else: |
289 |
self.__name=name |
290 |
|
291 |
|
292 |
def __str__(self): |
293 |
""" |
294 |
returns the name of the benchmark suite |
295 |
|
296 |
@return: name |
297 |
@rtype: C{str} |
298 |
""" |
299 |
return self.__name |
300 |
|
301 |
def run(self,options=None): |
302 |
""" |
303 |
runs the problem and returns a list of run characteristics |
304 |
|
305 |
|
306 |
@param options: the options that are used for the run. Note that the number of OpenMP threads is controlled |
307 |
by the L{Benchmark} the problem is run in. |
308 |
@type options: L{Options} |
309 |
@return: run characteristics |
310 |
@rtype: any type that can be read by the L{BenchmarkFilter} applied to it. |
311 |
@remark: this function has to overwritten by a particular problem |
312 |
""" |
313 |
raise NotImplementedError |
314 |
return [] |
315 |
|
316 |
class BenchmarkFilter(object): |
317 |
""" |
318 |
object to filter the characteristcs returned by Bechmark runs. |
319 |
|
320 |
""" |
321 |
def __init__(self): |
322 |
""" |
323 |
sets up a filter |
324 |
""" |
325 |
pass |
326 |
|
327 |
|
328 |
def getResultNames(self): |
329 |
""" |
330 |
return the names of the results produced when run() is called. |
331 |
|
332 |
@return: names the list of the names to be used when the results of the run() call are printed |
333 |
@rtype: C{list} of C{str} |
334 |
@remark: this function has to overwritten by a particular problem |
335 |
""" |
336 |
raise NotImplementedError |
337 |
return [] |
338 |
|
339 |
def __call__(self,result): |
340 |
""" |
341 |
filters out values results returned as characteristcs of a problem run |
342 |
|
343 |
@param result: values to be filtered |
344 |
@type result: any type that is produced by the L{BenchmarkProblem} it is applied to |
345 |
@return: a list of strings selected from result |
346 |
@rtype: C{list} of C{str} |
347 |
@remark: this function has to overwritten by a particular problem |
348 |
""" |
349 |
raise NotImplementedError |
350 |
return [] |
351 |
|
352 |
|
353 |
class Options(object): |
354 |
""" |
355 |
defines a set of options to be used to run a L{BenchmarkProblem} |
356 |
""" |
357 |
def __init__(self,name=None): |
358 |
""" |
359 |
sets up the options |
360 |
|
361 |
@param name: name of the option. If no name is given the class name is used. |
362 |
@type name: C{str} |
363 |
""" |
364 |
super(Options,self).__init__() |
365 |
if name==None: |
366 |
self.__name=self.__class__.__name__ |
367 |
else: |
368 |
self.__name=name |
369 |
def __str__(self): |
370 |
""" |
371 |
returns the name of the benchmark suite |
372 |
|
373 |
@return: name |
374 |
@rtype: C{str} |
375 |
""" |
376 |
return self.__name |
377 |
|
378 |
if __name__=="__main__": |
379 |
|
380 |
class OptionsTest1(Options): |
381 |
pass |
382 |
class OptionsTest2(Options): |
383 |
pass |
384 |
|
385 |
class BenchmarkProblemTest1(BenchmarkProblem): |
386 |
def __init__(self): |
387 |
super(BenchmarkProblemTest1,self).__init__(name="TEST1") |
388 |
def run(self,options=None): |
389 |
import time |
390 |
return time.time(),"A" |
391 |
|
392 |
class BenchmarkProblemTest2(BenchmarkProblem): |
393 |
def __init__(self): |
394 |
super(BenchmarkProblemTest2,self).__init__(name="TEST2") |
395 |
def run(self,options=None): |
396 |
import time |
397 |
return -time.time(),"B" |
398 |
|
399 |
class SimpleFilter(BenchmarkFilter): |
400 |
def getResultNames(self): |
401 |
return ["r0","r1"] |
402 |
def __call__(self,result): |
403 |
return [str(result[0]),str(result[1])] |
404 |
|
405 |
bm=Benchmark("Example") |
406 |
bm.addProblem(BenchmarkProblemTest1()) |
407 |
bm.addProblem(BenchmarkProblemTest2()) |
408 |
bm.addOptions(OptionsTest1()) |
409 |
bm.addOptions(OptionsTest2()) |
410 |
|
411 |
bms=BenchmarkSuite("A Test") |
412 |
bms.addBenchmark(bm) |
413 |
|
414 |
bms.run() |
415 |
print bms.getHTML(filter=SimpleFilter()) |
416 |
|
417 |
bms.run(scale=4) |
418 |
print bms.getHTML(filter=SimpleFilter()) |
419 |
|
420 |
bms.run(scale=[1,2]) |
421 |
print bms.getHTML(filter=SimpleFilter()) |