1 |
filter# $Id:$ |
2 |
|
3 |
# |
4 |
# COPYRIGHT ACcESS 2004 - All Rights Reserved |
5 |
# |
6 |
# This software is the property of ACcESS. No part of this code |
7 |
# may be copied in any form or by any means without the expressed written |
8 |
# consent of ACcESS. Copying, use or modification of this software |
9 |
# by any unauthorised person is illegal unless that |
10 |
# person has a software license agreement with ACcESS. |
11 |
# |
12 |
|
13 |
""" |
14 |
A simple framework to run benchmarks under OPENMP and to summarize the results in tables for instance in HTML |
15 |
|
16 |
@var __author__: name of author |
17 |
@var __licence__: licence agreement |
18 |
var __url__: url entry point on documentation |
19 |
@var __version__: version |
20 |
@var __date__: date of the version |
21 |
""" |
22 |
|
23 |
__author__="Lutz Gross, l.gross@uq.edu.au" |
24 |
__licence__="contact: esys@access.uq.edu.au" |
25 |
__url__="http://www.iservo.edu.au/esys/escript" |
26 |
__version__="$Revision:$" |
27 |
__date__="$Date:$" |
28 |
|
29 |
import os,socket,time,sys |
30 |
|
31 |
class BenchmarkSuite(object): |
32 |
""" |
33 |
framework to run a bunch of L{Benchmark}s with the object to create a table of statistics. |
34 |
@var MAX_LEVEL: maximum number of level in headers for output |
35 |
""" |
36 |
MAX_LEVEL=5 |
37 |
def __init__(self,name=None): |
38 |
""" |
39 |
sets up a suite of benchmarks |
40 |
|
41 |
@param name: name of the benchmark suite. If no name is given the class name is used. |
42 |
@type name: C{str} |
43 |
""" |
44 |
super(BenchmarkSuite,self).__init__() |
45 |
self.__benchmarks=[] |
46 |
self.__scale=1 |
47 |
if name==None: |
48 |
self.__name=self.__class__.__name__ |
49 |
else: |
50 |
self.__name=name |
51 |
|
52 |
def __str__(self): |
53 |
""" |
54 |
returns the name of the benchmark suite |
55 |
|
56 |
@return: name |
57 |
@rtype: C{str} |
58 |
""" |
59 |
return self.__name |
60 |
def addBenchmark(self,benchmark): |
61 |
""" |
62 |
adds a L{Benchmark} to the suite |
63 |
|
64 |
@param benchmark: adds a new L{Benchmark} to the suite |
65 |
@type benchmark: L{Benchmark} |
66 |
""" |
67 |
self.__benchmarks.append(benchmark) |
68 |
def __len__(self): |
69 |
""" |
70 |
returns the number of benchmarks in the suite |
71 |
|
72 |
@return: number of benchmarks |
73 |
@rtype: C{int} |
74 |
""" |
75 |
return len(self.__benchmarks) |
76 |
def __getitem__(self,i): |
77 |
""" |
78 |
returns the i-th benchmark in the suite through self[i] |
79 |
|
80 |
@param i: index of the requested benchmark |
81 |
@type i: C{int} |
82 |
@return: i-th benchmark |
83 |
@rtype: L{Benchmark} |
84 |
|
85 |
""" |
86 |
return self.__benchmarks[i] |
87 |
def run(self,scale=1): |
88 |
""" |
89 |
runs all benchmarks |
90 |
|
91 |
@param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks |
92 |
are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks |
93 |
in the suite is run with scale[p] threads. |
94 |
@type scale: C{int} or C{list} of C{int}s. |
95 |
""" |
96 |
self.__scale=scale |
97 |
for i in range(len(self)): self[i].run(scale=scale) |
98 |
def getHTML(self,filter,level=1): |
99 |
""" |
100 |
returns the results of the last benchmark run in HTML format. |
101 |
|
102 |
@param filter: filter to be applied to the results |
103 |
@type filter: L{BenchmarkFilter} |
104 |
@param level: level used in header <H?> tags |
105 |
@type level: C{int} |
106 |
@return: HTML document |
107 |
@rtype: C{str} |
108 |
""" |
109 |
out="" |
110 |
if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self) |
111 |
out+="<H%s>%s</H%s>\n"%(level,str(self),level) |
112 |
if level==1: |
113 |
m="" |
114 |
if isinstance(self.__scale,int): |
115 |
if self.__scale>1: |
116 |
m=" (%s threads)"%self.__scale |
117 |
out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m) |
118 |
for i in range(len(self)): |
119 |
out+="<p>\n" |
120 |
out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL)) |
121 |
out+="<p>\n" |
122 |
if level==1: |
123 |
out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z')) |
124 |
out+="</BODY></HTML>\n" |
125 |
return out |
126 |
|
127 |
|
128 |
class Benchmark(object): |
129 |
""" |
130 |
runs a bunch of similar L{BenchmarkProblem}s with a bunch of L{Options} |
131 |
""" |
132 |
def __init__(self,name=None,description=None): |
133 |
""" |
134 |
sets up a benchmark |
135 |
|
136 |
@param name: name of the benchmark. If no name is given the class name is used. |
137 |
@type name: C{str} |
138 |
@param description: description of the benchmark. |
139 |
@type description: C{str} or C{None} |
140 |
""" |
141 |
super(Benchmark,self).__init__() |
142 |
self.__options=[] |
143 |
self.__problems=[] |
144 |
self.__results=[] |
145 |
self.__scale=1 |
146 |
if name==None: |
147 |
self.__name=self.__class__.__name__ |
148 |
else: |
149 |
self.__name=name |
150 |
self.__description=description |
151 |
|
152 |
def __str__(self): |
153 |
""" |
154 |
returns the name of the benchmark suite |
155 |
|
156 |
@return: name |
157 |
@rtype: C{str} |
158 |
""" |
159 |
return self.__name |
160 |
|
161 |
def addProblem(self,problem): |
162 |
""" |
163 |
adds a problem to the benchmark |
164 |
|
165 |
@param problem: adds a new problem to the bechmark |
166 |
@type problem: L{BenchmarkProblem} |
167 |
""" |
168 |
self.__problems.append(problem) |
169 |
|
170 |
def addOptions(self,Options): |
171 |
""" |
172 |
adds a options to the benchmark |
173 |
|
174 |
@param options: adds a new option to the bechmark |
175 |
@type problem: L{Options} |
176 |
""" |
177 |
self.__options.append(Options) |
178 |
|
179 |
def run(self,scale=1): |
180 |
""" |
181 |
runs all problems with all options. |
182 |
|
183 |
|
184 |
@param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks |
185 |
are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks |
186 |
in the suite is run with scale[p] threads. |
187 |
@type scale: C{int} or C{list} of C{int}s. |
188 |
""" |
189 |
if isinstance(scale,list): |
190 |
c_max=min(len(scale),len(self.__problems)) |
191 |
else: |
192 |
c_max=len(self.__problems) |
193 |
self.__filter=filter |
194 |
self.__scale=scale |
195 |
self.__results=[] |
196 |
for c in range(c_max): |
197 |
r=self.__problems[c] |
198 |
if isinstance(scale,list): |
199 |
s=scale[c] |
200 |
else: |
201 |
s=scale |
202 |
row=[] |
203 |
for p in self.__options: |
204 |
os.environ['OMP_NUM_TREADS']=str(s) |
205 |
row.append(r.run(p)) |
206 |
self.__results.append(row) |
207 |
def getHTML(self,filter,level=1): |
208 |
""" |
209 |
returns the results of the last benchmark run in HTML format. |
210 |
|
211 |
@param filter: filter to be applied to the results |
212 |
@type filter: L{BenchmarkFilter} |
213 |
@param level: level used in header <H?> tags |
214 |
@type level: C{int} |
215 |
@return: HTML document |
216 |
@rtype: C{str} |
217 |
""" |
218 |
out="" |
219 |
if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self) |
220 |
out+="<H%s>%s</H%s>\n"%(level,str(self),level) |
221 |
if level==1: |
222 |
m="" |
223 |
if isinstance(self.__scale,int): |
224 |
if self.__scale>1: |
225 |
m=" (%s threads)"%self.__scale |
226 |
out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m) |
227 |
if self.__description: out+="<p>%s</p>\n"%str(self.__description) |
228 |
if len(self.__problems)>0: |
229 |
out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n" |
230 |
h1_seg="" |
231 |
rn=filter.getResultNames() |
232 |
if len(rn)==0: |
233 |
h1_seg+="<TD></TD>" |
234 |
else: |
235 |
for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n |
236 |
h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>" |
237 |
h1="<TR>" |
238 |
if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>" |
239 |
for o in self.__options: |
240 |
if len(rn)==0: |
241 |
h0+="<TH ALIGN=\"center\">%s</TH>"%str(o) |
242 |
elif len(rn)==1: |
243 |
h0+="<TH ALIGN=\"center\">%s</TH>"%str(o) |
244 |
empty_h1=False |
245 |
else: |
246 |
h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(len(rn),str(o)) |
247 |
h1+=h1_seg |
248 |
out+=h0+"</TR>\n"+h1+"</TR>\n" |
249 |
c=0 |
250 |
for r in range(len(self.__results)): |
251 |
out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r]) |
252 |
if isinstance(self.__scale,list): out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c] |
253 |
for col in self.__results[r]: |
254 |
for e in filter(col): out+="<TD ALIGN=\"right\">%s</TD>"%e |
255 |
out+="</TR>\n" |
256 |
c+=1 |
257 |
out+="</TABLE>" |
258 |
if level==1: |
259 |
out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z')) |
260 |
out+="</BODY></HTML>\n" |
261 |
return out |
262 |
|
263 |
class BenchmarkProblem(object): |
264 |
""" |
265 |
something that can be run and returns a list of characteristics such as timing, Mflops, error, etc. |
266 |
""" |
267 |
def __init__(self,name=None): |
268 |
""" |
269 |
sets up a benchmark problem |
270 |
|
271 |
@param name: name of the problem. If no name is given the class name is used. |
272 |
@type name: C{str} |
273 |
""" |
274 |
super(BenchmarkProblem,self).__init__() |
275 |
if name==None: |
276 |
self.__name=self.__class__.__name__ |
277 |
else: |
278 |
self.__name=name |
279 |
|
280 |
|
281 |
def __str__(self): |
282 |
""" |
283 |
returns the name of the benchmark suite |
284 |
|
285 |
@return: name |
286 |
@rtype: C{str} |
287 |
""" |
288 |
return self.__name |
289 |
|
290 |
def run(self,options=None): |
291 |
""" |
292 |
runs the problem and returns a list of run characteristics |
293 |
|
294 |
|
295 |
@param options: the options that are used for the run. Note that the number of OpenMP threads is controlled |
296 |
by the L{Benchmark} the problem is run in. |
297 |
@type options: L{Options} |
298 |
@return: run characteristics |
299 |
@rtype: any type that can be read by the L{BenchmarkFilter} applied to it. |
300 |
@remark: this function has to overwritten by a particular problem |
301 |
""" |
302 |
raise NotImplementedError |
303 |
return [] |
304 |
|
305 |
class BenchmarkFilter(object): |
306 |
""" |
307 |
object to filter the characteristcs returned by Bechmark runs. |
308 |
|
309 |
""" |
310 |
def __init__(self): |
311 |
""" |
312 |
sets up a filter |
313 |
""" |
314 |
pass |
315 |
|
316 |
|
317 |
def getResultNames(self): |
318 |
""" |
319 |
return the names of the results produced when run() is called. |
320 |
|
321 |
@return: names the list of the names to be used when the results of the run() call are printed |
322 |
@rtype: C{list} of C{str} |
323 |
@remark: this function has to overwritten by a particular problem |
324 |
""" |
325 |
raise NotImplementedError |
326 |
return [] |
327 |
|
328 |
def __call__(self,result): |
329 |
""" |
330 |
filters out values results returned as characteristcs of a problem run |
331 |
|
332 |
@param result: values to be filtered |
333 |
@type result: any type that is produced by the L{BenchmarkProblem} it is applied to |
334 |
@return: a list of strings selected from result |
335 |
@rtype: C{list} of C{str} |
336 |
@remark: this function has to overwritten by a particular problem |
337 |
""" |
338 |
raise NotImplementedError |
339 |
return [] |
340 |
|
341 |
|
342 |
class Options(object): |
343 |
""" |
344 |
defines a set of options to be used to run a L{BenchmarkProblem} |
345 |
""" |
346 |
def __init__(self,name=None): |
347 |
""" |
348 |
sets up the options |
349 |
|
350 |
@param name: name of the option. If no name is given the class name is used. |
351 |
@type name: C{str} |
352 |
""" |
353 |
super(Options,self).__init__() |
354 |
if name==None: |
355 |
self.__name=self.__class__.__name__ |
356 |
else: |
357 |
self.__name=name |
358 |
def __str__(self): |
359 |
""" |
360 |
returns the name of the benchmark suite |
361 |
|
362 |
@return: name |
363 |
@rtype: C{str} |
364 |
""" |
365 |
return self.__name |
366 |
|
367 |
if __name__=="__main__": |
368 |
|
369 |
class OptionsTest1(Options): |
370 |
pass |
371 |
class OptionsTest2(Options): |
372 |
pass |
373 |
|
374 |
class BenchmarkProblemTest1(BenchmarkProblem): |
375 |
def __init__(self): |
376 |
super(BenchmarkProblemTest1,self).__init__(name="TEST1") |
377 |
def run(self,options=None): |
378 |
import time |
379 |
return time.time(),"A" |
380 |
|
381 |
class BenchmarkProblemTest2(BenchmarkProblem): |
382 |
def __init__(self): |
383 |
super(BenchmarkProblemTest2,self).__init__(name="TEST2") |
384 |
def run(self,options=None): |
385 |
import time |
386 |
return -time.time(),"B" |
387 |
|
388 |
class SimpleFilter(BenchmarkFilter): |
389 |
def getResultNames(self): |
390 |
return ["r0","r1"] |
391 |
def __call__(self,result): |
392 |
return [str(result[0]),str(result[1])] |
393 |
|
394 |
bm=Benchmark("Example") |
395 |
bm.addProblem(BenchmarkProblemTest1()) |
396 |
bm.addProblem(BenchmarkProblemTest2()) |
397 |
bm.addOptions(OptionsTest1()) |
398 |
bm.addOptions(OptionsTest2()) |
399 |
|
400 |
bms=BenchmarkSuite("A Test") |
401 |
bms.addBenchmark(bm) |
402 |
|
403 |
bms.run() |
404 |
print bms.getHTML(filter=SimpleFilter()) |
405 |
|
406 |
bms.run(scale=4) |
407 |
print bms.getHTML(filter=SimpleFilter()) |
408 |
|
409 |
bms.run(scale=[1,2]) |
410 |
print bms.getHTML(filter=SimpleFilter()) |