/[escript]/trunk/escript/py_src/benchmark.py
ViewVC logotype

Contents of /trunk/escript/py_src/benchmark.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1809 - (show annotations)
Thu Sep 25 06:43:44 2008 UTC (10 years, 9 months ago) by ksteube
File MIME type: text/x-python
File size: 14281 byte(s)
Copyright updated in all python files

1
2 ########################################################
3 #
4 # Copyright (c) 2003-2008 by University of Queensland
5 # Earth Systems Science Computational Center (ESSCC)
6 # http://www.uq.edu.au/esscc
7 #
8 # Primary Business: Queensland, Australia
9 # Licensed under the Open Software License version 3.0
10 # http://www.opensource.org/licenses/osl-3.0.php
11 #
12 ########################################################
13
14 __copyright__="""Copyright (c) 2003-2008 by University of Queensland
15 Earth Systems Science Computational Center (ESSCC)
16 http://www.uq.edu.au/esscc
17 Primary Business: Queensland, Australia"""
18 __license__="""Licensed under the Open Software License version 3.0
19 http://www.opensource.org/licenses/osl-3.0.php"""
20 __url__="http://www.uq.edu.au/esscc/escript-finley"
21
22 filter# $Id:$
23
24 """
25 A simple framework to run benchmarks under OPENMP and to summarize the results in tables for instance in HTML
26
27 @var __author__: name of author
28 @var __license__: licence agreement
29 @var __copyright__: copyrights
30 @var __url__: url entry point on documentation
31 @var __version__: version
32 @var __date__: date of the version
33 """
34
35 __author__="Lutz Gross, l.gross@uq.edu.au"
36
37 import os,socket,time,sys,traceback
38 from esys.escript import setNumberOfThreads
39
40 class BenchmarkSuite(object):
41 """
42 framework to run a bunch of L{Benchmark}s with the object to create a table of statistics.
43
44 @cvar MAX_LEVEL: maximum number of level in headers for output
45 """
46 MAX_LEVEL=5
47 def __init__(self,name=None):
48 """
49 sets up a suite of benchmarks
50
51 @param name: name of the benchmark suite. If no name is given the class name is used.
52 @type name: C{str}
53 """
54 super(BenchmarkSuite,self).__init__()
55 self.__benchmarks=[]
56 self.__scale=1
57 if name==None:
58 self.__name=self.__class__.__name__
59 else:
60 self.__name=name
61
62 def __str__(self):
63 """
64 returns the name of the benchmark suite
65
66 @return: name
67 @rtype: C{str}
68 """
69 return self.__name
70 def addBenchmark(self,benchmark):
71 """
72 adds a L{Benchmark} to the suite
73
74 @param benchmark: adds a new L{Benchmark} to the suite
75 @type benchmark: L{Benchmark}
76 """
77 self.__benchmarks.append(benchmark)
78 def __len__(self):
79 """
80 returns the number of benchmarks in the suite
81
82 @return: number of benchmarks
83 @rtype: C{int}
84 """
85 return len(self.__benchmarks)
86 def __getitem__(self,i):
87 """
88 returns the i-th benchmark in the suite through self[i]
89
90 @param i: index of the requested benchmark
91 @type i: C{int}
92 @return: i-th benchmark
93 @rtype: L{Benchmark}
94
95 """
96 return self.__benchmarks[i]
97 def run(self,scale=1):
98 """
99 runs all benchmarks
100
101 @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
102 are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
103 in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted.
104 @type scale: C{int} or C{list} of C{int}s.
105 """
106 self.__scale=scale
107 for i in range(len(self)): self[i].run(scale=scale)
108 def getHTML(self,filter,level=1):
109 """
110 returns the results of the last benchmark run in HTML format.
111
112 @param filter: filter to be applied to the results
113 @type filter: L{BenchmarkFilter}
114 @param level: level used in header <H?> tags
115 @type level: C{int}
116 @return: HTML document
117 @rtype: C{str}
118 """
119 out=""
120 if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
121 out+="<H%s>%s</H%s>\n"%(level,str(self),level)
122 if level==1:
123 m=""
124 if isinstance(self.__scale,int):
125 if self.__scale>1:
126 m=" (%s threads)"%self.__scale
127 out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
128 for i in range(len(self)):
129 out+="<p>\n"
130 out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL))
131 out+="<p>\n"
132 if level==1:
133 try:
134 name=os.getlogin()
135 out+="<hr><p align=\"center\">by %s at %s</p>\n"%(name,time.strftime('%X %x %Z'))
136 except OSError:
137 out+="<hr><p align=\"center\">%s</p>\n"%(time.strftime('%X %x %Z'))
138
139 out+="</BODY></HTML>\n"
140 return out
141
142
143 class Benchmark(object):
144 """
145 runs a bunch of similar L{BenchmarkProblem}s with a bunch of L{Options}
146 """
147 def __init__(self,name=None,description=None):
148 """
149 sets up a benchmark
150
151 @param name: name of the benchmark. If no name is given the class name is used.
152 @type name: C{str}
153 @param description: description of the benchmark.
154 @type description: C{str} or C{None}
155 """
156 super(Benchmark,self).__init__()
157 self.__options=[]
158 self.__problems=[]
159 self.__results=[]
160 self.__scale=1
161 if name==None:
162 self.__name=self.__class__.__name__
163 else:
164 self.__name=name
165 self.__description=description
166
167 def __str__(self):
168 """
169 returns the name of the benchmark suite
170
171 @return: name
172 @rtype: C{str}
173 """
174 return self.__name
175
176 def addProblem(self,problem):
177 """
178 adds a problem to the benchmark
179
180 @param problem: adds a new problem to the bechmark
181 @type problem: L{BenchmarkProblem}
182 """
183 self.__problems.append(problem)
184
185 def addOptions(self,options):
186 """
187 adds a options to the benchmark
188
189 @param options: adds a new option to the bechmark. If options==None they are are ignored
190 @type options: L{Options}
191 """
192 if options!=None: self.__options.append(options)
193
194 def run(self,scale=1):
195 """
196 runs all problems with all options.
197
198
199 @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
200 are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
201 in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted.
202 @type scale: C{int} or C{list} of C{int}s.
203 """
204 if isinstance(scale,list):
205 c_max=min(len(scale),len(self.__problems))
206 else:
207 c_max=len(self.__problems)
208 self.__filter=filter
209 self.__scale=scale
210 self.__results=[]
211 for c in range(c_max):
212 r=self.__problems[c]
213 if isinstance(scale,list):
214 s=scale[c]
215 else:
216 s=scale
217 row=[]
218 if s>0:
219 t0=time.time()
220 print "%s with %s threads started."%(r.__class__,s)
221 for p in self.__options:
222 setNumberOfThreads(s)
223 try:
224 row.append(r.run(p))
225 except:
226 traceback.print_exc(file=sys.stdout)
227 row.append(None)
228 t0=time.time()-t0
229 print "%s with %s threads finished (walltime =%s sec)."%(r.__class__,s,t0)
230 self.__results.append(row)
231 def getHTML(self,filter,level=1):
232 """
233 returns the results of the last benchmark run in HTML format.
234
235 @param filter: filter to be applied to the results
236 @type filter: L{BenchmarkFilter}
237 @param level: level used in header <H?> tags
238 @type level: C{int}
239 @return: HTML document
240 @rtype: C{str}
241 """
242 out=""
243 if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
244 out+="<H%s>%s</H%s>\n"%(level,str(self),level)
245 if level==1:
246 m=""
247 if isinstance(self.__scale,int):
248 if self.__scale>1:
249 m=" (%s threads)"%self.__scale
250 out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
251 if self.__description: out+="<p>%s</p>\n"%str(self.__description)
252 if len(self.__problems)>0:
253 out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n"
254 h1_seg=""
255 rn=filter.getResultNames()
256 if len(rn)==0:
257 h1_seg+="<TD></TD>"
258 else:
259 for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n
260 h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>"
261 h1="<TR>"
262 if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>"
263 for o in self.__options:
264 if len(rn)==0:
265 h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
266 colspan=1
267 elif len(rn)==1:
268 h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
269 colspan=1
270 empty_h1=False
271 else:
272 colspan=len(rn)
273 h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(colspan,str(o))
274 h1+=h1_seg
275 out+=h0+"</TR>\n"+h1+"</TR>\n"
276 c=0
277 for r in range(len(self.__results)):
278 out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r])
279 if isinstance(self.__scale,list):
280 out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c]
281 for col in self.__results[r]:
282 if col==None:
283 out+="<TD ALIGN=\"center\" COLSPAN=%s>failed.</TD>"%colspan
284 else:
285 for e in filter(col): out+="<TD ALIGN=\"right\">%s</TD>"%e
286 out+="</TR>\n"
287 c+=1
288 out+="</TABLE>"
289 if level==1:
290 out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
291 out+="</BODY></HTML>\n"
292 return out
293
294 class BenchmarkProblem(object):
295 """
296 something that can be run and returns a list of characteristics such as timing, Mflops, error, etc.
297 """
298 def __init__(self,name=None):
299 """
300 sets up a benchmark problem
301
302 @param name: name of the problem. If no name is given the class name is used.
303 @type name: C{str}
304 """
305 super(BenchmarkProblem,self).__init__()
306 if name==None:
307 self.__name=self.__class__.__name__
308 else:
309 self.__name=name
310
311
312 def __str__(self):
313 """
314 returns the name of the benchmark suite
315
316 @return: name
317 @rtype: C{str}
318 """
319 return self.__name
320
321 def run(self,options=None):
322 """
323 runs the problem and returns a list of run characteristics
324
325
326 @param options: the options that are used for the run. Note that the number of OpenMP threads is controlled
327 by the L{Benchmark} the problem is run in.
328 @type options: L{Options}
329 @return: run characteristics
330 @rtype: any type that can be read by the L{BenchmarkFilter} applied to it.
331 @note: this function has to overwritten by a particular problem
332 """
333 raise NotImplementedError
334 return []
335
336 class BenchmarkFilter(object):
337 """
338 object to filter the characteristcs returned by Bechmark runs.
339
340 """
341 def __init__(self):
342 """
343 sets up a filter
344 """
345 pass
346
347
348 def getResultNames(self):
349 """
350 return the names of the results produced when run() is called.
351
352 @return: names the list of the names to be used when the results of the run() call are printed
353 @rtype: C{list} of C{str}
354 @note: this function has to overwritten by a particular problem
355 """
356 raise NotImplementedError
357 return []
358
359 def __call__(self,result):
360 """
361 filters out values results returned as characteristcs of a problem run
362
363 @param result: values to be filtered
364 @type result: any type that is produced by the L{BenchmarkProblem} it is applied to
365 @return: a list of strings selected from result
366 @rtype: C{list} of C{str}
367 @note: this function has to overwritten by a particular problem
368 """
369 raise NotImplementedError
370 return []
371
372
373 class Options(object):
374 """
375 defines a set of options to be used to run a L{BenchmarkProblem}
376 """
377 def __init__(self,name=None):
378 """
379 sets up the options
380
381 @param name: name of the option. If no name is given the class name is used.
382 @type name: C{str}
383 """
384 super(Options,self).__init__()
385 if name==None:
386 self.__name=self.__class__.__name__
387 else:
388 self.__name=name
389 def __str__(self):
390 """
391 returns the name of the benchmark suite
392
393 @return: name
394 @rtype: C{str}
395 """
396 return self.__name
397
398 if __name__=="__main__":
399
400 class OptionsTest1(Options):
401 pass
402 class OptionsTest2(Options):
403 pass
404
405 class BenchmarkProblemTest1(BenchmarkProblem):
406 def __init__(self):
407 super(BenchmarkProblemTest1,self).__init__(name="TEST1")
408 def run(self,options=None):
409 import time
410 return time.time(),"A"
411
412 class BenchmarkProblemTest2(BenchmarkProblem):
413 def __init__(self):
414 super(BenchmarkProblemTest2,self).__init__(name="TEST2")
415 def run(self,options=None):
416 import time
417 return -time.time(),"B"
418
419 class SimpleFilter(BenchmarkFilter):
420 def getResultNames(self):
421 return ["r0","r1"]
422 def __call__(self,result):
423 return [str(result[0]),str(result[1])]
424
425 bm=Benchmark("Example")
426 bm.addProblem(BenchmarkProblemTest1())
427 bm.addProblem(BenchmarkProblemTest2())
428 bm.addOptions(OptionsTest1())
429 bm.addOptions(OptionsTest2())
430
431 bms=BenchmarkSuite("A Test")
432 bms.addBenchmark(bm)
433
434 bms.run()
435 print bms.getHTML(filter=SimpleFilter())
436
437 bms.run(scale=4)
438 print bms.getHTML(filter=SimpleFilter())
439
440 bms.run(scale=[1,2])
441 print bms.getHTML(filter=SimpleFilter())

  ViewVC Help
Powered by ViewVC 1.1.26