/[escript]/trunk/escript/py_src/benchmark.py
ViewVC logotype

Contents of /trunk/escript/py_src/benchmark.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2158 - (show annotations)
Mon Dec 15 07:17:47 2008 UTC (10 years, 7 months ago) by caltinay
File MIME type: text/x-python
File size: 14378 byte(s)
Assorted spelling, grammar, whitespace and copy/paste error fixes (Part 1).
This commit should be a no-op.

1
2 ########################################################
3 #
4 # Copyright (c) 2003-2008 by University of Queensland
5 # Earth Systems Science Computational Center (ESSCC)
6 # http://www.uq.edu.au/esscc
7 #
8 # Primary Business: Queensland, Australia
9 # Licensed under the Open Software License version 3.0
10 # http://www.opensource.org/licenses/osl-3.0.php
11 #
12 ########################################################
13
14 __copyright__="""Copyright (c) 2003-2008 by University of Queensland
15 Earth Systems Science Computational Center (ESSCC)
16 http://www.uq.edu.au/esscc
17 Primary Business: Queensland, Australia"""
18 __license__="""Licensed under the Open Software License version 3.0
19 http://www.opensource.org/licenses/osl-3.0.php"""
20 __url__="http://www.uq.edu.au/esscc/escript-finley"
21
22 filter# $Id:$
23
24 """
25 A simple framework to run benchmarks under OpenMP and to summarize the results
26 in tables for instance in HTML
27
28 @var __author__: name of author
29 @var __license__: licence agreement
30 @var __copyright__: copyrights
31 @var __url__: url entry point on documentation
32 @var __version__: version
33 @var __date__: date of the version
34 """
35
36 __author__="Lutz Gross, l.gross@uq.edu.au"
37
38 import os,socket,time,sys,traceback
39 from esys.escript import setNumberOfThreads
40
41 class BenchmarkSuite(object):
42 """
43 framework to run a bunch of L{Benchmark}s using the object and creating a
44 table of statistics.
45
46 @cvar MAX_LEVEL: maximum number of level in headers for output
47 """
48 MAX_LEVEL=5
49 def __init__(self,name=None):
50 """
51 sets up a suite of benchmarks
52
53 @param name: name of the benchmark suite. If no name is given the class
54 name is used.
55 @type name: C{str}
56 """
57 super(BenchmarkSuite,self).__init__()
58 self.__benchmarks=[]
59 self.__scale=1
60 if name==None:
61 self.__name=self.__class__.__name__
62 else:
63 self.__name=name
64
65 def __str__(self):
66 """
67 returns the name of the benchmark suite
68
69 @return: name
70 @rtype: C{str}
71 """
72 return self.__name
73
74 def addBenchmark(self,benchmark):
75 """
76 adds a L{Benchmark} to the suite
77
78 @param benchmark: adds a new L{Benchmark} to the suite
79 @type benchmark: L{Benchmark}
80 """
81 self.__benchmarks.append(benchmark)
82
83 def __len__(self):
84 """
85 returns the number of benchmarks in the suite
86
87 @return: number of benchmarks
88 @rtype: C{int}
89 """
90 return len(self.__benchmarks)
91
92 def __getitem__(self,i):
93 """
94 returns the i-th benchmark in the suite through self[i]
95
96 @param i: index of the requested benchmark
97 @type i: C{int}
98 @return: i-th benchmark
99 @rtype: L{Benchmark}
100
101 """
102 return self.__benchmarks[i]
103
104 def run(self,scale=1):
105 """
106 runs all benchmarks
107
108 @param scale: defines the number of (OpenMP) threads to be used. If
109 scale is a scalar all benchmarks are run with scale number
110 of threads. If scale is a C{list}, the p-th problem in
111 each of the benchmarks in the suite is run with scale[p]
112 threads. If scale[p]<1 the p-th problem is omitted.
113 @type scale: C{int} or C{list} of C{int}s.
114 """
115 self.__scale=scale
116 for i in range(len(self)): self[i].run(scale=scale)
117
118 def getHTML(self,filter,level=1):
119 """
120 returns the results of the last benchmark run in HTML format.
121
122 @param filter: filter to be applied to the results
123 @type filter: L{BenchmarkFilter}
124 @param level: level used in header <H?> tags
125 @type level: C{int}
126 @return: HTML document
127 @rtype: C{str}
128 """
129 out=""
130 if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
131 out+="<H%s>%s</H%s>\n"%(level,str(self),level)
132 if level==1:
133 m=""
134 if isinstance(self.__scale,int):
135 if self.__scale>1:
136 m=" (%s threads)"%self.__scale
137 out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
138 for i in range(len(self)):
139 out+="<p>\n"
140 out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL))
141 out+="<p>\n"
142 if level==1:
143 try:
144 name=os.getlogin()
145 out+="<hr><p align=\"center\">by %s at %s</p>\n"%(name,time.strftime('%X %x %Z'))
146 except OSError:
147 out+="<hr><p align=\"center\">%s</p>\n"%(time.strftime('%X %x %Z'))
148
149 out+="</BODY></HTML>\n"
150 return out
151
152
153 class Benchmark(object):
154 """
155 runs a bunch of similar L{BenchmarkProblem}s with a bunch of L{Options}
156 """
157 def __init__(self,name=None,description=None):
158 """
159 sets up a benchmark
160
161 @param name: name of the benchmark. If no name is given the class name
162 is used.
163 @type name: C{str}
164 @param description: description of the benchmark.
165 @type description: C{str} or C{None}
166 """
167 super(Benchmark,self).__init__()
168 self.__options=[]
169 self.__problems=[]
170 self.__results=[]
171 self.__scale=1
172 if name==None:
173 self.__name=self.__class__.__name__
174 else:
175 self.__name=name
176 self.__description=description
177
178 def __str__(self):
179 """
180 returns the name of the benchmark suite
181
182 @return: name
183 @rtype: C{str}
184 """
185 return self.__name
186
187 def addProblem(self,problem):
188 """
189 adds a problem to the benchmark
190
191 @param problem: adds a new problem to the benchmark
192 @type problem: L{BenchmarkProblem}
193 """
194 self.__problems.append(problem)
195
196 def addOptions(self,options):
197 """
198 adds options to the benchmark
199
200 @param options: the options to be added to the bechmark. If
201 options==None they are ignored
202 @type options: L{Options}
203 """
204 if options!=None: self.__options.append(options)
205
206 def run(self,scale=1):
207 """
208 runs all problems with all options.
209
210 @param scale: defines the number of (OpenMP) threads to be used. If
211 scale is a scalar all benchmarks are run with scale number
212 of threads. If scale is a C{list}, the p-th problem in
213 each of the benchmarks in the suite is run with scale[p]
214 threads. If scale[p]<1 the p-th problem is omitted.
215 @type scale: C{int} or C{list} of C{int}s.
216 """
217 if isinstance(scale,list):
218 c_max=min(len(scale),len(self.__problems))
219 else:
220 c_max=len(self.__problems)
221 self.__filter=filter
222 self.__scale=scale
223 self.__results=[]
224 for c in range(c_max):
225 r=self.__problems[c]
226 if isinstance(scale,list):
227 s=scale[c]
228 else:
229 s=scale
230 row=[]
231 if s>0:
232 t0=time.time()
233 print "%s with %s threads started."%(r.__class__,s)
234 for p in self.__options:
235 setNumberOfThreads(s)
236 try:
237 row.append(r.run(p))
238 except:
239 traceback.print_exc(file=sys.stdout)
240 row.append(None)
241 t0=time.time()-t0
242 print "%s with %s threads finished (walltime=%s sec)."%(r.__class__,s,t0)
243 self.__results.append(row)
244
245 def getHTML(self,filter,level=1):
246 """
247 returns the results of the last benchmark run in HTML format.
248
249 @param filter: filter to be applied to the results
250 @type filter: L{BenchmarkFilter}
251 @param level: level used in header <H?> tags
252 @type level: C{int}
253 @return: HTML document
254 @rtype: C{str}
255 """
256 out=""
257 if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
258 out+="<H%s>%s</H%s>\n"%(level,str(self),level)
259 if level==1:
260 m=""
261 if isinstance(self.__scale,int):
262 if self.__scale>1:
263 m=" (%s threads)"%self.__scale
264 out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
265 if self.__description: out+="<p>%s</p>\n"%str(self.__description)
266 if len(self.__problems)>0:
267 out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n"
268 h1_seg=""
269 rn=filter.getResultNames()
270 if len(rn)==0:
271 h1_seg+="<TD></TD>"
272 else:
273 for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n
274 h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>"
275 h1="<TR>"
276 if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>"
277 for o in self.__options:
278 if len(rn)==0:
279 h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
280 colspan=1
281 elif len(rn)==1:
282 h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
283 colspan=1
284 empty_h1=False
285 else:
286 colspan=len(rn)
287 h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(colspan,str(o))
288 h1+=h1_seg
289 out+=h0+"</TR>\n"+h1+"</TR>\n"
290 c=0
291 for r in range(len(self.__results)):
292 out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r])
293 if isinstance(self.__scale,list):
294 out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c]
295 for col in self.__results[r]:
296 if col==None:
297 out+="<TD ALIGN=\"center\" COLSPAN=%s>failed.</TD>"%colspan
298 else:
299 for e in filter(col): out+="<TD ALIGN=\"right\">%s</TD>"%e
300 out+="</TR>\n"
301 c+=1
302 out+="</TABLE>"
303 if level==1:
304 out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
305 out+="</BODY></HTML>\n"
306 return out
307
308 class BenchmarkProblem(object):
309 """
310 a benchmark problem that can be run and which returns a list of
311 characteristics such as timing, MFlops, error, etc.
312 """
313 def __init__(self,name=None):
314 """
315 sets up a benchmark problem
316
317 @param name: name of the problem. If no name is given the class name
318 is used.
319 @type name: C{str}
320 """
321 super(BenchmarkProblem,self).__init__()
322 if name==None:
323 self.__name=self.__class__.__name__
324 else:
325 self.__name=name
326
327 def __str__(self):
328 """
329 returns the name of the benchmark suite
330
331 @return: name
332 @rtype: C{str}
333 """
334 return self.__name
335
336 def run(self,options=None):
337 """
338 runs the problem and returns a list of run characteristics
339
340 @param options: the options that are used for the run. Note that the
341 number of OpenMP threads is controlled by the
342 L{Benchmark} the problem is run in.
343 @type options: L{Options}
344 @return: run characteristics
345 @rtype: any type that can be read by the L{BenchmarkFilter} applied
346 to it.
347 @note: this function has to be overwritten by a particular problem
348 """
349 raise NotImplementedError
350 return []
351
352 class BenchmarkFilter(object):
353 """
354 object to filter the characteristics returned by Benchmark runs.
355
356 """
357 def __init__(self):
358 """
359 sets up a filter
360 """
361 pass
362
363 def getResultNames(self):
364 """
365 returns the names of the results produced when run() is called.
366
367 @return: the list of the names to be used when the results of
368 the run() call are printed
369 @rtype: C{list} of C{str}
370 @note: this function has to overwritten by a particular problem
371 """
372 raise NotImplementedError
373 return []
374
375 def __call__(self,result):
376 """
377 filters out results returned as characteristics of a problem run
378
379 @param result: values to be filtered
380 @type result: any type that is produced by the L{BenchmarkProblem}
381 it is applied to
382 @return: a list of strings selected from result
383 @rtype: C{list} of C{str}
384 @note: this function has to be overwritten by a particular problem
385 """
386 raise NotImplementedError
387 return []
388
389
390 class Options(object):
391 """
392 defines a set of options to be used to run a L{BenchmarkProblem}
393 """
394 def __init__(self,name=None):
395 """
396 sets up the options
397
398 @param name: name of the option. If no name is given the class name
399 is used.
400 @type name: C{str}
401 """
402 super(Options,self).__init__()
403 if name==None:
404 self.__name=self.__class__.__name__
405 else:
406 self.__name=name
407
408 def __str__(self):
409 """
410 returns the name of the benchmark suite
411
412 @return: name
413 @rtype: C{str}
414 """
415 return self.__name
416
417 if __name__=="__main__":
418
419 class OptionsTest1(Options):
420 pass
421 class OptionsTest2(Options):
422 pass
423
424 class BenchmarkProblemTest1(BenchmarkProblem):
425 def __init__(self):
426 super(BenchmarkProblemTest1,self).__init__(name="TEST1")
427 def run(self,options=None):
428 import time
429 return time.time(),"A"
430
431 class BenchmarkProblemTest2(BenchmarkProblem):
432 def __init__(self):
433 super(BenchmarkProblemTest2,self).__init__(name="TEST2")
434 def run(self,options=None):
435 import time
436 return -time.time(),"B"
437
438 class SimpleFilter(BenchmarkFilter):
439 def getResultNames(self):
440 return ["r0","r1"]
441 def __call__(self,result):
442 return [str(result[0]),str(result[1])]
443
444 bm=Benchmark("Example")
445 bm.addProblem(BenchmarkProblemTest1())
446 bm.addProblem(BenchmarkProblemTest2())
447 bm.addOptions(OptionsTest1())
448 bm.addOptions(OptionsTest2())
449
450 bms=BenchmarkSuite("A Test")
451 bms.addBenchmark(bm)
452
453 bms.run()
454 print bms.getHTML(filter=SimpleFilter())
455
456 bms.run(scale=4)
457 print bms.getHTML(filter=SimpleFilter())
458
459 bms.run(scale=[1,2])
460 print bms.getHTML(filter=SimpleFilter())
461

  ViewVC Help
Powered by ViewVC 1.1.26