/[escript]/trunk/escript/py_src/benchmark.py
ViewVC logotype

Annotation of /trunk/escript/py_src/benchmark.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 637 - (hide annotations)
Thu Mar 23 10:55:31 2006 UTC (13 years, 6 months ago) by gross
File MIME type: text/x-python
File size: 13899 byte(s)
more copyright statements added
1 gross 379 filter# $Id:$
2 gross 364
3     """
4     A simple framework to run benchmarks under OPENMP and to summarize the results in tables for instance in HTML
5    
6     @var __author__: name of author
7 elspeth 614 @var __license__: licence agreement
8 gross 637 @var __copyright__: copyrights
9     @var __url__: url entry point on documentation
10 gross 364 @var __version__: version
11     @var __date__: date of the version
12     """
13    
14     __author__="Lutz Gross, l.gross@uq.edu.au"
15 elspeth 609 __copyright__=""" Copyright (c) 2006 by ACcESS MNRF
16     http://www.access.edu.au
17     Primary Business: Queensland, Australia"""
18 elspeth 614 __license__="""Licensed under the Open Software License version 3.0
19     http://www.opensource.org/licenses/osl-3.0.php"""
20 gross 364 __url__="http://www.iservo.edu.au/esys/escript"
21 gross 637 __version__="$Revision$"
22     __date__="$Date$"
23 gross 364
24 gross 462 import os,socket,time,sys,traceback
25 gross 393 from esys.escript import setNumberOfThreads
26 gross 364
27     class BenchmarkSuite(object):
28     """
29     framework to run a bunch of L{Benchmark}s with the object to create a table of statistics.
30     @var MAX_LEVEL: maximum number of level in headers for output
31     """
32     MAX_LEVEL=5
33     def __init__(self,name=None):
34     """
35     sets up a suite of benchmarks
36    
37     @param name: name of the benchmark suite. If no name is given the class name is used.
38     @type name: C{str}
39     """
40     super(BenchmarkSuite,self).__init__()
41     self.__benchmarks=[]
42     self.__scale=1
43     if name==None:
44     self.__name=self.__class__.__name__
45     else:
46     self.__name=name
47    
48     def __str__(self):
49     """
50     returns the name of the benchmark suite
51    
52     @return: name
53     @rtype: C{str}
54     """
55     return self.__name
56     def addBenchmark(self,benchmark):
57     """
58     adds a L{Benchmark} to the suite
59    
60     @param benchmark: adds a new L{Benchmark} to the suite
61     @type benchmark: L{Benchmark}
62     """
63     self.__benchmarks.append(benchmark)
64     def __len__(self):
65     """
66     returns the number of benchmarks in the suite
67    
68     @return: number of benchmarks
69     @rtype: C{int}
70     """
71     return len(self.__benchmarks)
72     def __getitem__(self,i):
73     """
74     returns the i-th benchmark in the suite through self[i]
75    
76     @param i: index of the requested benchmark
77     @type i: C{int}
78     @return: i-th benchmark
79     @rtype: L{Benchmark}
80    
81     """
82     return self.__benchmarks[i]
83     def run(self,scale=1):
84     """
85     runs all benchmarks
86    
87     @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
88     are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
89 gross 387 in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted.
90 gross 364 @type scale: C{int} or C{list} of C{int}s.
91     """
92     self.__scale=scale
93 gross 379 for i in range(len(self)): self[i].run(scale=scale)
94     def getHTML(self,filter,level=1):
95 gross 364 """
96     returns the results of the last benchmark run in HTML format.
97    
98 gross 379 @param filter: filter to be applied to the results
99     @type filter: L{BenchmarkFilter}
100 gross 364 @param level: level used in header <H?> tags
101     @type level: C{int}
102     @return: HTML document
103     @rtype: C{str}
104     """
105     out=""
106     if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
107     out+="<H%s>%s</H%s>\n"%(level,str(self),level)
108     if level==1:
109     m=""
110     if isinstance(self.__scale,int):
111     if self.__scale>1:
112     m=" (%s threads)"%self.__scale
113     out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
114     for i in range(len(self)):
115     out+="<p>\n"
116 gross 379 out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL))
117 gross 364 out+="<p>\n"
118     if level==1:
119 gross 390 try:
120     name=os.getlogin()
121     out+="<hr><p align=\"center\">by %s at %s</p>\n"%(name,time.strftime('%X %x %Z'))
122     except OSError:
123     out+="<hr><p align=\"center\">%s</p>\n"%(time.strftime('%X %x %Z'))
124    
125 gross 364 out+="</BODY></HTML>\n"
126     return out
127    
128    
129     class Benchmark(object):
130     """
131     runs a bunch of similar L{BenchmarkProblem}s with a bunch of L{Options}
132     """
133     def __init__(self,name=None,description=None):
134     """
135     sets up a benchmark
136    
137     @param name: name of the benchmark. If no name is given the class name is used.
138     @type name: C{str}
139     @param description: description of the benchmark.
140     @type description: C{str} or C{None}
141     """
142     super(Benchmark,self).__init__()
143     self.__options=[]
144     self.__problems=[]
145     self.__results=[]
146     self.__scale=1
147     if name==None:
148     self.__name=self.__class__.__name__
149     else:
150     self.__name=name
151     self.__description=description
152    
153     def __str__(self):
154     """
155     returns the name of the benchmark suite
156    
157     @return: name
158     @rtype: C{str}
159     """
160     return self.__name
161    
162     def addProblem(self,problem):
163     """
164     adds a problem to the benchmark
165    
166     @param problem: adds a new problem to the bechmark
167     @type problem: L{BenchmarkProblem}
168     """
169     self.__problems.append(problem)
170    
171 gross 458 def addOptions(self,options):
172 gross 364 """
173     adds a options to the benchmark
174    
175 gross 458 @param options: adds a new option to the bechmark. If options==None they are are ignored
176 gross 364 @type problem: L{Options}
177     """
178 gross 458 if options!=None: self.__options.append(options)
179 gross 364
180     def run(self,scale=1):
181     """
182     runs all problems with all options.
183    
184    
185     @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
186     are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
187 gross 387 in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted.
188 gross 364 @type scale: C{int} or C{list} of C{int}s.
189     """
190     if isinstance(scale,list):
191 gross 386 c_max=min(len(scale),len(self.__problems))
192     else:
193     c_max=len(self.__problems)
194 gross 379 self.__filter=filter
195 gross 364 self.__scale=scale
196     self.__results=[]
197 gross 386 for c in range(c_max):
198     r=self.__problems[c]
199 gross 364 if isinstance(scale,list):
200     s=scale[c]
201     else:
202     s=scale
203     row=[]
204 gross 387 if s>0:
205 gross 435 t0=time.time()
206     print "%s with %s threads started."%(r.__class__,s)
207 gross 387 for p in self.__options:
208 gross 393 setNumberOfThreads(s)
209 gross 451 try:
210     row.append(r.run(p))
211     except:
212 gross 462 traceback.print_exc(file=sys.stdout)
213 gross 451 row.append(None)
214 gross 435 t0=time.time()-t0
215     print "%s with %s threads finished (walltime =%s sec)."%(r.__class__,s,t0)
216 gross 364 self.__results.append(row)
217 gross 379 def getHTML(self,filter,level=1):
218 gross 364 """
219     returns the results of the last benchmark run in HTML format.
220    
221 gross 379 @param filter: filter to be applied to the results
222     @type filter: L{BenchmarkFilter}
223 gross 364 @param level: level used in header <H?> tags
224     @type level: C{int}
225     @return: HTML document
226     @rtype: C{str}
227     """
228     out=""
229     if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
230     out+="<H%s>%s</H%s>\n"%(level,str(self),level)
231     if level==1:
232     m=""
233     if isinstance(self.__scale,int):
234     if self.__scale>1:
235     m=" (%s threads)"%self.__scale
236     out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
237     if self.__description: out+="<p>%s</p>\n"%str(self.__description)
238     if len(self.__problems)>0:
239     out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n"
240     h1_seg=""
241 gross 379 rn=filter.getResultNames()
242 gross 364 if len(rn)==0:
243     h1_seg+="<TD></TD>"
244     else:
245     for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n
246     h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>"
247     h1="<TR>"
248     if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>"
249     for o in self.__options:
250     if len(rn)==0:
251     h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
252 gross 451 colspan=1
253 gross 364 elif len(rn)==1:
254     h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
255 gross 451 colspan=1
256 gross 364 empty_h1=False
257     else:
258 gross 451 colspan=len(rn)
259     h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(colspan,str(o))
260 gross 364 h1+=h1_seg
261     out+=h0+"</TR>\n"+h1+"</TR>\n"
262     c=0
263     for r in range(len(self.__results)):
264     out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r])
265 gross 451 if isinstance(self.__scale,list):
266     out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c]
267 gross 385 for col in self.__results[r]:
268 gross 451 if col==None:
269     out+="<TD ALIGN=\"center\" COLSPAN=%s>failed.</TD>"%colspan
270     else:
271     for e in filter(col): out+="<TD ALIGN=\"right\">%s</TD>"%e
272 gross 364 out+="</TR>\n"
273 gross 385 c+=1
274 gross 364 out+="</TABLE>"
275     if level==1:
276     out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
277     out+="</BODY></HTML>\n"
278     return out
279    
280     class BenchmarkProblem(object):
281     """
282     something that can be run and returns a list of characteristics such as timing, Mflops, error, etc.
283     """
284     def __init__(self,name=None):
285     """
286     sets up a benchmark problem
287    
288     @param name: name of the problem. If no name is given the class name is used.
289     @type name: C{str}
290     """
291     super(BenchmarkProblem,self).__init__()
292     if name==None:
293     self.__name=self.__class__.__name__
294     else:
295     self.__name=name
296    
297    
298     def __str__(self):
299     """
300     returns the name of the benchmark suite
301    
302     @return: name
303     @rtype: C{str}
304     """
305     return self.__name
306    
307     def run(self,options=None):
308     """
309     runs the problem and returns a list of run characteristics
310    
311    
312     @param options: the options that are used for the run. Note that the number of OpenMP threads is controlled
313     by the L{Benchmark} the problem is run in.
314     @type options: L{Options}
315 gross 379 @return: run characteristics
316     @rtype: any type that can be read by the L{BenchmarkFilter} applied to it.
317 gross 364 @remark: this function has to overwritten by a particular problem
318     """
319     raise NotImplementedError
320     return []
321    
322 gross 379 class BenchmarkFilter(object):
323     """
324     object to filter the characteristcs returned by Bechmark runs.
325    
326     """
327     def __init__(self):
328     """
329     sets up a filter
330     """
331     pass
332    
333    
334     def getResultNames(self):
335     """
336     return the names of the results produced when run() is called.
337    
338     @return: names the list of the names to be used when the results of the run() call are printed
339     @rtype: C{list} of C{str}
340     @remark: this function has to overwritten by a particular problem
341     """
342     raise NotImplementedError
343     return []
344    
345     def __call__(self,result):
346     """
347     filters out values results returned as characteristcs of a problem run
348    
349     @param result: values to be filtered
350     @type result: any type that is produced by the L{BenchmarkProblem} it is applied to
351     @return: a list of strings selected from result
352     @rtype: C{list} of C{str}
353     @remark: this function has to overwritten by a particular problem
354     """
355     raise NotImplementedError
356     return []
357    
358    
359 gross 364 class Options(object):
360     """
361     defines a set of options to be used to run a L{BenchmarkProblem}
362     """
363     def __init__(self,name=None):
364     """
365     sets up the options
366    
367     @param name: name of the option. If no name is given the class name is used.
368     @type name: C{str}
369     """
370     super(Options,self).__init__()
371     if name==None:
372 gross 385 self.__name=self.__class__.__name__
373 gross 364 else:
374     self.__name=name
375     def __str__(self):
376     """
377     returns the name of the benchmark suite
378    
379     @return: name
380     @rtype: C{str}
381     """
382     return self.__name
383    
384     if __name__=="__main__":
385    
386     class OptionsTest1(Options):
387     pass
388     class OptionsTest2(Options):
389     pass
390    
391 gross 379 class BenchmarkProblemTest1(BenchmarkProblem):
392 gross 364 def __init__(self):
393     super(BenchmarkProblemTest1,self).__init__(name="TEST1")
394     def run(self,options=None):
395     import time
396     return time.time(),"A"
397    
398 gross 379 class BenchmarkProblemTest2(BenchmarkProblem):
399 gross 364 def __init__(self):
400     super(BenchmarkProblemTest2,self).__init__(name="TEST2")
401     def run(self,options=None):
402     import time
403     return -time.time(),"B"
404    
405 gross 379 class SimpleFilter(BenchmarkFilter):
406     def getResultNames(self):
407     return ["r0","r1"]
408     def __call__(self,result):
409     return [str(result[0]),str(result[1])]
410    
411 gross 364 bm=Benchmark("Example")
412     bm.addProblem(BenchmarkProblemTest1())
413     bm.addProblem(BenchmarkProblemTest2())
414     bm.addOptions(OptionsTest1())
415     bm.addOptions(OptionsTest2())
416    
417     bms=BenchmarkSuite("A Test")
418     bms.addBenchmark(bm)
419    
420     bms.run()
421 gross 379 print bms.getHTML(filter=SimpleFilter())
422 gross 364
423     bms.run(scale=4)
424 gross 379 print bms.getHTML(filter=SimpleFilter())
425 gross 364
426     bms.run(scale=[1,2])
427 gross 379 print bms.getHTML(filter=SimpleFilter())

  ViewVC Help
Powered by ViewVC 1.1.26