/[escript]/trunk/escript/py_src/benchmark.py
ViewVC logotype

Annotation of /trunk/escript/py_src/benchmark.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 614 - (hide annotations)
Wed Mar 22 01:37:07 2006 UTC (13 years, 8 months ago) by elspeth
File MIME type: text/x-python
File size: 13869 byte(s)
Corrected spelling of 'license' in url so that the link actually points to the license.

1 gross 379 filter# $Id:$
2 gross 364
3     """
4     A simple framework to run benchmarks under OPENMP and to summarize the results in tables for instance in HTML
5    
6     @var __author__: name of author
7 elspeth 614 @var __license__: licence agreement
8 gross 364 var __url__: url entry point on documentation
9     @var __version__: version
10     @var __date__: date of the version
11     """
12    
13     __author__="Lutz Gross, l.gross@uq.edu.au"
14 elspeth 609 __copyright__=""" Copyright (c) 2006 by ACcESS MNRF
15     http://www.access.edu.au
16     Primary Business: Queensland, Australia"""
17 elspeth 614 __license__="""Licensed under the Open Software License version 3.0
18     http://www.opensource.org/licenses/osl-3.0.php"""
19 gross 364 __url__="http://www.iservo.edu.au/esys/escript"
20     __version__="$Revision:$"
21     __date__="$Date:$"
22    
23 gross 462 import os,socket,time,sys,traceback
24 gross 393 from esys.escript import setNumberOfThreads
25 gross 364
26     class BenchmarkSuite(object):
27     """
28     framework to run a bunch of L{Benchmark}s with the object to create a table of statistics.
29     @var MAX_LEVEL: maximum number of level in headers for output
30     """
31     MAX_LEVEL=5
32     def __init__(self,name=None):
33     """
34     sets up a suite of benchmarks
35    
36     @param name: name of the benchmark suite. If no name is given the class name is used.
37     @type name: C{str}
38     """
39     super(BenchmarkSuite,self).__init__()
40     self.__benchmarks=[]
41     self.__scale=1
42     if name==None:
43     self.__name=self.__class__.__name__
44     else:
45     self.__name=name
46    
47     def __str__(self):
48     """
49     returns the name of the benchmark suite
50    
51     @return: name
52     @rtype: C{str}
53     """
54     return self.__name
55     def addBenchmark(self,benchmark):
56     """
57     adds a L{Benchmark} to the suite
58    
59     @param benchmark: adds a new L{Benchmark} to the suite
60     @type benchmark: L{Benchmark}
61     """
62     self.__benchmarks.append(benchmark)
63     def __len__(self):
64     """
65     returns the number of benchmarks in the suite
66    
67     @return: number of benchmarks
68     @rtype: C{int}
69     """
70     return len(self.__benchmarks)
71     def __getitem__(self,i):
72     """
73     returns the i-th benchmark in the suite through self[i]
74    
75     @param i: index of the requested benchmark
76     @type i: C{int}
77     @return: i-th benchmark
78     @rtype: L{Benchmark}
79    
80     """
81     return self.__benchmarks[i]
82     def run(self,scale=1):
83     """
84     runs all benchmarks
85    
86     @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
87     are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
88 gross 387 in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted.
89 gross 364 @type scale: C{int} or C{list} of C{int}s.
90     """
91     self.__scale=scale
92 gross 379 for i in range(len(self)): self[i].run(scale=scale)
93     def getHTML(self,filter,level=1):
94 gross 364 """
95     returns the results of the last benchmark run in HTML format.
96    
97 gross 379 @param filter: filter to be applied to the results
98     @type filter: L{BenchmarkFilter}
99 gross 364 @param level: level used in header <H?> tags
100     @type level: C{int}
101     @return: HTML document
102     @rtype: C{str}
103     """
104     out=""
105     if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
106     out+="<H%s>%s</H%s>\n"%(level,str(self),level)
107     if level==1:
108     m=""
109     if isinstance(self.__scale,int):
110     if self.__scale>1:
111     m=" (%s threads)"%self.__scale
112     out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
113     for i in range(len(self)):
114     out+="<p>\n"
115 gross 379 out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL))
116 gross 364 out+="<p>\n"
117     if level==1:
118 gross 390 try:
119     name=os.getlogin()
120     out+="<hr><p align=\"center\">by %s at %s</p>\n"%(name,time.strftime('%X %x %Z'))
121     except OSError:
122     out+="<hr><p align=\"center\">%s</p>\n"%(time.strftime('%X %x %Z'))
123    
124 gross 364 out+="</BODY></HTML>\n"
125     return out
126    
127    
128     class Benchmark(object):
129     """
130     runs a bunch of similar L{BenchmarkProblem}s with a bunch of L{Options}
131     """
132     def __init__(self,name=None,description=None):
133     """
134     sets up a benchmark
135    
136     @param name: name of the benchmark. If no name is given the class name is used.
137     @type name: C{str}
138     @param description: description of the benchmark.
139     @type description: C{str} or C{None}
140     """
141     super(Benchmark,self).__init__()
142     self.__options=[]
143     self.__problems=[]
144     self.__results=[]
145     self.__scale=1
146     if name==None:
147     self.__name=self.__class__.__name__
148     else:
149     self.__name=name
150     self.__description=description
151    
152     def __str__(self):
153     """
154     returns the name of the benchmark suite
155    
156     @return: name
157     @rtype: C{str}
158     """
159     return self.__name
160    
161     def addProblem(self,problem):
162     """
163     adds a problem to the benchmark
164    
165     @param problem: adds a new problem to the bechmark
166     @type problem: L{BenchmarkProblem}
167     """
168     self.__problems.append(problem)
169    
170 gross 458 def addOptions(self,options):
171 gross 364 """
172     adds a options to the benchmark
173    
174 gross 458 @param options: adds a new option to the bechmark. If options==None they are are ignored
175 gross 364 @type problem: L{Options}
176     """
177 gross 458 if options!=None: self.__options.append(options)
178 gross 364
179     def run(self,scale=1):
180     """
181     runs all problems with all options.
182    
183    
184     @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
185     are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
186 gross 387 in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted.
187 gross 364 @type scale: C{int} or C{list} of C{int}s.
188     """
189     if isinstance(scale,list):
190 gross 386 c_max=min(len(scale),len(self.__problems))
191     else:
192     c_max=len(self.__problems)
193 gross 379 self.__filter=filter
194 gross 364 self.__scale=scale
195     self.__results=[]
196 gross 386 for c in range(c_max):
197     r=self.__problems[c]
198 gross 364 if isinstance(scale,list):
199     s=scale[c]
200     else:
201     s=scale
202     row=[]
203 gross 387 if s>0:
204 gross 435 t0=time.time()
205     print "%s with %s threads started."%(r.__class__,s)
206 gross 387 for p in self.__options:
207 gross 393 setNumberOfThreads(s)
208 gross 451 try:
209     row.append(r.run(p))
210     except:
211 gross 462 traceback.print_exc(file=sys.stdout)
212 gross 451 row.append(None)
213 gross 435 t0=time.time()-t0
214     print "%s with %s threads finished (walltime =%s sec)."%(r.__class__,s,t0)
215 gross 364 self.__results.append(row)
216 gross 379 def getHTML(self,filter,level=1):
217 gross 364 """
218     returns the results of the last benchmark run in HTML format.
219    
220 gross 379 @param filter: filter to be applied to the results
221     @type filter: L{BenchmarkFilter}
222 gross 364 @param level: level used in header <H?> tags
223     @type level: C{int}
224     @return: HTML document
225     @rtype: C{str}
226     """
227     out=""
228     if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
229     out+="<H%s>%s</H%s>\n"%(level,str(self),level)
230     if level==1:
231     m=""
232     if isinstance(self.__scale,int):
233     if self.__scale>1:
234     m=" (%s threads)"%self.__scale
235     out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
236     if self.__description: out+="<p>%s</p>\n"%str(self.__description)
237     if len(self.__problems)>0:
238     out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n"
239     h1_seg=""
240 gross 379 rn=filter.getResultNames()
241 gross 364 if len(rn)==0:
242     h1_seg+="<TD></TD>"
243     else:
244     for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n
245     h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>"
246     h1="<TR>"
247     if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>"
248     for o in self.__options:
249     if len(rn)==0:
250     h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
251 gross 451 colspan=1
252 gross 364 elif len(rn)==1:
253     h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
254 gross 451 colspan=1
255 gross 364 empty_h1=False
256     else:
257 gross 451 colspan=len(rn)
258     h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(colspan,str(o))
259 gross 364 h1+=h1_seg
260     out+=h0+"</TR>\n"+h1+"</TR>\n"
261     c=0
262     for r in range(len(self.__results)):
263     out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r])
264 gross 451 if isinstance(self.__scale,list):
265     out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c]
266 gross 385 for col in self.__results[r]:
267 gross 451 if col==None:
268     out+="<TD ALIGN=\"center\" COLSPAN=%s>failed.</TD>"%colspan
269     else:
270     for e in filter(col): out+="<TD ALIGN=\"right\">%s</TD>"%e
271 gross 364 out+="</TR>\n"
272 gross 385 c+=1
273 gross 364 out+="</TABLE>"
274     if level==1:
275     out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
276     out+="</BODY></HTML>\n"
277     return out
278    
279     class BenchmarkProblem(object):
280     """
281     something that can be run and returns a list of characteristics such as timing, Mflops, error, etc.
282     """
283     def __init__(self,name=None):
284     """
285     sets up a benchmark problem
286    
287     @param name: name of the problem. If no name is given the class name is used.
288     @type name: C{str}
289     """
290     super(BenchmarkProblem,self).__init__()
291     if name==None:
292     self.__name=self.__class__.__name__
293     else:
294     self.__name=name
295    
296    
297     def __str__(self):
298     """
299     returns the name of the benchmark suite
300    
301     @return: name
302     @rtype: C{str}
303     """
304     return self.__name
305    
306     def run(self,options=None):
307     """
308     runs the problem and returns a list of run characteristics
309    
310    
311     @param options: the options that are used for the run. Note that the number of OpenMP threads is controlled
312     by the L{Benchmark} the problem is run in.
313     @type options: L{Options}
314 gross 379 @return: run characteristics
315     @rtype: any type that can be read by the L{BenchmarkFilter} applied to it.
316 gross 364 @remark: this function has to overwritten by a particular problem
317     """
318     raise NotImplementedError
319     return []
320    
321 gross 379 class BenchmarkFilter(object):
322     """
323     object to filter the characteristcs returned by Bechmark runs.
324    
325     """
326     def __init__(self):
327     """
328     sets up a filter
329     """
330     pass
331    
332    
333     def getResultNames(self):
334     """
335     return the names of the results produced when run() is called.
336    
337     @return: names the list of the names to be used when the results of the run() call are printed
338     @rtype: C{list} of C{str}
339     @remark: this function has to overwritten by a particular problem
340     """
341     raise NotImplementedError
342     return []
343    
344     def __call__(self,result):
345     """
346     filters out values results returned as characteristcs of a problem run
347    
348     @param result: values to be filtered
349     @type result: any type that is produced by the L{BenchmarkProblem} it is applied to
350     @return: a list of strings selected from result
351     @rtype: C{list} of C{str}
352     @remark: this function has to overwritten by a particular problem
353     """
354     raise NotImplementedError
355     return []
356    
357    
358 gross 364 class Options(object):
359     """
360     defines a set of options to be used to run a L{BenchmarkProblem}
361     """
362     def __init__(self,name=None):
363     """
364     sets up the options
365    
366     @param name: name of the option. If no name is given the class name is used.
367     @type name: C{str}
368     """
369     super(Options,self).__init__()
370     if name==None:
371 gross 385 self.__name=self.__class__.__name__
372 gross 364 else:
373     self.__name=name
374     def __str__(self):
375     """
376     returns the name of the benchmark suite
377    
378     @return: name
379     @rtype: C{str}
380     """
381     return self.__name
382    
383     if __name__=="__main__":
384    
385     class OptionsTest1(Options):
386     pass
387     class OptionsTest2(Options):
388     pass
389    
390 gross 379 class BenchmarkProblemTest1(BenchmarkProblem):
391 gross 364 def __init__(self):
392     super(BenchmarkProblemTest1,self).__init__(name="TEST1")
393     def run(self,options=None):
394     import time
395     return time.time(),"A"
396    
397 gross 379 class BenchmarkProblemTest2(BenchmarkProblem):
398 gross 364 def __init__(self):
399     super(BenchmarkProblemTest2,self).__init__(name="TEST2")
400     def run(self,options=None):
401     import time
402     return -time.time(),"B"
403    
404 gross 379 class SimpleFilter(BenchmarkFilter):
405     def getResultNames(self):
406     return ["r0","r1"]
407     def __call__(self,result):
408     return [str(result[0]),str(result[1])]
409    
410 gross 364 bm=Benchmark("Example")
411     bm.addProblem(BenchmarkProblemTest1())
412     bm.addProblem(BenchmarkProblemTest2())
413     bm.addOptions(OptionsTest1())
414     bm.addOptions(OptionsTest2())
415    
416     bms=BenchmarkSuite("A Test")
417     bms.addBenchmark(bm)
418    
419     bms.run()
420 gross 379 print bms.getHTML(filter=SimpleFilter())
421 gross 364
422     bms.run(scale=4)
423 gross 379 print bms.getHTML(filter=SimpleFilter())
424 gross 364
425     bms.run(scale=[1,2])
426 gross 379 print bms.getHTML(filter=SimpleFilter())

  ViewVC Help
Powered by ViewVC 1.1.26