/[escript]/trunk/escript/py_src/benchmark.py
ViewVC logotype

Annotation of /trunk/escript/py_src/benchmark.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 451 - (hide annotations)
Mon Jan 23 23:45:43 2006 UTC (13 years, 8 months ago) by gross
File MIME type: text/x-python
File size: 13884 byte(s)
failing rin is catched now.
1 gross 379 filter# $Id:$
2 gross 364
3     #
4     # COPYRIGHT ACcESS 2004 - All Rights Reserved
5     #
6     # This software is the property of ACcESS. No part of this code
7     # may be copied in any form or by any means without the expressed written
8     # consent of ACcESS. Copying, use or modification of this software
9     # by any unauthorised person is illegal unless that
10     # person has a software license agreement with ACcESS.
11     #
12    
13     """
14     A simple framework to run benchmarks under OPENMP and to summarize the results in tables for instance in HTML
15    
16     @var __author__: name of author
17     @var __licence__: licence agreement
18     var __url__: url entry point on documentation
19     @var __version__: version
20     @var __date__: date of the version
21     """
22    
23     __author__="Lutz Gross, l.gross@uq.edu.au"
24     __licence__="contact: esys@access.uq.edu.au"
25     __url__="http://www.iservo.edu.au/esys/escript"
26     __version__="$Revision:$"
27     __date__="$Date:$"
28    
29     import os,socket,time,sys
30 gross 393 from esys.escript import setNumberOfThreads
31 gross 364
32     class BenchmarkSuite(object):
33     """
34     framework to run a bunch of L{Benchmark}s with the object to create a table of statistics.
35     @var MAX_LEVEL: maximum number of level in headers for output
36     """
37     MAX_LEVEL=5
38     def __init__(self,name=None):
39     """
40     sets up a suite of benchmarks
41    
42     @param name: name of the benchmark suite. If no name is given the class name is used.
43     @type name: C{str}
44     """
45     super(BenchmarkSuite,self).__init__()
46     self.__benchmarks=[]
47     self.__scale=1
48     if name==None:
49     self.__name=self.__class__.__name__
50     else:
51     self.__name=name
52    
53     def __str__(self):
54     """
55     returns the name of the benchmark suite
56    
57     @return: name
58     @rtype: C{str}
59     """
60     return self.__name
61     def addBenchmark(self,benchmark):
62     """
63     adds a L{Benchmark} to the suite
64    
65     @param benchmark: adds a new L{Benchmark} to the suite
66     @type benchmark: L{Benchmark}
67     """
68     self.__benchmarks.append(benchmark)
69     def __len__(self):
70     """
71     returns the number of benchmarks in the suite
72    
73     @return: number of benchmarks
74     @rtype: C{int}
75     """
76     return len(self.__benchmarks)
77     def __getitem__(self,i):
78     """
79     returns the i-th benchmark in the suite through self[i]
80    
81     @param i: index of the requested benchmark
82     @type i: C{int}
83     @return: i-th benchmark
84     @rtype: L{Benchmark}
85    
86     """
87     return self.__benchmarks[i]
88     def run(self,scale=1):
89     """
90     runs all benchmarks
91    
92     @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
93     are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
94 gross 387 in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted.
95 gross 364 @type scale: C{int} or C{list} of C{int}s.
96     """
97     self.__scale=scale
98 gross 379 for i in range(len(self)): self[i].run(scale=scale)
99     def getHTML(self,filter,level=1):
100 gross 364 """
101     returns the results of the last benchmark run in HTML format.
102    
103 gross 379 @param filter: filter to be applied to the results
104     @type filter: L{BenchmarkFilter}
105 gross 364 @param level: level used in header <H?> tags
106     @type level: C{int}
107     @return: HTML document
108     @rtype: C{str}
109     """
110     out=""
111     if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
112     out+="<H%s>%s</H%s>\n"%(level,str(self),level)
113     if level==1:
114     m=""
115     if isinstance(self.__scale,int):
116     if self.__scale>1:
117     m=" (%s threads)"%self.__scale
118     out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
119     for i in range(len(self)):
120     out+="<p>\n"
121 gross 379 out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL))
122 gross 364 out+="<p>\n"
123     if level==1:
124 gross 390 try:
125     name=os.getlogin()
126     out+="<hr><p align=\"center\">by %s at %s</p>\n"%(name,time.strftime('%X %x %Z'))
127     except OSError:
128     out+="<hr><p align=\"center\">%s</p>\n"%(time.strftime('%X %x %Z'))
129    
130 gross 364 out+="</BODY></HTML>\n"
131     return out
132    
133    
134     class Benchmark(object):
135     """
136     runs a bunch of similar L{BenchmarkProblem}s with a bunch of L{Options}
137     """
138     def __init__(self,name=None,description=None):
139     """
140     sets up a benchmark
141    
142     @param name: name of the benchmark. If no name is given the class name is used.
143     @type name: C{str}
144     @param description: description of the benchmark.
145     @type description: C{str} or C{None}
146     """
147     super(Benchmark,self).__init__()
148     self.__options=[]
149     self.__problems=[]
150     self.__results=[]
151     self.__scale=1
152     if name==None:
153     self.__name=self.__class__.__name__
154     else:
155     self.__name=name
156     self.__description=description
157    
158     def __str__(self):
159     """
160     returns the name of the benchmark suite
161    
162     @return: name
163     @rtype: C{str}
164     """
165     return self.__name
166    
167     def addProblem(self,problem):
168     """
169     adds a problem to the benchmark
170    
171     @param problem: adds a new problem to the bechmark
172     @type problem: L{BenchmarkProblem}
173     """
174     self.__problems.append(problem)
175    
176     def addOptions(self,Options):
177     """
178     adds a options to the benchmark
179    
180     @param options: adds a new option to the bechmark
181     @type problem: L{Options}
182     """
183     self.__options.append(Options)
184    
185     def run(self,scale=1):
186     """
187     runs all problems with all options.
188    
189    
190     @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
191     are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
192 gross 387 in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted.
193 gross 364 @type scale: C{int} or C{list} of C{int}s.
194     """
195     if isinstance(scale,list):
196 gross 386 c_max=min(len(scale),len(self.__problems))
197     else:
198     c_max=len(self.__problems)
199 gross 379 self.__filter=filter
200 gross 364 self.__scale=scale
201     self.__results=[]
202 gross 386 for c in range(c_max):
203     r=self.__problems[c]
204 gross 364 if isinstance(scale,list):
205     s=scale[c]
206     else:
207     s=scale
208     row=[]
209 gross 387 if s>0:
210 gross 435 t0=time.time()
211     print "%s with %s threads started."%(r.__class__,s)
212 gross 387 for p in self.__options:
213 gross 393 setNumberOfThreads(s)
214 gross 451 try:
215     row.append(r.run(p))
216     except:
217     row.append(None)
218 gross 435 t0=time.time()-t0
219     print "%s with %s threads finished (walltime =%s sec)."%(r.__class__,s,t0)
220 gross 364 self.__results.append(row)
221 gross 379 def getHTML(self,filter,level=1):
222 gross 364 """
223     returns the results of the last benchmark run in HTML format.
224    
225 gross 379 @param filter: filter to be applied to the results
226     @type filter: L{BenchmarkFilter}
227 gross 364 @param level: level used in header <H?> tags
228     @type level: C{int}
229     @return: HTML document
230     @rtype: C{str}
231     """
232     out=""
233     if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
234     out+="<H%s>%s</H%s>\n"%(level,str(self),level)
235     if level==1:
236     m=""
237     if isinstance(self.__scale,int):
238     if self.__scale>1:
239     m=" (%s threads)"%self.__scale
240     out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
241     if self.__description: out+="<p>%s</p>\n"%str(self.__description)
242     if len(self.__problems)>0:
243     out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n"
244     h1_seg=""
245 gross 379 rn=filter.getResultNames()
246 gross 364 if len(rn)==0:
247     h1_seg+="<TD></TD>"
248     else:
249     for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n
250     h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>"
251     h1="<TR>"
252     if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>"
253     for o in self.__options:
254     if len(rn)==0:
255     h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
256 gross 451 colspan=1
257 gross 364 elif len(rn)==1:
258     h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
259 gross 451 colspan=1
260 gross 364 empty_h1=False
261     else:
262 gross 451 colspan=len(rn)
263     h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(colspan,str(o))
264 gross 364 h1+=h1_seg
265     out+=h0+"</TR>\n"+h1+"</TR>\n"
266     c=0
267     for r in range(len(self.__results)):
268     out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r])
269 gross 451 if isinstance(self.__scale,list):
270     out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c]
271 gross 385 for col in self.__results[r]:
272 gross 451 if col==None:
273     out+="<TD ALIGN=\"center\" COLSPAN=%s>failed.</TD>"%colspan
274     else:
275     for e in filter(col): out+="<TD ALIGN=\"right\">%s</TD>"%e
276 gross 364 out+="</TR>\n"
277 gross 385 c+=1
278 gross 364 out+="</TABLE>"
279     if level==1:
280     out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
281     out+="</BODY></HTML>\n"
282     return out
283    
284     class BenchmarkProblem(object):
285     """
286     something that can be run and returns a list of characteristics such as timing, Mflops, error, etc.
287     """
288     def __init__(self,name=None):
289     """
290     sets up a benchmark problem
291    
292     @param name: name of the problem. If no name is given the class name is used.
293     @type name: C{str}
294     """
295     super(BenchmarkProblem,self).__init__()
296     if name==None:
297     self.__name=self.__class__.__name__
298     else:
299     self.__name=name
300    
301    
302     def __str__(self):
303     """
304     returns the name of the benchmark suite
305    
306     @return: name
307     @rtype: C{str}
308     """
309     return self.__name
310    
311     def run(self,options=None):
312     """
313     runs the problem and returns a list of run characteristics
314    
315    
316     @param options: the options that are used for the run. Note that the number of OpenMP threads is controlled
317     by the L{Benchmark} the problem is run in.
318     @type options: L{Options}
319 gross 379 @return: run characteristics
320     @rtype: any type that can be read by the L{BenchmarkFilter} applied to it.
321 gross 364 @remark: this function has to overwritten by a particular problem
322     """
323     raise NotImplementedError
324     return []
325    
326 gross 379 class BenchmarkFilter(object):
327     """
328     object to filter the characteristcs returned by Bechmark runs.
329    
330     """
331     def __init__(self):
332     """
333     sets up a filter
334     """
335     pass
336    
337    
338     def getResultNames(self):
339     """
340     return the names of the results produced when run() is called.
341    
342     @return: names the list of the names to be used when the results of the run() call are printed
343     @rtype: C{list} of C{str}
344     @remark: this function has to overwritten by a particular problem
345     """
346     raise NotImplementedError
347     return []
348    
349     def __call__(self,result):
350     """
351     filters out values results returned as characteristcs of a problem run
352    
353     @param result: values to be filtered
354     @type result: any type that is produced by the L{BenchmarkProblem} it is applied to
355     @return: a list of strings selected from result
356     @rtype: C{list} of C{str}
357     @remark: this function has to overwritten by a particular problem
358     """
359     raise NotImplementedError
360     return []
361    
362    
363 gross 364 class Options(object):
364     """
365     defines a set of options to be used to run a L{BenchmarkProblem}
366     """
367     def __init__(self,name=None):
368     """
369     sets up the options
370    
371     @param name: name of the option. If no name is given the class name is used.
372     @type name: C{str}
373     """
374     super(Options,self).__init__()
375     if name==None:
376 gross 385 self.__name=self.__class__.__name__
377 gross 364 else:
378     self.__name=name
379     def __str__(self):
380     """
381     returns the name of the benchmark suite
382    
383     @return: name
384     @rtype: C{str}
385     """
386     return self.__name
387    
388     if __name__=="__main__":
389    
390     class OptionsTest1(Options):
391     pass
392     class OptionsTest2(Options):
393     pass
394    
395 gross 379 class BenchmarkProblemTest1(BenchmarkProblem):
396 gross 364 def __init__(self):
397     super(BenchmarkProblemTest1,self).__init__(name="TEST1")
398     def run(self,options=None):
399     import time
400     return time.time(),"A"
401    
402 gross 379 class BenchmarkProblemTest2(BenchmarkProblem):
403 gross 364 def __init__(self):
404     super(BenchmarkProblemTest2,self).__init__(name="TEST2")
405     def run(self,options=None):
406     import time
407     return -time.time(),"B"
408    
409 gross 379 class SimpleFilter(BenchmarkFilter):
410     def getResultNames(self):
411     return ["r0","r1"]
412     def __call__(self,result):
413     return [str(result[0]),str(result[1])]
414    
415 gross 364 bm=Benchmark("Example")
416     bm.addProblem(BenchmarkProblemTest1())
417     bm.addProblem(BenchmarkProblemTest2())
418     bm.addOptions(OptionsTest1())
419     bm.addOptions(OptionsTest2())
420    
421     bms=BenchmarkSuite("A Test")
422     bms.addBenchmark(bm)
423    
424     bms.run()
425 gross 379 print bms.getHTML(filter=SimpleFilter())
426 gross 364
427     bms.run(scale=4)
428 gross 379 print bms.getHTML(filter=SimpleFilter())
429 gross 364
430     bms.run(scale=[1,2])
431 gross 379 print bms.getHTML(filter=SimpleFilter())

  ViewVC Help
Powered by ViewVC 1.1.26