/[escript]/trunk/escript/py_src/benchmark.py
ViewVC logotype

Annotation of /trunk/escript/py_src/benchmark.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 434 - (hide annotations)
Thu Jan 19 01:53:16 2006 UTC (13 years, 8 months ago) by gross
File MIME type: text/x-python
File size: 13379 byte(s)
prints out the benchmark been started
1 gross 379 filter# $Id:$
2 gross 364
3     #
4     # COPYRIGHT ACcESS 2004 - All Rights Reserved
5     #
6     # This software is the property of ACcESS. No part of this code
7     # may be copied in any form or by any means without the expressed written
8     # consent of ACcESS. Copying, use or modification of this software
9     # by any unauthorised person is illegal unless that
10     # person has a software license agreement with ACcESS.
11     #
12    
13     """
14     A simple framework to run benchmarks under OPENMP and to summarize the results in tables for instance in HTML
15    
16     @var __author__: name of author
17     @var __licence__: licence agreement
18     var __url__: url entry point on documentation
19     @var __version__: version
20     @var __date__: date of the version
21     """
22    
23     __author__="Lutz Gross, l.gross@uq.edu.au"
24     __licence__="contact: esys@access.uq.edu.au"
25     __url__="http://www.iservo.edu.au/esys/escript"
26     __version__="$Revision:$"
27     __date__="$Date:$"
28    
29     import os,socket,time,sys
30 gross 393 from esys.escript import setNumberOfThreads
31 gross 364
32     class BenchmarkSuite(object):
33     """
34     framework to run a bunch of L{Benchmark}s with the object to create a table of statistics.
35     @var MAX_LEVEL: maximum number of level in headers for output
36     """
37     MAX_LEVEL=5
38     def __init__(self,name=None):
39     """
40     sets up a suite of benchmarks
41    
42     @param name: name of the benchmark suite. If no name is given the class name is used.
43     @type name: C{str}
44     """
45     super(BenchmarkSuite,self).__init__()
46     self.__benchmarks=[]
47     self.__scale=1
48     if name==None:
49     self.__name=self.__class__.__name__
50     else:
51     self.__name=name
52    
53     def __str__(self):
54     """
55     returns the name of the benchmark suite
56    
57     @return: name
58     @rtype: C{str}
59     """
60     return self.__name
61     def addBenchmark(self,benchmark):
62     """
63     adds a L{Benchmark} to the suite
64    
65     @param benchmark: adds a new L{Benchmark} to the suite
66     @type benchmark: L{Benchmark}
67     """
68     self.__benchmarks.append(benchmark)
69     def __len__(self):
70     """
71     returns the number of benchmarks in the suite
72    
73     @return: number of benchmarks
74     @rtype: C{int}
75     """
76     return len(self.__benchmarks)
77     def __getitem__(self,i):
78     """
79     returns the i-th benchmark in the suite through self[i]
80    
81     @param i: index of the requested benchmark
82     @type i: C{int}
83     @return: i-th benchmark
84     @rtype: L{Benchmark}
85    
86     """
87     return self.__benchmarks[i]
88     def run(self,scale=1):
89     """
90     runs all benchmarks
91    
92     @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
93     are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
94 gross 387 in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted.
95 gross 364 @type scale: C{int} or C{list} of C{int}s.
96     """
97     self.__scale=scale
98 gross 379 for i in range(len(self)): self[i].run(scale=scale)
99     def getHTML(self,filter,level=1):
100 gross 364 """
101     returns the results of the last benchmark run in HTML format.
102    
103 gross 379 @param filter: filter to be applied to the results
104     @type filter: L{BenchmarkFilter}
105 gross 364 @param level: level used in header <H?> tags
106     @type level: C{int}
107     @return: HTML document
108     @rtype: C{str}
109     """
110     out=""
111     if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
112     out+="<H%s>%s</H%s>\n"%(level,str(self),level)
113     if level==1:
114     m=""
115     if isinstance(self.__scale,int):
116     if self.__scale>1:
117     m=" (%s threads)"%self.__scale
118     out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
119     for i in range(len(self)):
120     out+="<p>\n"
121 gross 379 out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL))
122 gross 364 out+="<p>\n"
123     if level==1:
124 gross 390 try:
125     name=os.getlogin()
126     out+="<hr><p align=\"center\">by %s at %s</p>\n"%(name,time.strftime('%X %x %Z'))
127     except OSError:
128     out+="<hr><p align=\"center\">%s</p>\n"%(time.strftime('%X %x %Z'))
129    
130 gross 364 out+="</BODY></HTML>\n"
131     return out
132    
133    
134     class Benchmark(object):
135     """
136     runs a bunch of similar L{BenchmarkProblem}s with a bunch of L{Options}
137     """
138     def __init__(self,name=None,description=None):
139     """
140     sets up a benchmark
141    
142     @param name: name of the benchmark. If no name is given the class name is used.
143     @type name: C{str}
144     @param description: description of the benchmark.
145     @type description: C{str} or C{None}
146     """
147     super(Benchmark,self).__init__()
148     self.__options=[]
149     self.__problems=[]
150     self.__results=[]
151     self.__scale=1
152     if name==None:
153     self.__name=self.__class__.__name__
154     else:
155     self.__name=name
156     self.__description=description
157    
158     def __str__(self):
159     """
160     returns the name of the benchmark suite
161    
162     @return: name
163     @rtype: C{str}
164     """
165     return self.__name
166    
167     def addProblem(self,problem):
168     """
169     adds a problem to the benchmark
170    
171     @param problem: adds a new problem to the bechmark
172     @type problem: L{BenchmarkProblem}
173     """
174     self.__problems.append(problem)
175    
176     def addOptions(self,Options):
177     """
178     adds a options to the benchmark
179    
180     @param options: adds a new option to the bechmark
181     @type problem: L{Options}
182     """
183     self.__options.append(Options)
184    
185     def run(self,scale=1):
186     """
187     runs all problems with all options.
188    
189    
190     @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
191     are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
192 gross 387 in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted.
193 gross 364 @type scale: C{int} or C{list} of C{int}s.
194     """
195     if isinstance(scale,list):
196 gross 386 c_max=min(len(scale),len(self.__problems))
197     else:
198     c_max=len(self.__problems)
199 gross 379 self.__filter=filter
200 gross 364 self.__scale=scale
201     self.__results=[]
202 gross 386 for c in range(c_max):
203     r=self.__problems[c]
204 gross 364 if isinstance(scale,list):
205     s=scale[c]
206     else:
207     s=scale
208     row=[]
209 gross 387 if s>0:
210 gross 434 print "run %s with %s threads"%(r.__class__,s)
211 gross 387 for p in self.__options:
212 gross 393 setNumberOfThreads(s)
213 gross 387 row.append(r.run(p))
214 gross 364 self.__results.append(row)
215 gross 379 def getHTML(self,filter,level=1):
216 gross 364 """
217     returns the results of the last benchmark run in HTML format.
218    
219 gross 379 @param filter: filter to be applied to the results
220     @type filter: L{BenchmarkFilter}
221 gross 364 @param level: level used in header <H?> tags
222     @type level: C{int}
223     @return: HTML document
224     @rtype: C{str}
225     """
226     out=""
227     if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
228     out+="<H%s>%s</H%s>\n"%(level,str(self),level)
229     if level==1:
230     m=""
231     if isinstance(self.__scale,int):
232     if self.__scale>1:
233     m=" (%s threads)"%self.__scale
234     out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
235     if self.__description: out+="<p>%s</p>\n"%str(self.__description)
236     if len(self.__problems)>0:
237     out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n"
238     h1_seg=""
239 gross 379 rn=filter.getResultNames()
240 gross 364 if len(rn)==0:
241     h1_seg+="<TD></TD>"
242     else:
243     for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n
244     h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>"
245     h1="<TR>"
246     if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>"
247     for o in self.__options:
248     if len(rn)==0:
249     h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
250     elif len(rn)==1:
251     h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
252     empty_h1=False
253     else:
254     h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(len(rn),str(o))
255     h1+=h1_seg
256     out+=h0+"</TR>\n"+h1+"</TR>\n"
257     c=0
258     for r in range(len(self.__results)):
259     out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r])
260     if isinstance(self.__scale,list): out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c]
261 gross 385 for col in self.__results[r]:
262     for e in filter(col): out+="<TD ALIGN=\"right\">%s</TD>"%e
263 gross 364 out+="</TR>\n"
264 gross 385 c+=1
265 gross 364 out+="</TABLE>"
266     if level==1:
267     out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
268     out+="</BODY></HTML>\n"
269     return out
270    
271     class BenchmarkProblem(object):
272     """
273     something that can be run and returns a list of characteristics such as timing, Mflops, error, etc.
274     """
275     def __init__(self,name=None):
276     """
277     sets up a benchmark problem
278    
279     @param name: name of the problem. If no name is given the class name is used.
280     @type name: C{str}
281     """
282     super(BenchmarkProblem,self).__init__()
283     if name==None:
284     self.__name=self.__class__.__name__
285     else:
286     self.__name=name
287    
288    
289     def __str__(self):
290     """
291     returns the name of the benchmark suite
292    
293     @return: name
294     @rtype: C{str}
295     """
296     return self.__name
297    
298     def run(self,options=None):
299     """
300     runs the problem and returns a list of run characteristics
301    
302    
303     @param options: the options that are used for the run. Note that the number of OpenMP threads is controlled
304     by the L{Benchmark} the problem is run in.
305     @type options: L{Options}
306 gross 379 @return: run characteristics
307     @rtype: any type that can be read by the L{BenchmarkFilter} applied to it.
308 gross 364 @remark: this function has to overwritten by a particular problem
309     """
310     raise NotImplementedError
311     return []
312    
313 gross 379 class BenchmarkFilter(object):
314     """
315     object to filter the characteristcs returned by Bechmark runs.
316    
317     """
318     def __init__(self):
319     """
320     sets up a filter
321     """
322     pass
323    
324    
325     def getResultNames(self):
326     """
327     return the names of the results produced when run() is called.
328    
329     @return: names the list of the names to be used when the results of the run() call are printed
330     @rtype: C{list} of C{str}
331     @remark: this function has to overwritten by a particular problem
332     """
333     raise NotImplementedError
334     return []
335    
336     def __call__(self,result):
337     """
338     filters out values results returned as characteristcs of a problem run
339    
340     @param result: values to be filtered
341     @type result: any type that is produced by the L{BenchmarkProblem} it is applied to
342     @return: a list of strings selected from result
343     @rtype: C{list} of C{str}
344     @remark: this function has to overwritten by a particular problem
345     """
346     raise NotImplementedError
347     return []
348    
349    
350 gross 364 class Options(object):
351     """
352     defines a set of options to be used to run a L{BenchmarkProblem}
353     """
354     def __init__(self,name=None):
355     """
356     sets up the options
357    
358     @param name: name of the option. If no name is given the class name is used.
359     @type name: C{str}
360     """
361     super(Options,self).__init__()
362     if name==None:
363 gross 385 self.__name=self.__class__.__name__
364 gross 364 else:
365     self.__name=name
366     def __str__(self):
367     """
368     returns the name of the benchmark suite
369    
370     @return: name
371     @rtype: C{str}
372     """
373     return self.__name
374    
375     if __name__=="__main__":
376    
377     class OptionsTest1(Options):
378     pass
379     class OptionsTest2(Options):
380     pass
381    
382 gross 379 class BenchmarkProblemTest1(BenchmarkProblem):
383 gross 364 def __init__(self):
384     super(BenchmarkProblemTest1,self).__init__(name="TEST1")
385     def run(self,options=None):
386     import time
387     return time.time(),"A"
388    
389 gross 379 class BenchmarkProblemTest2(BenchmarkProblem):
390 gross 364 def __init__(self):
391     super(BenchmarkProblemTest2,self).__init__(name="TEST2")
392     def run(self,options=None):
393     import time
394     return -time.time(),"B"
395    
396 gross 379 class SimpleFilter(BenchmarkFilter):
397     def getResultNames(self):
398     return ["r0","r1"]
399     def __call__(self,result):
400     return [str(result[0]),str(result[1])]
401    
402 gross 364 bm=Benchmark("Example")
403     bm.addProblem(BenchmarkProblemTest1())
404     bm.addProblem(BenchmarkProblemTest2())
405     bm.addOptions(OptionsTest1())
406     bm.addOptions(OptionsTest2())
407    
408     bms=BenchmarkSuite("A Test")
409     bms.addBenchmark(bm)
410    
411     bms.run()
412 gross 379 print bms.getHTML(filter=SimpleFilter())
413 gross 364
414     bms.run(scale=4)
415 gross 379 print bms.getHTML(filter=SimpleFilter())
416 gross 364
417     bms.run(scale=[1,2])
418 gross 379 print bms.getHTML(filter=SimpleFilter())

  ViewVC Help
Powered by ViewVC 1.1.26