/[escript]/trunk/escript/py_src/benchmark.py
ViewVC logotype

Annotation of /trunk/escript/py_src/benchmark.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 379 - (hide annotations)
Mon Dec 19 01:10:34 2005 UTC (13 years, 10 months ago) by gross
File MIME type: text/x-python
File size: 13330 byte(s)
a filetr added
1 gross 379 filter# $Id:$
2 gross 364
3     #
4     # COPYRIGHT ACcESS 2004 - All Rights Reserved
5     #
6     # This software is the property of ACcESS. No part of this code
7     # may be copied in any form or by any means without the expressed written
8     # consent of ACcESS. Copying, use or modification of this software
9     # by any unauthorised person is illegal unless that
10     # person has a software license agreement with ACcESS.
11     #
12    
13     """
14     A simple framework to run benchmarks under OPENMP and to summarize the results in tables for instance in HTML
15    
16     @var __author__: name of author
17     @var __licence__: licence agreement
18     var __url__: url entry point on documentation
19     @var __version__: version
20     @var __date__: date of the version
21     """
22    
23     __author__="Lutz Gross, l.gross@uq.edu.au"
24     __licence__="contact: esys@access.uq.edu.au"
25     __url__="http://www.iservo.edu.au/esys/escript"
26     __version__="$Revision:$"
27     __date__="$Date:$"
28    
29     import os,socket,time,sys
30    
31     class BenchmarkSuite(object):
32     """
33     framework to run a bunch of L{Benchmark}s with the object to create a table of statistics.
34     @var MAX_LEVEL: maximum number of level in headers for output
35     """
36     MAX_LEVEL=5
37     def __init__(self,name=None):
38     """
39     sets up a suite of benchmarks
40    
41     @param name: name of the benchmark suite. If no name is given the class name is used.
42     @type name: C{str}
43     """
44     super(BenchmarkSuite,self).__init__()
45     self.__benchmarks=[]
46     self.__scale=1
47     if name==None:
48     self.__name=self.__class__.__name__
49     else:
50     self.__name=name
51    
52     def __str__(self):
53     """
54     returns the name of the benchmark suite
55    
56     @return: name
57     @rtype: C{str}
58     """
59     return self.__name
60     def addBenchmark(self,benchmark):
61     """
62     adds a L{Benchmark} to the suite
63    
64     @param benchmark: adds a new L{Benchmark} to the suite
65     @type benchmark: L{Benchmark}
66     """
67     self.__benchmarks.append(benchmark)
68     def __len__(self):
69     """
70     returns the number of benchmarks in the suite
71    
72     @return: number of benchmarks
73     @rtype: C{int}
74     """
75     return len(self.__benchmarks)
76     def __getitem__(self,i):
77     """
78     returns the i-th benchmark in the suite through self[i]
79    
80     @param i: index of the requested benchmark
81     @type i: C{int}
82     @return: i-th benchmark
83     @rtype: L{Benchmark}
84    
85     """
86     return self.__benchmarks[i]
87     def run(self,scale=1):
88     """
89     runs all benchmarks
90    
91     @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
92     are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
93     in the suite is run with scale[p] threads. In the case, len(scale) has to be less or equal to the
94     largest benchmark in the suite.
95     @type scale: C{int} or C{list} of C{int}s.
96     """
97     self.__scale=scale
98 gross 379 for i in range(len(self)): self[i].run(scale=scale)
99     def getHTML(self,filter,level=1):
100 gross 364 """
101     returns the results of the last benchmark run in HTML format.
102    
103 gross 379 @param filter: filter to be applied to the results
104     @type filter: L{BenchmarkFilter}
105 gross 364 @param level: level used in header <H?> tags
106     @type level: C{int}
107     @return: HTML document
108     @rtype: C{str}
109     """
110     out=""
111     if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
112     out+="<H%s>%s</H%s>\n"%(level,str(self),level)
113     if level==1:
114     m=""
115     if isinstance(self.__scale,int):
116     if self.__scale>1:
117     m=" (%s threads)"%self.__scale
118     out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
119     for i in range(len(self)):
120     out+="<p>\n"
121 gross 379 out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL))
122 gross 364 out+="<p>\n"
123     if level==1:
124     out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
125     out+="</BODY></HTML>\n"
126     return out
127    
128    
129     class Benchmark(object):
130     """
131     runs a bunch of similar L{BenchmarkProblem}s with a bunch of L{Options}
132     """
133     def __init__(self,name=None,description=None):
134     """
135     sets up a benchmark
136    
137     @param name: name of the benchmark. If no name is given the class name is used.
138     @type name: C{str}
139     @param description: description of the benchmark.
140     @type description: C{str} or C{None}
141     """
142     super(Benchmark,self).__init__()
143     self.__options=[]
144     self.__problems=[]
145     self.__results=[]
146     self.__scale=1
147     if name==None:
148     self.__name=self.__class__.__name__
149     else:
150     self.__name=name
151     self.__description=description
152    
153     def __str__(self):
154     """
155     returns the name of the benchmark suite
156    
157     @return: name
158     @rtype: C{str}
159     """
160     return self.__name
161    
162     def addProblem(self,problem):
163     """
164     adds a problem to the benchmark
165    
166     @param problem: adds a new problem to the bechmark
167     @type problem: L{BenchmarkProblem}
168     """
169     self.__problems.append(problem)
170    
171     def addOptions(self,Options):
172     """
173     adds a options to the benchmark
174    
175     @param options: adds a new option to the bechmark
176     @type problem: L{Options}
177     """
178     self.__options.append(Options)
179    
180     def run(self,scale=1):
181     """
182     runs all problems with all options.
183    
184    
185     @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
186     are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
187     in the suite is run with scale[p] threads. In the case, len(scale) has to be less or equal to the
188     largest benchmark in the suite.
189     @type scale: C{int} or C{list} of C{int}s.
190     """
191     if isinstance(scale,list):
192     if len(scale)<len(self.__problems):
193     raise ValueError,"scale list is too small. must be greater or equal to the number of problems in the benchmark"
194 gross 379 self.__filter=filter
195 gross 364 self.__scale=scale
196     self.__results=[]
197     c=0
198     for r in self.__problems:
199     if isinstance(scale,list):
200     s=scale[c]
201     else:
202     s=scale
203     row=[]
204     for p in self.__options:
205     os.putenv("OMP_NUM_TREADS",str(s))
206     row.append(r.run(p))
207     self.__results.append(row)
208     c+=1
209 gross 379 def getHTML(self,filter,level=1):
210 gross 364 """
211     returns the results of the last benchmark run in HTML format.
212    
213 gross 379 @param filter: filter to be applied to the results
214     @type filter: L{BenchmarkFilter}
215 gross 364 @param level: level used in header <H?> tags
216     @type level: C{int}
217     @return: HTML document
218     @rtype: C{str}
219     """
220     out=""
221     if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
222     out+="<H%s>%s</H%s>\n"%(level,str(self),level)
223     if level==1:
224     m=""
225     if isinstance(self.__scale,int):
226     if self.__scale>1:
227     m=" (%s threads)"%self.__scale
228     out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
229     if self.__description: out+="<p>%s</p>\n"%str(self.__description)
230     if len(self.__problems)>0:
231     out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n"
232     h1_seg=""
233 gross 379 rn=filter.getResultNames()
234 gross 364 if len(rn)==0:
235     h1_seg+="<TD></TD>"
236     else:
237     for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n
238     h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>"
239     h1="<TR>"
240     if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>"
241     for o in self.__options:
242     if len(rn)==0:
243     h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
244     elif len(rn)==1:
245     h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
246     empty_h1=False
247     else:
248     h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(len(rn),str(o))
249     h1+=h1_seg
250     out+=h0+"</TR>\n"+h1+"</TR>\n"
251     c=0
252     for r in range(len(self.__results)):
253     out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r])
254     if isinstance(self.__scale,list): out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c]
255 gross 379 filtered_results=filter(self.__results[r])
256     for col in filtered_results:
257     for e in col: out+="<TD ALIGN=\"right\">%s</TD>"%e
258 gross 364 out+="</TR>\n"
259     out+="</TABLE>"
260     c+=1
261     if level==1:
262     out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
263     out+="</BODY></HTML>\n"
264     return out
265    
266     class BenchmarkProblem(object):
267     """
268     something that can be run and returns a list of characteristics such as timing, Mflops, error, etc.
269     """
270     def __init__(self,name=None):
271     """
272     sets up a benchmark problem
273    
274     @param name: name of the problem. If no name is given the class name is used.
275     @type name: C{str}
276     """
277     super(BenchmarkProblem,self).__init__()
278     if name==None:
279     self.__name=self.__class__.__name__
280     else:
281     self.__name=name
282    
283    
284     def __str__(self):
285     """
286     returns the name of the benchmark suite
287    
288     @return: name
289     @rtype: C{str}
290     """
291     return self.__name
292    
293     def run(self,options=None):
294     """
295     runs the problem and returns a list of run characteristics
296    
297    
298     @param options: the options that are used for the run. Note that the number of OpenMP threads is controlled
299     by the L{Benchmark} the problem is run in.
300     @type options: L{Options}
301 gross 379 @return: run characteristics
302     @rtype: any type that can be read by the L{BenchmarkFilter} applied to it.
303 gross 364 @remark: this function has to overwritten by a particular problem
304     """
305     raise NotImplementedError
306     return []
307    
308 gross 379 class BenchmarkFilter(object):
309     """
310     object to filter the characteristcs returned by Bechmark runs.
311    
312     """
313     def __init__(self):
314     """
315     sets up a filter
316     """
317     pass
318    
319    
320     def getResultNames(self):
321     """
322     return the names of the results produced when run() is called.
323    
324     @return: names the list of the names to be used when the results of the run() call are printed
325     @rtype: C{list} of C{str}
326     @remark: this function has to overwritten by a particular problem
327     """
328     raise NotImplementedError
329     return []
330    
331     def __call__(self,result):
332     """
333     filters out values results returned as characteristcs of a problem run
334    
335     @param result: values to be filtered
336     @type result: any type that is produced by the L{BenchmarkProblem} it is applied to
337     @return: a list of strings selected from result
338     @rtype: C{list} of C{str}
339     @remark: this function has to overwritten by a particular problem
340     """
341     raise NotImplementedError
342     return []
343    
344    
345 gross 364 class Options(object):
346     """
347     defines a set of options to be used to run a L{BenchmarkProblem}
348     """
349     def __init__(self,name=None):
350     """
351     sets up the options
352    
353     @param name: name of the option. If no name is given the class name is used.
354     @type name: C{str}
355     """
356     super(Options,self).__init__()
357     if name==None:
358     self.__name=self.__class__.__name__
359     else:
360     self.__name=name
361     def __str__(self):
362     """
363     returns the name of the benchmark suite
364    
365     @return: name
366     @rtype: C{str}
367     """
368     return self.__name
369    
370     if __name__=="__main__":
371    
372     class OptionsTest1(Options):
373     pass
374     class OptionsTest2(Options):
375     pass
376    
377 gross 379 class BenchmarkProblemTest1(BenchmarkProblem):
378 gross 364 def __init__(self):
379     super(BenchmarkProblemTest1,self).__init__(name="TEST1")
380     def run(self,options=None):
381     import time
382     return time.time(),"A"
383    
384 gross 379 class BenchmarkProblemTest2(BenchmarkProblem):
385 gross 364 def __init__(self):
386     super(BenchmarkProblemTest2,self).__init__(name="TEST2")
387     def run(self,options=None):
388     import time
389     return -time.time(),"B"
390    
391 gross 379 class SimpleFilter(BenchmarkFilter):
392     def getResultNames(self):
393     return ["r0","r1"]
394     def __call__(self,result):
395     return [str(result[0]),str(result[1])]
396    
397 gross 364 bm=Benchmark("Example")
398     bm.addProblem(BenchmarkProblemTest1())
399     bm.addProblem(BenchmarkProblemTest2())
400     bm.addOptions(OptionsTest1())
401     bm.addOptions(OptionsTest2())
402    
403     bms=BenchmarkSuite("A Test")
404     bms.addBenchmark(bm)
405    
406     bms.run()
407 gross 379 print bms.getHTML(filter=SimpleFilter())
408 gross 364
409     bms.run(scale=4)
410 gross 379 print bms.getHTML(filter=SimpleFilter())
411 gross 364
412     bms.run(scale=[1,2])
413 gross 379 print bms.getHTML(filter=SimpleFilter())

  ViewVC Help
Powered by ViewVC 1.1.26