/[escript]/trunk/escript/py_src/benchmark.py
ViewVC logotype

Annotation of /trunk/escript/py_src/benchmark.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 385 - (hide annotations)
Mon Dec 19 06:12:14 2005 UTC (13 years, 11 months ago) by gross
File MIME type: text/x-python
File size: 13285 byte(s)
filerrs added
1 gross 379 filter# $Id:$
2 gross 364
3     #
4     # COPYRIGHT ACcESS 2004 - All Rights Reserved
5     #
6     # This software is the property of ACcESS. No part of this code
7     # may be copied in any form or by any means without the expressed written
8     # consent of ACcESS. Copying, use or modification of this software
9     # by any unauthorised person is illegal unless that
10     # person has a software license agreement with ACcESS.
11     #
12    
13     """
14     A simple framework to run benchmarks under OPENMP and to summarize the results in tables for instance in HTML
15    
16     @var __author__: name of author
17     @var __licence__: licence agreement
18     var __url__: url entry point on documentation
19     @var __version__: version
20     @var __date__: date of the version
21     """
22    
23     __author__="Lutz Gross, l.gross@uq.edu.au"
24     __licence__="contact: esys@access.uq.edu.au"
25     __url__="http://www.iservo.edu.au/esys/escript"
26     __version__="$Revision:$"
27     __date__="$Date:$"
28    
29     import os,socket,time,sys
30    
31     class BenchmarkSuite(object):
32     """
33     framework to run a bunch of L{Benchmark}s with the object to create a table of statistics.
34     @var MAX_LEVEL: maximum number of level in headers for output
35     """
36     MAX_LEVEL=5
37     def __init__(self,name=None):
38     """
39     sets up a suite of benchmarks
40    
41     @param name: name of the benchmark suite. If no name is given the class name is used.
42     @type name: C{str}
43     """
44     super(BenchmarkSuite,self).__init__()
45     self.__benchmarks=[]
46     self.__scale=1
47     if name==None:
48     self.__name=self.__class__.__name__
49     else:
50     self.__name=name
51    
52     def __str__(self):
53     """
54     returns the name of the benchmark suite
55    
56     @return: name
57     @rtype: C{str}
58     """
59     return self.__name
60     def addBenchmark(self,benchmark):
61     """
62     adds a L{Benchmark} to the suite
63    
64     @param benchmark: adds a new L{Benchmark} to the suite
65     @type benchmark: L{Benchmark}
66     """
67     self.__benchmarks.append(benchmark)
68     def __len__(self):
69     """
70     returns the number of benchmarks in the suite
71    
72     @return: number of benchmarks
73     @rtype: C{int}
74     """
75     return len(self.__benchmarks)
76     def __getitem__(self,i):
77     """
78     returns the i-th benchmark in the suite through self[i]
79    
80     @param i: index of the requested benchmark
81     @type i: C{int}
82     @return: i-th benchmark
83     @rtype: L{Benchmark}
84    
85     """
86     return self.__benchmarks[i]
87     def run(self,scale=1):
88     """
89     runs all benchmarks
90    
91     @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
92     are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
93     in the suite is run with scale[p] threads. In the case, len(scale) has to be less or equal to the
94     largest benchmark in the suite.
95     @type scale: C{int} or C{list} of C{int}s.
96     """
97     self.__scale=scale
98 gross 379 for i in range(len(self)): self[i].run(scale=scale)
99     def getHTML(self,filter,level=1):
100 gross 364 """
101     returns the results of the last benchmark run in HTML format.
102    
103 gross 379 @param filter: filter to be applied to the results
104     @type filter: L{BenchmarkFilter}
105 gross 364 @param level: level used in header <H?> tags
106     @type level: C{int}
107     @return: HTML document
108     @rtype: C{str}
109     """
110     out=""
111     if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
112     out+="<H%s>%s</H%s>\n"%(level,str(self),level)
113     if level==1:
114     m=""
115     if isinstance(self.__scale,int):
116     if self.__scale>1:
117     m=" (%s threads)"%self.__scale
118     out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
119     for i in range(len(self)):
120     out+="<p>\n"
121 gross 379 out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL))
122 gross 364 out+="<p>\n"
123     if level==1:
124     out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
125     out+="</BODY></HTML>\n"
126     return out
127    
128    
129     class Benchmark(object):
130     """
131     runs a bunch of similar L{BenchmarkProblem}s with a bunch of L{Options}
132     """
133     def __init__(self,name=None,description=None):
134     """
135     sets up a benchmark
136    
137     @param name: name of the benchmark. If no name is given the class name is used.
138     @type name: C{str}
139     @param description: description of the benchmark.
140     @type description: C{str} or C{None}
141     """
142     super(Benchmark,self).__init__()
143     self.__options=[]
144     self.__problems=[]
145     self.__results=[]
146     self.__scale=1
147     if name==None:
148     self.__name=self.__class__.__name__
149     else:
150     self.__name=name
151     self.__description=description
152    
153     def __str__(self):
154     """
155     returns the name of the benchmark suite
156    
157     @return: name
158     @rtype: C{str}
159     """
160     return self.__name
161    
162     def addProblem(self,problem):
163     """
164     adds a problem to the benchmark
165    
166     @param problem: adds a new problem to the bechmark
167     @type problem: L{BenchmarkProblem}
168     """
169     self.__problems.append(problem)
170    
171     def addOptions(self,Options):
172     """
173     adds a options to the benchmark
174    
175     @param options: adds a new option to the bechmark
176     @type problem: L{Options}
177     """
178     self.__options.append(Options)
179    
180     def run(self,scale=1):
181     """
182     runs all problems with all options.
183    
184    
185     @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
186     are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
187     in the suite is run with scale[p] threads. In the case, len(scale) has to be less or equal to the
188     largest benchmark in the suite.
189     @type scale: C{int} or C{list} of C{int}s.
190     """
191     if isinstance(scale,list):
192     if len(scale)<len(self.__problems):
193     raise ValueError,"scale list is too small. must be greater or equal to the number of problems in the benchmark"
194 gross 379 self.__filter=filter
195 gross 364 self.__scale=scale
196     self.__results=[]
197     c=0
198     for r in self.__problems:
199     if isinstance(scale,list):
200     s=scale[c]
201     else:
202     s=scale
203     row=[]
204     for p in self.__options:
205 gross 385 os.environ['OMP_NUM_TREADS']=str(s)
206 gross 364 row.append(r.run(p))
207     self.__results.append(row)
208     c+=1
209 gross 379 def getHTML(self,filter,level=1):
210 gross 364 """
211     returns the results of the last benchmark run in HTML format.
212    
213 gross 379 @param filter: filter to be applied to the results
214     @type filter: L{BenchmarkFilter}
215 gross 364 @param level: level used in header <H?> tags
216     @type level: C{int}
217     @return: HTML document
218     @rtype: C{str}
219     """
220     out=""
221     if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
222     out+="<H%s>%s</H%s>\n"%(level,str(self),level)
223     if level==1:
224     m=""
225     if isinstance(self.__scale,int):
226     if self.__scale>1:
227     m=" (%s threads)"%self.__scale
228     out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
229     if self.__description: out+="<p>%s</p>\n"%str(self.__description)
230     if len(self.__problems)>0:
231     out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n"
232     h1_seg=""
233 gross 379 rn=filter.getResultNames()
234 gross 364 if len(rn)==0:
235     h1_seg+="<TD></TD>"
236     else:
237     for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n
238     h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>"
239     h1="<TR>"
240     if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>"
241     for o in self.__options:
242     if len(rn)==0:
243     h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
244     elif len(rn)==1:
245     h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
246     empty_h1=False
247     else:
248     h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(len(rn),str(o))
249     h1+=h1_seg
250     out+=h0+"</TR>\n"+h1+"</TR>\n"
251     c=0
252     for r in range(len(self.__results)):
253     out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r])
254     if isinstance(self.__scale,list): out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c]
255 gross 385 for col in self.__results[r]:
256     for e in filter(col): out+="<TD ALIGN=\"right\">%s</TD>"%e
257 gross 364 out+="</TR>\n"
258 gross 385 c+=1
259 gross 364 out+="</TABLE>"
260     if level==1:
261     out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
262     out+="</BODY></HTML>\n"
263     return out
264    
265     class BenchmarkProblem(object):
266     """
267     something that can be run and returns a list of characteristics such as timing, Mflops, error, etc.
268     """
269     def __init__(self,name=None):
270     """
271     sets up a benchmark problem
272    
273     @param name: name of the problem. If no name is given the class name is used.
274     @type name: C{str}
275     """
276     super(BenchmarkProblem,self).__init__()
277     if name==None:
278     self.__name=self.__class__.__name__
279     else:
280     self.__name=name
281    
282    
283     def __str__(self):
284     """
285     returns the name of the benchmark suite
286    
287     @return: name
288     @rtype: C{str}
289     """
290     return self.__name
291    
292     def run(self,options=None):
293     """
294     runs the problem and returns a list of run characteristics
295    
296    
297     @param options: the options that are used for the run. Note that the number of OpenMP threads is controlled
298     by the L{Benchmark} the problem is run in.
299     @type options: L{Options}
300 gross 379 @return: run characteristics
301     @rtype: any type that can be read by the L{BenchmarkFilter} applied to it.
302 gross 364 @remark: this function has to overwritten by a particular problem
303     """
304     raise NotImplementedError
305     return []
306    
307 gross 379 class BenchmarkFilter(object):
308     """
309     object to filter the characteristcs returned by Bechmark runs.
310    
311     """
312     def __init__(self):
313     """
314     sets up a filter
315     """
316     pass
317    
318    
319     def getResultNames(self):
320     """
321     return the names of the results produced when run() is called.
322    
323     @return: names the list of the names to be used when the results of the run() call are printed
324     @rtype: C{list} of C{str}
325     @remark: this function has to overwritten by a particular problem
326     """
327     raise NotImplementedError
328     return []
329    
330     def __call__(self,result):
331     """
332     filters out values results returned as characteristcs of a problem run
333    
334     @param result: values to be filtered
335     @type result: any type that is produced by the L{BenchmarkProblem} it is applied to
336     @return: a list of strings selected from result
337     @rtype: C{list} of C{str}
338     @remark: this function has to overwritten by a particular problem
339     """
340     raise NotImplementedError
341     return []
342    
343    
344 gross 364 class Options(object):
345     """
346     defines a set of options to be used to run a L{BenchmarkProblem}
347     """
348     def __init__(self,name=None):
349     """
350     sets up the options
351    
352     @param name: name of the option. If no name is given the class name is used.
353     @type name: C{str}
354     """
355     super(Options,self).__init__()
356     if name==None:
357 gross 385 self.__name=self.__class__.__name__
358 gross 364 else:
359     self.__name=name
360     def __str__(self):
361     """
362     returns the name of the benchmark suite
363    
364     @return: name
365     @rtype: C{str}
366     """
367     return self.__name
368    
369     if __name__=="__main__":
370    
371     class OptionsTest1(Options):
372     pass
373     class OptionsTest2(Options):
374     pass
375    
376 gross 379 class BenchmarkProblemTest1(BenchmarkProblem):
377 gross 364 def __init__(self):
378     super(BenchmarkProblemTest1,self).__init__(name="TEST1")
379     def run(self,options=None):
380     import time
381     return time.time(),"A"
382    
383 gross 379 class BenchmarkProblemTest2(BenchmarkProblem):
384 gross 364 def __init__(self):
385     super(BenchmarkProblemTest2,self).__init__(name="TEST2")
386     def run(self,options=None):
387     import time
388     return -time.time(),"B"
389    
390 gross 379 class SimpleFilter(BenchmarkFilter):
391     def getResultNames(self):
392     return ["r0","r1"]
393     def __call__(self,result):
394     return [str(result[0]),str(result[1])]
395    
396 gross 364 bm=Benchmark("Example")
397     bm.addProblem(BenchmarkProblemTest1())
398     bm.addProblem(BenchmarkProblemTest2())
399     bm.addOptions(OptionsTest1())
400     bm.addOptions(OptionsTest2())
401    
402     bms=BenchmarkSuite("A Test")
403     bms.addBenchmark(bm)
404    
405     bms.run()
406 gross 379 print bms.getHTML(filter=SimpleFilter())
407 gross 364
408     bms.run(scale=4)
409 gross 379 print bms.getHTML(filter=SimpleFilter())
410 gross 364
411     bms.run(scale=[1,2])
412 gross 379 print bms.getHTML(filter=SimpleFilter())

  ViewVC Help
Powered by ViewVC 1.1.26