/[escript]/trunk/escript/py_src/benchmark.py
ViewVC logotype

Annotation of /trunk/escript/py_src/benchmark.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 364 - (hide annotations)
Thu Dec 15 04:38:28 2005 UTC (13 years, 11 months ago) by gross
File MIME type: text/x-python
File size: 12250 byte(s)
a simple environment to run benchmarks (mainly for testing solvers)
1 gross 364 # $Id:$
2    
3     #
4     # COPYRIGHT ACcESS 2004 - All Rights Reserved
5     #
6     # This software is the property of ACcESS. No part of this code
7     # may be copied in any form or by any means without the expressed written
8     # consent of ACcESS. Copying, use or modification of this software
9     # by any unauthorised person is illegal unless that
10     # person has a software license agreement with ACcESS.
11     #
12    
13     """
14     A simple framework to run benchmarks under OPENMP and to summarize the results in tables for instance in HTML
15    
16     @var __author__: name of author
17     @var __licence__: licence agreement
18     var __url__: url entry point on documentation
19     @var __version__: version
20     @var __date__: date of the version
21     """
22    
23     __author__="Lutz Gross, l.gross@uq.edu.au"
24     __licence__="contact: esys@access.uq.edu.au"
25     __url__="http://www.iservo.edu.au/esys/escript"
26     __version__="$Revision:$"
27     __date__="$Date:$"
28    
29     import os,socket,time,sys
30    
31     class BenchmarkSuite(object):
32     """
33     framework to run a bunch of L{Benchmark}s with the object to create a table of statistics.
34     @var MAX_LEVEL: maximum number of level in headers for output
35     """
36     MAX_LEVEL=5
37     def __init__(self,name=None):
38     """
39     sets up a suite of benchmarks
40    
41     @param name: name of the benchmark suite. If no name is given the class name is used.
42     @type name: C{str}
43     """
44     super(BenchmarkSuite,self).__init__()
45     self.__benchmarks=[]
46     self.__scale=1
47     if name==None:
48     self.__name=self.__class__.__name__
49     else:
50     self.__name=name
51    
52     def __str__(self):
53     """
54     returns the name of the benchmark suite
55    
56     @return: name
57     @rtype: C{str}
58     """
59     return self.__name
60     def addBenchmark(self,benchmark):
61     """
62     adds a L{Benchmark} to the suite
63    
64     @param benchmark: adds a new L{Benchmark} to the suite
65     @type benchmark: L{Benchmark}
66     """
67     self.__benchmarks.append(benchmark)
68     def __len__(self):
69     """
70     returns the number of benchmarks in the suite
71    
72     @return: number of benchmarks
73     @rtype: C{int}
74     """
75     return len(self.__benchmarks)
76     def __getitem__(self,i):
77     """
78     returns the i-th benchmark in the suite through self[i]
79    
80     @param i: index of the requested benchmark
81     @type i: C{int}
82     @return: i-th benchmark
83     @rtype: L{Benchmark}
84    
85     """
86     return self.__benchmarks[i]
87     def run(self,scale=1):
88     """
89     runs all benchmarks
90    
91     @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
92     are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
93     in the suite is run with scale[p] threads. In the case, len(scale) has to be less or equal to the
94     largest benchmark in the suite.
95     @type scale: C{int} or C{list} of C{int}s.
96     """
97     self.__scale=scale
98     for i in range(len(self)): self[i].run(scale)
99     def getHTML(self,level=1):
100     """
101     returns the results of the last benchmark run in HTML format.
102    
103     @param level: level used in header <H?> tags
104     @type level: C{int}
105     @return: HTML document
106     @rtype: C{str}
107     """
108     out=""
109     if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
110     out+="<H%s>%s</H%s>\n"%(level,str(self),level)
111     if level==1:
112     m=""
113     if isinstance(self.__scale,int):
114     if self.__scale>1:
115     m=" (%s threads)"%self.__scale
116     out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
117     for i in range(len(self)):
118     out+="<p>\n"
119     out+=self[i].getHTML(min(level+1,self.MAX_LEVEL))
120     out+="<p>\n"
121     if level==1:
122     out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
123     out+="</BODY></HTML>\n"
124     return out
125    
126    
127     class Benchmark(object):
128     """
129     runs a bunch of similar L{BenchmarkProblem}s with a bunch of L{Options}
130     """
131     def __init__(self,name=None,description=None):
132     """
133     sets up a benchmark
134    
135     @param name: name of the benchmark. If no name is given the class name is used.
136     @type name: C{str}
137     @param description: description of the benchmark.
138     @type description: C{str} or C{None}
139     """
140     super(Benchmark,self).__init__()
141     self.__options=[]
142     self.__problems=[]
143     self.__results=[]
144     self.__scale=1
145     if name==None:
146     self.__name=self.__class__.__name__
147     else:
148     self.__name=name
149     self.__description=description
150    
151     def __str__(self):
152     """
153     returns the name of the benchmark suite
154    
155     @return: name
156     @rtype: C{str}
157     """
158     return self.__name
159    
160     def addProblem(self,problem):
161     """
162     adds a problem to the benchmark
163    
164     @param problem: adds a new problem to the bechmark
165     @type problem: L{BenchmarkProblem}
166     """
167     self.__problems.append(problem)
168    
169     def addOptions(self,Options):
170     """
171     adds a options to the benchmark
172    
173     @param options: adds a new option to the bechmark
174     @type problem: L{Options}
175     """
176     self.__options.append(Options)
177    
178     def run(self,scale=1):
179     """
180     runs all problems with all options.
181    
182    
183     @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
184     are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
185     in the suite is run with scale[p] threads. In the case, len(scale) has to be less or equal to the
186     largest benchmark in the suite.
187     @type scale: C{int} or C{list} of C{int}s.
188     """
189     if isinstance(scale,list):
190     if len(scale)<len(self.__problems):
191     raise ValueError,"scale list is too small. must be greater or equal to the number of problems in the benchmark"
192     self.__scale=scale
193     self.__results=[]
194     c=0
195     for r in self.__problems:
196     if isinstance(scale,list):
197     s=scale[c]
198     else:
199     s=scale
200     row=[]
201     for p in self.__options:
202     os.putenv("OMP_NUM_TREADS",str(s))
203     row.append(r.run(p))
204     self.__results.append(row)
205     c+=1
206     def getHTML(self,level=1):
207     """
208     returns the results of the last benchmark run in HTML format.
209    
210     @param level: level used in header <H?> tags
211     @type level: C{int}
212     @return: HTML document
213     @rtype: C{str}
214     """
215     out=""
216     if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
217     out+="<H%s>%s</H%s>\n"%(level,str(self),level)
218     if level==1:
219     m=""
220     if isinstance(self.__scale,int):
221     if self.__scale>1:
222     m=" (%s threads)"%self.__scale
223     out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
224     if self.__description: out+="<p>%s</p>\n"%str(self.__description)
225     if len(self.__problems)>0:
226     out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n"
227     h1_seg=""
228     rn=self.__problems[0].getResultNames()
229     if len(rn)==0:
230     h1_seg+="<TD></TD>"
231     else:
232     for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n
233     h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>"
234     h1="<TR>"
235     if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>"
236     for o in self.__options:
237     if len(rn)==0:
238     h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
239     elif len(rn)==1:
240     h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
241     empty_h1=False
242     else:
243     h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(len(rn),str(o))
244     h1+=h1_seg
245     out+=h0+"</TR>\n"+h1+"</TR>\n"
246     c=0
247     for r in range(len(self.__results)):
248     out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r])
249     if isinstance(self.__scale,list): out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c]
250     for col in self.__results[r]:
251     for e in col: out+="<TD ALIGN=\"right\">%s</TD>"%str(e)
252     out+="</TR>\n"
253     out+="</TABLE>"
254     c+=1
255     if level==1:
256     out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
257     out+="</BODY></HTML>\n"
258     return out
259    
260     class BenchmarkProblem(object):
261     """
262     something that can be run and returns a list of characteristics such as timing, Mflops, error, etc.
263     """
264     def __init__(self,name=None):
265     """
266     sets up a benchmark problem
267    
268     @param name: name of the problem. If no name is given the class name is used.
269     @type name: C{str}
270     """
271     super(BenchmarkProblem,self).__init__()
272     if name==None:
273     self.__name=self.__class__.__name__
274     else:
275     self.__name=name
276    
277     def getResultNames(self):
278     """
279     return the names of the results produced when run() is called.
280    
281     @return: names the list of the names to be used when the results of the run() call are printed
282     @rtype: C{list} of C{str}
283     @remark: this function has to overwritten by a particular problem
284     """
285     raise NotImplementedError
286     return []
287    
288     def __str__(self):
289     """
290     returns the name of the benchmark suite
291    
292     @return: name
293     @rtype: C{str}
294     """
295     return self.__name
296    
297     def run(self,options=None):
298     """
299     runs the problem and returns a list of run characteristics
300    
301    
302     @param options: the options that are used for the run. Note that the number of OpenMP threads is controlled
303     by the L{Benchmark} the problem is run in.
304     @type options: L{Options}
305     @return: list of run characteristics
306     @rtype: C{list}
307     @remark: this function has to overwritten by a particular problem
308     """
309     raise NotImplementedError
310     return []
311    
312     class Options(object):
313     """
314     defines a set of options to be used to run a L{BenchmarkProblem}
315     """
316     def __init__(self,name=None):
317     """
318     sets up the options
319    
320     @param name: name of the option. If no name is given the class name is used.
321     @type name: C{str}
322     """
323     super(Options,self).__init__()
324     if name==None:
325     self.__name=self.__class__.__name__
326     else:
327     self.__name=name
328     def __str__(self):
329     """
330     returns the name of the benchmark suite
331    
332     @return: name
333     @rtype: C{str}
334     """
335     return self.__name
336    
337     if __name__=="__main__":
338    
339     class OptionsTest1(Options):
340     pass
341     class OptionsTest2(Options):
342     pass
343    
344     class BenchmarkProblemTest(BenchmarkProblem):
345     def __init__(self,name=None):
346     super(BenchmarkProblemTest,self).__init__(name)
347     def getResultNames(self):
348     return ["r0","r1"]
349    
350     class BenchmarkProblemTest1(BenchmarkProblemTest):
351     def __init__(self):
352     super(BenchmarkProblemTest1,self).__init__(name="TEST1")
353     def run(self,options=None):
354     import time
355     return time.time(),"A"
356    
357     class BenchmarkProblemTest2(BenchmarkProblemTest):
358     def __init__(self):
359     super(BenchmarkProblemTest2,self).__init__(name="TEST2")
360     def run(self,options=None):
361     import time
362     return -time.time(),"B"
363    
364     bm=Benchmark("Example")
365     bm.addProblem(BenchmarkProblemTest1())
366     bm.addProblem(BenchmarkProblemTest2())
367     bm.addOptions(OptionsTest1())
368     bm.addOptions(OptionsTest2())
369    
370     bms=BenchmarkSuite("A Test")
371     bms.addBenchmark(bm)
372    
373     bms.run()
374     print bms.getHTML()
375    
376     bms.run(scale=4)
377     print bms.getHTML()
378    
379     bms.run(scale=[1,2])
380     print bms.getHTML()

  ViewVC Help
Powered by ViewVC 1.1.26