/[escript]/trunk/escript/py_src/benchmark.py
ViewVC logotype

Contents of /trunk/escript/py_src/benchmark.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 387 - (show annotations)
Tue Dec 20 00:37:20 2005 UTC (13 years, 10 months ago) by gross
File MIME type: text/x-python
File size: 13118 byte(s)
scale allows now to switch on and off certain problems
1 filter# $Id:$
2
3 #
4 # COPYRIGHT ACcESS 2004 - All Rights Reserved
5 #
6 # This software is the property of ACcESS. No part of this code
7 # may be copied in any form or by any means without the expressed written
8 # consent of ACcESS. Copying, use or modification of this software
9 # by any unauthorised person is illegal unless that
10 # person has a software license agreement with ACcESS.
11 #
12
13 """
14 A simple framework to run benchmarks under OPENMP and to summarize the results in tables for instance in HTML
15
16 @var __author__: name of author
17 @var __licence__: licence agreement
18 var __url__: url entry point on documentation
19 @var __version__: version
20 @var __date__: date of the version
21 """
22
23 __author__="Lutz Gross, l.gross@uq.edu.au"
24 __licence__="contact: esys@access.uq.edu.au"
25 __url__="http://www.iservo.edu.au/esys/escript"
26 __version__="$Revision:$"
27 __date__="$Date:$"
28
29 import os,socket,time,sys
30
31 class BenchmarkSuite(object):
32 """
33 framework to run a bunch of L{Benchmark}s with the object to create a table of statistics.
34 @var MAX_LEVEL: maximum number of level in headers for output
35 """
36 MAX_LEVEL=5
37 def __init__(self,name=None):
38 """
39 sets up a suite of benchmarks
40
41 @param name: name of the benchmark suite. If no name is given the class name is used.
42 @type name: C{str}
43 """
44 super(BenchmarkSuite,self).__init__()
45 self.__benchmarks=[]
46 self.__scale=1
47 if name==None:
48 self.__name=self.__class__.__name__
49 else:
50 self.__name=name
51
52 def __str__(self):
53 """
54 returns the name of the benchmark suite
55
56 @return: name
57 @rtype: C{str}
58 """
59 return self.__name
60 def addBenchmark(self,benchmark):
61 """
62 adds a L{Benchmark} to the suite
63
64 @param benchmark: adds a new L{Benchmark} to the suite
65 @type benchmark: L{Benchmark}
66 """
67 self.__benchmarks.append(benchmark)
68 def __len__(self):
69 """
70 returns the number of benchmarks in the suite
71
72 @return: number of benchmarks
73 @rtype: C{int}
74 """
75 return len(self.__benchmarks)
76 def __getitem__(self,i):
77 """
78 returns the i-th benchmark in the suite through self[i]
79
80 @param i: index of the requested benchmark
81 @type i: C{int}
82 @return: i-th benchmark
83 @rtype: L{Benchmark}
84
85 """
86 return self.__benchmarks[i]
87 def run(self,scale=1):
88 """
89 runs all benchmarks
90
91 @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
92 are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
93 in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted.
94 @type scale: C{int} or C{list} of C{int}s.
95 """
96 self.__scale=scale
97 for i in range(len(self)): self[i].run(scale=scale)
98 def getHTML(self,filter,level=1):
99 """
100 returns the results of the last benchmark run in HTML format.
101
102 @param filter: filter to be applied to the results
103 @type filter: L{BenchmarkFilter}
104 @param level: level used in header <H?> tags
105 @type level: C{int}
106 @return: HTML document
107 @rtype: C{str}
108 """
109 out=""
110 if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
111 out+="<H%s>%s</H%s>\n"%(level,str(self),level)
112 if level==1:
113 m=""
114 if isinstance(self.__scale,int):
115 if self.__scale>1:
116 m=" (%s threads)"%self.__scale
117 out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
118 for i in range(len(self)):
119 out+="<p>\n"
120 out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL))
121 out+="<p>\n"
122 if level==1:
123 out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
124 out+="</BODY></HTML>\n"
125 return out
126
127
128 class Benchmark(object):
129 """
130 runs a bunch of similar L{BenchmarkProblem}s with a bunch of L{Options}
131 """
132 def __init__(self,name=None,description=None):
133 """
134 sets up a benchmark
135
136 @param name: name of the benchmark. If no name is given the class name is used.
137 @type name: C{str}
138 @param description: description of the benchmark.
139 @type description: C{str} or C{None}
140 """
141 super(Benchmark,self).__init__()
142 self.__options=[]
143 self.__problems=[]
144 self.__results=[]
145 self.__scale=1
146 if name==None:
147 self.__name=self.__class__.__name__
148 else:
149 self.__name=name
150 self.__description=description
151
152 def __str__(self):
153 """
154 returns the name of the benchmark suite
155
156 @return: name
157 @rtype: C{str}
158 """
159 return self.__name
160
161 def addProblem(self,problem):
162 """
163 adds a problem to the benchmark
164
165 @param problem: adds a new problem to the bechmark
166 @type problem: L{BenchmarkProblem}
167 """
168 self.__problems.append(problem)
169
170 def addOptions(self,Options):
171 """
172 adds a options to the benchmark
173
174 @param options: adds a new option to the bechmark
175 @type problem: L{Options}
176 """
177 self.__options.append(Options)
178
179 def run(self,scale=1):
180 """
181 runs all problems with all options.
182
183
184 @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
185 are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
186 in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted.
187 @type scale: C{int} or C{list} of C{int}s.
188 """
189 if isinstance(scale,list):
190 c_max=min(len(scale),len(self.__problems))
191 else:
192 c_max=len(self.__problems)
193 self.__filter=filter
194 self.__scale=scale
195 self.__results=[]
196 for c in range(c_max):
197 r=self.__problems[c]
198 if isinstance(scale,list):
199 s=scale[c]
200 else:
201 s=scale
202 row=[]
203 if s>0:
204 for p in self.__options:
205 os.environ['OMP_NUM_THREADS']=str(s)
206 row.append(r.run(p))
207 self.__results.append(row)
208 def getHTML(self,filter,level=1):
209 """
210 returns the results of the last benchmark run in HTML format.
211
212 @param filter: filter to be applied to the results
213 @type filter: L{BenchmarkFilter}
214 @param level: level used in header <H?> tags
215 @type level: C{int}
216 @return: HTML document
217 @rtype: C{str}
218 """
219 out=""
220 if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
221 out+="<H%s>%s</H%s>\n"%(level,str(self),level)
222 if level==1:
223 m=""
224 if isinstance(self.__scale,int):
225 if self.__scale>1:
226 m=" (%s threads)"%self.__scale
227 out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
228 if self.__description: out+="<p>%s</p>\n"%str(self.__description)
229 if len(self.__problems)>0:
230 out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n"
231 h1_seg=""
232 rn=filter.getResultNames()
233 if len(rn)==0:
234 h1_seg+="<TD></TD>"
235 else:
236 for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n
237 h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>"
238 h1="<TR>"
239 if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>"
240 for o in self.__options:
241 if len(rn)==0:
242 h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
243 elif len(rn)==1:
244 h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
245 empty_h1=False
246 else:
247 h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(len(rn),str(o))
248 h1+=h1_seg
249 out+=h0+"</TR>\n"+h1+"</TR>\n"
250 c=0
251 for r in range(len(self.__results)):
252 out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r])
253 if isinstance(self.__scale,list): out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c]
254 for col in self.__results[r]:
255 for e in filter(col): out+="<TD ALIGN=\"right\">%s</TD>"%e
256 out+="</TR>\n"
257 c+=1
258 out+="</TABLE>"
259 if level==1:
260 out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
261 out+="</BODY></HTML>\n"
262 return out
263
264 class BenchmarkProblem(object):
265 """
266 something that can be run and returns a list of characteristics such as timing, Mflops, error, etc.
267 """
268 def __init__(self,name=None):
269 """
270 sets up a benchmark problem
271
272 @param name: name of the problem. If no name is given the class name is used.
273 @type name: C{str}
274 """
275 super(BenchmarkProblem,self).__init__()
276 if name==None:
277 self.__name=self.__class__.__name__
278 else:
279 self.__name=name
280
281
282 def __str__(self):
283 """
284 returns the name of the benchmark suite
285
286 @return: name
287 @rtype: C{str}
288 """
289 return self.__name
290
291 def run(self,options=None):
292 """
293 runs the problem and returns a list of run characteristics
294
295
296 @param options: the options that are used for the run. Note that the number of OpenMP threads is controlled
297 by the L{Benchmark} the problem is run in.
298 @type options: L{Options}
299 @return: run characteristics
300 @rtype: any type that can be read by the L{BenchmarkFilter} applied to it.
301 @remark: this function has to overwritten by a particular problem
302 """
303 raise NotImplementedError
304 return []
305
306 class BenchmarkFilter(object):
307 """
308 object to filter the characteristcs returned by Bechmark runs.
309
310 """
311 def __init__(self):
312 """
313 sets up a filter
314 """
315 pass
316
317
318 def getResultNames(self):
319 """
320 return the names of the results produced when run() is called.
321
322 @return: names the list of the names to be used when the results of the run() call are printed
323 @rtype: C{list} of C{str}
324 @remark: this function has to overwritten by a particular problem
325 """
326 raise NotImplementedError
327 return []
328
329 def __call__(self,result):
330 """
331 filters out values results returned as characteristcs of a problem run
332
333 @param result: values to be filtered
334 @type result: any type that is produced by the L{BenchmarkProblem} it is applied to
335 @return: a list of strings selected from result
336 @rtype: C{list} of C{str}
337 @remark: this function has to overwritten by a particular problem
338 """
339 raise NotImplementedError
340 return []
341
342
343 class Options(object):
344 """
345 defines a set of options to be used to run a L{BenchmarkProblem}
346 """
347 def __init__(self,name=None):
348 """
349 sets up the options
350
351 @param name: name of the option. If no name is given the class name is used.
352 @type name: C{str}
353 """
354 super(Options,self).__init__()
355 if name==None:
356 self.__name=self.__class__.__name__
357 else:
358 self.__name=name
359 def __str__(self):
360 """
361 returns the name of the benchmark suite
362
363 @return: name
364 @rtype: C{str}
365 """
366 return self.__name
367
368 if __name__=="__main__":
369
370 class OptionsTest1(Options):
371 pass
372 class OptionsTest2(Options):
373 pass
374
375 class BenchmarkProblemTest1(BenchmarkProblem):
376 def __init__(self):
377 super(BenchmarkProblemTest1,self).__init__(name="TEST1")
378 def run(self,options=None):
379 import time
380 return time.time(),"A"
381
382 class BenchmarkProblemTest2(BenchmarkProblem):
383 def __init__(self):
384 super(BenchmarkProblemTest2,self).__init__(name="TEST2")
385 def run(self,options=None):
386 import time
387 return -time.time(),"B"
388
389 class SimpleFilter(BenchmarkFilter):
390 def getResultNames(self):
391 return ["r0","r1"]
392 def __call__(self,result):
393 return [str(result[0]),str(result[1])]
394
395 bm=Benchmark("Example")
396 bm.addProblem(BenchmarkProblemTest1())
397 bm.addProblem(BenchmarkProblemTest2())
398 bm.addOptions(OptionsTest1())
399 bm.addOptions(OptionsTest2())
400
401 bms=BenchmarkSuite("A Test")
402 bms.addBenchmark(bm)
403
404 bms.run()
405 print bms.getHTML(filter=SimpleFilter())
406
407 bms.run(scale=4)
408 print bms.getHTML(filter=SimpleFilter())
409
410 bms.run(scale=[1,2])
411 print bms.getHTML(filter=SimpleFilter())

  ViewVC Help
Powered by ViewVC 1.1.26