/[escript]/trunk/escript/py_src/benchmark.py
ViewVC logotype

Contents of /trunk/escript/py_src/benchmark.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 4154 - (show annotations)
Tue Jan 22 09:30:23 2013 UTC (6 years, 7 months ago) by jfenwick
File MIME type: text/x-python
File size: 14586 byte(s)
Round 1 of copyright fixes
1
2 ##############################################################################
3 #
4 # Copyright (c) 2003-2013 by University of Queensland
5 # http://www.uq.edu.au
6 #
7 # Primary Business: Queensland, Australia
8 # Licensed under the Open Software License version 3.0
9 # http://www.opensource.org/licenses/osl-3.0.php
10 #
11 # Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 # Development since 2012 by School of Earth Sciences
13 #
14 ##############################################################################
15
16 __copyright__="""Copyright (c) 2003-2013 by University of Queensland
17 http://www.uq.edu.au
18 Primary Business: Queensland, Australia"""
19 __license__="""Licensed under the Open Software License version 3.0
20 http://www.opensource.org/licenses/osl-3.0.php"""
21 __url__="https://launchpad.net/escript-finley"
22
23 filter# $Id:$
24
25 """
26 A simple framework to run benchmarks under OpenMP and to summarize the results
27 in tables for instance in HTML
28
29 :var __author__: name of author
30 :var __license__: licence agreement
31 :var __copyright__: copyrights
32 :var __url__: url entry point on documentation
33 :var __version__: version
34 :var __date__: date of the version
35 """
36
37 __author__="Lutz Gross, l.gross@uq.edu.au"
38
39 import os,socket,time,sys,traceback
40 from esys.escript import setNumberOfThreads
41
42 class BenchmarkSuite(object):
43 """
44 Framework to run a bunch of `Benchmark` s using the object and creating a
45 table of statistics.
46
47 :cvar MAX_LEVEL: maximum number of level in headers for output
48 """
49 MAX_LEVEL=5
50 def __init__(self,name=None):
51 """
52 Sets up a suite of benchmarks.
53
54 :param name: name of the benchmark suite. If no name is given the class
55 name is used.
56 :type name: ``str``
57 """
58 super(BenchmarkSuite,self).__init__()
59 self.__benchmarks=[]
60 self.__scale=1
61 if name==None:
62 self.__name=self.__class__.__name__
63 else:
64 self.__name=name
65
66 def __str__(self):
67 """
68 Returns the name of the benchmark suite.
69
70 :return: the name
71 :rtype: ``str``
72 """
73 return self.__name
74
75 def addBenchmark(self,benchmark):
76 """
77 Adds a new `Benchmark` to the suite.
78
79 :param benchmark: the benchmark to add
80 :type benchmark: `Benchmark`
81 """
82 self.__benchmarks.append(benchmark)
83
84 def __len__(self):
85 """
86 Returns the number of benchmarks in the suite.
87
88 :return: number of benchmarks
89 :rtype: ``int``
90 """
91 return len(self.__benchmarks)
92
93 def __getitem__(self,i):
94 """
95 Returns the i-th benchmark in the suite through self[i].
96
97 :param i: index of the requested benchmark
98 :type i: ``int``
99 :return: i-th benchmark
100 :rtype: `Benchmark`
101
102 """
103 return self.__benchmarks[i]
104
105 def run(self,scale=1):
106 """
107 Runs all benchmarks.
108
109 :param scale: defines the number of (OpenMP) threads to be used. If
110 ``scale`` is a scalar all benchmarks are run with ``scale``
111 number of threads. If ``scale`` is a ``list``, the p-th
112 problem in each of the benchmarks in the suite is run with
113 ``scale[p]`` threads. If ``scale[p]`` <1 the p-th problem is
114 omitted.
115 :type scale: ``int`` or ``list`` of ``int``
116 """
117 self.__scale=scale
118 for i in range(len(self)): self[i].run(scale=scale)
119
120 def getHTML(self,filter,level=1):
121 """
122 Returns the results of the last benchmark run in HTML format.
123
124 :param filter: filter to be applied to the results
125 :type filter: `BenchmarkFilter`
126 :param level: level used in header <H?> tags
127 :type level: ``int``
128 :return: HTML document
129 :rtype: ``str``
130 """
131 out=""
132 if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
133 out+="<H%s>%s</H%s>\n"%(level,str(self),level)
134 if level==1:
135 m=""
136 if isinstance(self.__scale,int):
137 if self.__scale>1:
138 m=" (%s threads)"%self.__scale
139 out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
140 for i in range(len(self)):
141 out+="<p>\n"
142 out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL))
143 out+="<p>\n"
144 if level==1:
145 try:
146 name=os.getlogin()
147 out+="<hr><p align=\"center\">by %s at %s</p>\n"%(name,time.strftime('%X %x %Z'))
148 except OSError:
149 out+="<hr><p align=\"center\">%s</p>\n"%(time.strftime('%X %x %Z'))
150
151 out+="</BODY></HTML>\n"
152 return out
153
154
155 class Benchmark(object):
156 """
157 Runs a bunch of similar `BenchmarkProblem` s with a bunch of `Options`.
158 """
159 def __init__(self,name=None,description=None):
160 """
161 Sets up a benchmark.
162
163 :param name: name of the benchmark. If no name is given the class name
164 is used.
165 :type name: ``str``
166 :param description: description of the benchmark
167 :type description: ``str`` or ``None``
168 """
169 super(Benchmark,self).__init__()
170 self.__options=[]
171 self.__problems=[]
172 self.__results=[]
173 self.__scale=1
174 if name==None:
175 self.__name=self.__class__.__name__
176 else:
177 self.__name=name
178 self.__description=description
179
180 def __str__(self):
181 """
182 Returns the name of the benchmark suite.
183
184 :return: the name
185 :rtype: ``str``
186 """
187 return self.__name
188
189 def addProblem(self,problem):
190 """
191 Adds a problem to the benchmark.
192
193 :param problem: the problem to be added
194 :type problem: `BenchmarkProblem`
195 """
196 self.__problems.append(problem)
197
198 def addOptions(self,options):
199 """
200 Adds options to the benchmark.
201
202 :param options: the options to be added to the benchmark. If
203 options==None the options are left unchanged.
204 :type options: `Options`
205 """
206 if options!=None: self.__options.append(options)
207
208 def run(self,scale=1):
209 """
210 Runs all problems with all options.
211
212 :param scale: defines the number of (OpenMP) threads to be used. If
213 ``scale`` is a scalar all benchmarks are run with ``scale``
214 number of threads. If ``scale`` is a ``list`` , the p-th
215 problem in each of the benchmarks in the suite is run with
216 ``scale[p]`` threads. If ``scale[p]`` <1 the p-th problem is
217 omitted.
218 :type scale: ``int`` or ``list`` of ``int`` s
219 """
220 if isinstance(scale,list):
221 c_max=min(len(scale),len(self.__problems))
222 else:
223 c_max=len(self.__problems)
224 self.__filter=filter
225 self.__scale=scale
226 self.__results=[]
227 for c in range(c_max):
228 r=self.__problems[c]
229 if isinstance(scale,list):
230 s=scale[c]
231 else:
232 s=scale
233 row=[]
234 if s>0:
235 t0=time.time()
236 print(("%s with %s threads started."%(r.__class__,s)))
237 for p in self.__options:
238 setNumberOfThreads(s)
239 try:
240 row.append(r.run(p))
241 except:
242 traceback.print_exc(file=sys.stdout)
243 row.append(None)
244 t0=time.time()-t0
245 print(("%s with %s threads finished (walltime=%s sec)."%(r.__class__,s,t0)))
246 self.__results.append(row)
247
248 def getHTML(self,filter,level=1):
249 """
250 Returns the results of the last benchmark run in HTML format.
251
252 :param filter: filter to be applied to the results
253 :type filter: `BenchmarkFilter`
254 :param level: level used in header <H?> tags
255 :type level: ``int``
256 :return: HTML document
257 :rtype: ``str``
258 """
259 out=""
260 if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
261 out+="<H%s>%s</H%s>\n"%(level,str(self),level)
262 if level==1:
263 m=""
264 if isinstance(self.__scale,int):
265 if self.__scale>1:
266 m=" (%s threads)"%self.__scale
267 out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
268 if self.__description: out+="<p>%s</p>\n"%str(self.__description)
269 if len(self.__problems)>0:
270 out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n"
271 h1_seg=""
272 rn=filter.getResultNames()
273 if len(rn)==0:
274 h1_seg+="<TD></TD>"
275 else:
276 for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n
277 h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>"
278 h1="<TR>"
279 if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>"
280 for o in self.__options:
281 if len(rn)==0:
282 h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
283 colspan=1
284 elif len(rn)==1:
285 h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
286 colspan=1
287 empty_h1=False
288 else:
289 colspan=len(rn)
290 h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(colspan,str(o))
291 h1+=h1_seg
292 out+=h0+"</TR>\n"+h1+"</TR>\n"
293 c=0
294 for r in range(len(self.__results)):
295 out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r])
296 if isinstance(self.__scale,list):
297 out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c]
298 for col in self.__results[r]:
299 if col==None:
300 out+="<TD ALIGN=\"center\" COLSPAN=%s>failed.</TD>"%colspan
301 else:
302 for e in filter(col): out+="<TD ALIGN=\"right\">%s</TD>"%e
303 out+="</TR>\n"
304 c+=1
305 out+="</TABLE>"
306 if level==1:
307 out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
308 out+="</BODY></HTML>\n"
309 return out
310
311 class BenchmarkProblem(object):
312 """
313 Represents a benchmark problem that can be run and which returns a list of
314 characteristics such as timing, MFlops, error, etc.
315 """
316 def __init__(self,name=None):
317 """
318 Sets up a benchmark problem.
319
320 :param name: name of the problem. If no name is given the class name
321 is used.
322 :type name: ``str``
323 """
324 super(BenchmarkProblem,self).__init__()
325 if name==None:
326 self.__name=self.__class__.__name__
327 else:
328 self.__name=name
329
330 def __str__(self):
331 """
332 Returns the name of the benchmark suite.
333
334 :return: the name
335 :rtype: ``str``
336 """
337 return self.__name
338
339 def run(self,options=None):
340 """
341 Runs the problem and returns a list of run characteristics.
342
343 :param options: the options that are used for the run. Note that the
344 number of OpenMP threads is controlled by the
345 `Benchmark` the problem is run in.
346 :type options: `Options`
347 :return: run characteristics
348 :rtype: any type that can be read by the `BenchmarkFilter` applied
349 to it
350 :note: this function has to be overwritten by a particular problem
351 """
352 raise NotImplementedError
353 return []
354
355 class BenchmarkFilter(object):
356 """
357 Object to filter the characteristics returned by Benchmark runs.
358
359 """
360 def __init__(self):
361 """
362 Sets up a filter.
363 """
364 pass
365
366 def getResultNames(self):
367 """
368 Returns the names of the results produced when ``run()`` is called.
369
370 :return: the list of the names to be used when the results of
371 the ``run()`` call are printed
372 :rtype: ``list`` of ``str``
373 :note: this function has to overwritten by a particular problem
374 """
375 raise NotImplementedError
376 return []
377
378 def __call__(self,result):
379 """
380 Filters out results returned as characteristics of a problem run.
381
382 :param result: values to be filtered
383 :type result: any type that is produced by the `BenchmarkProblem`
384 it is applied to
385 :return: a list of strings selected from result
386 :rtype: ``list`` of ``str``
387 :note: this function has to be overwritten by a particular problem
388 """
389 raise NotImplementedError
390 return []
391
392
393 class Options(object):
394 """
395 Defines a set of options to be used to run a `BenchmarkProblem`.
396 """
397 def __init__(self,name=None):
398 """
399 Sets up the options.
400
401 :param name: name of the option. If no name is given the class name
402 is used.
403 :type name: ``str``
404 """
405 super(Options,self).__init__()
406 if name==None:
407 self.__name=self.__class__.__name__
408 else:
409 self.__name=name
410
411 def __str__(self):
412 """
413 Returns the name of this options object.
414
415 :return: the name
416 :rtype: ``str``
417 """
418 return self.__name
419
420 if __name__=="__main__":
421
422 class OptionsTest1(Options):
423 pass
424 class OptionsTest2(Options):
425 pass
426
427 class BenchmarkProblemTest1(BenchmarkProblem):
428 def __init__(self):
429 super(BenchmarkProblemTest1,self).__init__(name="TEST1")
430 def run(self,options=None):
431 import time
432 return time.time(),"A"
433
434 class BenchmarkProblemTest2(BenchmarkProblem):
435 def __init__(self):
436 super(BenchmarkProblemTest2,self).__init__(name="TEST2")
437 def run(self,options=None):
438 import time
439 return -time.time(),"B"
440
441 class SimpleFilter(BenchmarkFilter):
442 def getResultNames(self):
443 return ["r0","r1"]
444 def __call__(self,result):
445 return [str(result[0]),str(result[1])]
446
447 bm=Benchmark("Example")
448 bm.addProblem(BenchmarkProblemTest1())
449 bm.addProblem(BenchmarkProblemTest2())
450 bm.addOptions(OptionsTest1())
451 bm.addOptions(OptionsTest2())
452
453 bms=BenchmarkSuite("A Test")
454 bms.addBenchmark(bm)
455
456 bms.run()
457 print((bms.getHTML(filter=SimpleFilter())))
458
459 bms.run(scale=4)
460 print((bms.getHTML(filter=SimpleFilter())))
461
462 bms.run(scale=[1,2])
463 print((bms.getHTML(filter=SimpleFilter())))
464

  ViewVC Help
Powered by ViewVC 1.1.26