/[escript]/trunk/escriptcore/py_src/benchmark.py
ViewVC logotype

Contents of /trunk/escriptcore/py_src/benchmark.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 5706 - (show annotations)
Mon Jun 29 03:41:36 2015 UTC (4 years, 2 months ago) by sshaw
File MIME type: text/x-python
File size: 14719 byte(s)
all python files now force use of python3 prints and division syntax to stop sneaky errors appearing in py3 environs
1
2 ##############################################################################
3 #
4 # Copyright (c) 2003-2015 by The University of Queensland
5 # http://www.uq.edu.au
6 #
7 # Primary Business: Queensland, Australia
8 # Licensed under the Open Software License version 3.0
9 # http://www.opensource.org/licenses/osl-3.0.php
10 #
11 # Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 # Development 2012-2013 by School of Earth Sciences
13 # Development from 2014 by Centre for Geoscience Computing (GeoComp)
14 #
15 ##############################################################################
16
17 from __future__ import print_function, division
18
19 __copyright__="""Copyright (c) 2003-2015 by The University of Queensland
20 http://www.uq.edu.au
21 Primary Business: Queensland, Australia"""
22 __license__="""Licensed under the Open Software License version 3.0
23 http://www.opensource.org/licenses/osl-3.0.php"""
24 __url__="https://launchpad.net/escript-finley"
25
26 filter# $Id:$
27
28 """
29 A simple framework to run benchmarks under OpenMP and to summarize the results
30 in tables for instance in HTML
31
32 :var __author__: name of author
33 :var __license__: licence agreement
34 :var __copyright__: copyrights
35 :var __url__: url entry point on documentation
36 :var __version__: version
37 :var __date__: date of the version
38 """
39
40 __author__="Lutz Gross, l.gross@uq.edu.au"
41
42 import os, socket, sys, time, traceback
43 from . import escriptcpp as esc
44
45 class BenchmarkSuite(object):
46 """
47 Framework to run a bunch of `Benchmark` s using the object and creating a
48 table of statistics.
49
50 :cvar MAX_LEVEL: maximum number of level in headers for output
51 """
52 MAX_LEVEL=5
53 def __init__(self,name=None):
54 """
55 Sets up a suite of benchmarks.
56
57 :param name: name of the benchmark suite. If no name is given the class
58 name is used.
59 :type name: ``str``
60 """
61 super(BenchmarkSuite,self).__init__()
62 self.__benchmarks=[]
63 self.__scale=1
64 if name is None:
65 self.__name=self.__class__.__name__
66 else:
67 self.__name=name
68
69 def __str__(self):
70 """
71 Returns the name of the benchmark suite.
72
73 :return: the name
74 :rtype: ``str``
75 """
76 return self.__name
77
78 def addBenchmark(self,benchmark):
79 """
80 Adds a new `Benchmark` to the suite.
81
82 :param benchmark: the benchmark to add
83 :type benchmark: `Benchmark`
84 """
85 self.__benchmarks.append(benchmark)
86
87 def __len__(self):
88 """
89 Returns the number of benchmarks in the suite.
90
91 :return: number of benchmarks
92 :rtype: ``int``
93 """
94 return len(self.__benchmarks)
95
96 def __getitem__(self,i):
97 """
98 Returns the i-th benchmark in the suite through self[i].
99
100 :param i: index of the requested benchmark
101 :type i: ``int``
102 :return: i-th benchmark
103 :rtype: `Benchmark`
104
105 """
106 return self.__benchmarks[i]
107
108 def run(self,scale=1):
109 """
110 Runs all benchmarks.
111
112 :param scale: defines the number of (OpenMP) threads to be used. If
113 ``scale`` is a scalar all benchmarks are run with ``scale``
114 number of threads. If ``scale`` is a ``list``, the p-th
115 problem in each of the benchmarks in the suite is run with
116 ``scale[p]`` threads. If ``scale[p]`` <1 the p-th problem is
117 omitted.
118 :type scale: ``int`` or ``list`` of ``int``
119 """
120 self.__scale=scale
121 for i in range(len(self)): self[i].run(scale=scale)
122
123 def getHTML(self,filter,level=1):
124 """
125 Returns the results of the last benchmark run in HTML format.
126
127 :param filter: filter to be applied to the results
128 :type filter: `BenchmarkFilter`
129 :param level: level used in header <H?> tags
130 :type level: ``int``
131 :return: HTML document
132 :rtype: ``str``
133 """
134 out=""
135 if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
136 out+="<H%s>%s</H%s>\n"%(level,str(self),level)
137 if level==1:
138 m=""
139 if isinstance(self.__scale,int):
140 if self.__scale>1:
141 m=" (%s threads)"%self.__scale
142 out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
143 for i in range(len(self)):
144 out+="<p>\n"
145 out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL))
146 out+="<p>\n"
147 if level==1:
148 try:
149 name=os.getlogin()
150 out+="<hr><p align=\"center\">by %s at %s</p>\n"%(name,time.strftime('%X %x %Z'))
151 except OSError:
152 out+="<hr><p align=\"center\">%s</p>\n"%(time.strftime('%X %x %Z'))
153
154 out+="</BODY></HTML>\n"
155 return out
156
157
158 class Benchmark(object):
159 """
160 Runs a bunch of similar `BenchmarkProblem` s with a bunch of `Options`.
161 """
162 def __init__(self,name=None,description=None):
163 """
164 Sets up a benchmark.
165
166 :param name: name of the benchmark. If no name is given the class name
167 is used.
168 :type name: ``str``
169 :param description: description of the benchmark
170 :type description: ``str`` or ``None``
171 """
172 super(Benchmark,self).__init__()
173 self.__options=[]
174 self.__problems=[]
175 self.__results=[]
176 self.__scale=1
177 if name is None:
178 self.__name=self.__class__.__name__
179 else:
180 self.__name=name
181 self.__description=description
182
183 def __str__(self):
184 """
185 Returns the name of the benchmark suite.
186
187 :return: the name
188 :rtype: ``str``
189 """
190 return self.__name
191
192 def addProblem(self,problem):
193 """
194 Adds a problem to the benchmark.
195
196 :param problem: the problem to be added
197 :type problem: `BenchmarkProblem`
198 """
199 self.__problems.append(problem)
200
201 def addOptions(self,options):
202 """
203 Adds options to the benchmark.
204
205 :param options: the options to be added to the benchmark. If
206 options is None the options are left unchanged.
207 :type options: `Options`
208 """
209 if options!=None: self.__options.append(options)
210
211 def run(self,scale=1):
212 """
213 Runs all problems with all options.
214
215 :param scale: defines the number of (OpenMP) threads to be used. If
216 ``scale`` is a scalar all benchmarks are run with ``scale``
217 number of threads. If ``scale`` is a ``list`` , the p-th
218 problem in each of the benchmarks in the suite is run with
219 ``scale[p]`` threads. If ``scale[p]`` <1 the p-th problem is
220 omitted.
221 :type scale: ``int`` or ``list`` of ``int`` s
222 """
223 if isinstance(scale,list):
224 c_max=min(len(scale),len(self.__problems))
225 else:
226 c_max=len(self.__problems)
227 self.__filter=filter
228 self.__scale=scale
229 self.__results=[]
230 for c in range(c_max):
231 r=self.__problems[c]
232 if isinstance(scale,list):
233 s=scale[c]
234 else:
235 s=scale
236 row=[]
237 if s>0:
238 t0=time.time()
239 print(("%s with %s threads started."%(r.__class__,s)))
240 for p in self.__options:
241 esc.setNumberOfThreads(s)
242 try:
243 row.append(r.run(p))
244 except:
245 traceback.print_exc(file=sys.stdout)
246 row.append(None)
247 t0=time.time()-t0
248 print(("%s with %s threads finished (walltime=%s sec)."%(r.__class__,s,t0)))
249 self.__results.append(row)
250
251 def getHTML(self,filter,level=1):
252 """
253 Returns the results of the last benchmark run in HTML format.
254
255 :param filter: filter to be applied to the results
256 :type filter: `BenchmarkFilter`
257 :param level: level used in header <H?> tags
258 :type level: ``int``
259 :return: HTML document
260 :rtype: ``str``
261 """
262 out=""
263 if level==1: out+="<HTML><HEAD><TITLE>Benchmark: %s</TITLE></HEAD><BODY>\n"%str(self)
264 out+="<H%s>%s</H%s>\n"%(level,str(self),level)
265 if level==1:
266 m=""
267 if isinstance(self.__scale,int):
268 if self.__scale>1:
269 m=" (%s threads)"%self.__scale
270 out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
271 if self.__description: out+="<p>%s</p>\n"%str(self.__description)
272 if len(self.__problems)>0:
273 out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n"
274 h1_seg=""
275 rn=filter.getResultNames()
276 if len(rn)==0:
277 h1_seg+="<TD></TD>"
278 else:
279 for n in rn: h1_seg+="<TD ALIGN=\"center\">%s</TD>"%n
280 h0="<TR><TH ALIGN=\"center\" ROWSPAN=2>Case</TH>"
281 h1="<TR>"
282 if isinstance(self.__scale,list): h0+="<TH ALIGN=\"center\" ROWSPAN=2>Threads</TH>"
283 for o in self.__options:
284 if len(rn)==0:
285 h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
286 colspan=1
287 elif len(rn)==1:
288 h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
289 colspan=1
290 empty_h1=False
291 else:
292 colspan=len(rn)
293 h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(colspan,str(o))
294 h1+=h1_seg
295 out+=h0+"</TR>\n"+h1+"</TR>\n"
296 c=0
297 for r in range(len(self.__results)):
298 out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r])
299 if isinstance(self.__scale,list):
300 out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c]
301 for col in self.__results[r]:
302 if col is None:
303 out+="<TD ALIGN=\"center\" COLSPAN=%s>failed.</TD>"%colspan
304 else:
305 for e in filter(col): out+="<TD ALIGN=\"right\">%s</TD>"%e
306 out+="</TR>\n"
307 c+=1
308 out+="</TABLE>"
309 if level==1:
310 out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
311 out+="</BODY></HTML>\n"
312 return out
313
314 class BenchmarkProblem(object):
315 """
316 Represents a benchmark problem that can be run and which returns a list of
317 characteristics such as timing, MFlops, error, etc.
318 """
319 def __init__(self,name=None):
320 """
321 Sets up a benchmark problem.
322
323 :param name: name of the problem. If no name is given the class name
324 is used.
325 :type name: ``str``
326 """
327 super(BenchmarkProblem,self).__init__()
328 if name is None:
329 self.__name=self.__class__.__name__
330 else:
331 self.__name=name
332
333 def __str__(self):
334 """
335 Returns the name of the benchmark suite.
336
337 :return: the name
338 :rtype: ``str``
339 """
340 return self.__name
341
342 def run(self,options=None):
343 """
344 Runs the problem and returns a list of run characteristics.
345
346 :param options: the options that are used for the run. Note that the
347 number of OpenMP threads is controlled by the
348 `Benchmark` the problem is run in.
349 :type options: `Options`
350 :return: run characteristics
351 :rtype: any type that can be read by the `BenchmarkFilter` applied
352 to it
353 :note: this function has to be overwritten by a particular problem
354 """
355 raise NotImplementedError
356 return []
357
358 class BenchmarkFilter(object):
359 """
360 Object to filter the characteristics returned by Benchmark runs.
361
362 """
363 def __init__(self):
364 """
365 Sets up a filter.
366 """
367 pass
368
369 def getResultNames(self):
370 """
371 Returns the names of the results produced when ``run()`` is called.
372
373 :return: the list of the names to be used when the results of
374 the ``run()`` call are printed
375 :rtype: ``list`` of ``str``
376 :note: this function has to overwritten by a particular problem
377 """
378 raise NotImplementedError
379 return []
380
381 def __call__(self,result):
382 """
383 Filters out results returned as characteristics of a problem run.
384
385 :param result: values to be filtered
386 :type result: any type that is produced by the `BenchmarkProblem`
387 it is applied to
388 :return: a list of strings selected from result
389 :rtype: ``list`` of ``str``
390 :note: this function has to be overwritten by a particular problem
391 """
392 raise NotImplementedError
393 return []
394
395
396 class Options(object):
397 """
398 Defines a set of options to be used to run a `BenchmarkProblem`.
399 """
400 def __init__(self,name=None):
401 """
402 Sets up the options.
403
404 :param name: name of the option. If no name is given the class name
405 is used.
406 :type name: ``str``
407 """
408 super(Options,self).__init__()
409 if name is None:
410 self.__name=self.__class__.__name__
411 else:
412 self.__name=name
413
414 def __str__(self):
415 """
416 Returns the name of this options object.
417
418 :return: the name
419 :rtype: ``str``
420 """
421 return self.__name
422
423 if __name__=="__main__":
424
425 class OptionsTest1(Options):
426 pass
427 class OptionsTest2(Options):
428 pass
429
430 class BenchmarkProblemTest1(BenchmarkProblem):
431 def __init__(self):
432 super(BenchmarkProblemTest1,self).__init__(name="TEST1")
433 def run(self,options=None):
434 import time
435 return time.time(),"A"
436
437 class BenchmarkProblemTest2(BenchmarkProblem):
438 def __init__(self):
439 super(BenchmarkProblemTest2,self).__init__(name="TEST2")
440 def run(self,options=None):
441 import time
442 return -time.time(),"B"
443
444 class SimpleFilter(BenchmarkFilter):
445 def getResultNames(self):
446 return ["r0","r1"]
447 def __call__(self,result):
448 return [str(result[0]),str(result[1])]
449
450 bm=Benchmark("Example")
451 bm.addProblem(BenchmarkProblemTest1())
452 bm.addProblem(BenchmarkProblemTest2())
453 bm.addOptions(OptionsTest1())
454 bm.addOptions(OptionsTest2())
455
456 bms=BenchmarkSuite("A Test")
457 bms.addBenchmark(bm)
458
459 bms.run()
460 print((bms.getHTML(filter=SimpleFilter())))
461
462 bms.run(scale=4)
463 print((bms.getHTML(filter=SimpleFilter())))
464
465 bms.run(scale=[1,2])
466 print((bms.getHTML(filter=SimpleFilter())))
467

  ViewVC Help
Powered by ViewVC 1.1.26