/[escript]/trunk/escript/py_src/benchmark.py
ViewVC logotype

Diff of /trunk/escript/py_src/benchmark.py

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

trunk/escript/py_src/benchmark.py revision 364 by gross, Thu Dec 15 04:38:28 2005 UTC temp/escript/py_src/benchmark.py revision 1387 by trankine, Fri Jan 11 07:45:26 2008 UTC
# Line 1  Line 1 
1  # $Id:$  filter# $Id:$
   
 #  
 #      COPYRIGHT ACcESS 2004 -  All Rights Reserved  
 #  
 #   This software is the property of ACcESS.  No part of this code  
 #   may be copied in any form or by any means without the expressed written  
 #   consent of ACcESS.  Copying, use or modification of this software  
 #   by any unauthorised person is illegal unless that  
 #   person has a software license agreement with ACcESS.  
 #  
2    
3  """  """
4  A simple framework to run benchmarks under OPENMP and to summarize the results in tables for instance in HTML  A simple framework to run benchmarks under OPENMP and to summarize the results in tables for instance in HTML
5    
6  @var __author__: name of author  @var __author__: name of author
7  @var __licence__: licence agreement  @var __license__: licence agreement
8  var __url__: url entry point on documentation  @var __copyright__: copyrights
9    @var __url__: url entry point on documentation
10  @var __version__: version  @var __version__: version
11  @var __date__: date of the version  @var __date__: date of the version
12  """  """
13    
14  __author__="Lutz Gross, l.gross@uq.edu.au"  __author__="Lutz Gross, l.gross@uq.edu.au"
15  __licence__="contact: esys@access.uq.edu.au"  __copyright__="""  Copyright (c) 2006 by ACcESS MNRF
16                        http://www.access.edu.au
17                    Primary Business: Queensland, Australia"""
18    __license__="""Licensed under the Open Software License version 3.0
19                 http://www.opensource.org/licenses/osl-3.0.php"""
20  __url__="http://www.iservo.edu.au/esys/escript"  __url__="http://www.iservo.edu.au/esys/escript"
21  __version__="$Revision:$"  __version__="$Revision$"
22  __date__="$Date:$"  __date__="$Date$"
23    
24  import os,socket,time,sys  import os,socket,time,sys,traceback
25    from esys.escript import setNumberOfThreads
26    
27  class BenchmarkSuite(object):  class BenchmarkSuite(object):
28     """     """
29     framework to run a bunch of L{Benchmark}s with the object to create a table of statistics.     framework to run a bunch of L{Benchmark}s with the object to create a table of statistics.
30     @var MAX_LEVEL: maximum number of level in headers for output  
31       @cvar MAX_LEVEL: maximum number of level in headers for output
32     """     """
33     MAX_LEVEL=5     MAX_LEVEL=5
34     def __init__(self,name=None):     def __init__(self,name=None):
# Line 90  class BenchmarkSuite(object): Line 87  class BenchmarkSuite(object):
87    
88         @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks         @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
89                       are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks                       are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
90                       in the suite is run with scale[p] threads. In the case, len(scale) has to be less or equal to the                       in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted.
                      largest benchmark in the suite.  
91         @type scale: C{int} or C{list} of C{int}s.         @type scale: C{int} or C{list} of C{int}s.
92         """         """
93         self.__scale=scale               self.__scale=scale      
94         for i in range(len(self)): self[i].run(scale)         for i in range(len(self)): self[i].run(scale=scale)
95     def getHTML(self,level=1):     def getHTML(self,filter,level=1):
96         """         """
97         returns the results of the last benchmark run in HTML format.         returns the results of the last benchmark run in HTML format.
98    
99           @param filter: filter to be applied to the results
100           @type filter: L{BenchmarkFilter}
101         @param level: level used in header <H?> tags         @param level: level used in header <H?> tags
102         @type level: C{int}         @type level: C{int}
103         @return: HTML document         @return: HTML document
# Line 116  class BenchmarkSuite(object): Line 114  class BenchmarkSuite(object):
114             out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)             out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m)
115         for i in range(len(self)):         for i in range(len(self)):
116             out+="<p>\n"             out+="<p>\n"
117             out+=self[i].getHTML(min(level+1,self.MAX_LEVEL))             out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL))
118             out+="<p>\n"             out+="<p>\n"
119         if level==1:         if level==1:
120             out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))             try:
121                   name=os.getlogin()
122                   out+="<hr><p align=\"center\">by %s at %s</p>\n"%(name,time.strftime('%X %x %Z'))
123               except OSError:
124                   out+="<hr><p align=\"center\">%s</p>\n"%(time.strftime('%X %x %Z'))
125                  
126             out+="</BODY></HTML>\n"             out+="</BODY></HTML>\n"
127         return out         return out
128    
# Line 166  class Benchmark(object): Line 169  class Benchmark(object):
169         """         """
170         self.__problems.append(problem)         self.__problems.append(problem)
171    
172     def addOptions(self,Options):     def addOptions(self,options):
173         """         """
174         adds a options to the benchmark         adds a options to the benchmark
175    
176         @param options: adds a new option to the bechmark         @param options: adds a new option to the bechmark. If options==None they are are ignored
177         @type problem: L{Options}         @type options: L{Options}
178         """         """
179         self.__options.append(Options)         if options!=None: self.__options.append(options)
180    
181     def run(self,scale=1):     def run(self,scale=1):
182         """         """
# Line 182  class Benchmark(object): Line 185  class Benchmark(object):
185    
186         @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks         @param scale: defines the number of (OpenMP) threads to be used. If scale is a scalar all benchmarks
187                       are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks                       are run with scale number of threads. If scale is a C{list}, the p-th problem in each of the benchmarks
188                       in the suite is run with scale[p] threads. In the case, len(scale) has to be less or equal to the                       in the suite is run with scale[p] threads. If scale[p]<1 teh p-th problem is omitted.
                      largest benchmark in the suite.  
189         @type scale: C{int} or C{list} of C{int}s.         @type scale: C{int} or C{list} of C{int}s.
190         """         """
191         if isinstance(scale,list):         if isinstance(scale,list):
192             if len(scale)<len(self.__problems):             c_max=min(len(scale),len(self.__problems))
193                raise ValueError,"scale list is too small. must be greater or equal to the number of problems in the benchmark"         else:
194               c_max=len(self.__problems)
195           self.__filter=filter
196         self.__scale=scale         self.__scale=scale
197         self.__results=[]         self.__results=[]
198         c=0         for c in range(c_max):
199         for r in self.__problems:            r=self.__problems[c]
200            if isinstance(scale,list):            if isinstance(scale,list):
201               s=scale[c]               s=scale[c]
202            else:            else:
203               s=scale               s=scale
204            row=[]            row=[]
205            for p in self.__options:            if s>0:
206                os.putenv("OMP_NUM_TREADS",str(s))                t0=time.time()
207                row.append(r.run(p))                print "%s with %s threads started."%(r.__class__,s)
208                  for p in self.__options:
209                      setNumberOfThreads(s)
210                      try:
211                         row.append(r.run(p))
212                      except:
213                         traceback.print_exc(file=sys.stdout)
214                         row.append(None)
215                  t0=time.time()-t0
216                  print "%s with %s threads finished (walltime =%s sec)."%(r.__class__,s,t0)
217            self.__results.append(row)            self.__results.append(row)
218            c+=1     def getHTML(self,filter,level=1):
    def getHTML(self,level=1):  
219         """         """
220         returns the results of the last benchmark run in HTML format.         returns the results of the last benchmark run in HTML format.
221    
222           @param filter: filter to be applied to the results
223           @type filter: L{BenchmarkFilter}
224         @param level: level used in header <H?> tags         @param level: level used in header <H?> tags
225         @type level: C{int}         @type level: C{int}
226         @return: HTML document         @return: HTML document
# Line 225  class Benchmark(object): Line 239  class Benchmark(object):
239         if len(self.__problems)>0:         if len(self.__problems)>0:
240            out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n"            out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n"
241            h1_seg=""            h1_seg=""
242            rn=self.__problems[0].getResultNames()            rn=filter.getResultNames()
243            if len(rn)==0:            if len(rn)==0:
244               h1_seg+="<TD></TD>"               h1_seg+="<TD></TD>"
245            else:            else:
# Line 236  class Benchmark(object): Line 250  class Benchmark(object):
250            for o in self.__options:            for o in self.__options:
251                   if len(rn)==0:                   if len(rn)==0:
252                       h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)                       h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
253                         colspan=1
254                   elif len(rn)==1:                   elif len(rn)==1:
255                       h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)                       h0+="<TH ALIGN=\"center\">%s</TH>"%str(o)
256                         colspan=1
257                       empty_h1=False                       empty_h1=False
258                   else:                   else:
259                       h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(len(rn),str(o))                       colspan=len(rn)
260                         h0+="<TH ALIGN=\"center\" COLSPAN=%s>%s</TH>"%(colspan,str(o))
261                   h1+=h1_seg                   h1+=h1_seg
262            out+=h0+"</TR>\n"+h1+"</TR>\n"            out+=h0+"</TR>\n"+h1+"</TR>\n"
263            c=0            c=0
264            for r in range(len(self.__results)):            for r in range(len(self.__results)):
265               out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r])               out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r])
266               if isinstance(self.__scale,list): out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c]               if isinstance(self.__scale,list):
267               for col in self.__results[r]:                   out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c]
268                     for e in col: out+="<TD ALIGN=\"right\">%s</TD>"%str(e)               for col in self.__results[r]:
269                       if col==None:
270                          out+="<TD ALIGN=\"center\" COLSPAN=%s>failed.</TD>"%colspan
271                       else:
272                          for e in filter(col): out+="<TD ALIGN=\"right\">%s</TD>"%e
273               out+="</TR>\n"               out+="</TR>\n"
274                 c+=1
275            out+="</TABLE>"            out+="</TABLE>"
           c+=1  
276         if level==1:         if level==1:
277            out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))            out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z'))
278            out+="</BODY></HTML>\n"            out+="</BODY></HTML>\n"
# Line 274  class BenchmarkProblem(object): Line 295  class BenchmarkProblem(object):
295         else:         else:
296            self.__name=name            self.__name=name
297    
    def getResultNames(self):  
        """  
        return the names of the results produced when run() is called.  
         
        @return: names the list of the names to be used when the results of the run() call are printed  
        @rtype: C{list} of C{str}  
        @remark: this function has to overwritten by a particular problem  
        """  
        raise NotImplementedError  
        return []  
298                
299     def __str__(self):     def __str__(self):
300         """         """
# Line 302  class BenchmarkProblem(object): Line 313  class BenchmarkProblem(object):
313         @param options: the options that are used for the run. Note that the number of OpenMP threads is controlled         @param options: the options that are used for the run. Note that the number of OpenMP threads is controlled
314                         by the L{Benchmark} the problem is run in.                         by the L{Benchmark} the problem is run in.
315         @type options: L{Options}         @type options: L{Options}
316         @return: list of run characteristics         @return: run characteristics
317         @rtype: C{list}         @rtype: any type that can be read by the L{BenchmarkFilter} applied to it.
318         @remark: this function has to overwritten by a particular problem         @note: this function has to overwritten by a particular problem
319         """         """
320         raise NotImplementedError         raise NotImplementedError
321         return []         return []
322            
323    class BenchmarkFilter(object):
324       """
325       object to filter the characteristcs returned by Bechmark runs.
326      
327       """
328       def __init__(self):
329           """
330           sets up a filter
331           """
332           pass
333    
334    
335       def getResultNames(self):
336           """
337           return the names of the results produced when run() is called.
338          
339           @return: names the list of the names to be used when the results of the run() call are printed
340           @rtype: C{list} of C{str}
341           @note: this function has to overwritten by a particular problem
342           """
343           raise NotImplementedError
344           return []
345    
346       def __call__(self,result):
347           """
348           filters out values results returned as characteristcs of a problem run
349          
350           @param result: values to be filtered
351           @type result: any type that is produced by the L{BenchmarkProblem} it is applied to
352           @return: a list of strings selected from result
353           @rtype: C{list} of C{str}
354           @note: this function has to overwritten by a particular problem
355           """
356           raise NotImplementedError
357           return []
358    
359    
360  class Options(object):  class Options(object):
361      """      """
362      defines a set of options to be used to run a L{BenchmarkProblem}      defines a set of options to be used to run a L{BenchmarkProblem}
# Line 322  class Options(object): Line 370  class Options(object):
370         """         """
371         super(Options,self).__init__()         super(Options,self).__init__()
372         if name==None:         if name==None:
373             self.__name=self.__class__.__name__            self.__name=self.__class__.__name__
374         else:         else:
375            self.__name=name            self.__name=name
376      def __str__(self):      def __str__(self):
# Line 341  if __name__=="__main__": Line 389  if __name__=="__main__":
389      class OptionsTest2(Options):      class OptionsTest2(Options):
390          pass          pass
391    
392      class BenchmarkProblemTest(BenchmarkProblem):      class BenchmarkProblemTest1(BenchmarkProblem):
        def __init__(self,name=None):  
            super(BenchmarkProblemTest,self).__init__(name)  
        def getResultNames(self):  
             return ["r0","r1"]    
   
     class BenchmarkProblemTest1(BenchmarkProblemTest):  
393         def __init__(self):         def __init__(self):
394             super(BenchmarkProblemTest1,self).__init__(name="TEST1")             super(BenchmarkProblemTest1,self).__init__(name="TEST1")
395         def run(self,options=None):         def run(self,options=None):
396             import time             import time
397             return time.time(),"A"             return time.time(),"A"
398    
399      class BenchmarkProblemTest2(BenchmarkProblemTest):      class BenchmarkProblemTest2(BenchmarkProblem):
400         def __init__(self):         def __init__(self):
401             super(BenchmarkProblemTest2,self).__init__(name="TEST2")             super(BenchmarkProblemTest2,self).__init__(name="TEST2")
402         def run(self,options=None):         def run(self,options=None):
403             import time             import time
404             return -time.time(),"B"             return -time.time(),"B"
405    
406        class SimpleFilter(BenchmarkFilter):
407           def getResultNames(self):
408                return ["r0","r1"]  
409           def __call__(self,result):
410                return [str(result[0]),str(result[1])]
411    
412      bm=Benchmark("Example")      bm=Benchmark("Example")
413      bm.addProblem(BenchmarkProblemTest1())      bm.addProblem(BenchmarkProblemTest1())
414      bm.addProblem(BenchmarkProblemTest2())      bm.addProblem(BenchmarkProblemTest2())
# Line 371  if __name__=="__main__": Line 419  if __name__=="__main__":
419      bms.addBenchmark(bm)      bms.addBenchmark(bm)
420    
421      bms.run()      bms.run()
422      print bms.getHTML()      print bms.getHTML(filter=SimpleFilter())
423            
424      bms.run(scale=4)      bms.run(scale=4)
425      print bms.getHTML()      print bms.getHTML(filter=SimpleFilter())
426    
427      bms.run(scale=[1,2])      bms.run(scale=[1,2])
428      print bms.getHTML()      print bms.getHTML(filter=SimpleFilter())

Legend:
Removed from v.364  
changed lines
  Added in v.1387

  ViewVC Help
Powered by ViewVC 1.1.26