1 |
# $Id:$ |
filter# $Id:$ |
2 |
|
|
3 |
# |
# |
4 |
# COPYRIGHT ACcESS 2004 - All Rights Reserved |
# COPYRIGHT ACcESS 2004 - All Rights Reserved |
95 |
@type scale: C{int} or C{list} of C{int}s. |
@type scale: C{int} or C{list} of C{int}s. |
96 |
""" |
""" |
97 |
self.__scale=scale |
self.__scale=scale |
98 |
for i in range(len(self)): self[i].run(scale) |
for i in range(len(self)): self[i].run(scale=scale) |
99 |
def getHTML(self,level=1): |
def getHTML(self,filter,level=1): |
100 |
""" |
""" |
101 |
returns the results of the last benchmark run in HTML format. |
returns the results of the last benchmark run in HTML format. |
102 |
|
|
103 |
|
@param filter: filter to be applied to the results |
104 |
|
@type filter: L{BenchmarkFilter} |
105 |
@param level: level used in header <H?> tags |
@param level: level used in header <H?> tags |
106 |
@type level: C{int} |
@type level: C{int} |
107 |
@return: HTML document |
@return: HTML document |
118 |
out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m) |
out+="<p>platform: %s%s</p>\n"%(socket.gethostname(),m) |
119 |
for i in range(len(self)): |
for i in range(len(self)): |
120 |
out+="<p>\n" |
out+="<p>\n" |
121 |
out+=self[i].getHTML(min(level+1,self.MAX_LEVEL)) |
out+=self[i].getHTML(filter=filter,level=min(level+1,self.MAX_LEVEL)) |
122 |
out+="<p>\n" |
out+="<p>\n" |
123 |
if level==1: |
if level==1: |
124 |
out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z')) |
out+="<hr><p align=\"center\">by %s at %s</p>\n"%(os.getlogin(),time.strftime('%X %x %Z')) |
191 |
if isinstance(scale,list): |
if isinstance(scale,list): |
192 |
if len(scale)<len(self.__problems): |
if len(scale)<len(self.__problems): |
193 |
raise ValueError,"scale list is too small. must be greater or equal to the number of problems in the benchmark" |
raise ValueError,"scale list is too small. must be greater or equal to the number of problems in the benchmark" |
194 |
|
self.__filter=filter |
195 |
self.__scale=scale |
self.__scale=scale |
196 |
self.__results=[] |
self.__results=[] |
197 |
c=0 |
c=0 |
206 |
row.append(r.run(p)) |
row.append(r.run(p)) |
207 |
self.__results.append(row) |
self.__results.append(row) |
208 |
c+=1 |
c+=1 |
209 |
def getHTML(self,level=1): |
def getHTML(self,filter,level=1): |
210 |
""" |
""" |
211 |
returns the results of the last benchmark run in HTML format. |
returns the results of the last benchmark run in HTML format. |
212 |
|
|
213 |
|
@param filter: filter to be applied to the results |
214 |
|
@type filter: L{BenchmarkFilter} |
215 |
@param level: level used in header <H?> tags |
@param level: level used in header <H?> tags |
216 |
@type level: C{int} |
@type level: C{int} |
217 |
@return: HTML document |
@return: HTML document |
230 |
if len(self.__problems)>0: |
if len(self.__problems)>0: |
231 |
out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n" |
out+="<TABLE ALIGN=\"center\" BORDER=3 CELLPADDING=5 CELLSPACING=1>\n" |
232 |
h1_seg="" |
h1_seg="" |
233 |
rn=self.__problems[0].getResultNames() |
rn=filter.getResultNames() |
234 |
if len(rn)==0: |
if len(rn)==0: |
235 |
h1_seg+="<TD></TD>" |
h1_seg+="<TD></TD>" |
236 |
else: |
else: |
252 |
for r in range(len(self.__results)): |
for r in range(len(self.__results)): |
253 |
out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r]) |
out+="<TR><TH ALIGN=\"right\">%s</TH>"%str(self.__problems[r]) |
254 |
if isinstance(self.__scale,list): out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c] |
if isinstance(self.__scale,list): out+="<TD ALIGN=\"right\">%s</TD>"%self.__scale[c] |
255 |
for col in self.__results[r]: |
filtered_results=filter(self.__results[r]) |
256 |
for e in col: out+="<TD ALIGN=\"right\">%s</TD>"%str(e) |
for col in filtered_results: |
257 |
|
for e in col: out+="<TD ALIGN=\"right\">%s</TD>"%e |
258 |
out+="</TR>\n" |
out+="</TR>\n" |
259 |
out+="</TABLE>" |
out+="</TABLE>" |
260 |
c+=1 |
c+=1 |
280 |
else: |
else: |
281 |
self.__name=name |
self.__name=name |
282 |
|
|
|
def getResultNames(self): |
|
|
""" |
|
|
return the names of the results produced when run() is called. |
|
|
|
|
|
@return: names the list of the names to be used when the results of the run() call are printed |
|
|
@rtype: C{list} of C{str} |
|
|
@remark: this function has to overwritten by a particular problem |
|
|
""" |
|
|
raise NotImplementedError |
|
|
return [] |
|
283 |
|
|
284 |
def __str__(self): |
def __str__(self): |
285 |
""" |
""" |
298 |
@param options: the options that are used for the run. Note that the number of OpenMP threads is controlled |
@param options: the options that are used for the run. Note that the number of OpenMP threads is controlled |
299 |
by the L{Benchmark} the problem is run in. |
by the L{Benchmark} the problem is run in. |
300 |
@type options: L{Options} |
@type options: L{Options} |
301 |
@return: list of run characteristics |
@return: run characteristics |
302 |
@rtype: C{list} |
@rtype: any type that can be read by the L{BenchmarkFilter} applied to it. |
303 |
@remark: this function has to overwritten by a particular problem |
@remark: this function has to overwritten by a particular problem |
304 |
""" |
""" |
305 |
raise NotImplementedError |
raise NotImplementedError |
306 |
return [] |
return [] |
307 |
|
|
308 |
|
class BenchmarkFilter(object): |
309 |
|
""" |
310 |
|
object to filter the characteristcs returned by Bechmark runs. |
311 |
|
|
312 |
|
""" |
313 |
|
def __init__(self): |
314 |
|
""" |
315 |
|
sets up a filter |
316 |
|
""" |
317 |
|
pass |
318 |
|
|
319 |
|
|
320 |
|
def getResultNames(self): |
321 |
|
""" |
322 |
|
return the names of the results produced when run() is called. |
323 |
|
|
324 |
|
@return: names the list of the names to be used when the results of the run() call are printed |
325 |
|
@rtype: C{list} of C{str} |
326 |
|
@remark: this function has to overwritten by a particular problem |
327 |
|
""" |
328 |
|
raise NotImplementedError |
329 |
|
return [] |
330 |
|
|
331 |
|
def __call__(self,result): |
332 |
|
""" |
333 |
|
filters out values results returned as characteristcs of a problem run |
334 |
|
|
335 |
|
@param result: values to be filtered |
336 |
|
@type result: any type that is produced by the L{BenchmarkProblem} it is applied to |
337 |
|
@return: a list of strings selected from result |
338 |
|
@rtype: C{list} of C{str} |
339 |
|
@remark: this function has to overwritten by a particular problem |
340 |
|
""" |
341 |
|
raise NotImplementedError |
342 |
|
return [] |
343 |
|
|
344 |
|
|
345 |
class Options(object): |
class Options(object): |
346 |
""" |
""" |
347 |
defines a set of options to be used to run a L{BenchmarkProblem} |
defines a set of options to be used to run a L{BenchmarkProblem} |
374 |
class OptionsTest2(Options): |
class OptionsTest2(Options): |
375 |
pass |
pass |
376 |
|
|
377 |
class BenchmarkProblemTest(BenchmarkProblem): |
class BenchmarkProblemTest1(BenchmarkProblem): |
|
def __init__(self,name=None): |
|
|
super(BenchmarkProblemTest,self).__init__(name) |
|
|
def getResultNames(self): |
|
|
return ["r0","r1"] |
|
|
|
|
|
class BenchmarkProblemTest1(BenchmarkProblemTest): |
|
378 |
def __init__(self): |
def __init__(self): |
379 |
super(BenchmarkProblemTest1,self).__init__(name="TEST1") |
super(BenchmarkProblemTest1,self).__init__(name="TEST1") |
380 |
def run(self,options=None): |
def run(self,options=None): |
381 |
import time |
import time |
382 |
return time.time(),"A" |
return time.time(),"A" |
383 |
|
|
384 |
class BenchmarkProblemTest2(BenchmarkProblemTest): |
class BenchmarkProblemTest2(BenchmarkProblem): |
385 |
def __init__(self): |
def __init__(self): |
386 |
super(BenchmarkProblemTest2,self).__init__(name="TEST2") |
super(BenchmarkProblemTest2,self).__init__(name="TEST2") |
387 |
def run(self,options=None): |
def run(self,options=None): |
388 |
import time |
import time |
389 |
return -time.time(),"B" |
return -time.time(),"B" |
390 |
|
|
391 |
|
class SimpleFilter(BenchmarkFilter): |
392 |
|
def getResultNames(self): |
393 |
|
return ["r0","r1"] |
394 |
|
def __call__(self,result): |
395 |
|
return [str(result[0]),str(result[1])] |
396 |
|
|
397 |
bm=Benchmark("Example") |
bm=Benchmark("Example") |
398 |
bm.addProblem(BenchmarkProblemTest1()) |
bm.addProblem(BenchmarkProblemTest1()) |
399 |
bm.addProblem(BenchmarkProblemTest2()) |
bm.addProblem(BenchmarkProblemTest2()) |
404 |
bms.addBenchmark(bm) |
bms.addBenchmark(bm) |
405 |
|
|
406 |
bms.run() |
bms.run() |
407 |
print bms.getHTML() |
print bms.getHTML(filter=SimpleFilter()) |
408 |
|
|
409 |
bms.run(scale=4) |
bms.run(scale=4) |
410 |
print bms.getHTML() |
print bms.getHTML(filter=SimpleFilter()) |
411 |
|
|
412 |
bms.run(scale=[1,2]) |
bms.run(scale=[1,2]) |
413 |
print bms.getHTML() |
print bms.getHTML(filter=SimpleFilter()) |