1 |
|
2 |
######################################################## |
3 |
# |
4 |
# Copyright (c) 2003-2008 by University of Queensland |
5 |
# Earth Systems Science Computational Center (ESSCC) |
6 |
# http://www.uq.edu.au/esscc |
7 |
# |
8 |
# Primary Business: Queensland, Australia |
9 |
# Licensed under the Open Software License version 3.0 |
10 |
# http://www.opensource.org/licenses/osl-3.0.php |
11 |
# |
12 |
######################################################## |
13 |
|
14 |
__copyright__="""Copyright (c) 2003-2008 by University of Queensland |
15 |
Earth Systems Science Computational Center (ESSCC) |
16 |
http://www.uq.edu.au/esscc |
17 |
Primary Business: Queensland, Australia""" |
18 |
__license__="""Licensed under the Open Software License version 3.0 |
19 |
http://www.opensource.org/licenses/osl-3.0.php""" |
20 |
__url__="http://www.uq.edu.au/esscc/escript-finley" |
21 |
|
22 |
""" |
23 |
Time serieas analysis |
24 |
|
25 |
@var __author__: name of author |
26 |
@var __copyright__: copyrights |
27 |
@var __license__: licence agreement |
28 |
@var __url__: url entry point on documentation |
29 |
@var __version__: version |
30 |
@var __date__: date of the version |
31 |
""" |
32 |
|
33 |
|
34 |
__author__="Lutz Gross, l.gross@uq.edu.au" |
35 |
|
36 |
|
37 |
import numarray |
38 |
from types import SliceType |
39 |
DEFAULT_BUFFER_SIZE=1000 |
40 |
DEFAULT_FLOAT_TYPE=numarray.Float64 |
41 |
|
42 |
class TimeSeriesBase: |
43 |
""" |
44 |
The TimeSeriesBase class is the base class for all classes of the TimeSeries |
45 |
module. |
46 |
""" |
47 |
|
48 |
def __init__(self,debug=False,description="TimeSeriesBase"): |
49 |
self.__debug=debug |
50 |
self.setDescription(description) |
51 |
|
52 |
def __str__(self): |
53 |
return self.__description |
54 |
|
55 |
def setDescription(self,text): |
56 |
self.__description=text |
57 |
|
58 |
def setDebugOn(self): |
59 |
""" |
60 |
Enables debugging mode. |
61 |
""" |
62 |
self.__debug=True |
63 |
|
64 |
def setDebugOff(self): |
65 |
""" |
66 |
Disables debugging mode. |
67 |
""" |
68 |
self.__debug=False |
69 |
|
70 |
def setDebug(self,flag=False): |
71 |
""" |
72 |
Sets debug mode to C{flag}. |
73 |
""" |
74 |
if flag: |
75 |
self.setDebugOn() |
76 |
else: |
77 |
self.setDebugOff() |
78 |
|
79 |
def debug(self): |
80 |
""" |
81 |
Returns True if debug mode is on, False otherwise. |
82 |
""" |
83 |
return self.__debug |
84 |
|
85 |
#============================================================================== |
86 |
class TimeSeriesBaseDataset(TimeSeriesBase): |
87 |
""" |
88 |
Provides an interface for accessing a set of linearly ordered data. |
89 |
""" |
90 |
def __init__(self,buffer,offset=0,debug=False,description="TimeSeriesDataset"): |
91 |
TimeSeriesBase.__init__(self,debug,description) |
92 |
self.__buffer=buffer |
93 |
self.__offset=offset |
94 |
if self.debug(): print "Debug: %s: offset %d to buffer"%(self,self.getOffset()) |
95 |
|
96 |
def __len__(self): |
97 |
""" |
98 |
Needed to handle negative indexing in slicing. |
99 |
""" |
100 |
return 0 |
101 |
|
102 |
def getNumComponents(self): |
103 |
""" |
104 |
Returns the number of components of the data (may be overwritten by |
105 |
subclasses). |
106 |
""" |
107 |
return self.getBaseBuffer().getNumComponents() |
108 |
|
109 |
def getIdOfLastDatum(self): |
110 |
""" |
111 |
Returns the identification number of the last datum in the data set (may |
112 |
be overwritten by subclasses). |
113 |
""" |
114 |
return self.getBaseBuffer().getIdOfLastDatum()-self.getOffset() |
115 |
|
116 |
def getIdOfFirstDatum(self): |
117 |
""" |
118 |
Returns the identification number of the first datum (may be overwritten |
119 |
by subclasses). |
120 |
""" |
121 |
return self.getBaseBuffer().getIdOfFirstDatum()-self.getOffset() |
122 |
|
123 |
def getIdOfFirstAvailableDatum(self): |
124 |
""" |
125 |
Returns the identification number of the first avaiable datum (may be |
126 |
overwritten by subclasses). |
127 |
""" |
128 |
return self.getBaseBuffer().getIdOfFirstAvailableDatum()-self.getOffset() |
129 |
|
130 |
def getOffsetInBaseBuffer(self): |
131 |
""" |
132 |
Returns the offset to access elements in getBaseBuffer() (may be |
133 |
overwritten by subclasses). |
134 |
""" |
135 |
return self.getOffset() |
136 |
|
137 |
def getIdOfLastUnreferencedDatum(self): |
138 |
""" |
139 |
Returns the identification number of the last datum which has been |
140 |
unused by all TimeSeries referring to the TimeSeriesBaseDataset (may be |
141 |
overwritten by subclasses). |
142 |
""" |
143 |
return self.getBaseBuffer().getIdOfLastUnreferencedDatum()-self.getOffset() |
144 |
|
145 |
def updateIdOfLastUnreferencedDatum(self,last_unreferenced_datum): |
146 |
""" |
147 |
Updates the identification number of the last unused datum (to be |
148 |
overwritten by subclasses). |
149 |
""" |
150 |
self.getBaseBuffer().updateIdOfLastUnreferencedDatum(last_unreferenced_datum+self.getOffset()) |
151 |
|
152 |
def append(self,values): |
153 |
""" |
154 |
Appends data to the buffer. If the buffer would be full the buffer is |
155 |
rearranged before the data are appended (to be overwritten by |
156 |
subclasses). |
157 |
""" |
158 |
self.getBaseBuffer().append(values) |
159 |
|
160 |
def getBaseBufferSize(self): |
161 |
""" |
162 |
Returns the size of the buffer (to be overwritten by subclasses). |
163 |
""" |
164 |
return self.getBaseBuffer().getBaseBufferSize() |
165 |
|
166 |
def needsRearrangement(self,num_new_data=0): |
167 |
""" |
168 |
Returns True if the buffer will be full after num_new_data values have |
169 |
been appended (to be overwritten by subclasses). |
170 |
""" |
171 |
return self.getBaseBuffer().needsRearrangement(num_new_data) |
172 |
|
173 |
def isEmpty(self): |
174 |
""" |
175 |
Returns True if no data are appended to buffer. |
176 |
""" |
177 |
return self.getNumData()<=0 |
178 |
|
179 |
def getNumData(self): |
180 |
""" |
181 |
Returns the number of data (not all of them are accessible). |
182 |
""" |
183 |
return self.getIdOfLastDatum()-self.getIdOfFirstDatum()+1 |
184 |
|
185 |
def getBaseBuffer(self): |
186 |
""" |
187 |
Returns the buffer referenced by the TimeSeriesBaseDataset. |
188 |
""" |
189 |
return self.__buffer |
190 |
|
191 |
def getOffset(self): |
192 |
""" |
193 |
Returns the offset when referring to dataset elements. |
194 |
""" |
195 |
return self.__offset |
196 |
|
197 |
def __getitem__(self,index): |
198 |
""" |
199 |
Returns the datum with given index. |
200 |
""" |
201 |
if type(index)==SliceType: |
202 |
start=index.start |
203 |
end=index.stop |
204 |
if start==end: |
205 |
return self[start] |
206 |
else: |
207 |
if start<self.getIdOfFirstDatum() or start>self.getIdOfLastDatum() or \ |
208 |
end-1<self.getIdOfFirstDatum() or end-1>self.getIdOfLastDatum(): raise IndexError,"%s: Index [%d:%d] out of range"%(self,start,end) |
209 |
return self.getBaseBuffer()[start+self.getOffsetInBaseBuffer():end+self.getOffsetInBaseBuffer()] |
210 |
else: |
211 |
if index<self.getIdOfFirstDatum() or index>self.getIdOfLastDatum(): raise IndexError,"%s: Index %d out of range"%(self,index) |
212 |
return self.getBaseBuffer()[index+self.getOffsetInBaseBuffer()] |
213 |
|
214 |
class TimeSeriesBaseBuffer(TimeSeriesBaseDataset): |
215 |
""" |
216 |
An implementation of TimeSeriesBaseDataset which actually is storing data |
217 |
in a numarray buffer. |
218 |
""" |
219 |
def __init__(self,buffer_size=DEFAULT_BUFFER_SIZE,numComponents=1,type=DEFAULT_FLOAT_TYPE,id_of_first_datum=0,debug=False,description="TimeSeriesBaseBuffer"): |
220 |
if numComponents<2: |
221 |
buffer=numarray.zeros((buffer_size,),type) |
222 |
else: |
223 |
buffer=numarray.zeros((buffer_size,numComponents),type) |
224 |
TimeSeriesBaseDataset.__init__(self,buffer,id_of_first_datum-1,debug,description) |
225 |
self.__num_data_in_buffer=0 |
226 |
self.__id_last_unreferenced_datum=id_of_first_datum-1 |
227 |
self.__id_last_datum=id_of_first_datum-1 |
228 |
self.__id_first_datum=id_of_first_datum |
229 |
if self.debug(): print "Debug: %s : buffer of size %d with %d components allocated (first datum is %d)."% \ |
230 |
(self,self.getBaseBufferSize(),self.getNumComponents(),id_of_first_datum) |
231 |
|
232 |
|
233 |
def getBaseBufferSize(self): |
234 |
""" |
235 |
Returns the size of the buffer. |
236 |
""" |
237 |
return self.getBaseBuffer().shape[0] |
238 |
|
239 |
def getNumComponents(self): |
240 |
""" |
241 |
Returns the number of components of the data (overwrites |
242 |
TimeSeriesBaseDataset method). |
243 |
""" |
244 |
if self.getBaseBuffer().rank==1: |
245 |
return 1 |
246 |
else: |
247 |
self.getBaseBuffer().shape[1] |
248 |
|
249 |
def getNumDataInBaseBuffer(self): |
250 |
""" |
251 |
Returns the number of data currently in the buffer. |
252 |
""" |
253 |
return self.__num_data_in_buffer |
254 |
|
255 |
def getIdOfLastDatum(self): |
256 |
""" |
257 |
Returns the identification number of the last datum in the data set |
258 |
(overwrites method from TimeSeriesBaseDataset). |
259 |
""" |
260 |
return self.__id_last_datum |
261 |
|
262 |
def getIdOfFirstDatum(self): |
263 |
""" |
264 |
Returns the identification number of the first datum (overwrites method |
265 |
from TimeSeriesBaseDataset). |
266 |
""" |
267 |
return self.__id_first_datum |
268 |
|
269 |
def getOffsetInBaseBuffer(self): |
270 |
""" |
271 |
Returns the offset to access elements in the buffer (overwrites method |
272 |
from TimeSeriesBaseDataset). |
273 |
""" |
274 |
return -self.getIdOfLastDatum()+self.getNumDataInBaseBuffer()-1 |
275 |
|
276 |
def getIdOfLastUnreferencedDatum(self): |
277 |
""" |
278 |
Returns the identification number of the last datum which has been |
279 |
unused by all TimeSeries referring to the TimeSeriesBaseDataset |
280 |
(overwrites method from TimeSeriesBaseDataset). |
281 |
""" |
282 |
return self.__id_last_unreferenced_datum |
283 |
|
284 |
def updateIdOfLastUnreferencedDatum(self,last_unreferenced_datum): |
285 |
""" |
286 |
Updates the identification number of the last unused datum (to be |
287 |
overwritten by subclasses). |
288 |
""" |
289 |
self.getBaseBuffer().updateIdOfLastUnreferencedDatum(last_unreferenced_datum-self.getOffset()) |
290 |
|
291 |
def updateIdOfLastUnreferencedDatum(self,last_unreferenced_datum): |
292 |
""" |
293 |
Updates the identification number of the last unused datum (overwrites |
294 |
TimeSeriesBaseDataset method). |
295 |
""" |
296 |
if self.__id_last_unreferenced_datum>last_unreferenced_datum: |
297 |
self.__id_last_unreferenced_datum=last_unreferenced_datum |
298 |
if self.debug(): print "Debug: %s: last unused datum is now %s"%(self,last_unreferenced_datum) |
299 |
|
300 |
def needsRearrangement(self,num_new_data=0): |
301 |
""" |
302 |
Returns True if the buffer will be full after num_new_data values have |
303 |
been appended. |
304 |
""" |
305 |
return self.getNumDataInBaseBuffer()+num_new_data>self.getBaseBufferSize() |
306 |
|
307 |
def getIdOfFirstAvailableDatum(self): |
308 |
""" |
309 |
Returns the identification number of the first available datum |
310 |
(overwrites TimeSeriesBaseDataset method). |
311 |
""" |
312 |
return self.getIdOfLastDatum()-self.__num_data_in_buffer+1 |
313 |
|
314 |
def append(self,data): |
315 |
""" |
316 |
Appends data to the buffer. If the buffer would be full the buffer is |
317 |
rearranged before the data are appended (overwrites TimeSeriesBaseDataset |
318 |
method). |
319 |
""" |
320 |
data=numarray.array(data) |
321 |
nc=self.getNumComponents() |
322 |
if data.rank==0: |
323 |
if nc==1: |
324 |
num_new_data=1 |
325 |
else: |
326 |
raise ValueError,"%s: illegal data shape"%self |
327 |
elif data.rank==1: |
328 |
if nc==1: |
329 |
num_new_data=data.shape[0] |
330 |
else: |
331 |
num_new_data=1 |
332 |
elif data.rank==2: |
333 |
if not nc==data.shape[1]: raise ValueError,"%s: illegal data shape"%self |
334 |
num_new_data=data.shape[0] |
335 |
else: |
336 |
raise ValueError,"%s: illegal rank"%self |
337 |
|
338 |
# check is buffer will be overflown when data are appended: |
339 |
if self.needsRearrangement(num_new_data): |
340 |
nn=self.getNumDataInBaseBuffer() |
341 |
num_protected_data=self.getIdOfLastDatum()-self.getIdOfLastUnreferencedDatum() |
342 |
if num_protected_data+num_new_data>self.getBaseBufferSize(): |
343 |
raise ValueError,"%s: buffer overflow: buffer size has to be bigger than %d"%(self,num_protected_data+num_new_data) |
344 |
if num_protected_data>0: self.getBaseBuffer()[0:num_protected_data]=self.getBaseBuffer()[nn-num_protected_data:nn] |
345 |
self.__num_data_in_buffer=num_protected_data |
346 |
self.__id_last_unreferenced_datum=self.__id_last_datum |
347 |
if self.debug(): |
348 |
print "Debug: %s: rearrangement: first data in buffer is %d."%(self,self.getIdOfLastDatum()-self.getNumDataInBaseBuffer()+1) |
349 |
# copy data over: |
350 |
nn=self.getNumDataInBaseBuffer() |
351 |
self.getBaseBuffer()[nn:nn+num_new_data]=data |
352 |
self.__num_data_in_buffer+=num_new_data |
353 |
self.__id_last_datum+=num_new_data |
354 |
self.__id_last_unreferenced_datum+=num_new_data |
355 |
if self.debug(): print "Debug: %s: %d data appended. Last unreferenced datum is now %d."%(self,num_new_data,self.__id_last_unreferenced_datum) |
356 |
|
357 |
# ====================================== |
358 |
class TimeSeriesControlerView(TimeSeriesBase): |
359 |
""" |
360 |
A TimeSeriesControlerView is attached to a Controler and moves forward in |
361 |
time by increasing the id of the last processed datum. |
362 |
Any implementation of a TimeSeriesControlerView must provide the |
363 |
getControler method which returns the Controler. |
364 |
""" |
365 |
def __init__(self,id_first_datum=0,debug=False,description="TimeSeries"): |
366 |
TimeSeriesBase.__init__(self,debug,description) |
367 |
self.__id_last_processed_datum=id_first_datum-1 |
368 |
if self.debug(): print "Debug: %s created with first datum %d"%(str(self),id_first_datum) |
369 |
|
370 |
def getIdOfLastProcessedDatum(self): |
371 |
return self.__id_last_processed_datum |
372 |
|
373 |
def updateIdOfLastProcessedDatum(self,id_last_processed_datum): |
374 |
self.__id_last_processed_datum=id_last_processed_datum |
375 |
|
376 |
# def getControler(self): |
377 |
# """ |
378 |
# Returns the Controler of the time series (to be overwritten by |
379 |
# subclass) |
380 |
# """ |
381 |
# pass |
382 |
|
383 |
class TimeSeries(TimeSeriesBaseDataset,TimeSeriesControlerView): |
384 |
""" |
385 |
Makes TimeSeriesBaseDataset look like a TimeSeries and introduces |
386 |
operations. Any implementation of a TimeSeriesControlerView must provide |
387 |
the getControler method which returns the Controler. |
388 |
""" |
389 |
def __init__(self,dataset,debug=False,description="TimeSeries"): |
390 |
TimeSeriesControlerView.__init__(self,dataset.getIdOfFirstDatum(),debug,description) |
391 |
TimeSeriesBaseDataset.__init__(self,dataset,0,debug,description) |
392 |
|
393 |
def getDataset(self): |
394 |
""" |
395 |
Returns the TimeSeriesBaseDataset of the time series. |
396 |
""" |
397 |
return self.getBaseBuffer() |
398 |
|
399 |
# def getControler(self): |
400 |
# """ |
401 |
# Returns the Controler of the time series (to be overwritten by |
402 |
# subclass) |
403 |
# """ |
404 |
# pass |
405 |
|
406 |
def __add__(self,arg): |
407 |
if isinstance(arg,TimeSeriesBaseDataset): |
408 |
return TimeSeriesAdd(self,arg) |
409 |
else: |
410 |
return TimeSeriesAddScalar(self,arg) |
411 |
|
412 |
def __sub__(self,arg): |
413 |
return self+(-1.)*arg |
414 |
|
415 |
def __mul__(self,arg): |
416 |
if isinstance(arg,TimeSeriesBaseDataset): |
417 |
return TimeSeriesMult(self,arg) |
418 |
else: |
419 |
return TimeSeriesMultScalar(self,arg) |
420 |
|
421 |
def __div__(self,arg): |
422 |
if isinstance(arg,TimeSeriesBaseDataset): |
423 |
return TimeSeriesDiv(self,arg) |
424 |
else: |
425 |
return TimeSeriesMultScalar(self,1./arg) |
426 |
|
427 |
def __pow__(self,arg): |
428 |
if isinstance(arg,TimeSeriesBaseDataset): |
429 |
return TimeSeriesPower(self,arg) |
430 |
else: |
431 |
return TimeSeriesPowerScalar(self,arg) |
432 |
|
433 |
def __radd__(self,arg): |
434 |
return self.__add__(arg) |
435 |
|
436 |
def __rsub__(self,arg): |
437 |
return arg+(-1.)*self |
438 |
|
439 |
def __rmul__(self,arg): |
440 |
return self.__mul__(arg) |
441 |
|
442 |
def __rdiv__(self,arg): |
443 |
if isinstance(arg,TimeSeriesBaseDataset): |
444 |
return TimeSeriesDiv(arg,self) |
445 |
else: |
446 |
return TimeSeriesDivScalar(self,arg) |
447 |
|
448 |
def __rpow__(self,arg): |
449 |
if isinstance(arg,TimeSeriesBaseDataset): |
450 |
return TimeSeriesPower(arg,self) |
451 |
else: |
452 |
return Exp(numarray.log(arg)*self) |
453 |
|
454 |
def __lshift__(self,arg): |
455 |
return TimeSeriesShift(self,-arg) |
456 |
|
457 |
def __rshift__(self,arg): |
458 |
return TimeSeriesShift(self,arg) |
459 |
|
460 |
def __neg__(self): |
461 |
return (-1.0)*self |
462 |
|
463 |
def __pos__(self): |
464 |
return (1.0)*self |
465 |
|
466 |
class TimeSeriesOperator(TimeSeriesControlerView): |
467 |
""" |
468 |
A TimeSeriesOperator decribes an operation acting on a list of TimeSeries |
469 |
time_series_args. It allows to update its output (if there is any) |
470 |
through the C{update} method which is overwritten by a particular |
471 |
implementation of the class. The C{update} method is called to process |
472 |
the data [start:end] using [start-left_wing_size:end+right_wing_size] of |
473 |
its arguments. |
474 |
""" |
475 |
def __init__(self,controler,time_series_args=[],left_wing_size=0,right_wing_size=0,debug=False,description="TimeSeriesOperator"): |
476 |
id_first_datum=controler.getIdOfFirstDatum() |
477 |
for i in time_series_args: id_first_datum=max(id_first_datum,i.getIdOfFirstDatum()) |
478 |
TimeSeriesControlerView.__init__(self,id_first_datum+left_wing_size,debug,description) |
479 |
self.__left_wing_size=left_wing_size |
480 |
self.__right_wing_size=right_wing_size |
481 |
self.__time_series_args=time_series_args |
482 |
self.__controler=controler |
483 |
controler.appendOperatorToUpdateList(self) |
484 |
if self.debug(): print "Debug: %s: with left/right wing size %d/%d and %d arguments."%(str(self),left_wing_size,right_wing_size,len(time_series_args)) |
485 |
|
486 |
def __del__(self): |
487 |
self.getControler().removeOperatorFromUpdateList(self) |
488 |
|
489 |
def getControler(self): |
490 |
""" |
491 |
Returns the Controler updating the TimeSeriesOperator. |
492 |
""" |
493 |
return self.__controler |
494 |
|
495 |
def getLeftWingSize(self): |
496 |
""" |
497 |
Returns the left wing size. |
498 |
""" |
499 |
return self.__left_wing_size |
500 |
|
501 |
def getRightWingSize(self): |
502 |
""" |
503 |
Returns the right wing size. |
504 |
""" |
505 |
return self.__right_wing_size |
506 |
|
507 |
def getArguments(self,index=None): |
508 |
""" |
509 |
Returns the list of arguments or, if C{index} is present, the |
510 |
argument with given index. In the latter case C{None} is returned |
511 |
if no arguments are present. |
512 |
""" |
513 |
if index==None: |
514 |
return self.__time_series_args |
515 |
else: |
516 |
if len(self.__time_series_args)>0: |
517 |
return self.__time_series_args[index] |
518 |
else: |
519 |
return None |
520 |
|
521 |
def getArgumentDataset(self,index): |
522 |
""" |
523 |
Returns the dataset of the argument with given index. |
524 |
""" |
525 |
arg=self.getArguments(index) |
526 |
if arg==None: |
527 |
return None |
528 |
else: |
529 |
return self.getArguments(index).getDataset() |
530 |
|
531 |
def flush(self): |
532 |
""" |
533 |
Calls the C{update} method with the maximum processable range. It |
534 |
also updates the id of unused data for all arguments. |
535 |
""" |
536 |
start=self.getIdOfLastProcessedDatum()+1 |
537 |
end=self.getControler().getIdOfLastDatum() |
538 |
for i in self.getArguments(): end=min(end,i.getIdOfLastDatum()) |
539 |
if start<=end-self.getRightWingSize(): |
540 |
if self.debug(): print "Debug: %s: range [%d:%d] is updated."%(self,start,end-self.getRightWingSize()) |
541 |
self.update(start,end-self.getRightWingSize()+1) |
542 |
for i in self.getArguments(): i.updateIdOfLastUnreferencedDatum(end-self.getLeftWingSize()) |
543 |
self.updateIdOfLastProcessedDatum(end) |
544 |
|
545 |
def update(self,start,end): |
546 |
""" |
547 |
Updates the data [start:end] using [start-left_wing_size:end+right_wing_size] |
548 |
of its arguments (is overwritten by a particular TimeSeriesOperator). |
549 |
""" |
550 |
pass |
551 |
|
552 |
|
553 |
class TimeSeriesFilter(TimeSeries,TimeSeriesOperator): |
554 |
""" |
555 |
A TimeSeriesFilter is a TimeSeries that is created through a |
556 |
TimeSeriesOperator. |
557 |
""" |
558 |
def __init__(self,controler,dataset,time_series_args=[],left_wing_size=0,right_wing_size=0,debug=False,description="TimeSeriesFilter"): |
559 |
TimeSeriesOperator.__init__(self,controler,time_series_args,left_wing_size,right_wing_size,debug,description) |
560 |
TimeSeries.__init__(self,dataset,debug,description) |
561 |
|
562 |
def update(self,start,end): |
563 |
""" |
564 |
Appends zeros to the dataset. This method should be overwritten by a |
565 |
particular TimeSeriesFilter. |
566 |
""" |
567 |
nc=self.getNumComponents() |
568 |
if nc>1: |
569 |
self.getDataset().append(numarray.zeros([nc,end-start])) |
570 |
else: |
571 |
self.getDataset().append(numarray.zeros(end-start)) |
572 |
|
573 |
class Controler(TimeSeries): |
574 |
""" |
575 |
Controls a set of TimeSeries. |
576 |
""" |
577 |
def __init__(self,buffer_size=DEFAULT_BUFFER_SIZE,debug=False,description="TimeSeriesControler"): |
578 |
TimeSeries.__init__(self,TimeSeriesBaseBuffer(buffer_size,1,DEFAULT_FLOAT_TYPE,0,debug,"node buffer of "+description),debug,"nodes of "+description) |
579 |
self.setFlushRate() |
580 |
self.__update_time_series=list() |
581 |
|
582 |
def getControler(self): |
583 |
""" |
584 |
Returns the C{Controler} of the time series (overwrites method of |
585 |
TimeSeries). |
586 |
""" |
587 |
return self |
588 |
|
589 |
def setFlushRate(self,rate=50): |
590 |
""" |
591 |
Sets the flush rate, i.e. after C{rate} new time nodes have been |
592 |
checked in the C{flush} method is called. |
593 |
""" |
594 |
self.__flush_rate=rate |
595 |
if self.debug(): print "Debug: %s: flush rate is set to %d"%(self,rate) |
596 |
|
597 |
def needsFlushing(self): |
598 |
""" |
599 |
Returns True if the depending TimeSeriesFilters needs to be flushed |
600 |
because the time nodes buffer is full or because of the set flush rate. |
601 |
""" |
602 |
return self.needsRearrangement(1) or (self.getNumData()+1)%self.__flush_rate==0 |
603 |
|
604 |
def flush(self): |
605 |
""" |
606 |
Flushes all dependent TimeSeriesFilters by processing their flush |
607 |
method. |
608 |
""" |
609 |
if self.debug(): print "Debug: %s: start flushing"%self |
610 |
for time_serie in self.__update_time_series: time_serie.flush() |
611 |
|
612 |
def appendOperatorToUpdateList(self,time_serie): |
613 |
if not time_serie.getControler()==self: raise ValueError,"%s: TimeSeries %s is not defined on this controler."%(self,time_serie) |
614 |
if not self.isEmpty(): raise ValueError,"%s: you can only check in a time series time_serie if controler is empty."%self |
615 |
self.__update_time_series.append(time_serie) |
616 |
if self.debug(): print "Debug: %s: %s has been added to update list."%(self,time_serie) |
617 |
|
618 |
def removeOperatorFromUpdateList(self,time_serie): |
619 |
self.__update_time_series.remove(time_serie) |
620 |
if self.debug(): print "Debug: %s: %s has been removed from update list."%(self,time_serie) |
621 |
|
622 |
def nextTime(self,value): |
623 |
if self.needsFlushing(): self.flush() |
624 |
self.getDataset().append(value) |
625 |
if self.debug(): print "Debug: %s: new time node %e has been added."%(self,value) |
626 |
|
627 |
class TimeSeriesShift(TimeSeries): |
628 |
""" |
629 |
Creates a shift of the time series, i.e. if d[n] is the datum at time |
630 |
t[n], the value at t[n] becomes v[n+shift] on the output. |
631 |
""" |
632 |
def __init__(self,time_serie,shift=1): |
633 |
if shift<0: |
634 |
dsc="(%s)<<%d"%(time_serie,-shift) |
635 |
else: |
636 |
dsc="(%s)>>%d"%(time_serie,shift) |
637 |
self.__controler=time_serie.getControler() |
638 |
TimeSeries.__init__(self,TimeSeriesBaseDataset(time_serie.getDataset(),-shift,time_serie.debug(),"buffer view to "+dsc),time_serie.debug(),dsc) |
639 |
|
640 |
def getControler(self): |
641 |
return self.__controler |
642 |
|
643 |
class TimeSeriesAdd(TimeSeriesFilter): |
644 |
""" |
645 |
Adds two TimeSeries. |
646 |
""" |
647 |
def __init__(self,time_serie_1,time_serie_2): |
648 |
dsc="(%s)+(%s)"%(time_serie_1,time_serie_2) |
649 |
dbg=time_serie_1.debug() or time_serie_2.debug() |
650 |
cntrl=time_serie_1.getControler() |
651 |
if not cntrl==time_serie_2.getControler(): |
652 |
raise ValueError("TimeSeriesAdd: %s and %s have different controler."%(time_serie_1,time_serie_2)) |
653 |
id_first_datum=max(time_serie_1.getIdOfFirstDatum(),time_serie_2.getIdOfFirstDatum()) |
654 |
TimeSeriesFilter.__init__(self,cntrl, \ |
655 |
TimeSeriesBaseBuffer(cntrl.getBaseBufferSize(),time_serie_1.getNumComponents(),DEFAULT_FLOAT_TYPE,id_first_datum,dbg,"buffer for "+dsc), \ |
656 |
[time_serie_1,time_serie_2],0,0,dbg,dsc) |
657 |
|
658 |
def update(self,start,end): |
659 |
self.append(self.getArgumentDataset(0)[start:end]+self.getArgumentDataset(1)[start:end]) |
660 |
|
661 |
class TimeSeriesAddScalar(TimeSeriesFilter): |
662 |
""" |
663 |
Adds a single value to a TimeSeries. |
664 |
""" |
665 |
def __init__(self,time_serie,scalar): |
666 |
dsc="(%s)+(%s)"%(time_serie,scalar) |
667 |
dbg=time_serie.debug() |
668 |
cntrl=time_serie.getControler() |
669 |
id_first_datum=time_serie.getIdOfFirstDatum() |
670 |
TimeSeriesFilter.__init__(self,cntrl, \ |
671 |
TimeSeriesBaseBuffer(cntrl.getBaseBufferSize(),time_serie.getNumComponents(),DEFAULT_FLOAT_TYPE,id_first_datum,dbg,"buffer for "+dsc), \ |
672 |
[time_serie],0,0,dbg,dsc) |
673 |
self.__scalar=scalar |
674 |
|
675 |
def update(self,start,end): |
676 |
self.append(self.getArgumentDataset(0)[start:end]+self.__scalar) |
677 |
|
678 |
class TimeSeriesMult(TimeSeriesFilter): |
679 |
""" |
680 |
Multiplies two TimeSeries. |
681 |
""" |
682 |
def __init__(self,time_serie_1,time_serie_2): |
683 |
dsc="(%s)*(%s)"%(time_serie_1,time_serie_2) |
684 |
dbg=time_serie_1.debug() or time_serie_2.debug() |
685 |
cntrl=time_serie_1.getControler() |
686 |
if not cntrl==time_serie_2.getControler(): |
687 |
raise ValueError("TimeSeriesMult: %s and %s have different controler."%(time_serie_1,time_serie_2)) |
688 |
id_first_datum=max(time_serie_1.getIdOfFirstDatum(),time_serie_2.getIdOfFirstDatum()) |
689 |
TimeSeriesFilter.__init__(self,cntrl, \ |
690 |
TimeSeriesBaseBuffer(cntrl.getBaseBufferSize(),time_serie_1.getNumComponents(),DEFAULT_FLOAT_TYPE,id_first_datum,dbg,"buffer for "+dsc), \ |
691 |
[time_serie_1,time_serie_2],0,0,dbg,dsc) |
692 |
|
693 |
def update(self,start,end): |
694 |
self.append(self.getArgumentDataset(0)[start:end]*self.getArgumentDataset(1)[start:end]) |
695 |
|
696 |
class TimeSeriesMultScalar(TimeSeriesFilter): |
697 |
""" |
698 |
Multiplies a TimeSeries with a single (scalar) value. |
699 |
""" |
700 |
def __init__(self,time_serie,scalar): |
701 |
dsc="(%s)*%s"%(time_serie,scalar) |
702 |
dbg=time_serie.debug() |
703 |
cntrl=time_serie.getControler() |
704 |
id_first_datum=time_serie.getIdOfFirstDatum() |
705 |
TimeSeriesFilter.__init__(self,cntrl, \ |
706 |
TimeSeriesBaseBuffer(cntrl.getBaseBufferSize(),time_serie.getNumComponents(),DEFAULT_FLOAT_TYPE,id_first_datum,dbg,"buffer for "+dsc), \ |
707 |
[time_serie],0,0,dbg,dsc) |
708 |
self.__scalar=scalar |
709 |
|
710 |
def update(self,start,end): |
711 |
self.append(self.getArgumentDataset(0)[start:end]*self.__scalar) |
712 |
|
713 |
class TimeSeriesDiv(TimeSeriesFilter): |
714 |
""" |
715 |
Divides two TimeSeries. |
716 |
""" |
717 |
def __init__(self,time_serie_1,time_serie_2): |
718 |
dsc="(%s)/(%s)"%(time_serie_1,time_serie_2) |
719 |
dbg=time_serie_1.debug() or time_serie_2.debug() |
720 |
cntrl=time_serie_1.getControler() |
721 |
if not cntrl==time_serie_2.getControler(): |
722 |
raise ValueError("TimeSeriesDiv: %s and %s have different controler."%(time_serie_1,time_serie_2)) |
723 |
id_first_datum=max(time_serie_1.getIdOfFirstDatum(),time_serie_2.getIdOfFirstDatum()) |
724 |
TimeSeriesFilter.__init__(self,cntrl, \ |
725 |
TimeSeriesBaseBuffer(cntrl.getBaseBufferSize(),time_serie_1.getNumComponents(),DEFAULT_FLOAT_TYPE,id_first_datum,dbg,"buffer for "+dsc), \ |
726 |
[time_serie_1,time_serie_2],0,0,dbg,dsc) |
727 |
|
728 |
def update(self,start,end): |
729 |
self.append(self.getArgumentDataset(0)[start:end]/self.getArgumentDataset(1)[start:end]) |
730 |
|
731 |
class TimeSeriesDivScalar(TimeSeriesFilter): |
732 |
""" |
733 |
Divides a scalar by a TimeSeries. |
734 |
""" |
735 |
def __init__(self,time_serie,scalar): |
736 |
dsc="(%s)/(%s)"%(scalar,time_serie) |
737 |
dbg=time_serie.debug() |
738 |
cntrl=time_serie.getControler() |
739 |
id_first_datum=time_serie.getIdOfFirstDatum() |
740 |
TimeSeriesFilter.__init__(self,cntrl, \ |
741 |
TimeSeriesBaseBuffer(cntrl.getBaseBufferSize(),time_serie.getNumComponents(),DEFAULT_FLOAT_TYPE,id_first_datum,dbg,"buffer for "+dsc), \ |
742 |
[time_serie],0,0,dbg,dsc) |
743 |
self.__scalar=scalar |
744 |
|
745 |
def update(self,start,end): |
746 |
self.append(self.__scalar/self.getArgumentDataset(0)[start:end]) |
747 |
|
748 |
class TimeSeriesPower(TimeSeriesFilter): |
749 |
""" |
750 |
Raises one TimeSeries to the power of another TimeSeries. |
751 |
""" |
752 |
def __init__(self,time_serie_1,time_serie_2): |
753 |
dsc="(%s)**(%s)"%(time_serie_1,time_serie_2) |
754 |
dbg=time_serie_1.debug() or time_serie_2.debug() |
755 |
cntrl=time_serie_1.getControler() |
756 |
if not cntrl==time_serie_2.getControler(): |
757 |
raise ValueError("TimeSeriesPower: %s and %s have different controler."%(time_serie_1,time_serie_2)) |
758 |
id_first_datum=max(time_serie_1.getIdOfFirstDatum(),time_serie_2.getIdOfFirstDatum()) |
759 |
TimeSeriesFilter.__init__(self,cntrl, \ |
760 |
TimeSeriesBaseBuffer(cntrl.getBaseBufferSize(),time_serie_1.getNumComponents(),DEFAULT_FLOAT_TYPE,id_first_datum,dbg,"buffer for "+dsc), \ |
761 |
[time_serie_1,time_serie_2],0,0,dbg,dsc) |
762 |
|
763 |
def update(self,start,end): |
764 |
self.append(self.getArgumentDataset(0)[start:end]**self.getArgumentDataset(1)[start:end]) |
765 |
|
766 |
class TimeSeriesPowerScalar(TimeSeriesFilter): |
767 |
""" |
768 |
Raises a TimeSeries to the power of a scalar. |
769 |
""" |
770 |
def __init__(self,time_serie,scalar): |
771 |
dsc="(%s)**(%s)"%(time_serie,scalar) |
772 |
dbg=time_serie.debug() |
773 |
cntrl=time_serie.getControler() |
774 |
id_first_datum=time_serie.getIdOfFirstDatum() |
775 |
TimeSeriesFilter.__init__(self,cntrl, \ |
776 |
TimeSeriesBaseBuffer(cntrl.getBaseBufferSize(),time_serie.getNumComponents(),DEFAULT_FLOAT_TYPE,id_first_datum,dbg,"buffer for "+dsc), \ |
777 |
[time_serie],0,0,dbg,dsc) |
778 |
self.__scalar=scalar |
779 |
|
780 |
def update(self,start,end): |
781 |
self.append(self.getArgumentDataset(0)[start:end]**self.__scalar) |
782 |
|
783 |
class Exp(TimeSeriesFilter): |
784 |
""" |
785 |
Computes M{exp(TimeSeries)}. |
786 |
""" |
787 |
def __init__(self,time_serie): |
788 |
dsc="exp(%s)"%(time_serie) |
789 |
dbg=time_serie.debug() |
790 |
cntrl=time_serie.getControler() |
791 |
id_first_datum=time_serie.getIdOfFirstDatum() |
792 |
TimeSeriesFilter.__init__(self,cntrl, \ |
793 |
TimeSeriesBaseBuffer(cntrl.getBaseBufferSize(),time_serie.getNumComponents(),DEFAULT_FLOAT_TYPE,id_first_datum,dbg,"buffer for "+dsc), \ |
794 |
[time_serie],0,0,dbg,dsc) |
795 |
|
796 |
def update(self,start,end): |
797 |
self.append(numarray.exp(self.getArgumentDataset(0)[start:end])) |
798 |
|
799 |
class Writer(TimeSeriesOperator): |
800 |
""" |
801 |
Writes the time series into an output stream C{ostream} which must have |
802 |
the C{writeline} method. The values are separated by the string |
803 |
C{separator}. |
804 |
""" |
805 |
def __init__(self,time_serie,ostream,seperator=",",commend_tag="#"): |
806 |
dsc="write %s to %s"%(time_serie,ostream) |
807 |
dbg=time_serie.debug() |
808 |
cntrl=time_serie.getControler() |
809 |
self.__ostream=ostream |
810 |
self.__seperator=seperator |
811 |
TimeSeriesOperator.__init__(self,cntrl,[time_serie],0,0,dbg,dsc) |
812 |
ostream.writelines("%s time series %s\n"%(commend_tag,str(self))) |
813 |
|
814 |
def update(self,start,end): |
815 |
cntrl=self.getControler() |
816 |
arg=self.getArguments(0) |
817 |
n=arg.getNumComponents() |
818 |
if n<2: |
819 |
for i in range(start,end): self.__ostream.writelines("%s%s%s\n"%(cntrl[i],self.__seperator,arg[i])) |
820 |
else: |
821 |
for i in range(start,end): |
822 |
l="%s"%cntrl[i] |
823 |
for j in range(n): l=l+"%s%s"(self.__seperator,arg[i][j]) |
824 |
self.__ostream.writelines("%s\n"%l) |
825 |
|
826 |
class DataCatcher(TimeSeries): |
827 |
""" |
828 |
Collects data into a time series. |
829 |
""" |
830 |
def __init__(self,controler,numComponents=1,description="DataCatcher"): |
831 |
self.__controler=controler |
832 |
dbg=controler.debug() |
833 |
TimeSeries.__init__(self,TimeSeriesBaseBuffer(controler.getBaseBufferSize(),numComponents,DEFAULT_FLOAT_TYPE,controler.getIdOfFirstDatum(),dbg,"buffer for "+description),dbg,description) |
834 |
|
835 |
def getControler(self): |
836 |
return self.__controler |
837 |
|
838 |
def nextValue(self,value): |
839 |
""" |
840 |
Appends a value to the time series. |
841 |
""" |
842 |
id_last=self.getIdOfLastDatum() |
843 |
id_current=self.getControler().getIdOfLastDatum() |
844 |
if id_last+1==id_current: |
845 |
self.getDataset().append(value) |
846 |
elif id_last+1<id_current: |
847 |
if self.isEmpty(): |
848 |
self.getDataset().append(value) |
849 |
id_last+=1 |
850 |
t_last=self.getControler()[id_last] |
851 |
t_current=self.getControler()[id_current] |
852 |
value_last=self[id_last] |
853 |
out=(value_last-value)/(t_last-t_current)*(self.getControler()[id_last+1:id_current+1]-t_current)+value |
854 |
self.getDataset().append(out) |
855 |
else : |
856 |
raise ValueError,"%s: a new time node must be introduced before a new value can be added." |
857 |
self.updateIdOfLastUnreferencedDatum(id_last) |
858 |
|
859 |
|
860 |
class TimeSeriesCumulativeSum(TimeSeriesFilter): |
861 |
""" |
862 |
Cumulative sum of the time series values. |
863 |
""" |
864 |
def __init__(self,time_serie): |
865 |
dsc="cumsum(%s)"%(time_serie) |
866 |
dbg=time_serie.debug() |
867 |
cntrl=time_serie.getControler() |
868 |
id_first_datum=time_serie.getIdOfFirstDatum() |
869 |
TimeSeriesFilter.__init__(self,cntrl, \ |
870 |
TimeSeriesBaseBuffer(cntrl.getBaseBufferSize(),time_serie.getNumComponents(),DEFAULT_FLOAT_TYPE,id_first_datum,dbg,"buffer for "+dsc), \ |
871 |
[time_serie],0,0,dbg,dsc) |
872 |
self.__last_value=0 |
873 |
|
874 |
def update(self,start,end): |
875 |
out=numarray.cumsum(self.getArgumentDataset(0)[start:end])+self.__last_value |
876 |
self.__last_value=out[end-start-1] |
877 |
self.append(out) |
878 |
|
879 |
|
880 |
class Reader(TimeSeriesBase): |
881 |
""" |
882 |
Reads a list of input streams and creates a time series for each input |
883 |
stream but on the same C{Controler} where the first column is used to |
884 |
create the time nodes. |
885 |
""" |
886 |
def __init__(self,list_of_istreams,buffer_size=DEFAULT_BUFFER_SIZE,seperator=",",commend_tag="#",debug=False): |
887 |
TimeSeriesBase.__init__(self,debug=debug,description="reader") |
888 |
if not isinstance(list_of_istreams,list): |
889 |
self.__list_of_istreams=[list_of_istreams] |
890 |
else: |
891 |
self.__list_of_istreams=list_of_istreams |
892 |
self.__cntrl=Controler(buffer_size,debug,"reader controler") |
893 |
self.__seperator=seperator |
894 |
self.__commend_tag=commend_tag |
895 |
self.__time_series={} |
896 |
self.__t={} |
897 |
self.__v={} |
898 |
# set up the time series: |
899 |
for i in self.__list_of_istreams: |
900 |
line=self.__commend_tag |
901 |
while not line=="" and line[0]==self.__commend_tag: |
902 |
line=i.readline().strip() |
903 |
if line=="": |
904 |
list_of_istreams.remove(i) |
905 |
else: |
906 |
d=line.split(self.__seperator) |
907 |
self.__t[i]=float(d[0]) |
908 |
tmp=[] |
909 |
for j in d[1:]: tmp.append(float(j)) |
910 |
self.__v[i]=numarray.array(tmp) |
911 |
self.__time_series[i]=DataCatcher(self.__cntrl,len(d)-1,str(i)) |
912 |
|
913 |
# |
914 |
def run(self): |
915 |
while len(self.__list_of_istreams)>0: |
916 |
if len(self.__time_series)>0: |
917 |
# find list all times with minumum time node: |
918 |
tminargs=[] |
919 |
for i in self.__time_series: |
920 |
if len(tminargs)==0: |
921 |
tminargs.append(i) |
922 |
elif abs(t[tminargs[0]]-self.__t[i])<1.e-8*abs(self.__t[i]): |
923 |
tminargs.append(i) |
924 |
elif self.__t[i]<t[tminargs[0]]: |
925 |
tminargs=[i] |
926 |
# find list all times with minumum time node: |
927 |
self.__cntrl.nextTime(self.__t[tminargs[0]]) |
928 |
for i in tminargs: |
929 |
self.__time_series[i].nextValue(self.__v[i]) |
930 |
# find next line without leading "#" |
931 |
line="#" |
932 |
while not line=="" and line[0]==self.__commend_tag: |
933 |
line=i.readline().strip() |
934 |
# if eof reached iostream is removed for searching |
935 |
if line=="": |
936 |
self.__list_of_istreams.remove(i) |
937 |
else: |
938 |
d=line.split(self.__seperator) |
939 |
self.__t[i]=float(d[0]) |
940 |
tmp=[] |
941 |
for j in d[1:]: tmp.append(float(j)) |
942 |
self.__v[i]=numarray.array(tmp) |
943 |
|
944 |
def getControler(self): |
945 |
""" |
946 |
Returns the Controler shared by all time series created through the |
947 |
input streams. |
948 |
""" |
949 |
return self.__cntrl |
950 |
|
951 |
def getTimeSeries(self,istream=None): |
952 |
""" |
953 |
Returns the time series as a tuple. If C{istream} is present its time |
954 |
series is returned. |
955 |
""" |
956 |
if istream==None: |
957 |
out=self.__time_series.values() |
958 |
if len(out)>1: |
959 |
return tuple(out) |
960 |
elif len(out)>0: |
961 |
return out[0] |
962 |
else: |
963 |
return None |
964 |
else: |
965 |
return self.__time_series[istream] |
966 |
|
967 |
|
968 |
class Plotter(TimeSeriesOperator): |
969 |
def __init__(self,time_series,window_size=DEFAULT_BUFFER_SIZE/4,file_name=None,format=None): |
970 |
if isinstance(time_series,list): |
971 |
dbg=time_series[0].getControler().debug() |
972 |
text="" |
973 |
for i in time_series: |
974 |
if len(text)==0: |
975 |
text=str(i) |
976 |
else: |
977 |
text=text+","+str(i) |
978 |
TimeSeriesOperator.__init__(self,time_series[0].getControler(),time_series,window_size,0,dbg,"plot(%s)"%text) |
979 |
else: |
980 |
dbg=time_series.getControler().debug() |
981 |
text=str(time_series) |
982 |
TimeSeriesOperator.__init__(self,time_series.getControler(),[time_series],window_size,0,dbg,"plot(%s)"%text) |
983 |
from pyvisi.renderers.gnuplot import LinePlot,Scene,PsImage |
984 |
self.__renderer=Scene() |
985 |
self.__line_plot=LinePlot(self.__renderer) |
986 |
self.__line_plot.setTitle(text) |
987 |
self.__line_plot.setLineStyle("lines") |
988 |
self.__line_plot.setXLabel("time") |
989 |
self.__line_plot.setYLabel("values") |
990 |
self.__file_name=file_name |
991 |
if format==None: |
992 |
self.__format=PsImage() |
993 |
else: |
994 |
self.__format=format |
995 |
self.__window_size=window_size |
996 |
|
997 |
def update(self,start,end): |
998 |
s=max(end-self.__window_size,self.getControler().getIdOfFirstAvailableDatum()) |
999 |
args=[self.getControler()[s:end]] |
1000 |
for arg in self.getArguments(): args.append(arg[s:end]) |
1001 |
self.__line_plot.setData(*args) |
1002 |
self.__line_plot.render() |
1003 |
if self.__file_name==None: |
1004 |
raise SystemError,"Online viewing is not available yet!" |
1005 |
else: |
1006 |
self.__renderer.save(fname=self.__file_name, format=self.__format) |
1007 |
|
1008 |
|
1009 |
def viewer(time_serie,seperator=","): |
1010 |
""" |
1011 |
Creates a viewer for a time series. |
1012 |
""" |
1013 |
import sys |
1014 |
return Writer(time_serie,sys.stdout,seperator) |
1015 |
|
1016 |
def differential(time_serie): |
1017 |
""" |
1018 |
Calculates the derivative M{Dv} of the time series v: |
1019 |
|
1020 |
M{Dv[n]=(v[n]-v[n-1])/(t[n]-t[n-1])} |
1021 |
""" |
1022 |
out=(((time_serie<<1)-time_serie)/((time_serie.getControler()<<1)-time_serie.getControler())+ \ |
1023 |
((time_serie>>1)-time_serie)/((time_serie.getControler()>>1)-time_serie.getControler()))/2. |
1024 |
out.setDescription("d(%s)/dt"%str(time_serie)) |
1025 |
out.setDebug(time_serie.debug()) |
1026 |
return out |
1027 |
|
1028 |
def integral(time_serie): |
1029 |
""" |
1030 |
Calculates the intagral M{Iv} of the time series v using the trapezoidal |
1031 |
rule: |
1032 |
|
1033 |
M{Iv[n]=int_{t_0}^{t_n} v ~ sum_{0<i<=n} n (v[i]+v[i-1])/2*(t[i]-t[i-1])} |
1034 |
|
1035 |
""" |
1036 |
out=TimeSeriesCumulativeSum(((time_serie>>1)+time_serie)/2.*(time_serie.getControler()-(time_serie.getControler()>>1))) |
1037 |
out.setDescription("I (%s) dt"%str(time_serie)) |
1038 |
out.setDebug(time_serie.debug()) |
1039 |
return out |
1040 |
|
1041 |
def smooth(time_serie,range=5): |
1042 |
""" |
1043 |
Smoothes a time series using the previous and next range values at each |
1044 |
time. |
1045 |
""" |
1046 |
i=integral(time_serie) |
1047 |
out=((i>>range)-(i<<range))/((time_serie.getControler()>>range)-(time_serie.getControler()<<range)) |
1048 |
out.setDescription("smooth(%s,-%d:%d) dt"%(str(time_serie),range,range)) |
1049 |
out.setDebug(time_serie.debug()) |
1050 |
return out |
1051 |
|
1052 |
def leakySmooth(time_serie,l=0.99): |
1053 |
""" |
1054 |
Leaky smoother: |
1055 |
|
1056 |
M{s(t)=int_{t_0}^{t} v(r) l^{t-r} dr/ int_{t_0}^{t} l^{t-r} dr} |
1057 |
""" |
1058 |
w=l**(-time_serie.getControler()) |
1059 |
out=integrate(time_serie*w)/integrate(w) |
1060 |
out.setDescription("leaky smoother(%s)"%str(time_serie)) |
1061 |
return out |
1062 |
|
1063 |
# test |
1064 |
|
1065 |
if __name__=="__main__": |
1066 |
# tests the interfaces to data sets: |
1067 |
print "Test of Datasets:" |
1068 |
print "=================" |
1069 |
bf=TimeSeriesBaseBuffer(buffer_size=5,numComponents=1,debug=True,description="TestBaseBuffer") |
1070 |
bfv_l=TimeSeriesBaseDataset(bf,offset=1,debug=True,description="offset 1") |
1071 |
bfv_r=TimeSeriesBaseDataset(bf,offset=-1,debug=True,description="offset -1") |
1072 |
bf.append([1.,2.,3.,4.]) |
1073 |
print "should be all 2. :",bfv_l[0] |
1074 |
print bf[1] |
1075 |
print bfv_r[2] |
1076 |
bf.append([5.,6.,7.]) |
1077 |
print "should be all 5. :",bfv_l[3],bf[4],bfv_r[5] |
1078 |
print "should be all 6. :",bfv_l[4],bf[5],bfv_r[6] |
1079 |
print "should be all 7. :",bfv_l[5],bf[6],bfv_r[7] |
1080 |
print "should be all [6., 7.] :",bfv_l[4:6],bf[5:7],bfv_r[6:8] |
1081 |
|
1082 |
print "Test of Controler" |
1083 |
print "=================" |
1084 |
b=Controler(buffer_size=15,debug=True) |
1085 |
s3=b>>3 |
1086 |
s1=b>>1 |
1087 |
s_3=b<<3 |
1088 |
print s_3 |
1089 |
print b |
1090 |
print b+s3 |
1091 |
sum=(s_3+b)+(b+s3) |
1092 |
|
1093 |
for i in range(30): |
1094 |
b.nextTime(i*1.) |
1095 |
b.flush() |
1096 |
print "should be all 28. :",s_3.getDataset()[25],b.getDataset()[28],s3.getDataset()[31] |
1097 |
print "should be all 29. :",s_3.getDataset()[26],b.getDataset()[29],s3.getDataset()[32] |
1098 |
print "should be all 96. :",sum.getDataset()[24] |
1099 |
|
1100 |
print "Test of operators" |
1101 |
print "=================" |
1102 |
b=Controler(buffer_size=15,debug=True) |
1103 |
b.setFlushRate(2) |
1104 |
q=DataCatcher(b) |
1105 |
b1=b<<1 |
1106 |
a=b+b1 |
1107 |
a_s=b1+1. |
1108 |
s_a=1.+b1 |
1109 |
d=b-b1 |
1110 |
d_s=b1-1. |
1111 |
s_d=1.-b1 |
1112 |
m=b*b1 |
1113 |
m_s=b1*2. |
1114 |
s_m=2.*b1 |
1115 |
dv=b/b1 |
1116 |
dv_s=b1/2. |
1117 |
s_dv=2./b1 |
1118 |
p=b**b1 |
1119 |
p_s=b1**2. |
1120 |
s_p=2.**b1 |
1121 |
pb=+b |
1122 |
mb=-b |
1123 |
sum=TimeSeriesCumulativeSum(b) |
1124 |
diff=differential(b) |
1125 |
smt=smooth(b,2) |
1126 |
int=integral(b*2) |
1127 |
fl=file("/tmp/test.csv","w") |
1128 |
w=Writer(q,fl) |
1129 |
v=viewer(q) |
1130 |
plo=Plotter([a,a_s],window_size=4,file_name="s.ps") |
1131 |
for i in range(30): |
1132 |
b.nextTime(i*1.) |
1133 |
if i%2==1: q.nextValue(i*28.) |
1134 |
b.flush() |
1135 |
print "a[28] should be %e: %e"%(28.+29.,a[28]) |
1136 |
print "a_s[28] should be %e: %e"%(29.+1.,a_s[28]) |
1137 |
print "s_a[28] should be %e: %e"%(29.+1.,s_a[28]) |
1138 |
print "d[28] should be %e: %e"%(28.-29.,d[28]) |
1139 |
print "d_s[28] should %e: %e"%(29.-1.,d_s[28]) |
1140 |
print "s_d[28] should %e: %e"%(1.-29.,s_d[28]) |
1141 |
print "m[28] should be %e: %e"%(28.*29.,m[28]) |
1142 |
print "m_s[28] should be %e: %e"%(29.*2.,m_s[28]) |
1143 |
print "s_m[28] should be %e: %e"%(29.*2.,s_m[28]) |
1144 |
print "dv[28] should be %e: %e"%(28./29.,dv[28]) |
1145 |
print "dv_s[28] should be %e: %e"%(29./2.,dv_s[28]) |
1146 |
print "s_dv[28] should be %e: %e"%(2./29.,s_dv[28]) |
1147 |
print "p[28] should be %e: %e"%(28.**29.,p[28]) |
1148 |
print "p_s[28] should be %e: %e"%(29.**2,p_s[28]) |
1149 |
print "s_p[28] should be %e: %e"%(2.**29.,s_p[28]) |
1150 |
print "pb[28] should be %e: %e"%(28.,pb[28]) |
1151 |
print "mb[28] should be %e: %e"%(-28.,mb[28]) |
1152 |
print "sum[28] should be %e: %e"%(28*29./2,sum[28]) |
1153 |
print "diff[28] should be %e: %e"%(1.,diff[28]) |
1154 |
print "smt[27] should be %e: %e"%(27.,smt[27]) |
1155 |
print "int[28] should be %e: %e"%(28.**2,int[28]) |
1156 |
print "q[27] should be %e: %e"%(27*28.,q[27]) |
1157 |
print "q[28] should be %e: %e"%(28*28.,q[28]) |
1158 |
print "q[29] should be %e: %e"%(29*28.,q[29]) |
1159 |
fl.flush() |
1160 |
|
1161 |
rin=Reader(file("/tmp/test.csv","r+"),buffer_size=15,debug=True) |
1162 |
rin.run() |
1163 |
inp=rin.getTimeSeries() |
1164 |
print "inp[27] should be %e: %e"%(27*28.,inp[27]) |
1165 |
print "inp[28] should be %e: %e"%(28*28.,inp[28]) |
1166 |
print "inp[29] should be %e: %e"%(29*28.,inp[29]) |
1167 |
|