/[escript]/trunk/doc/user/pyvisi.tex
ViewVC logotype

Contents of /trunk/doc/user/pyvisi.tex

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1206 - (show annotations)
Fri Jun 29 06:09:19 2007 UTC (13 years, 1 month ago) by jongui
File MIME type: application/x-tex
File size: 46144 byte(s)
Updated the guide further.
1 \chapter{The module \pyvisi}
2 \label{PYVISI CHAP}
3 \declaremodule{extension}{esys.pyvisi}
4 \modulesynopsis{Python Visualization Interface}
5
6 \section{Introduction}
7 \pyvisi is a Python module that is used to generate 2D and 3D visualization
8 for escript and its PDE solvers: finley and bruce. This module provides
9 an easy to use interface to the \VTK library (\VTKUrl). Pyvisi can be used to
10 render (generate) surface maps and contours for scalar fields, arrows and
11 streamlines for vector fields, and ellipsoids for tensor fields.
12 There are three
13 approaches for rendering an object. (1) Online - object is rendered on-screen
14 with interaction capability (i.e. zoom and rotate), (2) Offline - object is
15 rendered off-screen (no pop-up window) and (3) Display - object is rendered
16 on-screen but with no interaction capability (on-the-fly
17 animation). All three approaches have the option to save the rendered object
18 as an image (i.e. jpg).
19
20 The following outlines the general steps to use Pyvisi:
21
22 \begin{enumerate}
23 \item Create a \Scene instance - a window in which objects are to be
24 rendered on.
25 \item Create a data input instance (i.e. \DataCollector or \ImageReader) -
26 reads and loads the source data for visualization.
27 \item Create a data visualization instance (i.e. \Map, \Velocity, \Ellipsoid,
28 \Contour, \Carpet, \StreamLine or \Image) - proccesses and manipulates
29 the source data.
30 \item Create a \Camera or \Light instance - controls the viewing angle and
31 lighting effects.
32 \item Render the object - using either the Online, Offline or Display approach.
33 \end{enumerate}
34 \begin{center}
35 \begin{math}
36 scene \rightarrow data \; input \rightarrow data \; visualization \rightarrow
37 camera \, / \, light \rightarrow render
38 \end{math}
39 \end{center}
40
41 \section{\pyvisi Classes}
42 The following subsections give a brief overview of the important classes
43 and some of their corresponding methods. Please refer to \ReferenceGuide for
44 full details.
45
46
47 %#############################################################################
48
49
50 \subsection{Scene Classes}
51 This subsection details the instances used to setup the viewing environment.
52
53 \subsubsection{\Scene class}
54
55 \begin{classdesc}{Scene}{renderer = Renderer.ONLINE, num_viewport = 1,
56 x_size = 1152, y_size = 864}
57 A scene is a window in which objects are to be rendered on. Only
58 one scene needs to be created. However, a scene may be divided into four
59 smaller windows called viewports (if needed). Each viewport in turn can
60 render a different object.
61 \end{classdesc}
62
63 The following are some of the methods available:
64 \begin{methoddesc}[Scene]{setBackground}{color}
65 Set the background color of the scene.
66 \end{methoddesc}
67
68 \begin{methoddesc}[Scene]{render}{image_name = None}
69 Render the object using either the Online, Offline or Display mode.
70 \end{methoddesc}
71
72 \subsubsection{\Camera class}
73
74 \begin{classdesc}{Camera}{scene, viewport = Viewport.SOUTH_WEST}
75 A camera controls the display angle of the rendered object and one is
76 usually created for a \Scene. However, if a \Scene has four viewports, then a
77 separate camera may be created for each viewport.
78 \end{classdesc}
79
80 The following are some of the methods available:
81 \begin{methoddesc}[Camera]{setFocalPoint}{position}
82 Set the focal point of the camera.
83 \end{methoddesc}
84
85 \begin{methoddesc}[Camera]{setPosition}{position}
86 Set the position of the camera.
87 \end{methoddesc}
88
89 \begin{methoddesc}[Camera]{azimuth}{angle}
90 Rotate the camera to the left and right.
91 \end{methoddesc}
92
93 \begin{methoddesc}[Camera]{elevation}{angle}
94 Rotate the camera to the top and bottom (only between -90 and 90).
95 \end{methoddesc}
96
97 \begin{methoddesc}[Camera]{backView}{}
98 Rotate the camera to view the back of the rendered object.
99 \end{methoddesc}
100
101 \begin{methoddesc}[Camera]{topView}{}
102 Rotate the camera to view the top of the rendered object.
103 \end{methoddesc}
104
105 \begin{methoddesc}[Camera]{bottomView}{}
106 Rotate the camera to view the bottom of the rendered object.
107 \end{methoddesc}
108
109 \begin{methoddesc}[Camera]{leftView}{}
110 Rotate the camera to view the left side of the rendered object.
111 \end{methoddesc}
112
113 \begin{methoddesc}[Camera]{rightView}{}
114 Rotate the camera to view the right side of the rendered object.
115 \end{methoddesc}
116
117 \begin{methoddesc}[Camera]{isometricView}{}
118 Rotate the camera to view the isometric angle of the rendered object.
119 \end{methoddesc}
120
121 \begin{methoddesc}[Camera]{dolly}{distance}
122 Move the camera towards (greater than 1) the rendered object. However,
123 the camera is unable to be moved away from the rendered object.
124 \end{methoddesc}
125
126 \subsubsection{\Light class}
127
128 \begin{classdesc}{Light}{scene, viewport = Viewport.SOUTH_WEST}
129 A light controls the lighting effect for the rendered object and works in
130 a similar way to \Camera.
131 \end{classdesc}
132
133 The following are some of the methods available:
134 \begin{methoddesc}[Light]{setColor}{color}
135 Set the light color.
136 \end{methoddesc}
137
138 \begin{methoddesc}[Light]{setFocalPoint}{position}
139 Set the focal point of the light.
140 \end{methoddesc}
141
142 \begin{methoddesc}[Light]{setPosition}{position}
143 Set the position of the light.
144 \end{methoddesc}
145
146 \begin{methoddesc}[Light]{setAngle}{elevation = 0, azimuth = 0}
147 An alternative to set the position and focal point of the light by using the
148 elevation and azimuth.
149 \end{methoddesc}
150
151
152 %##############################################################################
153
154
155 \subsection{Input Classes}
156 \label{INPUT SEC}
157 This subsection details the instances used to read and load the source data
158 for visualization.
159
160 \subsubsection{\DataCollector class}
161 \begin{classdesc}{DataCollector}{source = Source.XML}
162 A data collector is used to read data either from a XML file (using
163 \texttt{setFileName()}) or from an escript object directly (using
164 \texttt{setData()}). Writing XML files are expensive, but this approach has
165 the advantage given that the results can be analyzed easily after the
166 simulation has completed.
167 \end{classdesc}
168
169 The following are some of the methods available:
170 \begin{methoddesc}[DataCollector]{setFileName}{file_name}
171 Set the XML file name to read.
172 \end{methoddesc}
173
174 \begin{methoddesc}[DataCollector]{setData}{**args}
175 Create data using the \textless name\textgreater=\textless data\textgreater
176 pairing. Assumption is made that the data will be given in the
177 appropriate format.
178 \end{methoddesc}
179
180 \begin{methoddesc}[DataCollector]{setActiveScalar}{scalar}
181 Specify the scalar field to load.
182 \end{methoddesc}
183
184 \begin{methoddesc}[DataCollector]{setActiveVector}{vector}
185 Specify the vector field to load.
186 \end{methoddesc}
187
188 \begin{methoddesc}[DataCollector]{setActiveTensor}{tensor}
189 Specify the tensor field to load.
190 \end{methoddesc}
191
192 \subsubsection{\ImageReader class}
193
194 \begin{classdesc}{ImageReader}{format}
195 An image reader is used to read data from an image in a variety of formats.
196 \end{classdesc}
197
198 The following are some of the methods available:
199 \begin{methoddesc}[ImageReader]{setImageName}{image_name}
200 Set the image name to be read.
201 \end{methoddesc}
202
203 \subsubsection{\TextTwoD class}
204
205 \begin{classdesc}{Text2D}{scene, text, viewport = Viewport.SOUTH_WEST}
206 A two-dimensional text is used to annotate the rendered object
207 (i.e. inserting titles, authors and labels).
208 \end{classdesc}
209
210 The following are some of the methods available:
211 \begin{methoddesc}[Text2D]{setFontSize}{size}
212 Set the 2D text size.
213 \end{methoddesc}
214
215 \begin{methoddesc}[Text2D]{boldOn}{}
216 Bold the 2D text.
217 \end{methoddesc}
218
219 \begin{methoddesc}[Text2D]{setColor}{color}
220 Set the color of the 2D text.
221 \end{methoddesc}
222
223 Including methods from \ActorTwoD.
224
225
226 %##############################################################################
227
228
229 \subsection{Data Visualization Classes}
230 \label{DATAVIS SEC}
231 This subsection details the instances used to process and manipulate the source
232 data. The typical usage of some of the classes are also shown.
233
234 One point to note is that the source can either be point or cell data. If the
235 source is cell data, a conversion to point data may or may not be
236 required, in order for the object to be rendered correctly.
237 If a conversion is needed, the 'cell_to_point' flag (see below) must
238 be set to 'True', otherwise 'False' (which is the default). On occasions, an
239 inaccurate object may be rendered from cell data even after conversion.
240
241 \subsubsection{\Map class}
242
243 \begin{classdesc}{Map}{scene, data_collector,
244 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
245 outline = True}
246 Class that shows a scalar field on a domain surface. The domain surface
247 can either be colored or grey-scaled, depending on the lookup table used.
248 \end{classdesc}
249
250 The following are some of the methods available:\\
251 Methods from \ActorThreeD and \DataSetMapper.
252
253 A typical usage of \Map is shown below.
254
255 \begin{python}
256 # Import the necessary modules.
257 from esys.pyvisi import Scene, DataCollector, Map, Camera
258 from esys.pyvisi.constant import *
259 import os
260
261 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
262 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
263 X_SIZE = 800
264 Y_SIZE = 800
265
266 SCALAR_FIELD_POINT_DATA = "temperature"
267 SCALAR_FIELD_CELL_DATA = "temperature_cell"
268 FILE_3D = "interior_3D.xml"
269 IMAGE_NAME = "map.jpg"
270 JPG_RENDERER = Renderer.ONLINE_JPG
271
272 # Create a Scene with four viewports.
273 s = Scene(renderer = JPG_RENDERER, num_viewport = 4, x_size = X_SIZE,
274 y_size = Y_SIZE)
275
276 # Create a DataCollector reading from a XML file.
277 dc1 = DataCollector(source = Source.XML)
278 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
279 dc1.setActiveScalar(scalar = SCALAR_FIELD_POINT_DATA)
280
281 # Create a Map for the first viewport.
282 m1 = Map(scene = s, data_collector = dc1, viewport = Viewport.SOUTH_WEST,
283 lut = Lut.COLOR, cell_to_point = False, outline = True)
284 m1.setRepresentationToWireframe()
285
286 # Create a Camera for the first viewport
287 c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
288 c1.isometricView()
289
290 # Create a second DataCollector reading from the same XML file but specifying
291 # a different scalar field.
292 dc2 = DataCollector(source = Source.XML)
293 dc2.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
294 dc2.setActiveScalar(scalar = SCALAR_FIELD_CELL_DATA)
295
296 # Create a Map for the third viewport.
297 m2 = Map(scene = s, data_collector = dc2, viewport = Viewport.NORTH_EAST,
298 lut = Lut.COLOR, cell_to_point = True, outline = True)
299
300 # Create a Camera for the third viewport
301 c2 = Camera(scene = s, viewport = Viewport.NORTH_EAST)
302
303 # Render the object.
304 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME))
305 \end{python}
306
307 \subsubsection{\MapOnPlaneCut class}
308
309 \begin{classdesc}{MapOnPlaneCut}{scene, data_collector,
310 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
311 outline = True}
312 This class works in a similar way to \Map, except that it shows a scalar
313 field cut using a plane. The plane can be translated and rotated along the
314 X, Y and Z axes.
315 \end{classdesc}
316
317 The following are some of the methods available:\\
318 Methods from \ActorThreeD, \Transform and \DataSetMapper.
319
320 \subsubsection{\MapOnPlaneClip class}
321
322 \begin{classdesc}{MapOnPlaneClip}{scene, data_collector,
323 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
324 outline = True}
325 This class works in a similar way to \MapOnPlaneCut, except that it shows a
326 scalar field clipped using a plane.
327 \end{classdesc}
328
329 The following are some of the methods available:\\
330 Methods from \ActorThreeD, \Transform, \Clipper and \DataSetMapper.
331
332 \subsubsection{\MapOnScalarClip class}
333
334 \begin{classdesc}{MapOnScalarClip}{scene, data_collector,
335 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
336 outline = True}
337 This class works in a similar way to \Map, except that it shows a scalar
338 field clipped using a scalar value.
339 \end{classdesc}
340
341 The following are some of the methods available:\\
342 Methods from \ActorThreeD, \Clipper and \DataSetMapper.
343
344 \subsubsection{\Velocity class}
345
346 \begin{classdesc}{Velocity}{scene, data_collector, arrow = Arrow.TWO_D,
347 color_mode = ColorMode.VECTOR, viewport = Viewport.SOUTH_WEST,
348 lut = Lut.COLOR, cell_to_point = False, outline = True}
349 Class that shows a vector field using arrows. The arrows can either be
350 colored or grey-scaled, depending on the lookup table used. If the arrows
351 are colored, there are two possible coloring modes, either using vector data or
352 scalar data. Similarly, there are two possible types of arrows, either
353 using two-dimensional or three-dimensional.
354 \end{classdesc}
355
356 The following are some of the methods available:\\
357 Methods from \ActorThreeD, \GlyphThreeD, \MaskPoints and \DataSetMapper.
358
359 \subsubsection{\VelocityOnPlaneCut class}
360
361 \begin{classdesc}{VelocityOnPlaneCut}{scene, data_collector,
362 arrow = Arrow.TWO_D, color_mode = ColorMode.VECTOR,
363 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR,
364 cell_to_point = False, outline = True}
365 This class works in a similar way to \MapOnPlaneCut, except that
366 it shows a vector field using arrows cut using a plane.
367 \end{classdesc}
368
369 The following are some of the methods available:\\
370 Methods from \ActorThreeD, \GlyphThreeD, \Transform, \MaskPoints and
371 \DataSetMapper.
372
373 A typical usage of \VelocityOnPlaneCut is shown below.
374
375 \begin{python}
376 # Import the necessary modules
377 from esys.pyvisi import Scene, DataCollector, VelocityOnPlaneCut, Camera
378 from esys.pyvisi.constant import *
379 import os
380
381 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
382 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
383 X_SIZE = 400
384 Y_SIZE = 400
385
386 VECTOR_FIELD_CELL_DATA = "velocity"
387 FILE_3D = "interior_3D.xml"
388 IMAGE_NAME = "velocity.jpg"
389 JPG_RENDERER = Renderer.ONLINE_JPG
390
391 # Create a Scene with four viewports
392 s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE,
393 y_size = Y_SIZE)
394
395 # Create a DataCollector reading from a XML file.
396 dc1 = DataCollector(source = Source.XML)
397 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
398 dc1.setActiveVector(vector = VECTOR_FIELD_CELL_DATA)
399
400 # Create VelocityOnPlaneCut.
401 vopc1 = VelocityOnPlaneCut(scene = s, data_collector = dc1,
402 viewport = Viewport.SOUTH_WEST, color_mode = ColorMode.VECTOR,
403 arrow = Arrow.THREE_D, lut = Lut.COLOR, cell_to_point = False,
404 outline = True)
405 vopc1.setScaleFactor(scale_factor = 0.5)
406 vopc1.setPlaneToXY(offset = 0.5)
407 vopc1.setRatio(2)
408 vopc1.randomOn()
409
410 # Create a Camera.
411 c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
412 c1.isometricView()
413 c1.elevation(angle = -20)
414
415 # Render the object.
416 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME))
417 \end{python}
418
419 \subsubsection{\VelocityOnPlaneClip class}
420
421 \begin{classdesc}{VelocityOnPlaneClip}{scene, data_collector,
422 arrow = Arrow.TWO_D, color_mode = ColorMode.VECTOR,
423 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR,
424 cell_to_point = False, online = True}
425 This class works in a similar way to \MapOnPlaneClip, except that it shows a
426 vector field using arrows clipped using a plane.
427 \end{classdesc}
428
429 The following are some of the methods available:\\
430 Methods from \ActorThreeD, \GlyphThreeD, \Transform, \Clipper,
431 \MaskPoints and \DataSetMapper.
432
433 \subsubsection{\Ellipsoid class}
434
435 \begin{classdesc}{Ellipsoid}{scene, data_collector,
436 viewport = Viewport = SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
437 outline = True}
438 Class that shows a tensor field using ellipsoids. The ellipsoids can either be
439 colored or grey-scaled, depending on the lookup table used.
440 \end{classdesc}
441
442 The following are some of the methods available:\\
443 Methods from \ActorThreeD, \Sphere, \TensorGlyph, \MaskPoints and
444 \DataSetMapper.
445
446 \subsubsection{\EllipsoidOnPlaneCut class}
447
448 \begin{classdesc}{EllipsoidOnPlaneCut}{scene, data_collector,
449 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
450 outline = True}
451 This class works in a similar way to \MapOnPlaneCut, except that it shows
452 a tensor field using ellipsoids cut using a plane.
453 \end{classdesc}
454
455 The following are some of the methods available:\\
456 Methods from \ActorThreeD, \Sphere, \TensorGlyph, \Transform,
457 \MaskPoints and \DataSetMapper.
458
459 \subsubsection{\EllipsoidOnPlaneClip class}
460
461 \begin{classdesc}{EllipsoidOnPlaneClip}{scene, data_collector,
462 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
463 outline = True}
464 This class works in a similar way to \MapOnPlaneClip, except that it shows a
465 tensor field using ellipsoids clipped using a plane.
466 \end{classdesc}
467
468 The following are some of the methods available:\\
469 Methods from \ActorThreeD, \Sphere, \TensorGlyph, \Transform, \Clipper,
470 \MaskPoints and \DataSetMapper.
471
472 A typical usage of \EllipsoidOnPlaneClip is shown below.
473
474 \begin{python}
475 # Import the necessary modules
476 from esys.pyvisi import Scene, DataCollector, EllipsoidOnPlaneClip, Camera
477 from esys.pyvisi.constant import *
478 import os
479
480 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
481 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
482 X_SIZE = 400
483 Y_SIZE = 400
484
485 TENSOR_FIELD_CELL_DATA = "stress_cell"
486 FILE_3D = "interior_3D.xml"
487 IMAGE_NAME = "ellipsoid.jpg"
488 JPG_RENDERER = Renderer.ONLINE_JPG
489
490 # Create a Scene.
491 s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE,
492 y_size = Y_SIZE)
493
494 # Create a DataCollector reading from a XML file.
495 dc1 = DataCollector(source = Source.XML)
496 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
497 dc1.setActiveTensor(tensor = TENSOR_FIELD_CELL_DATA)
498
499 # Create a EllipsoidOnPlaneClip.
500 eopc1 = EllipsoidOnPlaneClip(scene = s, data_collector = dc1,
501 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = True,
502 outline = True)
503 eopc1.setPlaneToXY()
504 eopc1.setScaleFactor(scale_factor = 0.2)
505 eopc1.rotateX(angle = 10)
506
507 # Create a camera.
508 c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
509 c1.bottomView()
510 c1.azimuth(angle = -90)
511 c1.elevation(angle = 10)
512
513 # Render the object.
514 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME))
515 \end{python}
516
517 \subsubsection{\Contour class}
518
519 \begin{classdesc}{Contour}{scene, data_collector,
520 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
521 outline = True}
522 Class that shows a scalar field using contour surfaces. The contour surfaces can
523 either be colored or grey-scaled, depending on the lookup table used. This
524 class can also be used to generate iso surfaces.
525 \end{classdesc}
526
527 The following are some of the methods available:\\
528 Methods from \ActorThreeD, \ContourModule and \DataSetMapper.
529
530 A typical usage of \Contour is shown below.
531
532 \begin{python}
533 # Import the necessary modules
534 from esys.pyvisi import Scene, DataCollector, Contour, Camera
535 from esys.pyvisi.constant import *
536 import os
537
538 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
539 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
540 X_SIZE = 400
541 Y_SIZE = 400
542
543 SCALAR_FIELD_POINT_DATA = "temperature"
544 FILE_3D = "interior_3D.xml"
545 IMAGE_NAME = "contour.jpg"
546 JPG_RENDERER = Renderer.ONLINE_JPG
547
548 # Create a Scene.
549 s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE,
550 y_size = Y_SIZE)
551
552 # Create a DataCollector reading a XML file.
553 dc1 = DataCollector(source = Source.XML)
554 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
555 dc1.setActiveScalar(scalar = SCALAR_FIELD_POINT_DATA)
556
557 # Create a Contour.
558 ctr1 = Contour(scene = s, data_collector = dc1, viewport = Viewport.SOUTH_WEST,
559 lut = Lut.COLOR, cell_to_point = False, outline = True)
560 ctr1.generateContours(contours = 3)
561
562 # Create a Camera.
563 cam1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
564 cam1.elevation(angle = -40)
565
566 # Render the object.
567 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME))
568 \end{python}
569
570 \subsubsection{\ContourOnPlaneCut class}
571
572 \begin{classdesc}{ContourOnPlaneCut}{scene, data_collector,
573 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
574 outline = True}
575 This class works in a similar way to \MapOnPlaneCut, except that it shows a
576 scalar field using contour surfaces cut using a plane.
577 \end{classdesc}
578
579 The following are some of the methods available:\\
580 Methods from \ActorThreeD, \ContourModule, \Transform and \DataSetMapper.
581
582 \subsubsection{\ContourOnPlaneClip class}
583
584 \begin{classdesc}{ContourOnPlaneClip}{scene, data_collector,
585 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
586 outline = True}
587 This class works in a similar way to \MapOnPlaneClip, except that it shows a
588 scalar field using contour surfaces clipped using a plane.
589 \end{classdesc}
590
591 The following are some of the methods available:\\
592 Methods from \ActorThreeD, \ContourModule, \Transform, \Clipper and
593 \DataSetMapper.
594
595 \subsubsection{\StreamLine class}
596
597 \begin{classdesc}{StreamLine}{scene, data_collector,
598 viewport = Viewport.SOUTH_WEST, color_mode = ColorMode.VECTOR, lut = Lut.COLOR,
599 cell_to_point = False, outline = True}
600 Class that shows the direction of particles of a vector field using streamlines.
601 The streamlines can either be colored or grey-scaled, depending on the lookup
602 table used. If the streamlines are colored, there are two possible coloring
603 modes, either using vector data or scalar data.
604 \end{classdesc}
605
606 The following are some of the methods available:\\
607 Methods from \ActorThreeD, \PointSource, \StreamLineModule, \Tube and
608 \DataSetMapper.
609
610 A typical usage of \StreamLine is shown below.
611
612 \begin{python}
613 # Import the necessary modules.
614 from esys.pyvisi import Scene, DataCollector, StreamLine, Camera
615 from esys.pyvisi.constant import *
616 import os
617
618 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
619 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
620 X_SIZE = 400
621 Y_SIZE = 400
622
623 VECTOR_FIELD_CELL_DATA = "temperature"
624 FILE_3D = "interior_3D.xml"
625 IMAGE_NAME = "streamline.jpg"
626 JPG_RENDERER = Renderer.ONLINE_JPG
627
628 # Create a Scene.
629 s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE,
630 y_size = Y_SIZE)
631
632 # Create a DataCollector reading from a XML file.
633 dc1 = DataCollector(source = Source.XML)
634 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
635
636 # Create a Streamline.
637 sl1 = StreamLine(scene = s, data_collector = dc1,
638 viewport = Viewport.SOUTH_WEST, color_mode = ColorMode.SCALAR,
639 lut = Lut.COLOR, cell_to_point = False, outline = True)
640 sl1.setTubeRadius(radius = 0.02)
641 sl1.setTubeNumberOfSides(3)
642 sl1.setTubeRadiusToVaryByScalar()
643
644 # Create a Camera.
645 c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
646 c1.isometricView()
647
648 # Render the object.
649 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME))
650 \end{python}
651
652 \subsubsection{\Carpet class}
653
654 \begin{classdesc}{Carpet}{scene, data_collector,
655 viewport = Viewport.Viewport.SOUTH_WEST, warp_mode = WarpMode.SCALAR,
656 lut = Lut.COLOR, cell_to_point = False, outline = True}
657 This class works in a similar way to \MapOnPlaneCut, except that it shows a
658 scalar field cut on a plane and deformated (warp) along the normal. The
659 plane can either be colored or grey-scaled, depending on the lookup table used.
660 Similarly, the plane can be deformated either using scalar data or vector data.
661 \end{classdesc}
662
663 The following are some of the methods available:\\
664 Methods from \ActorThreeD, \Warp, \Transform and \DataSetMapper.
665
666 A typical usage of \Carpet is shown below.
667
668 \begin{python}
669 # Import the necessary modules.
670 from esys.pyvisi import Scene, DataCollector, Carpet, Camera
671 from esys.pyvisi.constant import *
672 import os
673
674 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
675 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
676 X_SIZE = 400
677 Y_SIZE = 400
678
679 SCALAR_FIELD_CELL_DATA = "temperature_cell"
680 FILE_3D = "interior_3D.xml"
681 IMAGE_NAME = "carpet.jpg"
682 JPG_RENDERER = Renderer.ONLINE_JPG
683
684 # Create a Scene.
685 s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE,
686 y_size = Y_SIZE)
687
688 # Create a DataCollector reading from a XML file.
689 dc1 = DataCollector(source = Source.XML)
690 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
691 dc1.setActiveScalar(scalar = SCALAR_FIELD_CELL_DATA)
692
693 # Create a Carpet.
694 cpt1 = Carpet(scene = s, data_collector = dc1, viewport = Viewport.SOUTH_WEST,
695 warp_mode = WarpMode.SCALAR, lut = Lut.COLOR, cell_to_point = True,
696 outline = True)
697 cpt1.setPlaneToXY(0.2)
698 cpt1.setScaleFactor(1.9)
699
700 # Create a Camera.
701 c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
702 c1.isometricView()
703
704 # Render the object.
705 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME))
706 \end{python}
707
708 \subsubsection{\Legend class}
709
710 \begin{classdesc}{Legend}{scene, data_collector,
711 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, legend = LegendType.SCALAR}
712 Class that shows a scalar field on a domain surface. The domain surface
713 can either be colored or grey-scaled, depending on the lookup table used
714 \end{classdesc}
715
716 The following are some of the methods available:\\
717 Methods from \ActorThreeD, \ScalarBar and \DataSetMapper.
718
719 \subsubsection{\Rectangle class}
720
721 \begin{classdesc}{Rectangle}{scene, viewport = Viewport.SOUTH_WEST}
722 Class that generates a rectangle box.
723 \end{classdesc}
724
725 The following are some of the methods available:\\
726 Methods from \ActorThreeD, \CubeSource and \DataSetMapper.
727
728 \subsubsection{\Image class}
729
730 \begin{classdesc}{Image}{scene, image_reader, viewport = Viewport.SOUTH_WEST}
731 Class that displays an image which can be scaled (upwards and downwards) and
732 has interaction capability. The image can also be translated and rotated along
733 the X, Y and Z axes. One of the most common use of this feature is pasting an
734 image on a surface map.
735 \end{classdesc}
736
737 The following are some of the methods available:\\
738 Methods from \ActorThreeD, \PlaneSource and \Transform.
739
740 A typical usage of \Image is shown below.
741
742 \begin{python}
743 # Import the necessary modules.
744 from esys.pyvisi import Scene, DataCollector, Map, ImageReader, Image, Camera
745 from esys.pyvisi import GlobalPosition
746 from esys.pyvisi.constant import *
747 import os
748
749 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
750 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
751 X_SIZE = 400
752 Y_SIZE = 400
753
754 SCALAR_FIELD_POINT_DATA = "temperature"
755 FILE_3D = "interior_3D.xml"
756 LOAD_IMAGE_NAME = "flinders.jpg"
757 SAVE_IMAGE_NAME = "image.jpg"
758 JPG_RENDERER = Renderer.ONLINE_JPG
759
760 # Create a Scene.
761 s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE,
762 y_size = Y_SIZE)
763
764 # Create a DataCollector reading from a XML file.
765 dc1 = DataCollector(source = Source.XML)
766 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
767
768 # Create a Map.
769 m1 = Map(scene = s, data_collector = dc1, viewport = Viewport.SOUTH_WEST,
770 lut = Lut.COLOR, cell_to_point = False, outline = True)
771 m1.setOpacity(0.3)
772
773 # Create an ImageReader (in place of DataCollector).
774 ir = ImageReader(ImageFormat.JPG)
775 ir.setImageName(image_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, \
776 LOAD_IMAGE_NAME))
777
778 # Create an Image.
779 i = Image(scene = s, image_reader = ir, viewport = Viewport.SOUTH_WEST)
780 i.setOpacity(opacity = 0.9)
781 i.translate(0,0,-1)
782 i.setPoint1(GlobalPosition(2,0,0))
783 i.setPoint2(GlobalPosition(0,2,0))
784
785 # Create a Camera.
786 c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
787
788 # Render the image.
789 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, SAVE_IMAGE_NAME))
790 \end{python}
791
792 \subsubsection{\Logo class}
793
794 \begin{classdesc}{Logo}{scene, image_reader, viewport = Viewport.SOUTH_WEST}
795 Class that displays a static image, in particular a logo
796 (i.e. company symbol) and has NO interaction capability. The position and size
797 of the logo can be specified.
798 \end{classdesc}
799
800 The following are some of the methods available:\\
801 Methods from \ImageReslice and \ActorTwoD.
802
803 \subsubsection{\Movie class}
804
805 \begin{classdesc}{Movie}{parameter_file = "make_movie"}
806 Class that creates a file called 'make_movie' by default (if a parameter
807 file name is not speficied) which contains a list of parameters required
808 by the 'ppmtompeg' command to generate a movie from a series of images.
809 \end{classdesc}
810
811 The following are some of the methods available:\\
812 \begin{methoddesc}[Movie]{imageRange}{input_directory, first_image, last_image}
813 The image range from which the movie is to be generated from.
814 \end{methoddesc}
815
816 \begin{methoddesc}[Movie]{imageList}{input_directory, image_list}
817 The image list from which the movie is to be generated from.
818 \end{methoddesc}
819
820 \begin{methoddesc}[Movie]{makeMovie}{movie}
821 Generate the movie.
822 \end{methoddesc}
823
824 A typical usage of \Movie is shown below.
825
826 \begin{python}
827 # Import the necessary modules.
828 from esys.pyvisi import Scene, DataCollector, Map, Camera, Velocity, Legend
829 from esys.pyvisi import Movie, LocalPosition
830 from esys.pyvisi.constant import *
831 import os
832
833 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
834 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
835 X_SIZE = 800
836 Y_SIZE = 800
837
838 SCALAR_FIELD_POINT_DATA = "temp"
839 FILE_2D = "tempvel-"
840 IMAGE_NAME = "movie"
841 JPG_RENDERER = Renderer.ONLINE_JPG
842
843 # Create a Scene.
844 s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE,
845 y_size = Y_SIZE)
846
847 # Create a DataCollector reading from a XML file.
848 dc1 = DataCollector(source = Source.XML)
849 dc1.setActiveScalar(scalar = SCALAR_FIELD_POINT_DATA)
850
851 # Create a Map.
852 m1 = Map(scene = s, data_collector = dc1,
853 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
854 outline = True)
855
856 # Create a Camera.
857 cam1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
858
859 # Create a movie.
860 mov = Movie()
861 #lst = []
862
863 # Read in one file one after another and render the object.
864 for i in range(938, 949):
865 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, \
866 FILE_2D + "%06d.vtu") % i)
867
868 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, \
869 IMAGE_NAME + "%06d.jpg") % i)
870
871 #lst.append(IMAGE_NAME + "%06d.jpg" % i)
872
873 # Images (first and last inclusive) from which the movie is to be generated.
874 mov.imageRange(input_directory = PYVISI_EXAMPLE_IMAGES_PATH,
875 first_image = IMAGE_NAME + "000938.jpg",
876 last_image = IMAGE_NAME + "000948.jpg")
877
878 # Alternatively, a list of images can be specified.
879 #mov.imageList(input_directory = PYVISI_EXAMPLE_IMAGES_PATH, image_list = lst)
880
881 # Generate the movie.
882 mov.makeMovie(os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, "movie.mpg"))
883 \end{python}
884
885
886 %##############################################################################
887
888
889 \subsection{Coordinate Classes}
890 This subsection details the instances used to position the rendered object.
891
892 \subsubsection{\LocalPosition class}
893
894 \begin{classdesc}{LocalPosition}{x_coor, y_coor}
895 Class that defines the local positioning (X and Y) coordinate system (2D).
896 \end{classdesc}
897
898 \subsubsection{\GlobalPosition class}
899
900 \begin{classdesc}{GlobalPosition}{x_coor, y_coor, z_coor}
901 Class that defines the global positioning (X, Y and Z) coordinate system (3D).
902 \end{classdesc}
903
904
905 %##############################################################################
906
907
908 \subsection{Supporting Classes}
909 This subsection details the supporting classes and their corresponding methods
910 inherited by the input (see Section \ref{INPUT SEC}) and data
911 visualization classes (see Section \ref{DATAVIS SEC}).
912
913 \subsubsection{\ActorThreeD class}
914
915 The following are some of the methods available:
916
917 \begin{methoddesc}[Actor3D]{setOpacity}{opacity}
918 Set the opacity (transparency) of the 3D actor.
919 \end{methoddesc}
920
921 \begin{methoddesc}[Actor3D]{setColor}{color}
922 Set the color of the 3D actor.
923 \end{methoddesc}
924
925 \begin{methoddesc}[Actor3D]{setRepresentationToWireframe}{}
926 Set the representation of the 3D actor to wireframe.
927 \end{methoddesc}
928
929 \subsubsection{\ActorTwoD class}
930
931 The following are some of the methods available:
932
933 \begin{methoddesc}[Actor2D]{setPosition}{position}
934 Set the position (XY) of the 2D actor. Default position is the lower left hand
935 corner of the window / viewport.
936 \end{methoddesc}
937
938 \subsubsection{\Clipper class}
939
940 The following are some of the methods available:
941
942 \begin{methoddesc}[Clipper]{setInsideOutOn}{}
943 Clips one side of the rendered object.
944 \end{methoddesc}
945
946 \begin{methoddesc}[Clipper]{setInsideOutOff}{}
947 Clips the other side of the rendered object.
948 \end{methoddesc}
949
950 \begin{methoddesc}[Clipper]{setClipValue}{value}
951 Set the scalar clip value (instead of using a plane) for the clipper.
952 \end{methoddesc}
953
954 \subsubsection{\ContourModule class}
955
956 The following are some of the methods available:
957
958 \begin{methoddesc}[ContourModule]{generateContours}{contours = None,
959 lower_range = None, upper_range = None}
960 Generate the specified number of contours within the specified range.
961 In order to generate an iso surface, the 'lower_range' and 'upper_range'
962 must be equal.
963 \end{methoddesc}
964
965 \subsubsection{\GlyphThreeD class}
966
967 The following are some of the methods available:
968
969 \begin{methoddesc}[Glyph3D]{setScaleModeByVector}{}
970 Set the 3D glyph to scale according to the vector data.
971 \end{methoddesc}
972
973 \begin{methoddesc}[Glyph3D]{setScaleModeByScalar}{}
974 Set the 3D glyph to scale according to the scalar data.
975 \end{methoddesc}
976
977 \begin{methoddesc}[Glyph3D]{setScaleFactor}{scale_factor}
978 Set the 3D glyph scale factor.
979 \end{methoddesc}
980
981 \subsubsection{\TensorGlyph class}
982
983 The following are some of the methods available:
984
985 \begin{methoddesc}[TensorGlyph]{setScaleFactor}{scale_factor}
986 Set the scale factor for the tensor glyph.
987 \end{methoddesc}
988
989 \begin{methoddesc}[TensorGlyph]{setMaxScaleFactor}{max_scale_factor}
990 Set the maximum allowable scale factor for the tensor glyph.
991 \end{methoddesc}
992
993 \subsubsection{\PlaneSource class}
994
995 The following are some of the methods available:
996
997 \begin{methoddesc}[PlaneSource]{setPoint1}{position}
998 Set the first point from the origin of the plane source.
999 \end{methoddesc}
1000
1001 \begin{methoddesc}[PlaneSource]{setPoint2}{position}
1002 Set the second point from the origin of the plane source.
1003 \end{methoddesc}
1004
1005 \subsubsection{\PointSource class}
1006
1007 The following are some of the methods available:
1008
1009 \begin{methoddesc}[PointSource]{setPointSourceRadius}{radius}
1010 Set the radius of the sphere.
1011 \end{methoddesc}
1012
1013 \begin{methoddesc}[PointSource]{setPointSourceCenter}{center}
1014 Set the center of the sphere.
1015 \end{methoddesc}
1016
1017 \begin{methoddesc}[PointSource]{setPointSourceNumberOfPoints}{points}
1018 Set the number of points to generate within the sphere (the larger the
1019 number of points, the more streamlines are generated).
1020 \end{methoddesc}
1021
1022 \subsubsection{\Sphere class}
1023
1024 The following are some of the methods available:
1025
1026 \begin{methoddesc}[Sphere]{setThetaResolution}{resolution}
1027 Set the theta resolution of the sphere.
1028 \end{methoddesc}
1029
1030 \begin{methoddesc}[Sphere]{setPhiResolution}{resolution}
1031 Set the phi resolution of the sphere.
1032 \end{methoddesc}
1033
1034 \subsubsection{\StreamLineModule class}
1035
1036 The following are some of the methods available:
1037
1038 \begin{methoddesc}[StreamLineModule]{setMaximumPropagationTime}{time}
1039 Set the maximum length of the streamline expressed in elapsed time.
1040 \end{methoddesc}
1041
1042 \begin{methoddesc}[StreamLineModule]{setIntegrationToBothDirections}{}
1043 Set the integration to occur both sides: forward (where the streamline
1044 goes) and backward (where the streamline came from).
1045 \end{methoddesc}
1046
1047 \subsubsection{\Transform class}
1048
1049 The following are some of the methods available:
1050
1051 \begin{methoddesc}[Transform]{translate}{x_offset, y_offset, z_offset}
1052 Translate the rendered object along the x, y and z-axes.
1053 \end{methoddesc}
1054
1055 \begin{methoddesc}[Transform]{rotateX}{angle}
1056 Rotate the plane along the x-axis.
1057 \end{methoddesc}
1058
1059 \begin{methoddesc}[Transform]{rotateY}{angle}
1060 Rotate the plane along the y-axis.
1061 \end{methoddesc}
1062
1063 \begin{methoddesc}[Transform]{rotateZ}{angle}
1064 Rotate the plane along the z-axis.
1065 \end{methoddesc}
1066
1067 \begin{methoddesc}[Transform]{setPlaneToXY}{offset = 0}
1068 Set the plane orthogonal to the z-axis.
1069 \end{methoddesc}
1070
1071 \begin{methoddesc}[Transform]{setPlaneToYZ}{offset = 0}
1072 Set the plane orthogonal to the x-axis.
1073 \end{methoddesc}
1074
1075 \begin{methoddesc}[Transform]{setPlaneToXZ}{offset = 0}
1076 Set the plane orthogonal to the y-axis.
1077 \end{methoddesc}
1078
1079 \subsubsection{\Tube class}
1080
1081 The following are some of the methods available:
1082
1083 \begin{methoddesc}[Tube]{setTubeRadius}{radius}
1084 Set the radius of the tube.
1085 \end{methoddesc}
1086
1087 \begin{methoddesc}[Tube]{setTubeRadiusToVaryByVector}{}
1088 Set the radius of the tube to vary by vector data.
1089 \end{methoddesc}
1090
1091 \begin{methoddesc}[Tube]{setTubeRadiusToVaryByScalar}{}
1092 Set the radius of the tube to vary by scalar data.
1093 \end{methoddesc}
1094
1095 \subsubsection{\Warp class}
1096
1097 The following are some of the methods available:
1098
1099 \begin{methoddesc}[Warp]{setScaleFactor}{scale_factor}
1100 Set the displacement scale factor.
1101 \end{methoddesc}
1102
1103 \subsubsection{\MaskPoints class}
1104
1105 The following are some of the methods available:
1106
1107 \begin{methoddesc}[MaskPoints]{setRatio}{ratio}
1108 Mask every n'th point.
1109 \end{methoddesc}
1110
1111 \begin{methoddesc}[MaskPoints]{randomOn}{}
1112 Enables the randomization of the points selected for masking.
1113 \end{methoddesc}
1114
1115
1116 \subsubsection{\ScalarBar class}
1117
1118 The following are some of the methods available:
1119
1120 \begin{methoddesc}[ScalarBar]{setTitle}{title}
1121 Set the title of the scalar bar.
1122 \end{methoddesc}
1123
1124 \begin{methoddesc}[ScalarBar]{setPosition}{position}
1125 Set the local position of the scalar bar.
1126 \end{methoddesc}
1127
1128 \begin{methoddesc}[ScalarBar]{setOrientationToHorizontal}{}
1129 Set the orientation of the scalar bar to horizontal.
1130 \end{methoddesc}
1131
1132 \begin{methoddesc}[ScalarBar]{setOrientationToVertical}{}
1133 Set the orientation of the scalar bar to vertical.
1134 \end{methoddesc}
1135
1136 \begin{methoddesc}[ScalarBar]{setHeight}{height}
1137 Set the height of the scalar bar.
1138 \end{methoddesc}
1139
1140 \begin{methoddesc}[ScalarBar]{setWidth}{width}
1141 Set the width of the scalar bar.
1142 \end{methoddesc}
1143
1144 \begin{methoddesc}[ScalarBar]{setLabelColor}{color}
1145 Set the color of the scalar bar's label.
1146 \end{methoddesc}
1147
1148 \begin{methoddesc}[ScalarBar]{setTitleColor}{color}
1149 Set the color of the scalar bar's title.
1150 \end{methoddesc}
1151
1152 \subsubsection{\ImageReslice class}
1153
1154 The following are some of the methods available:
1155
1156 \begin{methoddesc}[ImageReslice]{setSize}{size}
1157 Set the size of the image (logo in particular), between 0 and 2. Size 1 (one)
1158 displays the image in its original size (which is the default).
1159 \end{methoddesc}
1160
1161 \subsubsection{\DataSetMapper class}
1162
1163 The following are some of the methods available:
1164
1165 \begin{methoddesc}[DataSetMapper]{setScalarRange}{lower_range, upper_range}
1166 Set the minimum and maximium scalar range for the data set mapper. This
1167 method is called when the range has been specified by the user.
1168 Therefore, the scalar range read from the source will be ignored.
1169 \end{methoddesc}
1170
1171 \subsubsection{\CubeSource class}
1172
1173 The following are some of the methods available:
1174
1175 \begin{methoddesc}[CubeSource]{setCenter}{center}
1176 Set the cube source center.
1177 \end{methoddesc}
1178
1179 \begin{methoddesc}[CubeSource]{setXLength}{length}
1180 Set the cube source length along the x-axis.
1181 \end{methoddesc}
1182
1183 \begin{methoddesc}[CubeSource]{setYLength}{length}
1184 Set the cube source length along the y-axis.
1185 \end{methoddesc}
1186
1187 \begin{methoddesc}[CubeSource]{setZLength}{length}
1188 Set the cube source length along the z-axis.
1189 \end{methoddesc}
1190
1191
1192 % #############################################################################
1193
1194
1195 \section{More Examples}
1196 This section shows more examples.
1197
1198 \textsf{Reading A Series of Files}
1199
1200 \begin{python}
1201 # Import the necessary modules.
1202 from esys.pyvisi import Scene, DataCollector, Contour, Camera
1203 from esys.pyvisi.constant import *
1204 import os
1205
1206 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
1207 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
1208 X_SIZE = 400
1209 Y_SIZE = 300
1210
1211 SCALAR_FIELD_POINT_DATA_1 = "lava"
1212 SCALAR_FIELD_POINT_DATA_2 = "talus"
1213 FILE_2D = "phi_talus_lava."
1214
1215 IMAGE_NAME = "seriesofreads"
1216 JPG_RENDERER = Renderer.ONLINE_JPG
1217
1218 # Create a Scene.
1219 s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE,
1220 y_size = Y_SIZE)
1221
1222 # Create a DataCollector reading from a XML file.
1223 dc1 = DataCollector(source = Source.XML)
1224 dc1.setActiveScalar(scalar = SCALAR_FIELD_POINT_DATA_1)
1225
1226 # Create a Contour.
1227 mosc1 = Contour(scene = s, data_collector = dc1,
1228 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
1229 outline = True)
1230 mosc1.generateContours(0)
1231
1232 # Create a second DataCollector reading from the same XML file.
1233 dc2 = DataCollector(source = Source.XML)
1234 dc2.setActiveScalar(scalar = SCALAR_FIELD_POINT_DATA_2)
1235
1236 # Create a second Contour.
1237 mosc2 = Contour(scene = s, data_collector = dc2,
1238 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
1239 outline = True)
1240 mosc2.generateContours(0)
1241
1242 # Create a Camera.
1243 cam1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
1244
1245 # Read in one file one after another and render the object.
1246 for i in range(99, 104):
1247 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, \
1248 FILE_2D + "%04d.vtu") % i)
1249 dc2.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, \
1250 FILE_2D + "%04d.vtu") % i)
1251
1252 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, \
1253 IMAGE_NAME + "%04d.jpg") % i)
1254 \end{python}
1255
1256 \textsf{Manipulating A Single File with A Series of Translation}
1257
1258 \begin{python}
1259
1260 \end{python}
1261
1262 \textsf{Reading Data Directly from Escript Objects}
1263
1264 \begin{python}
1265 # Import the necessary modules.
1266 from esys.escript import *
1267 from esys.escript.linearPDEs import LinearPDE
1268 from esys.finley import Rectangle
1269 from esys.pyvisi import Scene, DataCollector, Map, Camera
1270 from esys.pyvisi.constant import *
1271 import os
1272
1273 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
1274 X_SIZE = 400
1275 Y_SIZE = 400
1276 JPG_RENDERER = Renderer.ONLINE_JPG
1277
1278 #... set some parameters ...
1279 xc=[0.02,0.002]
1280 r=0.001
1281 qc=50.e6
1282 Tref=0.
1283 rhocp=2.6e6
1284 eta=75.
1285 kappa=240.
1286 tend=5.
1287 # ... time, time step size and counter ...
1288 t=0
1289 h=0.1
1290 i=0
1291
1292 #... generate domain ...
1293 mydomain = Rectangle(l0=0.05,l1=0.01,n0=250, n1=50)
1294 #... open PDE ...
1295 mypde=LinearPDE(mydomain)
1296 mypde.setSymmetryOn()
1297 mypde.setValue(A=kappa*kronecker(mydomain),D=rhocp/h,d=eta,y=eta*Tref)
1298 # ... set heat source: ....
1299 x=mydomain.getX()
1300 qH=qc*whereNegative(length(x-xc)-r)
1301 # ... set initial temperature ....
1302 T=Tref
1303
1304 # Create a Scene.
1305 s = Scene(renderer = JPG_RENDERER, x_size = X_SIZE, y_size = Y_SIZE)
1306
1307 # Create a DataCollector reading directly from escript objects.
1308 dc = DataCollector(source = Source.ESCRIPT)
1309
1310 # Create a Map.
1311 m = Map(scene = s, data_collector = dc, \
1312 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, \
1313 cell_to_point = False, outline = True)
1314
1315 # Create a Camera.
1316 c = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
1317
1318 # ... start iteration:
1319 while t<0.4:
1320 i+=1
1321 t+=h
1322 mypde.setValue(Y=qH+rhocp/h*T)
1323 T=mypde.getSolution()
1324
1325 dc.setData(temp = T)
1326
1327 # Render the object.
1328 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, \
1329 "diffusion%02d.jpg") % i)
1330 \end{python}
1331
1332 \newpage
1333
1334 \section{Useful Keys}
1335 This section shows some of the useful keys when interacting with the rendered
1336 object (in the Online approach).
1337
1338 \begin{table}[ht]
1339 \begin{center}
1340 \begin{tabular}{| c | p{13cm} |}
1341 \hline
1342 \textbf{Key} & \textbf{Description} \\ \hline
1343 Keypress 'c' / 'a' & Toggle between the camera ('c') and object ('a') mode. In
1344 camera mode, mouse events affect the camera position and focal point. In
1345 object mode, mouse events affect the rendered object's element (i.e.
1346 cut surface map, clipped velocity field, streamline, etc) that is under the
1347 mouse pointer.\\ \hline
1348 Mouse button 1 & Rotate the camera around its focal point (if in camera mode)
1349 or rotate the rendered object's element (if in object mode).\\ \hline
1350 Mourse button 2 & Pan the camera (if in camera mode) or translate the rendered
1351 object's element (if in object mode). \\ \hline
1352 Mouse button 3 & Zoom the camera (if in camera mode) or scale the rendered
1353 object's element (if in object mode). \\ \hline
1354 Keypress 3 & Toggle the render window in and out of stereo mode. By default,
1355 red-blue stereo pairs are created. \\ \hline
1356 Keypress 'e' / 'q' & Exit the application if only one file is to be read, or
1357 read and display the next file if multiple files are to be read. \\ \hline
1358 Keypress 's' & Modify the representation of the rendered object to surfaces.
1359 \\ \hline
1360 Keypress 'w' & Modify the representation of the rendered object to wireframe.
1361 \\ \hline
1362 Keypress 'r' & Reset the position of the rendered object to the center.
1363 \\ \hline
1364 \end{tabular}
1365 \caption{Useful keys}
1366 \end{center}
1367 \end{table}
1368
1369
1370 % ############################################################################
1371
1372
1373 \newpage
1374
1375 \section{Sample Output}
1376 This section displays some of the sample output by Pyvisi.
1377
1378 \begin{table}[ht]
1379 \begin{tabular}{c c c}
1380 \includegraphics[width=\thumbnailwidth]{figures/Map} &
1381 \includegraphics[width=\thumbnailwidth]{figures/MapOnPlaneCut} &
1382 \includegraphics[width=\thumbnailwidth]{figures/MapOnPlaneClip} \\
1383 Map & MapOnPlaneCut & MapOnPlaneClip \\
1384 \includegraphics[width=\thumbnailwidth]{figures/MapOnScalarClip} &
1385 \includegraphics[width=\thumbnailwidth]{figures/Velocity} &
1386 \includegraphics[width=\thumbnailwidth]{figures/VelocityOnPlaneCut} \\
1387 MapOnScalarClip & Velocity & VelocityOnPlaneCut \\
1388 \includegraphics[width=\thumbnailwidth]{figures/VelocityOnPlaneClip} &
1389 \includegraphics[width=\thumbnailwidth]{figures/Ellipsoid} &
1390 \includegraphics[width=\thumbnailwidth]{figures/EllipsoidOnPlaneCut} \\
1391 VelocityOnPlaneClip & Ellipsoid & EllipsoidOnPlaneCut \\
1392 \includegraphics[width=\thumbnailwidth]{figures/EllipsoidOnPlaneClip} &
1393 \includegraphics[width=\thumbnailwidth]{figures/Contour} &
1394 \includegraphics[width=\thumbnailwidth]{figures/ContourOnPlaneCut} \\
1395 EllipsoidOnPlaneClip & Contour & ContourOnPlaneCut \\
1396 \end{tabular}
1397 \caption{Sample output}
1398 \end{table}
1399
1400 \begin{table}[t]
1401 \begin{tabular}{c c c}
1402 \includegraphics[width=\thumbnailwidth]{figures/ContourOnPlaneClip} &
1403 \includegraphics[width=\thumbnailwidth]{figures/StreamLine} &
1404 \includegraphics[width=\thumbnailwidth]{figures/Carpet} \\
1405 ContourOnPlaneClip & StreamLine & Carpet \\
1406 \includegraphics[width=\thumbnailwidth]{figures/Rectangle} &
1407 \includegraphics[width=\thumbnailwidth]{figures/Text} &
1408 \includegraphics[width=\thumbnailwidth]{figures/Logo} \\
1409 Rectangle & Text & Logo \\
1410 \includegraphics[width=\thumbnailwidth]{figures/Image} &
1411 \includegraphics[width=\thumbnailwidth]{figures/Legend} \\
1412 Image & Legend \\
1413 \end{tabular}
1414 \caption{Sample Output}
1415 \end{table}
1416
1417

  ViewVC Help
Powered by ViewVC 1.1.26