/[escript]/trunk/doc/user/pyvisi.tex
ViewVC logotype

Contents of /trunk/doc/user/pyvisi.tex

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1167 - (show annotations)
Thu May 24 01:04:17 2007 UTC (12 years, 9 months ago) by jongui
File MIME type: application/x-tex
File size: 41319 byte(s)
Deleted remaining scenes from the test cases and updated the guide.
1 \chapter{The module \pyvisi}
2 \label{PYVISI CHAP}
3 \declaremodule{extension}{esys.pyvisi}
4 \modulesynopsis{Python Visualization Interface}
5
6 \section{Introduction}
7 \pyvisi is a Python module that is used to generate 2D and 3D visualization
8 for escript and its PDE solvers: finley and bruce. This module provides
9 an easy to use interface to the \VTK library (\VTKUrl). There are three
10 approaches for rendering an object. (1) Online - object is rendered on-screen
11 with interaction capability (i.e. zoom and rotate), (2) Offline - object is
12 rendered off-screen (no pop-up window) and (3) Display - object is rendered
13 on-screen but with no interaction capability (able to produce on-the-fly
14 animation). All three approaches have the option to save the rendered object
15 as an image (i.e. jpg).
16
17 The following outlines the general guidelines when using Pyvisi:
18
19 \begin{enumerate}
20 \item Create a \Scene instance, a window in which objects are to be rendered on.
21 \item Create a data input instance (i.e. \DataCollector or \ImageReader), which
22 reads and loads the source data for visualization.
23 \item Create a data visualization instance (i.e. \Map, \Velocity, \Ellipsoid,
24 \Contour, \Carpet, \StreamLine or \Image), which proccesses and manipulates the
25 source data.
26 \item Create a \Camera or \Light instance, which controls the viewing angle and
27 lighting effects.
28 \item Render the object using either the Online, Offline or Display approach.
29 \end{enumerate}
30 \begin{center}
31 \begin{math}
32 scene \rightarrow data \; input \rightarrow data \; visualization \rightarrow
33 camer \, / \, light \rightarrow render
34 \end{math}
35 \end{center}
36
37 \section{\pyvisi Classes}
38 The following subsections give a brief overview of the important classes
39 and some of their corresponding methods. Please refer to \ReferenceGuide for
40 full details.
41
42
43 %#############################################################################
44
45
46 \subsection{Scene Classes}
47 This subsection details the instances used to setup the viewing environment.
48
49 \subsubsection{\Scene class}
50
51 \begin{classdesc}{Scene}{renderer = Renderer.ONLINE, num_viewport = 1,
52 x_size = 1152, y_size = 864}
53 A scene is a window in which objects are to be rendered on. Only
54 one scene needs to be created. However, a scene may be divided into four
55 smaller windows called viewports (if needed). Each viewport in turn can
56 render a different object.
57 \end{classdesc}
58
59 The following are some of the methods available:
60 \begin{methoddesc}[Scene]{setBackground}{color}
61 Set the background color of the scene.
62 \end{methoddesc}
63
64 \begin{methoddesc}[Scene]{render}{image_name = None}
65 Render the object using either the Online, Offline or Display mode.
66 \end{methoddesc}
67
68 \subsubsection{\Camera class}
69
70 \begin{classdesc}{Camera}{scene, viewport = Viewport.SOUTH_WEST}
71 A camera controls the display angle of the rendered object and one is
72 usually created for a \Scene. However, if a \Scene has four viewports, then a
73 separate camera may be created for each viewport.
74 \end{classdesc}
75
76 The following are some of the methods available:
77 \begin{methoddesc}[Camera]{setFocalPoint}{position}
78 Set the focal point of the camera.
79 \end{methoddesc}
80
81 \begin{methoddesc}[Camera]{setPosition}{position}
82 Set the position of the camera.
83 \end{methoddesc}
84
85 \begin{methoddesc}[Camera]{azimuth}{angle}
86 Rotate the camera to the left and right.
87 \end{methoddesc}
88
89 \begin{methoddesc}[Camera]{elevation}{angle}
90 Rotate the camera to the top and bottom (only between -90 and 90).
91 \end{methoddesc}
92
93 \begin{methoddesc}[Camera]{backView}{}
94 Rotate the camera to view the back of the rendered object.
95 \end{methoddesc}
96
97 \begin{methoddesc}[Camera]{topView}{}
98 Rotate the camera to view the top of the rendered object.
99 \end{methoddesc}
100
101 \begin{methoddesc}[Camera]{bottomView}{}
102 Rotate the camera to view the bottom of the rendered object.
103 \end{methoddesc}
104
105 \begin{methoddesc}[Camera]{leftView}{}
106 Rotate the camera to view the left side of the rendered object.
107 \end{methoddesc}
108
109 \begin{methoddesc}[Camera]{rightView}{}
110 Rotate the camera to view the right side of the rendered object.
111 \end{methoddesc}
112
113 \begin{methoddesc}[Camera]{isometricView}{}
114 Rotate the camera to view the isometric angle of the rendered object.
115 \end{methoddesc}
116
117 \begin{methoddesc}[Camera]{dolly}{distance}
118 Move the camera towards (greater than 1) and away (less than 1) from
119 the rendered object.
120 \end{methoddesc}
121
122 \subsubsection{\Light class}
123
124 \begin{classdesc}{Light}{scene, viewport = Viewport.SOUTH_WEST}
125 A light controls the lighting effect for the rendered object and works in
126 a similar way to \Camera.
127 \end{classdesc}
128
129 The following are some of the methods available:
130 \begin{methoddesc}[Light]{setColor}{color}
131 Set the light color.
132 \end{methoddesc}
133
134 \begin{methoddesc}[Light]{setFocalPoint}{position}
135 Set the focal point of the light.
136 \end{methoddesc}
137
138 \begin{methoddesc}[Light]{setPosition}{position}
139 Set the position of the light.
140 \end{methoddesc}
141
142 \begin{methoddesc}[Light]{setAngle}{elevation = 0, azimuth = 0}
143 An alternative to set the position and focal point of the light by using the
144 elevation and azimuth.
145 \end{methoddesc}
146
147
148 %##############################################################################
149
150
151 \subsection{Input Classes}
152 This subsection details the instances used to read and load the source data
153 for visualization.
154
155 \subsubsection{\DataCollector class}
156 \label{DATACOLLECTOR SEC}
157 \begin{classdesc}{DataCollector}{source = Source.XML}
158 A data collector is used to read data either from a XML file (using
159 \texttt{setFileName()}) or from an escript object directly (using
160 \texttt{setData()}). Writing XML files are expensive, but this approach has
161 the advantage given that the results can be analyzed easily after the
162 simulation has completed.
163 \end{classdesc}
164
165 The following are some of the methods available:
166 \begin{methoddesc}[DataCollector]{setFileName}{file_name}
167 Set the XML file name to read.
168 \end{methoddesc}
169
170 \begin{methoddesc}[DataCollector]{setData}{**args}
171 Create data using the \textless name\textgreater=\textless data\textgreater
172 pairing. Assumption is made that the data will be given in the
173 appropriate format.
174 \end{methoddesc}
175
176 \begin{methoddesc}[DataCollector]{setActiveScalar}{scalar}
177 Specify the scalar field to load.
178 \end{methoddesc}
179
180 \begin{methoddesc}[DataCollector]{setActiveVector}{vector}
181 Specify the vector field to load.
182 \end{methoddesc}
183
184 \begin{methoddesc}[DataCollector]{setActiveTensor}{tensor}
185 Specify the tensor field to load.
186 \end{methoddesc}
187
188 \subsubsection{\ImageReader class}
189
190 \begin{classdesc}{ImageReader}{format}
191 An image reader is used to read data from an image in a variety of formats.
192 \end{classdesc}
193
194 The following are some of the methods available:
195 \begin{methoddesc}[ImageReader]{setImageName}{image_name}
196 Set the image name to be read.
197 \end{methoddesc}
198
199 \subsubsection{\TextTwoD class}
200
201 \begin{classdesc}{Text2D}{scene, text, viewport = Viewport.SOUTH_WEST}
202 A two-dimensional text is used to annotate the rendered object
203 (i.e. inserting titles, authors and labels).
204 \end{classdesc}
205
206 The following are some of the methods available:
207 \begin{methoddesc}[Text2D]{setFontSize}{size}
208 Set the 2D text size.
209 \end{methoddesc}
210
211 \begin{methoddesc}[Text2D]{boldOn}{}
212 Bold the 2D text.
213 \end{methoddesc}
214
215 \begin{methoddesc}[Text2D]{setColor}{color}
216 Set the color of the 2D text.
217 \end{methoddesc}
218
219 Including methods from \ActorTwoD.
220
221
222 %##############################################################################
223
224
225 \subsection{Data Visualization Classes}
226 This subsection details the instances used to process and manipulate the source
227 data. The typical usage of some of the classes are also shown.
228
229 One point to note is that the source can either be point or cell data. If the
230 source is cell data, a conversion to point data may or may not be
231 required, in order for the object to be rendered correctly.
232 If a conversion is needed, the 'cell_to_point' flag (see below) must
233 be set to 'True', otherwise 'False' (which is the default).
234
235 \subsubsection{\Map class}
236
237 \begin{classdesc}{Map}{scene, data_collector,
238 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
239 outline = True}
240 Class that shows a scalar field on a domain surface. The domain surface
241 can either be colored or grey-scaled, depending on the lookup table used.
242 \end{classdesc}
243
244 The following are some of the methods available:\\
245 Methods from \ActorThreeD.
246
247 A typical usage of \Map is shown below.
248
249 \begin{python}
250 # Import the necessary modules.
251 from esys.pyvisi import Scene, DataCollector, Map, Camera
252 from esys.pyvisi.constant import *
253 import os
254
255 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
256 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
257 X_SIZE = 800
258 Y_SIZE = 800
259
260 SCALAR_FIELD_POINT_DATA = "temperature"
261 SCALAR_FIELD_CELL_DATA = "temperature_cell"
262 FILE_3D = "interior_3D.xml"
263 IMAGE_NAME = "map.jpg"
264 JPG_RENDERER = Renderer.ONLINE_JPG
265
266 # Create a Scene with four viewports.
267 s = Scene(renderer = JPG_RENDERER, num_viewport = 4, x_size = X_SIZE,
268 y_size = Y_SIZE)
269
270 # Create a DataCollector reading from a XML file.
271 dc1 = DataCollector(source = Source.XML)
272 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
273 dc1.setActiveScalar(scalar = SCALAR_FIELD_POINT_DATA)
274
275 # Create a Map for the first viewport.
276 m1 = Map(scene = s, data_collector = dc1, viewport = Viewport.SOUTH_WEST,
277 lut = Lut.COLOR, cell_to_point = False, outline = True)
278 m1.setRepresentationToWireframe()
279
280 # Create a Camera for the first viewport
281 c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
282 c1.isometricView()
283
284 # Create a second DataCollector reading from the same XML file but specifying
285 # a different scalar field.
286 dc2 = DataCollector(source = Source.XML)
287 dc2.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
288 dc2.setActiveScalar(scalar = SCALAR_FIELD_CELL_DATA)
289
290 # Create a Map for the third viewport.
291 m2 = Map(scene = s, data_collector = dc2, viewport = Viewport.NORTH_EAST,
292 lut = Lut.COLOR, cell_to_point = True, outline = True)
293
294 # Create a Camera for the third viewport
295 c1 = Camera(scene = s, viewport = Viewport.NORTH_EAST)
296
297 # Render the object.
298 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME))
299 \end{python}
300
301 \subsubsection{\MapOnPlaneCut class}
302
303 \begin{classdesc}{MapOnPlaneCut}{scene, data_collector,
304 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
305 outline = True}
306 This class works in a similar way to \Map, except that it shows a scalar
307 field cut using a plane. The plane can be translated and rotated along the
308 X, Y and Z axes.
309 \end{classdesc}
310
311 The following are some of the methods available:\\
312 Methods from \ActorThreeD and \Transform.
313
314 \subsubsection{\MapOnPlaneClip class}
315
316 \begin{classdesc}{MapOnPlaneClip}{scene, data_collector,
317 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
318 outline = True}
319 This class works in a similar way to \MapOnPlaneCut, except that it shows a
320 scalar field clipped using a plane.
321 \end{classdesc}
322
323 The following are some of the methods available:\\
324 Methods from \ActorThreeD, \Transform and \Clipper.
325
326 \subsubsection{\MapOnScalarClip class}
327
328 \begin{classdesc}{MapOnScalarClip}{scene, data_collector,
329 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
330 outline = True}
331 This class works in a similar way to \Map, except that it shows a scalar
332 field clipped using a scalar value.
333 \end{classdesc}
334
335 The following are some of the methods available:\\
336 Methods from \ActorThreeD and \Clipper.
337
338 \subsubsection{\Velocity class}
339
340 \begin{classdesc}{Velocity}{scene, data_collector, arrow = Arrow.TWO_D,
341 color_mode = ColorMode.VECTOR, viewport = Viewport.SOUTH_WEST,
342 lut = Lut.COLOR, cell_to_point = False, outline = True}
343 Class that shows a vector field using arrows. The arrows can either be
344 colored or grey-scaled, depending on the lookup table used. If the arrows
345 are colored, there are two possible coloring modes, either using vector data or
346 scalar data. Similarly, there are two possible types of arrows, either
347 using two-dimensional or three-dimensional.
348 \end{classdesc}
349
350 The following are some of the methods available:\\
351 Methods from \ActorThreeD, \GlyphThreeD and \MaskPoints.
352
353 \subsubsection{\VelocityOnPlaneCut class}
354
355 \begin{classdesc}{VelocityOnPlaneCut}{scene, data_collector,
356 arrow = Arrow.TWO_D, color_mode = ColorMode.VECTOR,
357 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR,
358 cell_to_point = False, outline = True}
359 This class works in a similar way to \MapOnPlaneCut, except that
360 it shows a vector field using arrows cut using a plane.
361 \end{classdesc}
362
363 The following are some of the methods available:\\
364 Methods from \ActorThreeD, \GlyphThreeD, \Transform and \MaskPoints.
365
366 A typical usage of \VelocityOnPlaneCut is shown below.
367
368 \begin{python}
369 # Import the necessary modules
370 from esys.pyvisi import Scene, DataCollector, VelocityOnPlaneCut, Camera
371 from esys.pyvisi.constant import *
372 import os
373
374 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
375 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
376 X_SIZE = 400
377 Y_SIZE = 400
378
379 VECTOR_FIELD_CELL_DATA = "velocity"
380 FILE_3D = "interior_3D.xml"
381 IMAGE_NAME = "velocity.jpg"
382 JPG_RENDERER = Renderer.ONLINE_JPG
383
384 # Create a Scene with four viewports
385 s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE,
386 y_size = Y_SIZE)
387
388 # Create a DataCollector reading from a XML file.
389 dc1 = DataCollector(source = Source.XML)
390 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
391 dc1.setActiveVector(vector = VECTOR_FIELD_CELL_DATA)
392
393 # Create VelocityOnPlaneCut.
394 vopc1 = VelocityOnPlaneCut(scene = s, data_collector = dc1,
395 viewport = Viewport.SOUTH_WEST, color_mode = ColorMode.VECTOR,
396 arrow = Arrow.THREE_D, lut = Lut.COLOR, cell_to_point = False,
397 outline = True)
398 vopc1.setScaleFactor(scale_factor = 0.5)
399 vopc1.setPlaneToXY(offset = 0.5)
400 vopc1.setRatio(2)
401 vopc1.randomOn()
402
403 # Create a Camera.
404 c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
405 c1.isometricView()
406 c1.elevation(angle = -20)
407
408 # Render the object.
409 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME))
410 \end{python}
411
412 \subsubsection{\VelocityOnPlaneClip class}
413
414 \begin{classdesc}{VelocityOnPlaneClip}{scene, data_collector,
415 arrow = Arrow.TWO_D, color_mode = ColorMode.VECTOR,
416 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR,
417 cell_to_point = False, online = True}
418 This class works in a similar way to \MapOnPlaneClip, except that it shows a
419 vector field using arrows clipped using a plane.
420 \end{classdesc}
421
422 The following are some of the methods available:\\
423 Methods from \ActorThreeD, \GlyphThreeD, \Transform, \Clipper and
424 \MaskPoints.
425
426 \subsubsection{\Ellipsoid class}
427
428 \begin{classdesc}{Ellipsoid}{scene, data_collector,
429 viewport = Viewport = SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
430 outline = True}
431 Class that shows a tensor field using ellipsoids. The ellipsoids can either be
432 colored or grey-scaled, depending on the lookup table used.
433 \end{classdesc}
434
435 The following are some of the methods available:\\
436 Methods from \ActorThreeD, \Sphere, \TensorGlyph and \MaskPoints.
437
438 \subsubsection{\EllipsoidOnPlaneCut class}
439
440 \begin{classdesc}{EllipsoidOnPlaneCut}{scene, data_collector,
441 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
442 outline = True}
443 This class works in a similar way to \MapOnPlaneCut, except that it shows
444 a tensor field using ellipsoids cut using a plane.
445 \end{classdesc}
446
447 The following are some of the methods available:\\
448 Methods from \ActorThreeD, \Sphere, \TensorGlyph, \Transform and
449 \MaskPoints.
450
451 \subsubsection{\EllipsoidOnPlaneClip class}
452
453 \begin{classdesc}{EllipsoidOnPlaneClip}{scene, data_collector,
454 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
455 outline = True}
456 This class works in a similar way to \MapOnPlaneClip, except that it shows a
457 tensor field using ellipsoids clipped using a plane.
458 \end{classdesc}
459
460 The following are some of the methods available:\\
461 Methods from \ActorThreeD, \Sphere, \TensorGlyph, \Transform, \Clipper
462 and \MaskPoints.
463
464 A typical usage of \EllipsoidOnPlaneClip is shown below.
465
466 \begin{python}
467 # Import the necessary modules
468 from esys.pyvisi import Scene, DataCollector, EllipsoidOnPlaneClip, Camera
469 from esys.pyvisi.constant import *
470 import os
471
472 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
473 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
474 X_SIZE = 400
475 Y_SIZE = 400
476
477 TENSOR_FIELD_CELL_DATA = "stress_cell"
478 FILE_3D = "interior_3D.xml"
479 IMAGE_NAME = "ellipsoid.jpg"
480 JPG_RENDERER = Renderer.ONLINE_JPG
481
482 # Create a Scene.
483 s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE,
484 y_size = Y_SIZE)
485
486 # Create a DataCollector reading from a XML file.
487 dc1 = DataCollector(source = Source.XML)
488 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
489 dc1.setActiveTensor(tensor = TENSOR_FIELD_CELL_DATA)
490
491 # Create a EllipsoidOnPlaneClip.
492 eopc1 = EllipsoidOnPlaneClip(scene = s, data_collector = dc1,
493 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = True,
494 outline = True)
495 eopc1.setPlaneToXY()
496 eopc1.setScaleFactor(scale_factor = 0.2)
497 eopc1.rotateX(angle = 10)
498
499 # Create a camera.
500 c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
501 c1.bottomView()
502 c1.azimuth(angle = -90)
503 c1.elevation(angle = 10)
504
505 # Render the object.
506 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME))
507 \end{python}
508
509 \subsubsection{\Contour class}
510
511 \begin{classdesc}{Contour}{scene, data_collector,
512 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
513 outline = True}
514 Class that shows a scalar field using contour surfaces. The contour surfaces can
515 either be colored or grey-scaled, depending on the lookup table used. This
516 class can also be used to generate iso surfaces.
517 \end{classdesc}
518
519 The following are some of the methods available:\\
520 Methods from \ActorThreeD and \ContourModule.
521
522 A typical usage of \Contour is shown below.
523
524 \begin{python}
525 # Import the necessary modules
526 from esys.pyvisi import Scene, DataCollector, Contour, Camera
527 from esys.pyvisi.constant import *
528 import os
529
530 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
531 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
532 X_SIZE = 400
533 Y_SIZE = 400
534
535 SCALAR_FIELD_POINT_DATA = "temperature"
536 FILE_3D = "interior_3D.xml"
537 IMAGE_NAME = "contour.jpg"
538 JPG_RENDERER = Renderer.ONLINE_JPG
539
540 # Create a Scene.
541 s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE,
542 y_size = Y_SIZE)
543
544 # Create a DataCollector reading a XML file.
545 dc1 = DataCollector(source = Source.XML)
546 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
547 dc1.setActiveScalar(scalar = SCALAR_FIELD_POINT_DATA)
548
549 # Create a Contour.
550 ctr1 = Contour(scene = s, data_collector = dc1, viewport = Viewport.SOUTH_WEST,
551 lut = Lut.COLOR, cell_to_point = False, outline = True)
552 ctr1.generateContours(contours = 3)
553
554 # Create a Camera.
555 cam1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
556 cam1.elevation(angle = -40)
557
558 # Render the object.
559 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME))
560 \end{python}
561
562 \subsubsection{\ContourOnPlaneCut class}
563
564 \begin{classdesc}{ContourOnPlaneCut}{scene, data_collector,
565 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
566 outline = True}
567 This class works in a similar way to \MapOnPlaneCut, except that it shows a
568 scalar field using contour surfaces cut using a plane.
569 \end{classdesc}
570
571 The following are some of the methods available:\\
572 Methods from \ActorThreeD, \ContourModule and \Transform.
573
574 \subsubsection{\ContourOnPlaneClip class}
575
576 \begin{classdesc}{ContourOnPlaneClip}{scene, data_collector,
577 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
578 outline = True}
579 This class works in a similar way to \MapOnPlaneClip, except that it shows a
580 scalar field using contour surfaces clipped using a plane.
581 \end{classdesc}
582
583 The following are some of the methods available:\\
584 Methods from \ActorThreeD, \ContourModule, \Transform and \Clipper.
585
586 \subsubsection{\StreamLine class}
587
588 \begin{classdesc}{StreamLine}{scene, data_collector,
589 viewport = Viewport.SOUTH_WEST, color_mode = ColorMode.VECTOR, lut = Lut.COLOR,
590 cell_to_point = False, outline = True}
591 Class that shows the direction of particles of a vector field using streamlines.
592 The streamlines can either be colored or grey-scaled, depending on the lookup
593 table used. If the streamlines are colored, there are two possible coloring
594 modes, either using vector data or scalar data.
595 \end{classdesc}
596
597 The following are some of the methods available:\\
598 Methods from \ActorThreeD, \PointSource, \StreamLineModule and \Tube.
599
600 A typical usage of \StreamLine is shown below.
601
602 \begin{python}
603 # Import the necessary modules.
604 from esys.pyvisi import Scene, DataCollector, StreamLine, Camera
605 from esys.pyvisi.constant import *
606 import os
607
608 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
609 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
610 X_SIZE = 400
611 Y_SIZE = 400
612
613 VECTOR_FIELD_CELL_DATA = "temperature"
614 FILE_3D = "interior_3D.xml"
615 IMAGE_NAME = "streamline.jpg"
616 JPG_RENDERER = Renderer.ONLINE_JPG
617
618 # Create a Scene.
619 s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE,
620 y_size = Y_SIZE)
621
622 # Create a DataCollector reading from a XML file.
623 dc1 = DataCollector(source = Source.XML)
624 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
625
626 # Create a Streamline.
627 sl1 = StreamLine(scene = s, data_collector = dc1,
628 viewport = Viewport.SOUTH_WEST, color_mode = ColorMode.SCALAR,
629 lut = Lut.COLOR, cell_to_point = False, outline = True)
630 sl1.setTubeRadius(radius = 0.02)
631
632 # Create a Camera.
633 c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
634 c1.isometricView()
635
636 # Render the object.
637 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME))
638 \end{python}
639
640 \subsubsection{\Carpet class}
641
642 \begin{classdesc}{Carpet}{scene, data_collector,
643 viewport = Viewport.Viewport.SOUTH_WEST, warp_mode = WarpMode.SCALAR,
644 lut = Lut.COLOR, cell_to_point = False, outline = True}
645 This class works in a similar way to \MapOnPlaneCut, except that it shows a
646 scalar field cut on a plane and deformated (warp) along the normal. The
647 plane can either be colored or grey-scaled, depending on the lookup table used.
648 Similarly, the plane can be deformated either using scalar data or vector data.
649 \end{classdesc}
650
651 The following are some of the methods available:\\
652 Methods from \ActorThreeD, \Warp and \Transform.
653
654 A typical usage of \Carpet is shown below.
655
656 \begin{python}
657 # Import the necessary modules.
658 from esys.pyvisi import Scene, DataCollector, Carpet, Camera
659 from esys.pyvisi.constant import *
660 import os
661
662 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
663 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
664 X_SIZE = 400
665 Y_SIZE = 400
666
667 SCALAR_FIELD_CELL_DATA = "temperature_cell"
668 FILE_3D = "interior_3D.xml"
669 IMAGE_NAME = "carpet.jpg"
670 JPG_RENDERER = Renderer.ONLINE_JPG
671
672 # Create a Scene.
673 s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE,
674 y_size = Y_SIZE)
675
676 # Create a DataCollector reading from a XML file.
677 dc1 = DataCollector(source = Source.XML)
678 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
679 dc1.setActiveScalar(scalar = SCALAR_FIELD_CELL_DATA)
680
681 # Create a Carpet.
682 cpt1 = Carpet(scene = s, data_collector = dc1, viewport = Viewport.SOUTH_WEST,
683 warp_mode = WarpMode.SCALAR, lut = Lut.COLOR, cell_to_point = True,
684 outline = True)
685 cpt1.setPlaneToXY(0.2)
686 cpt1.setScaleFactor(1.9)
687
688 # Create a Camera.
689 c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
690 c1.isometricView()
691
692 # Render the object.
693 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME))
694 \end{python}
695
696 \subsubsection{\Image class}
697
698 \begin{classdesc}{Image}{scene, image_reader, viewport = Viewport.SOUTH_WEST}
699 Class that displays an image which can be scaled (upwards and downwards) and
700 has interaction capability. The image can also be translated and rotated along
701 the X, Y and Z axes. One of the most common use of this feature is pasting an
702 image on a surface map.
703 \end{classdesc}
704
705 The following are some of the methods available:\\
706 Methods from \ActorThreeD, \PlaneSource and \Transform.
707
708 A typical usage of \Image is shown below.
709
710 \begin{python}
711 # Import the necessary modules.
712 from esys.pyvisi import Scene, DataCollector, Map, ImageReader, Image, Camera
713 from esys.pyvisi import GlobalPosition
714 from esys.pyvisi.constant import *
715 import os
716
717 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
718 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
719 X_SIZE = 400
720 Y_SIZE = 400
721
722 SCALAR_FIELD_POINT_DATA = "temperature"
723 FILE_3D = "interior_3D.xml"
724 LOAD_IMAGE_NAME = "flinders.jpg"
725 SAVE_IMAGE_NAME = "image.jpg"
726 JPG_RENDERER = Renderer.ONLINE_JPG
727
728 # Create a Scene.
729 s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE,
730 y_size = Y_SIZE)
731
732 # Create a DataCollector reading from a XML file.
733 dc1 = DataCollector(source = Source.XML)
734 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
735
736 # Create a Map.
737 m1 = Map(scene = s, data_collector = dc1, viewport = Viewport.SOUTH_WEST,
738 lut = Lut.COLOR, cell_to_point = False, outline = True)
739 m1.setOpacity(0.3)
740
741 # Create an ImageReader (in place of DataCollector).
742 ir = ImageReader(ImageFormat.JPG)
743 ir.setImageName(image_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, \
744 LOAD_IMAGE_NAME))
745
746 # Create an Image.
747 i = Image(scene = s, image_reader = ir, viewport = Viewport.SOUTH_WEST)
748 i.setOpacity(opacity = 0.9)
749 i.translate(0,0,-1)
750 i.setPoint1(GlobalPosition(2,0,0))
751 i.setPoint2(GlobalPosition(0,2,0))
752
753 # Create a Camera.
754 c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
755
756 # Render the image.
757 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, SAVE_IMAGE_NAME))
758 \end{python}
759
760 \subsubsection{\Logo class}
761
762 \begin{classdesc}{Logo}{scene, image_reader, viewport = Viewport.SOUTH_WEST}
763 Class that displays a static image, in particular a logo
764 (i.e. company symbol) and has NO interaction capability. The position and size
765 of the logo can be specified.
766 \end{classdesc}
767
768 The following are some of the methods available:\\
769 Methods from \ImageReslice and \ActorTwoD.
770
771
772 %##############################################################################
773
774
775 \subsection{Coordinate Classes}
776 This subsection details the instances used to position the rendered object.
777
778 \begin{classdesc}{LocalPosition}{x_coor, y_coor}
779 Class that defines the local positioning coordinate system (2D).
780 \end{classdesc}
781
782 \begin{classdesc}{GlobalPosition}{x_coor, y_coor, z_coor}
783 Class that defines the global positioning coordinate system (3D).
784 \end{classdesc}
785
786
787 %##############################################################################
788
789
790 \subsection{Supporting Classes}
791 This subsection details the supporting classes inherited by the data
792 visualization classes (see Section \ref{DATACOLLECTOR SEC}) and their
793 available methods.
794
795 \subsubsection{\ActorThreeD class}
796
797 The following are some of the methods available:
798
799 \begin{methoddesc}[Actor3D]{setOpacity}{opacity}
800 Set the opacity (transparency) of the 3D actor.
801 \end{methoddesc}
802
803 \begin{methoddesc}[Actor3D]{setColor}{color}
804 Set the color of the 3D actor.
805 \end{methoddesc}
806
807 \begin{methoddesc}[Actor3D]{setRepresentationToWireframe}{}
808 Set the representation of the 3D actor to wireframe.
809 \end{methoddesc}
810
811 \subsubsection{\ActorTwoD class}
812
813 The following are some of the methods available:
814
815 \begin{methoddesc}[Actor2D]{setPosition}{position}
816 Set the position (XY) of the 2D actor. Default position is the lower left hand
817 corner of the window / viewport.
818 \end{methoddesc}
819
820 \subsubsection{\Clipper class}
821
822 The following are some of the methods available:
823
824 \begin{methoddesc}[Clipper]{setInsideOutOn}{}
825 Clips one side of the rendered object.
826 \end{methoddesc}
827
828 \begin{methoddesc}[Clipper]{setInsideOutOff}{}
829 Clips the other side of the rendered object.
830 \end{methoddesc}
831
832 \begin{methoddesc}[Clipper]{setClipValue}{value}
833 Set the scalar clip value (instead of using a plane) for the clipper.
834 \end{methoddesc}
835
836 \subsubsection{\ContourModule class}
837
838 The following are some of the methods available:
839
840 \begin{methoddesc}[ContourModule]{generateContours}{contours = None,
841 lower_range = None, upper_range = None}
842 Generate the specified number of contours within the specified range.
843 In order to generate an iso surface, the 'lower_range' and 'upper_range'
844 must be equal.
845 \end{methoddesc}
846
847 \subsubsection{\GlyphThreeD class}
848
849 The following are some of the methods available:
850
851 \begin{methoddesc}[Glyph3D]{setScaleModeByVector}{}
852 Set the 3D glyph to scale according to the vector data.
853 \end{methoddesc}
854
855 \begin{methoddesc}[Glyph3D]{setScaleModeByScalar}{}
856 Set the 3D glyph to scale according to the scalar data.
857 \end{methoddesc}
858
859 \begin{methoddesc}[Glyph3D]{setScaleFactor}{scale_factor}
860 Set the 3D glyph scale factor.
861 \end{methoddesc}
862
863 \subsubsection{\TensorGlyph class}
864
865 The following are some of the methods available:
866
867 \begin{methoddesc}[TensorGlyph]{setScaleFactor}{scale_factor}
868 Set the scale factor for the tensor glyph.
869 \end{methoddesc}
870
871 \begin{methoddesc}[TensorGlyph]{setMaxScaleFactor}{max_scale_factor}
872 Set the maximum allowable scale factor for the tensor glyph.
873 \end{methoddesc}
874
875 \subsubsection{\PlaneSource class}
876
877 The following are some of the methods available:
878
879 \begin{methoddesc}[PlaneSource]{setPoint1}{position}
880 Set the first point from the origin of the plane source.
881 \end{methoddesc}
882
883 \begin{methoddesc}[PlaneSource]{setPoint2}{position}
884 Set the second point from the origin of the plane source.
885 \end{methoddesc}
886
887 \subsubsection{\PointSource class}
888
889 The following are some of the methods available:
890
891 \begin{methoddesc}[PointSource]{setPointSourceRadius}{radius}
892 Set the radius of the sphere.
893 \end{methoddesc}
894
895 \begin{methoddesc}[PointSource]{setPointSourceCenter}{center}
896 Set the center of the sphere.
897 \end{methoddesc}
898
899 \begin{methoddesc}[PointSource]{setPointSourceNumberOfPoints}{points}
900 Set the number of points to generate within the sphere (the larger the
901 number of points, the more streamlines are generated).
902 \end{methoddesc}
903
904 \subsubsection{\Sphere class}
905
906 The following are some of the methods available:
907
908 \begin{methoddesc}[Sphere]{setThetaResolution}{resolution}
909 Set the theta resolution of the sphere.
910 \end{methoddesc}
911
912 \begin{methoddesc}[Sphere]{setPhiResolution}{resolution}
913 Set the phi resolution of the sphere.
914 \end{methoddesc}
915
916 \subsubsection{\StreamLineModule class}
917
918 The following are some of the methods available:
919
920 \begin{methoddesc}[StreamLineModule]{setMaximumPropagationTime}{time}
921 Set the maximum length of the streamline expressed in elapsed time.
922 \end{methoddesc}
923
924 \begin{methoddesc}[StreamLineModule]{setIntegrationToBothDirections}{}
925 Set the integration to occur both sides: forward (where the streamline
926 goes) and backward (where the streamline came from).
927 \end{methoddesc}
928
929 \subsubsection{\Transform class}
930
931 The following are some of the methods available:
932
933 \begin{methoddesc}[Transform]{translate}{x_offset, y_offset, z_offset}
934 Translate the rendered object along the x, y and z-axes.
935 \end{methoddesc}
936
937 \begin{methoddesc}[Transform]{rotateX}{angle}
938 Rotate the plane along the x-axis.
939 \end{methoddesc}
940
941 \begin{methoddesc}[Transform]{rotateY}{angle}
942 Rotate the plane along the y-axis.
943 \end{methoddesc}
944
945 \begin{methoddesc}[Transform]{rotateZ}{angle}
946 Rotate the plane along the z-axis.
947 \end{methoddesc}
948
949 \begin{methoddesc}[Transform]{setPlaneToXY}{offset = 0}
950 Set the plane orthogonal to the z-axis.
951 \end{methoddesc}
952
953 \begin{methoddesc}[Transform]{setPlaneToYZ}{offset = 0}
954 Set the plane orthogonal to the x-axis.
955 \end{methoddesc}
956
957 \begin{methoddesc}[Transform]{setPlaneToXZ}{offset = 0}
958 Set the plane orthogonal to the y-axis.
959 \end{methoddesc}
960
961 \subsubsection{\Tube class}
962
963 The following are some of the methods available:
964
965 \begin{methoddesc}[Tube]{setTubeRadius}{radius}
966 Set the radius of the tube.
967 \end{methoddesc}
968
969 \begin{methoddesc}[Tube]{setTubeRadiusToVaryByVector}{}
970 Set the radius of the tube to vary by vector data.
971 \end{methoddesc}
972
973 \begin{methoddesc}[Tube]{setTubeRadiusToVaryByScalar}{}
974 Set the radius of the tube to vary by scalar data.
975 \end{methoddesc}
976
977 \subsubsection{\Warp class}
978
979 The following are some of the methods available:
980
981 \begin{methoddesc}[Warp]{setScaleFactor}{scale_factor}
982 Set the displacement scale factor.
983 \end{methoddesc}
984
985 \subsubsection{\MaskPoints class}
986
987 The following are some of the methods available:
988
989 \begin{methoddesc}[MaskPoints]{setRatio}{ratio}
990 Mask every nth point.
991 \end{methoddesc}
992
993 \begin{methoddesc}[MaskPoints]{randomOn}{}
994 Enables the randomization of the points selected for masking.
995 \end{methoddesc}
996
997 \subsubsection{\ImageReslice class}
998
999 The following are some of the methods available:
1000
1001 \begin{methoddesc}[ImageReslice]{setSize}{size}
1002 Set the size of the image (logo in particular), between 0 and 2. Size 1 (one)
1003 displays the image in its original size (which is the default).
1004 \end{methoddesc}
1005
1006
1007 % #############################################################################
1008
1009
1010 \section{More Examples}
1011 This section shows more examples.
1012
1013 \textsf{Reading A Series of Files}
1014
1015 \begin{python}
1016 # Import the necessary modules.
1017 from esys.pyvisi import Scene, DataCollector, Contour, Camera
1018 from esys.pyvisi.constant import *
1019 import os
1020
1021 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
1022 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
1023 X_SIZE = 400
1024 Y_SIZE = 300
1025
1026 SCALAR_FIELD_POINT_DATA_1 = "lava"
1027 SCALAR_FIELD_POINT_DATA_2 = "talus"
1028 FILE_2D = "phi_talus_lava."
1029 FIRST_FILE_NAME = "phi_talus_lava.0099.vtu"
1030
1031 IMAGE_NAME = "seriesofreads"
1032 JPG_RENDERER = Renderer.ONLINE_JPG
1033
1034 # Create a Scene.
1035 s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE,
1036 y_size = Y_SIZE)
1037
1038 # Create a DataCollector reading from a XML file. An initial file must always
1039 # be assigned when the DataCollector is created, although the same file is
1040 # read again in the for-loop.
1041 dc1 = DataCollector(source = Source.XML)
1042 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, \
1043 FIRST_FILE_NAME))
1044 dc1.setActiveScalar(scalar = SCALAR_FIELD_POINT_DATA_1)
1045
1046 # Create a Contour.
1047 mosc1 = Contour(scene = s, data_collector = dc1,
1048 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
1049 outline = True)
1050 mosc1.generateContours(0)
1051
1052 # Create a second DataCollector reading from the same XML file.
1053 dc2 = DataCollector(source = Source.XML)
1054 dc2.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, \
1055 FIRST_FILE_NAME))
1056 dc2.setActiveScalar(scalar = SCALAR_FIELD_POINT_DATA_2)
1057
1058 # Create a second Contour.
1059 mosc2 = Contour(scene = s, data_collector = dc2,
1060 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
1061 outline = True)
1062 mosc2.generateContours(0)
1063
1064 # Create a Camera.
1065 cam1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
1066
1067 # Read in one file one after another and render the object.
1068 for i in range(99, 104):
1069 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, \
1070 FILE_2D + "%04d.vtu") % i)
1071 dc2.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, \
1072 FILE_2D + "%04d.vtu") % i)
1073
1074 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, \
1075 IMAGE_NAME + "%04d.jpg") % i)
1076 \end{python}
1077
1078 \textsf{Manipulating A Single File with A Series of Translation}
1079
1080 \begin{python}
1081 # Import the necessary modules.
1082 from esys.pyvisi import Scene, DataCollector, MapOnPlaneCut, Camera
1083 from esys.pyvisi.constant import *
1084 import os
1085
1086 PYVISI_EXAMPLE_MESHES_PATH = "data_meshes"
1087 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
1088 X_SIZE = 400
1089 Y_SIZE = 400
1090
1091 SCALAR_FIELD_POINT_DATA = "temperature"
1092 FILE_3D = "interior_3D.xml"
1093 IMAGE_NAME = "seriesofcuts"
1094 JPG_RENDERER = Renderer.ONLINE_JPG
1095
1096 # Create a Scene.
1097 s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE,
1098 y_size = Y_SIZE)
1099
1100 # Create a DataCollector reading from a XML file.
1101 dc1 = DataCollector(source = Source.XML)
1102 dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D))
1103 dc1.setActiveScalar(scalar = SCALAR_FIELD_POINT_DATA)
1104
1105 # Create a MapOnPlaneCut.
1106 mopc1 = MapOnPlaneCut(scene = s, data_collector = dc1,
1107 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False,
1108 outline = True)
1109 mopc1.setPlaneToYZ(offset = 0.1)
1110
1111 # Create a Camera.
1112 c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
1113 c1.isometricView()
1114
1115 # Render the object with multiple cuts from a series of translation.
1116 for i in range(0, 5):
1117 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME +
1118 "%02d.jpg") % i)
1119 mopc1.translate(0.6,0,0)
1120 \end{python}
1121
1122 \textsf{Reading Data Directly from Escript Objects}
1123
1124 \begin{python}
1125 # Import the necessary modules.
1126 from esys.escript import *
1127 from esys.escript.linearPDEs import LinearPDE
1128 from esys.finley import Rectangle
1129 from esys.pyvisi import Scene, DataCollector, Map, Camera
1130 from esys.pyvisi.constant import *
1131 import os
1132
1133 PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images"
1134 X_SIZE = 400
1135 Y_SIZE = 400
1136 JPG_RENDERER = Renderer.ONLINE_JPG
1137
1138 #... Set some parameters ...
1139 xc=[0.02,0.002]
1140 r=0.001
1141 qc=50.e6
1142 Tref=0.
1143 rhocp=2.6e6
1144 eta=75.
1145 kappa=240.
1146 tend=5.
1147 # ... Time, time step size and counter ...
1148 t=0
1149 h=0.1
1150 i=0
1151
1152 #... Generate domain ...
1153 mydomain = Rectangle(l0=0.05,l1=0.01,n0=250, n1=50)
1154 #... open PDE ...
1155 mypde=LinearPDE(mydomain)
1156 mypde.setSymmetryOn()
1157 mypde.setValue(A=kappa*kronecker(mydomain),D=rhocp/h,d=eta,y=eta*Tref)
1158 # ... Set heat source: ....
1159 x=mydomain.getX()
1160 qH=qc*whereNegative(length(x-xc)-r)
1161 # ... set initial temperature ....
1162 T=Tref
1163
1164 # Create a Scene.
1165 s = Scene(renderer = JPG_RENDERER, x_size = X_SIZE, y_size = Y_SIZE)
1166
1167 # Create a DataCollector reading directly from escript objects.
1168 dc = DataCollector(source = Source.ESCRIPT)
1169
1170 # Create a Map.
1171 m = Map(scene = s, data_collector = dc, \
1172 viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, \
1173 cell_to_point = False, outline = True)
1174
1175 # Create a Camera.
1176 c = Camera(scene = s, viewport = Viewport.SOUTH_WEST)
1177
1178 # ... Start iteration:
1179 while t<tend:
1180 i+=1
1181 t+=h
1182 mypde.setValue(Y=qH+rhocp/h*T)
1183 T=mypde.getSolution()
1184
1185 dc.setData(temp = T)
1186
1187 # Render the object.
1188 s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, \
1189 "diffusion%02d.jpg") % i)
1190 \end{python}
1191
1192 \newpage
1193
1194 \section{Useful Keys}
1195 This section shows some of the useful keys when interacting with the rendered
1196 object.
1197
1198 \begin{table}[ht]
1199 \begin{center}
1200 \begin{tabular}{| c | p{13cm} |}
1201 \hline
1202 \textbf{Key} & \textbf{Description} \\ \hline
1203 Keypress 'c' / 'a' & Toggle between the camera ('c') and object ('a') mode. In
1204 camera mode, mouse events affect the camera position and focal point. In
1205 object mode, mouse events affect the rendered object's element (i.e.
1206 cut surface map, clipped velocity field, streamline, etc) that is under the
1207 mouse pointer.\\ \hline
1208 Mouse button 1 & Rotate the camera around its focal point (if in camera mode)
1209 or rotate the rendered object's element (if in object mode).\\ \hline
1210 Mourse button 2 & Pan the camera (if in camera mode) or translate the rendered
1211 object's element (if in object mode). \\ \hline
1212 Mouse button 3 & Zoom the camera (if in camera mode) or scale the rendered
1213 object's element (if in object mode). \\ \hline
1214 Keypress 3 & Toggle the render window in and out of stereo mode. By default,
1215 red-blue stereo pairs are created. \\ \hline
1216 Keypress 'e' / 'q' & Exit the application if only one file is to be read, or
1217 read and display the next file if multiple files are to be read. \\ \hline
1218 Keypress 's' & Modify the representation of the rendered object to surfaces.
1219 \\ \hline
1220 Keypress 'w' & Modify the representation of the rendered object to wireframe.
1221 \\ \hline
1222 \end{tabular}
1223 \caption{Useful keys}
1224 \end{center}
1225 \end{table}
1226
1227
1228 % ############################################################################
1229
1230
1231 \newpage
1232
1233 \section{Sample Output}
1234 This section displays thumbnails of sample output.
1235
1236 \begin{table}[ht]
1237 \begin{tabular}{c c c}
1238 \includegraphics[width=\thumbnailwidth]{figures/Map} &
1239 \includegraphics[width=\thumbnailwidth]{figures/MapOnPlaneCut} &
1240 \includegraphics[width=\thumbnailwidth]{figures/MapOnPlaneClip} \\
1241 Map & MapOnPlaneCut & MapOnPlaneClip \\
1242 \includegraphics[width=\thumbnailwidth]{figures/MapOnScalarClip} &
1243 \includegraphics[width=\thumbnailwidth]{figures/Velocity} &
1244 \includegraphics[width=\thumbnailwidth]{figures/VelocityOnPlaneCut} \\
1245 MapOnScalarClip & Velocity & VelocityOnPlaneCut \\
1246 \includegraphics[width=\thumbnailwidth]{figures/VelocityOnPlaneClip} &
1247 \includegraphics[width=\thumbnailwidth]{figures/Ellipsoid} &
1248 \includegraphics[width=\thumbnailwidth]{figures/EllipsoidOnPlaneCut} \\
1249 VelocityOnPlaneClip & Ellipsoid & EllipsoidOnPlaneCut \\
1250 \includegraphics[width=\thumbnailwidth]{figures/EllipsoidOnPlaneClip} &
1251 \includegraphics[width=\thumbnailwidth]{figures/Contour} &
1252 \includegraphics[width=\thumbnailwidth]{figures/ContourOnPlaneCut} \\
1253 EllipsoidOnPlaneClip & Contour & ContourOnPlaneCut \\
1254 \includegraphics[width=\thumbnailwidth]{figures/ContourOnPlaneClip} &
1255 \includegraphics[width=\thumbnailwidth]{figures/StreamLine} &
1256 \includegraphics[width=\thumbnailwidth]{figures/Carpet} \\
1257 ContourOnPlaneClip & StreamLine & Carpet \\
1258 \end{tabular}
1259 \caption{Sample output}
1260 \end{table}
1261
1262
1263

  ViewVC Help
Powered by ViewVC 1.1.26