1 |
|
2 |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% |
3 |
% |
4 |
% Copyright (c) 2003-2008 by University of Queensland |
5 |
% Earth Systems Science Computational Center (ESSCC) |
6 |
% http://www.uq.edu.au/esscc |
7 |
% |
8 |
% Primary Business: Queensland, Australia |
9 |
% Licensed under the Open Software License version 3.0 |
10 |
% http://www.opensource.org/licenses/osl-3.0.php |
11 |
% |
12 |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% |
13 |
|
14 |
|
15 |
\chapter{The Module \pyvisi} |
16 |
\label{PYVISI CHAP} |
17 |
\declaremodule{extension}{esys.pyvisi} |
18 |
\modulesynopsis{Python Visualization Interface} |
19 |
|
20 |
\section{Introduction} |
21 |
\pyvisi is a Python module that is used to generate 2D and 3D visualizations |
22 |
for escript and its PDE solvers finley and bruce. The module provides |
23 |
an easy to use interface to the \VTK library (\VTKUrl) to render (generate) |
24 |
surface maps and contours for scalar fields, arrows and streamlines for vector |
25 |
fields, and ellipsoids for tensor fields. There are three approaches for |
26 |
rendering an object. (1) Online - object is rendered on-screen with |
27 |
interaction capability (i.e. zoom and rotate), (2) Offline - object is |
28 |
rendered off-screen (no pop-up window) and (3) Display - object is rendered |
29 |
on-screen but with no interaction capability (on-the-fly animation). All three |
30 |
approaches have the option to save the rendered object as an image (e.g. jpeg) |
31 |
and subsequently converting a series of images into a movie (mpeg). |
32 |
|
33 |
The following outlines the general steps to use Pyvisi: |
34 |
|
35 |
\begin{enumerate} |
36 |
\item Create a \Scene instance - a window in which objects will be rendered on. |
37 |
\item Create a data input instance (i.e. \DataCollector or \ImageReader) - |
38 |
reads the source data for visualization. |
39 |
\item Create a data visualization object (i.e. \Map, \Velocity, \Ellipsoid, |
40 |
\Contour, \Carpet, \StreamLine, etc.) - creates a visual representation of |
41 |
the source data. |
42 |
\item Create a \Camera or \Light instance - controls the viewing angle and |
43 |
lighting effects. |
44 |
\item Render the object - using either the Online, Offline or Display approach. |
45 |
\item Generate movie - converts a series of images into a movie. (optional) |
46 |
\end{enumerate} |
47 |
\begin{center} |
48 |
\begin{math} |
49 |
scene \rightarrow data \; input \rightarrow data \; visualization \rightarrow |
50 |
camera \, / \, light \rightarrow render \rightarrow movie |
51 |
\end{math} |
52 |
\end{center} |
53 |
|
54 |
\section{\pyvisi Classes} |
55 |
The following subsections give a brief overview of the important classes |
56 |
and some of their corresponding methods. Please refer to \ReferenceGuide for |
57 |
full details. |
58 |
|
59 |
|
60 |
%############################################################################# |
61 |
|
62 |
|
63 |
\subsection{Scene Classes} |
64 |
This section details the instances used to setup the viewing environment. |
65 |
|
66 |
\subsubsection{\Scene class} |
67 |
|
68 |
\begin{classdesc}{Scene}{renderer = Renderer.ONLINE, num_viewport = 1, |
69 |
x_size = 1152, y_size = 864} |
70 |
A scene is a window in which objects are to be rendered on. Only |
71 |
one scene needs to be created. However, a scene may be divided into four |
72 |
smaller windows called viewports (if needed). Each viewport in turn can |
73 |
render a different object. |
74 |
\end{classdesc} |
75 |
|
76 |
The following are some of the methods available: |
77 |
\begin{methoddesc}[Scene]{setBackground}{color} |
78 |
Set the background color of the scene. |
79 |
\end{methoddesc} |
80 |
|
81 |
\begin{methoddesc}[Scene]{render}{image_name = None} |
82 |
Render the object using either the Online, Offline or Display mode. |
83 |
\end{methoddesc} |
84 |
|
85 |
\subsubsection{\Camera class} |
86 |
|
87 |
\begin{classdesc}{Camera}{scene, viewport = Viewport.SOUTH_WEST} |
88 |
A camera controls the display angle of the rendered object and one is |
89 |
usually created for a \Scene. However, if a \Scene has four viewports, then a |
90 |
separate camera may be created for each viewport. |
91 |
\end{classdesc} |
92 |
|
93 |
The following are some of the methods available: |
94 |
\begin{methoddesc}[Camera]{setFocalPoint}{position} |
95 |
Set the focal point of the camera. |
96 |
\end{methoddesc} |
97 |
|
98 |
\begin{methoddesc}[Camera]{setPosition}{position} |
99 |
Set the position of the camera. |
100 |
\end{methoddesc} |
101 |
|
102 |
\begin{methoddesc}[Camera]{azimuth}{angle} |
103 |
Rotate the camera to the left and right. The angle parameter is in degrees. |
104 |
\end{methoddesc} |
105 |
|
106 |
\begin{methoddesc}[Camera]{elevation}{angle} |
107 |
Rotate the camera up and down (angle must be between -90 and 90). |
108 |
\end{methoddesc} |
109 |
|
110 |
\begin{methoddesc}[Camera]{backView}{} |
111 |
Rotate the camera to view the back of the rendered object. |
112 |
\end{methoddesc} |
113 |
|
114 |
\begin{methoddesc}[Camera]{topView}{} |
115 |
Rotate the camera to view the top of the rendered object. |
116 |
\end{methoddesc} |
117 |
|
118 |
\begin{methoddesc}[Camera]{bottomView}{} |
119 |
Rotate the camera to view the bottom of the rendered object. |
120 |
\end{methoddesc} |
121 |
|
122 |
\begin{methoddesc}[Camera]{leftView}{} |
123 |
Rotate the camera to view the left side of the rendered object. |
124 |
\end{methoddesc} |
125 |
|
126 |
\begin{methoddesc}[Camera]{rightView}{} |
127 |
Rotate the camera to view the right side of the rendered object. |
128 |
\end{methoddesc} |
129 |
|
130 |
\begin{methoddesc}[Camera]{isometricView}{} |
131 |
Rotate the camera to view an isometric projection of the rendered object. |
132 |
\end{methoddesc} |
133 |
|
134 |
\begin{methoddesc}[Camera]{dolly}{distance} |
135 |
Move the camera towards (greater than 1) the rendered object. However, |
136 |
it is not possible to move the camera away from the rendered object with this |
137 |
method. |
138 |
\end{methoddesc} |
139 |
|
140 |
\subsubsection{\Light class} |
141 |
|
142 |
\begin{classdesc}{Light}{scene, viewport = Viewport.SOUTH_WEST} |
143 |
A light controls the lighting effect for the rendered object and is set up in |
144 |
a similar way to \Camera. |
145 |
\end{classdesc} |
146 |
|
147 |
The following are some of the methods available: |
148 |
\begin{methoddesc}[Light]{setColor}{color} |
149 |
Set the light color. |
150 |
\end{methoddesc} |
151 |
|
152 |
\begin{methoddesc}[Light]{setFocalPoint}{position} |
153 |
Set the focal point of the light. |
154 |
\end{methoddesc} |
155 |
|
156 |
\begin{methoddesc}[Light]{setPosition}{position} |
157 |
Set the position of the light. |
158 |
\end{methoddesc} |
159 |
|
160 |
\begin{methoddesc}[Light]{setAngle}{elevation = 0, azimuth = 0} |
161 |
An alternative to set the position and focal point of the light by using |
162 |
elevation and azimuth. |
163 |
\end{methoddesc} |
164 |
|
165 |
|
166 |
%############################################################################## |
167 |
|
168 |
|
169 |
\subsection{Input Classes} |
170 |
\label{INPUT SEC} |
171 |
This subsection details the instances used to read and load the source data |
172 |
for visualization. |
173 |
|
174 |
\subsubsection{\DataCollector class} |
175 |
\begin{classdesc}{DataCollector}{source = Source.XML} |
176 |
A data collector is used to read data either from an XML file (using |
177 |
\texttt{setFileName()}) or from an escript object directly (using |
178 |
\texttt{setData()}). Writing XML files is expensive but has the advantage |
179 |
that the results can be analyzed easily after the simulation has completed. |
180 |
\end{classdesc} |
181 |
|
182 |
The following are some of the methods available: |
183 |
\begin{methoddesc}[DataCollector]{setFileName}{file_name} |
184 |
Set the XML file name to read. |
185 |
\end{methoddesc} |
186 |
|
187 |
\begin{methoddesc}[DataCollector]{setData}{**args} |
188 |
Create data using the \textless name\textgreater=\textless data\textgreater |
189 |
pairing. The method assumes that the data is given in the appropriate format. |
190 |
\end{methoddesc} |
191 |
|
192 |
\begin{methoddesc}[DataCollector]{setActiveScalar}{scalar} |
193 |
Specify the scalar field to load. |
194 |
\end{methoddesc} |
195 |
|
196 |
\begin{methoddesc}[DataCollector]{setActiveVector}{vector} |
197 |
Specify the vector field to load. |
198 |
\end{methoddesc} |
199 |
|
200 |
\begin{methoddesc}[DataCollector]{setActiveTensor}{tensor} |
201 |
Specify the tensor field to load. |
202 |
\end{methoddesc} |
203 |
|
204 |
\subsubsection{\ImageReader class} |
205 |
|
206 |
\begin{classdesc}{ImageReader}{format} |
207 |
An image reader is used to read data from an image in a variety of formats. |
208 |
\end{classdesc} |
209 |
|
210 |
The following is one of the methods available: |
211 |
\begin{methoddesc}[ImageReader]{setImageName}{image_name} |
212 |
Set the filename of the image to be loaded. |
213 |
\end{methoddesc} |
214 |
|
215 |
\subsubsection{\TextTwoD class} |
216 |
|
217 |
\begin{classdesc}{Text2D}{scene, text, viewport = Viewport.SOUTH_WEST} |
218 |
This class is used to insert two-dimensional text for annotations |
219 |
(e.g. titles, authors and labels). |
220 |
\end{classdesc} |
221 |
|
222 |
The following are some of the methods available: |
223 |
\begin{methoddesc}[Text2D]{setFontSize}{size} |
224 |
Set the 2D text size. |
225 |
\end{methoddesc} |
226 |
|
227 |
\begin{methoddesc}[Text2D]{boldOn}{} |
228 |
Use bold font style for the text. |
229 |
\end{methoddesc} |
230 |
|
231 |
\begin{methoddesc}[Text2D]{setColor}{color} |
232 |
Set the color of the 2D text. |
233 |
\end{methoddesc} |
234 |
|
235 |
Including methods from \ActorTwoD. |
236 |
|
237 |
|
238 |
%############################################################################## |
239 |
|
240 |
|
241 |
\subsection{Data Visualization Classes} |
242 |
\label{DATAVIS SEC} |
243 |
This subsection details the instances used to process and manipulate the source |
244 |
data. The typical usage of some of the classes is also shown. See \Sec{SAMPLEOUTPUT SEC} for sample images generated with these classes. |
245 |
|
246 |
One point to note is that the source can either be point or cell data. If the |
247 |
source is cell data, a conversion to point data may or may not be |
248 |
required, in order for the object to be rendered correctly. |
249 |
If a conversion is needed, the 'cell_to_point' flag (see below) must |
250 |
be set to 'True', otherwise to 'False' (which is the default). On occasions, an |
251 |
inaccurate object may be rendered from cell data even after conversion. |
252 |
|
253 |
\subsubsection{\Map class} |
254 |
|
255 |
\begin{classdesc}{Map}{scene, data_collector, |
256 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False, |
257 |
outline = True} |
258 |
Class that shows a scalar field on a domain surface. The domain surface |
259 |
can either be color or gray-scale, depending on the lookup table used. |
260 |
\end{classdesc} |
261 |
|
262 |
The following are some of the methods available:\\ |
263 |
Methods from \ActorThreeD and \DataSetMapper. |
264 |
|
265 |
A typical usage of \Map is shown below. |
266 |
|
267 |
\begin{python} |
268 |
""" |
269 |
Author: John Ngui, john.ngui@uq.edu.au |
270 |
""" |
271 |
|
272 |
# Import the necessary modules. |
273 |
from esys.pyvisi import Scene, DataCollector, Map, Camera |
274 |
from esys.pyvisi.constant import * |
275 |
import os |
276 |
|
277 |
PYVISI_EXAMPLE_MESHES_PATH = "data_meshes" |
278 |
PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images" |
279 |
X_SIZE = 800 |
280 |
Y_SIZE = 800 |
281 |
|
282 |
SCALAR_FIELD_POINT_DATA = "temperature" |
283 |
SCALAR_FIELD_CELL_DATA = "temperature_cell" |
284 |
FILE_3D = "interior_3D.xml" |
285 |
IMAGE_NAME = "map.jpg" |
286 |
JPG_RENDERER = Renderer.ONLINE_JPG |
287 |
|
288 |
# Create a Scene with four viewports. |
289 |
s = Scene(renderer = JPG_RENDERER, num_viewport = 4, x_size = X_SIZE, |
290 |
y_size = Y_SIZE) |
291 |
|
292 |
# Create a DataCollector reading from a XML file. |
293 |
dc1 = DataCollector(source = Source.XML) |
294 |
dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D)) |
295 |
dc1.setActiveScalar(scalar = SCALAR_FIELD_POINT_DATA) |
296 |
|
297 |
# Create a Map for the first viewport. |
298 |
m1 = Map(scene = s, data_collector = dc1, viewport = Viewport.SOUTH_WEST, |
299 |
lut = Lut.COLOR, cell_to_point = False, outline = True) |
300 |
m1.setRepresentationToWireframe() |
301 |
|
302 |
# Create a Camera for the first viewport |
303 |
c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST) |
304 |
c1.isometricView() |
305 |
|
306 |
# Create a second DataCollector reading from the same XML file but specifying |
307 |
# a different scalar field. |
308 |
dc2 = DataCollector(source = Source.XML) |
309 |
dc2.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D)) |
310 |
dc2.setActiveScalar(scalar = SCALAR_FIELD_CELL_DATA) |
311 |
|
312 |
# Create a Map for the third viewport. |
313 |
m2 = Map(scene = s, data_collector = dc2, viewport = Viewport.NORTH_EAST, |
314 |
lut = Lut.COLOR, cell_to_point = True, outline = True) |
315 |
|
316 |
# Create a Camera for the third viewport |
317 |
c2 = Camera(scene = s, viewport = Viewport.NORTH_EAST) |
318 |
|
319 |
# Render the object. |
320 |
s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME)) |
321 |
\end{python} |
322 |
|
323 |
\subsubsection{\MapOnPlaneCut class} |
324 |
|
325 |
\begin{classdesc}{MapOnPlaneCut}{scene, data_collector, |
326 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False, |
327 |
outline = True} |
328 |
This class works in a similar way to \Map, except that the result is a slice of |
329 |
the scalar field produced by cutting the map with a plane. The plane can be |
330 |
translated and rotated to its desired position. |
331 |
\end{classdesc} |
332 |
|
333 |
The following are some of the methods available:\\ |
334 |
Methods from \ActorThreeD, \Transform and \DataSetMapper. |
335 |
|
336 |
\subsubsection{\MapOnPlaneClip class} |
337 |
|
338 |
\begin{classdesc}{MapOnPlaneClip}{scene, data_collector, |
339 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False, |
340 |
outline = True} |
341 |
This class works in a similar way to \MapOnPlaneCut, except that the defined |
342 |
plane is used to clip the scalar field. |
343 |
\end{classdesc} |
344 |
|
345 |
The following are some of the methods available:\\ |
346 |
Methods from \ActorThreeD, \Transform, \Clipper and \DataSetMapper. |
347 |
|
348 |
\subsubsection{\MapOnScalarClip class} |
349 |
|
350 |
\begin{classdesc}{MapOnScalarClip}{scene, data_collector, |
351 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False, |
352 |
outline = True} |
353 |
This class works in a similar way to \Map, except that it only shows parts of |
354 |
the scalar field matching a scalar value. |
355 |
\end{classdesc} |
356 |
|
357 |
The following are some of the methods available:\\ |
358 |
Methods from \ActorThreeD, \Clipper and \DataSetMapper. |
359 |
|
360 |
\subsubsection{\MapOnScalarClipWithRotation class} |
361 |
|
362 |
\begin{classdesc}{MapOnScalarClipWithRotation}{scene, data_collector, |
363 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False} |
364 |
This class works in a similar way to \Map except that it |
365 |
shows a 2D scalar field clipped using a scalar value and subsequently |
366 |
rotated around the z-axis to create a 3D looking effect. This class should |
367 |
only be used with 2D data sets and NOT 3D. |
368 |
\end{classdesc} |
369 |
|
370 |
The following are some of the methods available:\\ |
371 |
Methods from \ActorThreeD, \Clipper, \Rotation and \DataSetMapper. |
372 |
|
373 |
\subsubsection{\Velocity class} |
374 |
|
375 |
\begin{classdesc}{Velocity}{scene, data_collector, arrow = Arrow.TWO_D, |
376 |
color_mode = ColorMode.VECTOR, viewport = Viewport.SOUTH_WEST, |
377 |
lut = Lut.COLOR, cell_to_point = False, outline = True} |
378 |
This class is used to display a vector field using arrows. The arrows can |
379 |
either be color or gray-scale, depending on the lookup table used. If the |
380 |
arrows are colored, there are two possible coloring modes, either using vector |
381 |
data or scalar data. Similarly, there are two possible types of arrows, either |
382 |
two-dimensional or three-dimensional. |
383 |
\end{classdesc} |
384 |
|
385 |
The following are some of the methods available:\\ |
386 |
Methods from \ActorThreeD, \GlyphThreeD, \MaskPoints and \DataSetMapper. |
387 |
|
388 |
\subsubsection{\VelocityOnPlaneCut class} |
389 |
|
390 |
\begin{classdesc}{VelocityOnPlaneCut}{scene, data_collector, |
391 |
arrow = Arrow.TWO_D, color_mode = ColorMode.VECTOR, |
392 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, |
393 |
cell_to_point = False, outline = True} |
394 |
This class works in a similar way to \MapOnPlaneCut, except that it shows a |
395 |
vector field using arrows cut using a plane. |
396 |
\end{classdesc} |
397 |
|
398 |
The following are some of the methods available:\\ |
399 |
Methods from \ActorThreeD, \GlyphThreeD, \Transform, \MaskPoints and |
400 |
\DataSetMapper. |
401 |
|
402 |
A typical usage of \VelocityOnPlaneCut is shown below. |
403 |
|
404 |
\begin{python} |
405 |
""" |
406 |
Author: John Ngui, john.ngui@uq.edu.au |
407 |
""" |
408 |
|
409 |
# Import the necessary modules |
410 |
from esys.pyvisi import Scene, DataCollector, VelocityOnPlaneCut, Camera |
411 |
from esys.pyvisi.constant import * |
412 |
import os |
413 |
|
414 |
PYVISI_EXAMPLE_MESHES_PATH = "data_meshes" |
415 |
PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images" |
416 |
X_SIZE = 400 |
417 |
Y_SIZE = 400 |
418 |
|
419 |
VECTOR_FIELD_CELL_DATA = "velocity" |
420 |
FILE_3D = "interior_3D.xml" |
421 |
IMAGE_NAME = "velocity.jpg" |
422 |
JPG_RENDERER = Renderer.ONLINE_JPG |
423 |
|
424 |
# Create a Scene. |
425 |
s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE, |
426 |
y_size = Y_SIZE) |
427 |
|
428 |
# Create a DataCollector reading from a XML file. |
429 |
dc1 = DataCollector(source = Source.XML) |
430 |
dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D)) |
431 |
dc1.setActiveVector(vector = VECTOR_FIELD_CELL_DATA) |
432 |
|
433 |
# Create VelocityOnPlaneCut. |
434 |
vopc1 = VelocityOnPlaneCut(scene = s, data_collector = dc1, |
435 |
viewport = Viewport.SOUTH_WEST, color_mode = ColorMode.VECTOR, |
436 |
arrow = Arrow.THREE_D, lut = Lut.COLOR, cell_to_point = False, |
437 |
outline = True) |
438 |
vopc1.setScaleFactor(scale_factor = 0.5) |
439 |
vopc1.setPlaneToXY(offset = 0.5) |
440 |
vopc1.setRatio(2) |
441 |
vopc1.randomOn() |
442 |
|
443 |
# Create a Camera. |
444 |
c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST) |
445 |
c1.isometricView() |
446 |
c1.elevation(angle = -20) |
447 |
|
448 |
# Render the object. |
449 |
s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME)) |
450 |
\end{python} |
451 |
|
452 |
\subsubsection{\VelocityOnPlaneClip class} |
453 |
|
454 |
\begin{classdesc}{VelocityOnPlaneClip}{scene, data_collector, |
455 |
arrow = Arrow.TWO_D, color_mode = ColorMode.VECTOR, |
456 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, |
457 |
cell_to_point = False, online = True} |
458 |
This class works in a similar way to \MapOnPlaneClip, except that it shows a |
459 |
vector field using arrows clipped using a plane. |
460 |
\end{classdesc} |
461 |
|
462 |
The following are some of the methods available:\\ |
463 |
Methods from \ActorThreeD, \GlyphThreeD, \Transform, \Clipper, |
464 |
\MaskPoints and \DataSetMapper. |
465 |
|
466 |
\subsubsection{\Ellipsoid class} |
467 |
|
468 |
\begin{classdesc}{Ellipsoid}{scene, data_collector, |
469 |
viewport = Viewport = SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False, |
470 |
outline = True} |
471 |
Class that shows a tensor field using ellipsoids. The ellipsoids can either be |
472 |
color or gray-scale, depending on the lookup table used. |
473 |
\end{classdesc} |
474 |
|
475 |
The following are some of the methods available:\\ |
476 |
Methods from \ActorThreeD, \Sphere, \TensorGlyph, \MaskPoints and |
477 |
\DataSetMapper. |
478 |
|
479 |
\subsubsection{\EllipsoidOnPlaneCut class} |
480 |
|
481 |
\begin{classdesc}{EllipsoidOnPlaneCut}{scene, data_collector, |
482 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False, |
483 |
outline = True} |
484 |
This class works in a similar way to \MapOnPlaneCut, except that it shows |
485 |
a tensor field using ellipsoids cut using a plane. |
486 |
\end{classdesc} |
487 |
|
488 |
The following are some of the methods available:\\ |
489 |
Methods from \ActorThreeD, \Sphere, \TensorGlyph, \Transform, |
490 |
\MaskPoints and \DataSetMapper. |
491 |
|
492 |
\subsubsection{\EllipsoidOnPlaneClip class} |
493 |
|
494 |
\begin{classdesc}{EllipsoidOnPlaneClip}{scene, data_collector, |
495 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False, |
496 |
outline = True} |
497 |
This class works in a similar way to \MapOnPlaneClip, except that it shows a |
498 |
tensor field using ellipsoids clipped using a plane. |
499 |
\end{classdesc} |
500 |
|
501 |
The following are some of the methods available:\\ |
502 |
Methods from \ActorThreeD, \Sphere, \TensorGlyph, \Transform, \Clipper, |
503 |
\MaskPoints and \DataSetMapper. |
504 |
|
505 |
A typical usage of \EllipsoidOnPlaneClip is shown below. |
506 |
|
507 |
\begin{python} |
508 |
""" |
509 |
Author: John Ngui, john.ngui@uq.edu.au |
510 |
""" |
511 |
|
512 |
# Import the necessary modules |
513 |
from esys.pyvisi import Scene, DataCollector, EllipsoidOnPlaneClip, Camera |
514 |
from esys.pyvisi.constant import * |
515 |
import os |
516 |
|
517 |
PYVISI_EXAMPLE_MESHES_PATH = "data_meshes" |
518 |
PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images" |
519 |
X_SIZE = 400 |
520 |
Y_SIZE = 400 |
521 |
|
522 |
TENSOR_FIELD_CELL_DATA = "stress_cell" |
523 |
FILE_3D = "interior_3D.xml" |
524 |
IMAGE_NAME = "ellipsoid.jpg" |
525 |
JPG_RENDERER = Renderer.ONLINE_JPG |
526 |
|
527 |
# Create a Scene. |
528 |
s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE, |
529 |
y_size = Y_SIZE) |
530 |
|
531 |
# Create a DataCollector reading from a XML file. |
532 |
dc1 = DataCollector(source = Source.XML) |
533 |
dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D)) |
534 |
dc1.setActiveTensor(tensor = TENSOR_FIELD_CELL_DATA) |
535 |
|
536 |
# Create an EllipsoidOnPlaneClip. |
537 |
eopc1 = EllipsoidOnPlaneClip(scene = s, data_collector = dc1, |
538 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = True, |
539 |
outline = True) |
540 |
eopc1.setPlaneToXY() |
541 |
eopc1.setScaleFactor(scale_factor = 0.2) |
542 |
eopc1.rotateX(angle = 10) |
543 |
|
544 |
# Create a Camera. |
545 |
c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST) |
546 |
c1.bottomView() |
547 |
c1.azimuth(angle = -90) |
548 |
c1.elevation(angle = 10) |
549 |
|
550 |
# Render the object. |
551 |
s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME)) |
552 |
\end{python} |
553 |
|
554 |
\subsubsection{\Contour class} |
555 |
|
556 |
\begin{classdesc}{Contour}{scene, data_collector, |
557 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False, |
558 |
outline = True} |
559 |
Class that shows a scalar field using contour surfaces. The contour surfaces |
560 |
can either be color or gray-scale, depending on the lookup table used. This |
561 |
class can also be used to generate isosurfaces. |
562 |
\end{classdesc} |
563 |
|
564 |
The following are some of the methods available:\\ |
565 |
Methods from \ActorThreeD, \ContourModule and \DataSetMapper. |
566 |
|
567 |
A typical usage of \Contour is shown below. |
568 |
|
569 |
\begin{python} |
570 |
""" |
571 |
Author: John Ngui, john.ngui@uq.edu.au |
572 |
""" |
573 |
|
574 |
# Import the necessary modules |
575 |
from esys.pyvisi import Scene, DataCollector, Contour, Camera |
576 |
from esys.pyvisi.constant import * |
577 |
import os |
578 |
|
579 |
PYVISI_EXAMPLE_MESHES_PATH = "data_meshes" |
580 |
PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images" |
581 |
X_SIZE = 400 |
582 |
Y_SIZE = 400 |
583 |
|
584 |
SCALAR_FIELD_POINT_DATA = "temperature" |
585 |
FILE_3D = "interior_3D.xml" |
586 |
IMAGE_NAME = "contour.jpg" |
587 |
JPG_RENDERER = Renderer.ONLINE_JPG |
588 |
|
589 |
# Create a Scene. |
590 |
s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE, |
591 |
y_size = Y_SIZE) |
592 |
|
593 |
# Create a DataCollector reading a XML file. |
594 |
dc1 = DataCollector(source = Source.XML) |
595 |
dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D)) |
596 |
dc1.setActiveScalar(scalar = SCALAR_FIELD_POINT_DATA) |
597 |
|
598 |
# Create three contours. |
599 |
ctr1 = Contour(scene = s, data_collector = dc1, viewport = Viewport.SOUTH_WEST, |
600 |
lut = Lut.COLOR, cell_to_point = False, outline = True) |
601 |
ctr1.generateContours(contours = 3) |
602 |
|
603 |
# Create a Camera. |
604 |
cam1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST) |
605 |
cam1.elevation(angle = -40) |
606 |
|
607 |
# Render the object. |
608 |
s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME)) |
609 |
\end{python} |
610 |
|
611 |
\subsubsection{\ContourOnPlaneCut class} |
612 |
|
613 |
\begin{classdesc}{ContourOnPlaneCut}{scene, data_collector, |
614 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False, |
615 |
outline = True} |
616 |
This class works in a similar way to \MapOnPlaneCut, except that it shows a |
617 |
scalar field using contour surfaces cut using a plane. |
618 |
\end{classdesc} |
619 |
|
620 |
The following are some of the methods available:\\ |
621 |
Methods from \ActorThreeD, \ContourModule, \Transform and \DataSetMapper. |
622 |
|
623 |
\subsubsection{\ContourOnPlaneClip class} |
624 |
|
625 |
\begin{classdesc}{ContourOnPlaneClip}{scene, data_collector, |
626 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False, |
627 |
outline = True} |
628 |
This class works in a similar way to \MapOnPlaneClip, except that it shows a |
629 |
scalar field using contour surfaces clipped using a plane. |
630 |
\end{classdesc} |
631 |
|
632 |
The following are some of the methods available:\\ |
633 |
Methods from \ActorThreeD, \ContourModule, \Transform, \Clipper and |
634 |
\DataSetMapper. |
635 |
|
636 |
\subsubsection{\StreamLine class} |
637 |
|
638 |
\begin{classdesc}{StreamLine}{scene, data_collector, |
639 |
viewport = Viewport.SOUTH_WEST, color_mode = ColorMode.VECTOR, lut = Lut.COLOR, |
640 |
cell_to_point = False, outline = True} |
641 |
Class that shows the direction of particles of a vector field using streamlines. |
642 |
The streamlines can either be color or gray-scale, depending on the lookup |
643 |
table used. If the streamlines are colored, there are two possible coloring |
644 |
modes, either using vector data or scalar data. |
645 |
\end{classdesc} |
646 |
|
647 |
The following are some of the methods available:\\ |
648 |
Methods from \ActorThreeD, \PointSource, \StreamLineModule, \Tube and |
649 |
\DataSetMapper. |
650 |
|
651 |
A typical usage of \StreamLine is shown below. |
652 |
|
653 |
\begin{python} |
654 |
""" |
655 |
Author: John Ngui, john.ngui@uq.edu.au |
656 |
""" |
657 |
|
658 |
# Import the necessary modules. |
659 |
from esys.pyvisi import Scene, DataCollector, StreamLine, Camera |
660 |
from esys.pyvisi.constant import * |
661 |
import os |
662 |
|
663 |
PYVISI_EXAMPLE_MESHES_PATH = "data_meshes" |
664 |
PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images" |
665 |
X_SIZE = 400 |
666 |
Y_SIZE = 400 |
667 |
|
668 |
VECTOR_FIELD_CELL_DATA = "temperature" |
669 |
FILE_3D = "interior_3D.xml" |
670 |
IMAGE_NAME = "streamline.jpg" |
671 |
JPG_RENDERER = Renderer.ONLINE_JPG |
672 |
|
673 |
# Create a Scene. |
674 |
s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE, |
675 |
y_size = Y_SIZE) |
676 |
|
677 |
# Create a DataCollector reading from a XML file. |
678 |
dc1 = DataCollector(source = Source.XML) |
679 |
dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D)) |
680 |
|
681 |
# Create streamlines. |
682 |
sl1 = StreamLine(scene = s, data_collector = dc1, |
683 |
viewport = Viewport.SOUTH_WEST, color_mode = ColorMode.SCALAR, |
684 |
lut = Lut.COLOR, cell_to_point = False, outline = True) |
685 |
sl1.setTubeRadius(radius = 0.02) |
686 |
sl1.setTubeNumberOfSides(3) |
687 |
sl1.setTubeRadiusToVaryByVector() |
688 |
sl1.setPointSourceRadius(0.9) |
689 |
|
690 |
# Create a Camera. |
691 |
c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST) |
692 |
c1.isometricView() |
693 |
|
694 |
# Render the object. |
695 |
s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME)) |
696 |
\end{python} |
697 |
|
698 |
\subsubsection{\Carpet class} |
699 |
|
700 |
\begin{classdesc}{Carpet}{scene, data_collector, |
701 |
viewport = Viewport.Viewport.SOUTH_WEST, warp_mode = WarpMode.SCALAR, |
702 |
lut = Lut.COLOR, cell_to_point = False, outline = True} |
703 |
This class works in a similar way to \MapOnPlaneCut, except that it shows a |
704 |
scalar field cut on a plane and deformed (warped) along the normal. The |
705 |
plane can either be color or gray-scale, depending on the lookup table used. |
706 |
Similarly, the plane can be deformed either using scalar data or vector data. |
707 |
\end{classdesc} |
708 |
|
709 |
The following are some of the methods available:\\ |
710 |
Methods from \ActorThreeD, \Warp, \Transform and \DataSetMapper. |
711 |
|
712 |
A typical usage of \Carpet is shown below. |
713 |
|
714 |
\begin{python} |
715 |
""" |
716 |
Author: John Ngui, john.ngui@uq.edu.au |
717 |
""" |
718 |
|
719 |
# Import the necessary modules. |
720 |
from esys.pyvisi import Scene, DataCollector, Carpet, Camera |
721 |
from esys.pyvisi.constant import * |
722 |
import os |
723 |
|
724 |
PYVISI_EXAMPLE_MESHES_PATH = "data_meshes" |
725 |
PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images" |
726 |
X_SIZE = 400 |
727 |
Y_SIZE = 400 |
728 |
|
729 |
SCALAR_FIELD_CELL_DATA = "temperature_cell" |
730 |
FILE_3D = "interior_3D.xml" |
731 |
IMAGE_NAME = "carpet.jpg" |
732 |
JPG_RENDERER = Renderer.ONLINE_JPG |
733 |
|
734 |
# Create a Scene. |
735 |
s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE, |
736 |
y_size = Y_SIZE) |
737 |
|
738 |
# Create a DataCollector reading from a XML file. |
739 |
dc1 = DataCollector(source = Source.XML) |
740 |
dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D)) |
741 |
dc1.setActiveScalar(scalar = SCALAR_FIELD_CELL_DATA) |
742 |
|
743 |
# Create a Carpet. |
744 |
cpt1 = Carpet(scene = s, data_collector = dc1, viewport = Viewport.SOUTH_WEST, |
745 |
warp_mode = WarpMode.SCALAR, lut = Lut.COLOR, cell_to_point = True, |
746 |
outline = True) |
747 |
cpt1.setPlaneToXY(0.2) |
748 |
cpt1.setScaleFactor(1.9) |
749 |
|
750 |
# Create a Camera. |
751 |
c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST) |
752 |
c1.isometricView() |
753 |
|
754 |
# Render the object. |
755 |
s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME)) |
756 |
\end{python} |
757 |
|
758 |
\subsubsection{\Legend class} |
759 |
|
760 |
\begin{classdesc}{Legend}{scene, data_collector, |
761 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, legend = LegendType.SCALAR} |
762 |
Class that shows a scalar field on a domain surface. The domain surface |
763 |
can either be color or gray-scale, depending on the lookup table used |
764 |
\end{classdesc} |
765 |
|
766 |
The following are some of the methods available:\\ |
767 |
Methods from \ActorThreeD, \ScalarBar and \DataSetMapper. |
768 |
|
769 |
\subsubsection{\Rectangle class} |
770 |
|
771 |
\begin{classdesc}{Rectangle}{scene, viewport = Viewport.SOUTH_WEST} |
772 |
Class that generates a rectangle box. |
773 |
\end{classdesc} |
774 |
|
775 |
The following are some of the methods available:\\ |
776 |
Methods from \ActorThreeD, \CubeSource and \DataSetMapper. |
777 |
|
778 |
\subsubsection{\Image class} |
779 |
|
780 |
\begin{classdesc}{Image}{scene, image_reader, viewport = Viewport.SOUTH_WEST} |
781 |
Class that displays an image which can be scaled (upwards and downwards) and |
782 |
has interaction capability. The image can also be translated and rotated along |
783 |
the X, Y and Z axes. One of the most common use of this feature is pasting an |
784 |
image on a surface map. |
785 |
\end{classdesc} |
786 |
|
787 |
The following are some of the methods available:\\ |
788 |
Methods from \ActorThreeD, \PlaneSource and \Transform. |
789 |
|
790 |
A typical usage of \Image is shown below. |
791 |
|
792 |
\begin{python} |
793 |
""" |
794 |
Author: John Ngui, john.ngui@uq.edu.au |
795 |
""" |
796 |
|
797 |
# Import the necessary modules. |
798 |
from esys.pyvisi import Scene, DataCollector, Map, ImageReader, Image, Camera |
799 |
from esys.pyvisi import GlobalPosition |
800 |
from esys.pyvisi.constant import * |
801 |
import os |
802 |
|
803 |
PYVISI_EXAMPLE_MESHES_PATH = "data_meshes" |
804 |
PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images" |
805 |
X_SIZE = 400 |
806 |
Y_SIZE = 400 |
807 |
|
808 |
SCALAR_FIELD_POINT_DATA = "temperature" |
809 |
FILE_3D = "interior_3D.xml" |
810 |
LOAD_IMAGE_NAME = "flinders.jpg" |
811 |
SAVE_IMAGE_NAME = "image.jpg" |
812 |
JPG_RENDERER = Renderer.ONLINE_JPG |
813 |
|
814 |
# Create a Scene. |
815 |
s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE, |
816 |
y_size = Y_SIZE) |
817 |
|
818 |
# Create a DataCollector reading from a XML file. |
819 |
dc1 = DataCollector(source = Source.XML) |
820 |
dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D)) |
821 |
|
822 |
# Create a Map. |
823 |
m1 = Map(scene = s, data_collector = dc1, viewport = Viewport.SOUTH_WEST, |
824 |
lut = Lut.COLOR, cell_to_point = False, outline = True) |
825 |
m1.setOpacity(0.3) |
826 |
|
827 |
# Create an ImageReader (in place of DataCollector). |
828 |
ir = ImageReader(ImageFormat.JPG) |
829 |
ir.setImageName(image_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, \ |
830 |
LOAD_IMAGE_NAME)) |
831 |
|
832 |
# Create an Image. |
833 |
i = Image(scene = s, image_reader = ir, viewport = Viewport.SOUTH_WEST) |
834 |
i.setOpacity(opacity = 0.9) |
835 |
i.translate(0,0,-1) |
836 |
i.setPoint1(GlobalPosition(2,0,0)) |
837 |
i.setPoint2(GlobalPosition(0,2,0)) |
838 |
|
839 |
# Create a Camera. |
840 |
c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST) |
841 |
|
842 |
# Render the image. |
843 |
s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, SAVE_IMAGE_NAME)) |
844 |
\end{python} |
845 |
|
846 |
\subsubsection{\Logo class} |
847 |
|
848 |
\begin{classdesc}{Logo}{scene, image_reader, viewport = Viewport.SOUTH_WEST} |
849 |
Class that displays a static image, in particular a logo |
850 |
(e.g. company symbol) and has NO interaction capability. The position and size |
851 |
of the logo can be specified. |
852 |
\end{classdesc} |
853 |
|
854 |
The following are some of the methods available:\\ |
855 |
Methods from \ImageReslice and \ActorTwoD. |
856 |
|
857 |
\subsubsection{\Movie class} |
858 |
|
859 |
\begin{classdesc}{Movie}{parameter_file = "make_movie"} |
860 |
This class is used to create movies out of a series of images. The parameter |
861 |
specifies the name of a file that will contain the required information for the |
862 |
'ppmtompeg' command which is used to generate the movie. |
863 |
\end{classdesc} |
864 |
|
865 |
The following are some of the methods available:\\ |
866 |
\begin{methoddesc}[Movie]{imageRange}{input_directory, first_image, last_image} |
867 |
Use this method to specify that the movie is to be generated from image files |
868 |
with filenames in a certain range (e.g. 'image000.jpg' to 'image050.jpg'). |
869 |
\end{methoddesc} |
870 |
|
871 |
\begin{methoddesc}[Movie]{imageList}{input_directory, image_list} |
872 |
Use this method to specify a list of arbitrary image filenames from which the |
873 |
movie is to be generated. |
874 |
\end{methoddesc} |
875 |
|
876 |
\begin{methoddesc}[Movie]{makeMovie}{movie} |
877 |
Generate the movie with the specified filename. |
878 |
\end{methoddesc} |
879 |
|
880 |
A typical usage of \Movie is shown below. |
881 |
|
882 |
\begin{python} |
883 |
""" |
884 |
Author: John Ngui, john.ngui@uq.edu.au |
885 |
""" |
886 |
|
887 |
# Import the necessary modules. |
888 |
from esys.pyvisi import Scene, DataCollector, Map, Camera, Velocity, Legend |
889 |
from esys.pyvisi import Movie, LocalPosition |
890 |
from esys.pyvisi.constant import * |
891 |
import os |
892 |
|
893 |
PYVISI_EXAMPLE_MESHES_PATH = "data_meshes" |
894 |
PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images" |
895 |
X_SIZE = 800 |
896 |
Y_SIZE = 800 |
897 |
|
898 |
SCALAR_FIELD_POINT_DATA = "temp" |
899 |
FILE_2D = "tempvel-" |
900 |
IMAGE_NAME = "movie" |
901 |
JPG_RENDERER = Renderer.OFFLINE_JPG |
902 |
|
903 |
# Create a Scene. |
904 |
s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE, |
905 |
y_size = Y_SIZE) |
906 |
|
907 |
# Create a DataCollector reading from a XML file. |
908 |
dc1 = DataCollector(source = Source.XML) |
909 |
dc1.setActiveScalar(scalar = SCALAR_FIELD_POINT_DATA) |
910 |
|
911 |
# Create a Map. |
912 |
m1 = Map(scene = s, data_collector = dc1, |
913 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False, |
914 |
outline = True) |
915 |
|
916 |
# Create a Camera. |
917 |
cam1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST) |
918 |
|
919 |
# Create a movie. |
920 |
mov = Movie() |
921 |
lst = [] |
922 |
|
923 |
# Read in one file one after another and render the object. |
924 |
for i in range(938, 949): |
925 |
dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, \ |
926 |
FILE_2D + "%06d.vtu") % i) |
927 |
|
928 |
s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, \ |
929 |
IMAGE_NAME + "%06d.jpg" % i)) |
930 |
|
931 |
lst.append(IMAGE_NAME + "%06d.jpg" % i) |
932 |
|
933 |
# Images (first and last inclusive) from which the movie is to be generated. |
934 |
mov.imageRange(input_directory = PYVISI_EXAMPLE_IMAGES_PATH, |
935 |
first_image = IMAGE_NAME + "000938.jpg", |
936 |
last_image = IMAGE_NAME + "000948.jpg") |
937 |
|
938 |
# Alternatively, a list of images can be specified. |
939 |
#mov.imageList(input_directory = PYVISI_EXAMPLE_IMAGES_PATH, image_list = lst) |
940 |
|
941 |
# Generate the movie. |
942 |
mov.makeMovie(os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, "movie.mpg")) |
943 |
\end{python} |
944 |
|
945 |
|
946 |
%############################################################################## |
947 |
|
948 |
|
949 |
\subsection{Coordinate Classes} |
950 |
This subsection details the instances used to position rendered objects. |
951 |
|
952 |
\subsubsection{\LocalPosition class} |
953 |
|
954 |
\begin{classdesc}{LocalPosition}{x_coor, y_coor} |
955 |
Class that defines a position (X and Y) in the local 2D coordinate system. |
956 |
\end{classdesc} |
957 |
|
958 |
\subsubsection{\GlobalPosition class} |
959 |
|
960 |
\begin{classdesc}{GlobalPosition}{x_coor, y_coor, z_coor} |
961 |
Class that defines a position (X, Y and Z) in the global 3D coordinate system. |
962 |
\end{classdesc} |
963 |
|
964 |
|
965 |
%############################################################################## |
966 |
|
967 |
|
968 |
\subsection{Supporting Classes} |
969 |
This subsection details the supporting classes and their corresponding methods |
970 |
inherited by the input (see \Sec{INPUT SEC}) and data |
971 |
visualization classes (see \Sec{DATAVIS SEC}). |
972 |
|
973 |
\subsubsection{\ActorThreeD class} |
974 |
Class that defines a 3D actor. \\ |
975 |
|
976 |
The following are some of the methods available: |
977 |
|
978 |
\begin{methoddesc}[Actor3D]{setOpacity}{opacity} |
979 |
Set the opacity (transparency) of the 3D actor. |
980 |
\end{methoddesc} |
981 |
|
982 |
\begin{methoddesc}[Actor3D]{setColor}{color} |
983 |
Set the color of the 3D actor. |
984 |
\end{methoddesc} |
985 |
|
986 |
\begin{methoddesc}[Actor3D]{setRepresentationToWireframe}{} |
987 |
Set the representation of the 3D actor to wireframe. |
988 |
\end{methoddesc} |
989 |
|
990 |
\subsubsection{\ActorTwoD class} |
991 |
Class that defines a 2D actor. \\ |
992 |
|
993 |
The following are some of the methods available: |
994 |
|
995 |
\begin{methoddesc}[Actor2D]{setPosition}{position} |
996 |
Set the position (XY) of the 2D actor. Default position is the lower left hand |
997 |
corner of the window / viewport. |
998 |
\end{methoddesc} |
999 |
|
1000 |
\subsubsection{\Clipper class} |
1001 |
Class that defines a clipper. \\ |
1002 |
|
1003 |
The following are some of the methods available: |
1004 |
|
1005 |
\begin{methoddesc}[Clipper]{setInsideOutOn}{} |
1006 |
Clips one side of the rendered object. |
1007 |
\end{methoddesc} |
1008 |
|
1009 |
\begin{methoddesc}[Clipper]{setInsideOutOff}{} |
1010 |
Clips the other side of the rendered object. |
1011 |
\end{methoddesc} |
1012 |
|
1013 |
\begin{methoddesc}[Clipper]{setClipValue}{value} |
1014 |
Set the scalar clip value (instead of using a plane) for the clipper. |
1015 |
\end{methoddesc} |
1016 |
|
1017 |
\subsubsection{\ContourModule class} |
1018 |
Class that defines the contour module. \\ |
1019 |
|
1020 |
The following are some of the methods available: |
1021 |
|
1022 |
\begin{methoddesc}[ContourModule]{generateContours}{contours = None, |
1023 |
lower_range = None, upper_range = None} |
1024 |
Generate the specified number of contours within the specified range. |
1025 |
In order to generate a single isosurface, the 'lower_range' and 'upper_range' |
1026 |
must be set to the same value. |
1027 |
\end{methoddesc} |
1028 |
|
1029 |
\subsubsection{\GlyphThreeD class} |
1030 |
Class that defines 3D glyphs. \\ |
1031 |
|
1032 |
The following are some of the methods available: |
1033 |
|
1034 |
\begin{methoddesc}[Glyph3D]{setScaleModeByVector}{} |
1035 |
Set the 3D glyph to scale according to the vector data. |
1036 |
\end{methoddesc} |
1037 |
|
1038 |
\begin{methoddesc}[Glyph3D]{setScaleModeByScalar}{} |
1039 |
Set the 3D glyph to scale according to the scalar data. |
1040 |
\end{methoddesc} |
1041 |
|
1042 |
\begin{methoddesc}[Glyph3D]{setScaleFactor}{scale_factor} |
1043 |
Set the 3D glyph scale factor. |
1044 |
\end{methoddesc} |
1045 |
|
1046 |
\subsubsection{\TensorGlyph class} |
1047 |
Class that defines tensor glyphs. \\ |
1048 |
|
1049 |
The following are some of the methods available: |
1050 |
|
1051 |
\begin{methoddesc}[TensorGlyph]{setScaleFactor}{scale_factor} |
1052 |
Set the scale factor for the tensor glyph. |
1053 |
\end{methoddesc} |
1054 |
|
1055 |
\begin{methoddesc}[TensorGlyph]{setMaxScaleFactor}{max_scale_factor} |
1056 |
Set the maximum allowable scale factor for the tensor glyph. |
1057 |
\end{methoddesc} |
1058 |
|
1059 |
\subsubsection{\PlaneSource class} |
1060 |
Class that defines a plane source. A plane source is defined by an origin |
1061 |
and two other points, which form the axes (X and Y). \\ |
1062 |
|
1063 |
The following are some of the methods available: |
1064 |
|
1065 |
\begin{methoddesc}[PlaneSource]{setOrigin}{position} |
1066 |
Set the origin of the plane source. |
1067 |
\end{methoddesc} |
1068 |
|
1069 |
\begin{methoddesc}[PlaneSource]{setPoint1}{position} |
1070 |
Set the first point from the origin of the plane source. |
1071 |
\end{methoddesc} |
1072 |
|
1073 |
\begin{methoddesc}[PlaneSource]{setPoint2}{position} |
1074 |
Set the second point from the origin of the plane source. |
1075 |
\end{methoddesc} |
1076 |
|
1077 |
\subsubsection{\PointSource class} |
1078 |
Class that defines the source (location) to generate points. The points are |
1079 |
generated within the radius of a sphere. \\ |
1080 |
|
1081 |
The following are some of the methods available: |
1082 |
|
1083 |
\begin{methoddesc}[PointSource]{setPointSourceRadius}{radius} |
1084 |
Set the radius of the sphere. |
1085 |
\end{methoddesc} |
1086 |
|
1087 |
\begin{methoddesc}[PointSource]{setPointSourceCenter}{center} |
1088 |
Set the center of the sphere. |
1089 |
\end{methoddesc} |
1090 |
|
1091 |
\begin{methoddesc}[PointSource]{setPointSourceNumberOfPoints}{points} |
1092 |
Set the number of points to generate within the sphere (the larger the |
1093 |
number of points, the more streamlines are generated). |
1094 |
\end{methoddesc} |
1095 |
|
1096 |
\subsubsection{\Sphere class} |
1097 |
Class that defines a sphere. \\ |
1098 |
|
1099 |
The following are some of the methods available: |
1100 |
|
1101 |
\begin{methoddesc}[Sphere]{setThetaResolution}{resolution} |
1102 |
Set the theta resolution of the sphere. |
1103 |
\end{methoddesc} |
1104 |
|
1105 |
\begin{methoddesc}[Sphere]{setPhiResolution}{resolution} |
1106 |
Set the phi resolution of the sphere. |
1107 |
\end{methoddesc} |
1108 |
|
1109 |
\subsubsection{\StreamLineModule class} |
1110 |
Class that defines the streamline module. \\ |
1111 |
|
1112 |
The following are some of the methods available: |
1113 |
|
1114 |
\begin{methoddesc}[StreamLineModule]{setMaximumPropagationTime}{time} |
1115 |
Set the maximum length of the streamline expressed in elapsed time. |
1116 |
\end{methoddesc} |
1117 |
|
1118 |
\begin{methoddesc}[StreamLineModule]{setIntegrationToBothDirections}{} |
1119 |
Set the integration to occur both sides: forward (where the streamline |
1120 |
goes) and backward (where the streamline came from). |
1121 |
\end{methoddesc} |
1122 |
|
1123 |
\subsubsection{\Transform class} |
1124 |
Class that defines the orientation of planes. \\ |
1125 |
|
1126 |
The following are some of the methods available: |
1127 |
|
1128 |
\begin{methoddesc}[Transform]{translate}{x_offset, y_offset, z_offset} |
1129 |
Translate the rendered object along the x, y and z-axes. |
1130 |
\end{methoddesc} |
1131 |
|
1132 |
\begin{methoddesc}[Transform]{rotateX}{angle} |
1133 |
Rotate the plane around the x-axis. |
1134 |
\end{methoddesc} |
1135 |
|
1136 |
\begin{methoddesc}[Transform]{rotateY}{angle} |
1137 |
Rotate the plane around the y-axis. |
1138 |
\end{methoddesc} |
1139 |
|
1140 |
\begin{methoddesc}[Transform]{rotateZ}{angle} |
1141 |
Rotate the plane around the z-axis. |
1142 |
\end{methoddesc} |
1143 |
|
1144 |
\begin{methoddesc}[Transform]{setPlaneToXY}{offset = 0} |
1145 |
Set the plane orthogonal to the z-axis. |
1146 |
\end{methoddesc} |
1147 |
|
1148 |
\begin{methoddesc}[Transform]{setPlaneToYZ}{offset = 0} |
1149 |
Set the plane orthogonal to the x-axis. |
1150 |
\end{methoddesc} |
1151 |
|
1152 |
\begin{methoddesc}[Transform]{setPlaneToXZ}{offset = 0} |
1153 |
Set the plane orthogonal to the y-axis. |
1154 |
\end{methoddesc} |
1155 |
|
1156 |
\subsubsection{\Tube class} |
1157 |
Class that defines the tube wrapped around the streamlines. \\ |
1158 |
|
1159 |
The following are some of the methods available: |
1160 |
|
1161 |
\begin{methoddesc}[Tube]{setTubeRadius}{radius} |
1162 |
Set the radius of the tube. |
1163 |
\end{methoddesc} |
1164 |
|
1165 |
\begin{methoddesc}[Tube]{setTubeRadiusToVaryByVector}{} |
1166 |
Set the radius of the tube to vary by vector data. |
1167 |
\end{methoddesc} |
1168 |
|
1169 |
\begin{methoddesc}[Tube]{setTubeRadiusToVaryByScalar}{} |
1170 |
Set the radius of the tube to vary by scalar data. |
1171 |
\end{methoddesc} |
1172 |
|
1173 |
\subsubsection{\Warp class} |
1174 |
Class that defines the deformation of a scalar field. \\ |
1175 |
|
1176 |
The following are some of the methods available: |
1177 |
|
1178 |
\begin{methoddesc}[Warp]{setScaleFactor}{scale_factor} |
1179 |
Set the displacement scale factor. |
1180 |
\end{methoddesc} |
1181 |
|
1182 |
\subsubsection{\MaskPoints class} |
1183 |
Class that defines masking of points. This is useful to prevent the |
1184 |
rendered object from being cluttered with arrows or ellipsoids. \\ |
1185 |
|
1186 |
The following are some of the methods available: |
1187 |
|
1188 |
\begin{methoddesc}[MaskPoints]{setRatio}{ratio} |
1189 |
Mask every n'th point. |
1190 |
\end{methoddesc} |
1191 |
|
1192 |
\begin{methoddesc}[MaskPoints]{randomOn}{} |
1193 |
Enables randomization of the points selected for masking. |
1194 |
\end{methoddesc} |
1195 |
|
1196 |
\subsubsection{\ScalarBar class} |
1197 |
Class that defines a scalar bar. \\ |
1198 |
|
1199 |
The following are some of the methods available: |
1200 |
|
1201 |
\begin{methoddesc}[ScalarBar]{setTitle}{title} |
1202 |
Set the title of the scalar bar. |
1203 |
\end{methoddesc} |
1204 |
|
1205 |
\begin{methoddesc}[ScalarBar]{setPosition}{position} |
1206 |
Set the local position of the scalar bar. |
1207 |
\end{methoddesc} |
1208 |
|
1209 |
\begin{methoddesc}[ScalarBar]{setOrientationToHorizontal}{} |
1210 |
Set the orientation of the scalar bar to horizontal. |
1211 |
\end{methoddesc} |
1212 |
|
1213 |
\begin{methoddesc}[ScalarBar]{setOrientationToVertical}{} |
1214 |
Set the orientation of the scalar bar to vertical. |
1215 |
\end{methoddesc} |
1216 |
|
1217 |
\begin{methoddesc}[ScalarBar]{setHeight}{height} |
1218 |
Set the height of the scalar bar. |
1219 |
\end{methoddesc} |
1220 |
|
1221 |
\begin{methoddesc}[ScalarBar]{setWidth}{width} |
1222 |
Set the width of the scalar bar. |
1223 |
\end{methoddesc} |
1224 |
|
1225 |
\begin{methoddesc}[ScalarBar]{setLabelColor}{color} |
1226 |
Set the color of the scalar bar's label. |
1227 |
\end{methoddesc} |
1228 |
|
1229 |
\begin{methoddesc}[ScalarBar]{setTitleColor}{color} |
1230 |
Set the color of the scalar bar's title. |
1231 |
\end{methoddesc} |
1232 |
|
1233 |
\subsubsection{\ImageReslice class} |
1234 |
Class that defines an image reslice which is used to resize static |
1235 |
(no interaction capability) images (i.e. logo). \\ |
1236 |
|
1237 |
The following are some of the methods available: |
1238 |
|
1239 |
\begin{methoddesc}[ImageReslice]{setSize}{size} |
1240 |
Set the size factor of the image. The value must be between 0 and 2. |
1241 |
Size 1 (one) keeps the image in its original size (which is the default). |
1242 |
\end{methoddesc} |
1243 |
|
1244 |
\subsubsection{\DataSetMapper class} |
1245 |
Class that defines a data set mapper. \\ |
1246 |
|
1247 |
The following are some of the methods available: |
1248 |
|
1249 |
\begin{methoddesc}[DataSetMapper]{setScalarRange}{lower_range, upper_range} |
1250 |
Set the minimum and maximum scalar range for the data set mapper. This |
1251 |
method is called when the range has been specified by the user. |
1252 |
Therefore, the scalar range read from the source will be ignored. |
1253 |
\end{methoddesc} |
1254 |
|
1255 |
\subsubsection{\CubeSource class} |
1256 |
Class that defines a cube source. The center of the cube source defines |
1257 |
the point from which the cube is to be generated and the X, Y |
1258 |
and Z lengths define the length of the cube from the center point. If |
1259 |
X length is 3, then the X length to the left and right of the center |
1260 |
point is 1.5 respectively.\\ |
1261 |
|
1262 |
The following are some of the methods available: |
1263 |
|
1264 |
\begin{methoddesc}[CubeSource]{setCenter}{center} |
1265 |
Set the cube source center. |
1266 |
\end{methoddesc} |
1267 |
|
1268 |
\begin{methoddesc}[CubeSource]{setXLength}{length} |
1269 |
Set the cube source length along the x-axis. |
1270 |
\end{methoddesc} |
1271 |
|
1272 |
\begin{methoddesc}[CubeSource]{setYLength}{length} |
1273 |
Set the cube source length along the y-axis. |
1274 |
\end{methoddesc} |
1275 |
|
1276 |
\begin{methoddesc}[CubeSource]{setZLength}{length} |
1277 |
Set the cube source length along the z-axis. |
1278 |
\end{methoddesc} |
1279 |
|
1280 |
\subsubsection{\Rotation class} |
1281 |
Class that sweeps 2D data around the z-axis to create a 3D looking effect. \\ |
1282 |
|
1283 |
The following are some of the methods available: |
1284 |
|
1285 |
\begin{methoddesc}[Rotation]{setResolution}{resolution} |
1286 |
Set the resolution of the sweep for the rotation, which controls the |
1287 |
number of intermediate points. |
1288 |
\end{methoddesc} |
1289 |
|
1290 |
\begin{methoddesc}[Rotation]{setAngle}{angle} |
1291 |
Set the angle of rotation. |
1292 |
\end{methoddesc} |
1293 |
|
1294 |
|
1295 |
% ############################################################################# |
1296 |
|
1297 |
|
1298 |
\section{More Examples} |
1299 |
This section provides examples for some common tasks. |
1300 |
|
1301 |
\subsection{Reading a Series of Files} |
1302 |
The following script shows how to generate images from a time series using |
1303 |
two data sources. |
1304 |
|
1305 |
\begin{python} |
1306 |
""" |
1307 |
Author: John Ngui, john.ngui@uq.edu.au |
1308 |
""" |
1309 |
|
1310 |
# Import the necessary modules. |
1311 |
from esys.pyvisi import Scene, DataCollector, Contour, Camera |
1312 |
from esys.pyvisi.constant import * |
1313 |
import os |
1314 |
|
1315 |
PYVISI_EXAMPLE_MESHES_PATH = "data_meshes" |
1316 |
PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images" |
1317 |
X_SIZE = 400 |
1318 |
Y_SIZE = 300 |
1319 |
|
1320 |
SCALAR_FIELD_POINT_DATA_1 = "lava" |
1321 |
SCALAR_FIELD_POINT_DATA_2 = "talus" |
1322 |
FILE_2D = "phi_talus_lava." |
1323 |
|
1324 |
IMAGE_NAME = "seriesofreads" |
1325 |
JPG_RENDERER = Renderer.ONLINE_JPG |
1326 |
|
1327 |
# Create a Scene. |
1328 |
s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE, |
1329 |
y_size = Y_SIZE) |
1330 |
|
1331 |
# Create a DataCollector reading from an XML file. |
1332 |
dc1 = DataCollector(source = Source.XML) |
1333 |
dc1.setActiveScalar(scalar = SCALAR_FIELD_POINT_DATA_1) |
1334 |
|
1335 |
# Create a Contour. |
1336 |
mosc1 = Contour(scene = s, data_collector = dc1, |
1337 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False, |
1338 |
outline = True) |
1339 |
mosc1.generateContours(0) |
1340 |
|
1341 |
# Create a second DataCollector reading from the same XML file |
1342 |
# but specifying a different scalar field. |
1343 |
dc2 = DataCollector(source = Source.XML) |
1344 |
dc2.setActiveScalar(scalar = SCALAR_FIELD_POINT_DATA_2) |
1345 |
|
1346 |
# Create a second Contour. |
1347 |
mosc2 = Contour(scene = s, data_collector = dc2, |
1348 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False, |
1349 |
outline = True) |
1350 |
mosc2.generateContours(0) |
1351 |
|
1352 |
# Create a Camera. |
1353 |
cam1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST) |
1354 |
|
1355 |
# Read in one file after another and render the object. |
1356 |
for i in range(99, 104): |
1357 |
dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, \ |
1358 |
FILE_2D + "%04d.vtu") % i) |
1359 |
dc2.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, \ |
1360 |
FILE_2D + "%04d.vtu") % i) |
1361 |
|
1362 |
s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, \ |
1363 |
IMAGE_NAME + "%04d.jpg") % i) |
1364 |
\end{python} |
1365 |
|
1366 |
\subsection{Creating Slices of a Data Source} |
1367 |
The following script shows how to save a series of images that slice the |
1368 |
data at different points by gradually translating the cut plane. |
1369 |
|
1370 |
\begin{python} |
1371 |
""" |
1372 |
Author: John Ngui, john.ngui@uq.edu.au |
1373 |
""" |
1374 |
|
1375 |
# Import the necessary modules. |
1376 |
from esys.pyvisi import Scene, DataCollector, MapOnPlaneCut, Camera |
1377 |
from esys.pyvisi.constant import * |
1378 |
import os |
1379 |
|
1380 |
PYVISI_EXAMPLE_MESHES_PATH = "data_meshes" |
1381 |
PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images" |
1382 |
X_SIZE = 400 |
1383 |
Y_SIZE = 400 |
1384 |
|
1385 |
SCALAR_FIELD_POINT_DATA = "temperature" |
1386 |
FILE_3D = "interior_3D.xml" |
1387 |
IMAGE_NAME = "seriesofcuts" |
1388 |
JPG_RENDERER = Renderer.ONLINE_JPG |
1389 |
|
1390 |
# Create a Scene. |
1391 |
s = Scene(renderer = JPG_RENDERER, num_viewport = 1, x_size = X_SIZE, |
1392 |
y_size = Y_SIZE) |
1393 |
|
1394 |
# Create a DataCollector reading from an XML file. |
1395 |
dc1 = DataCollector(source = Source.XML) |
1396 |
dc1.setFileName(file_name = os.path.join(PYVISI_EXAMPLE_MESHES_PATH, FILE_3D)) |
1397 |
dc1.setActiveScalar(scalar = SCALAR_FIELD_POINT_DATA) |
1398 |
|
1399 |
# Create a MapOnPlaneCut. |
1400 |
mopc1 = MapOnPlaneCut(scene = s, data_collector = dc1, |
1401 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, cell_to_point = False, |
1402 |
outline = True) |
1403 |
mopc1.setPlaneToYZ(offset = 0.1) |
1404 |
|
1405 |
# Create a Camera. |
1406 |
c1 = Camera(scene = s, viewport = Viewport.SOUTH_WEST) |
1407 |
c1.isometricView() |
1408 |
|
1409 |
# Render the object with multiple cuts using a series of translations. |
1410 |
for i in range(0, 5): |
1411 |
s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, IMAGE_NAME + |
1412 |
"%02d.jpg") % i) |
1413 |
mopc1.translate(0.6,0,0) |
1414 |
\end{python} |
1415 |
|
1416 |
\subsection{Reading Data Directly from escript Objects} |
1417 |
The following script shows how to combine Pyvisi code with escript code to |
1418 |
generate visualizations on the fly. |
1419 |
|
1420 |
\begin{python} |
1421 |
""" |
1422 |
Author: Lutz Gross, l.gross@uq.edu.au |
1423 |
Author: John Ngui, john.ngui@uq.edu.au |
1424 |
""" |
1425 |
|
1426 |
# Import the necessary modules. |
1427 |
from esys.escript import * |
1428 |
from esys.escript.linearPDEs import LinearPDE |
1429 |
from esys.finley import Rectangle |
1430 |
from esys.pyvisi import Scene, DataCollector, Map, Camera |
1431 |
from esys.pyvisi.constant import * |
1432 |
import os |
1433 |
|
1434 |
PYVISI_EXAMPLE_IMAGES_PATH = "data_sample_images" |
1435 |
X_SIZE = 400 |
1436 |
Y_SIZE = 400 |
1437 |
JPG_RENDERER = Renderer.ONLINE_JPG |
1438 |
|
1439 |
#... set some parameters ... |
1440 |
xc = [0.02,0.002] |
1441 |
r = 0.001 |
1442 |
qc = 50.e6 |
1443 |
Tref = 0. |
1444 |
rhocp = 2.6e6 |
1445 |
eta = 75. |
1446 |
kappa = 240. |
1447 |
tend = 5. |
1448 |
# initialize time, time step size and counter ... |
1449 |
t=0 |
1450 |
h=0.1 |
1451 |
i=0 |
1452 |
|
1453 |
# generate domain ... |
1454 |
mydomain = Rectangle(l0=0.05, l1=0.01, n0=250, n1=50) |
1455 |
# open PDE ... |
1456 |
mypde = LinearPDE(mydomain) |
1457 |
mypde.setSymmetryOn() |
1458 |
mypde.setValue(A=kappa*kronecker(mydomain), D=rhocp/h, d=eta, y=eta*Tref) |
1459 |
# set heat source: ... |
1460 |
x = mydomain.getX() |
1461 |
qH = qc*whereNegative(length(x-xc)-r) |
1462 |
|
1463 |
# set initial temperature .... |
1464 |
T=Tref |
1465 |
|
1466 |
# Create a Scene. |
1467 |
s = Scene(renderer = JPG_RENDERER, x_size = X_SIZE, y_size = Y_SIZE) |
1468 |
|
1469 |
# Create a DataCollector reading directly from escript objects. |
1470 |
dc = DataCollector(source = Source.ESCRIPT) |
1471 |
|
1472 |
# Create a Map. |
1473 |
m = Map(scene = s, data_collector = dc, \ |
1474 |
viewport = Viewport.SOUTH_WEST, lut = Lut.COLOR, \ |
1475 |
cell_to_point = False, outline = True) |
1476 |
|
1477 |
# Create a Camera. |
1478 |
c = Camera(scene = s, viewport = Viewport.SOUTH_WEST) |
1479 |
|
1480 |
# start iteration |
1481 |
while t < 0.4: |
1482 |
i += 1 |
1483 |
t += h |
1484 |
mypde.setValue(Y=qH+rhocp/h*T) |
1485 |
T = mypde.getSolution() |
1486 |
|
1487 |
dc.setData(temp = T) |
1488 |
|
1489 |
# Render the object. |
1490 |
s.render(image_name = os.path.join(PYVISI_EXAMPLE_IMAGES_PATH, \ |
1491 |
"diffusion%02d.jpg") % i) |
1492 |
\end{python} |
1493 |
|
1494 |
\newpage |
1495 |
|
1496 |
\section{Useful Keys} |
1497 |
This section lists keyboard shortcuts available when interacting with rendered |
1498 |
objects using the Online approach. |
1499 |
|
1500 |
\begin{table}[ht] |
1501 |
\begin{center} |
1502 |
\begin{tabular}{| c | p{13cm} |} |
1503 |
\hline |
1504 |
\textbf{Key} & \textbf{Description} \\ \hline |
1505 |
Keypress 'c' / 'a' & Toggle between the camera ('c') and object ('a') mode. In |
1506 |
camera mode, mouse events affect the camera position and focal point. In |
1507 |
object mode, mouse events affect the rendered object's element (i.e. |
1508 |
cut surface map, clipped velocity field, streamline, etc) that is under the |
1509 |
mouse pointer.\\ \hline |
1510 |
Mouse button 1 & Rotate the camera around its focal point (if in camera mode) |
1511 |
or rotate the rendered object's element (if in object mode).\\ \hline |
1512 |
Mouse button 2 & Pan the camera (if in camera mode) or translate the rendered |
1513 |
object's element (if in object mode). \\ \hline |
1514 |
Mouse button 3 & Zoom the camera (if in camera mode) or scale the rendered |
1515 |
object's element (if in object mode). \\ \hline |
1516 |
Keypress 3 & Toggle the render window in and out of stereo mode. By default, |
1517 |
red-blue stereo pairs are created. \\ \hline |
1518 |
Keypress 'e' / 'q' & Exit the application if only one file is to be read, or |
1519 |
read and display the next file if multiple files are to be read. \\ \hline |
1520 |
Keypress 's' & Modify the representation of the rendered object to surfaces. |
1521 |
\\ \hline |
1522 |
Keypress 'w' & Modify the representation of the rendered object to wireframe. |
1523 |
\\ \hline |
1524 |
Keypress 'r' & Reset the position of the rendered object to the center. |
1525 |
\\ \hline |
1526 |
\end{tabular} |
1527 |
\caption{Useful keys in Online render mode} |
1528 |
\end{center} |
1529 |
\end{table} |
1530 |
|
1531 |
|
1532 |
% ############################################################################ |
1533 |
|
1534 |
|
1535 |
\newpage |
1536 |
\renewcommand{\textfraction}{0.06} % force first table to stay with the text |
1537 |
\section{Sample Output} |
1538 |
\label{SAMPLEOUTPUT SEC} |
1539 |
This section shows sample images produced with the various classes of Pyvisi. |
1540 |
The source code to produce these images is included in the Pyvisi distribution. |
1541 |
% |
1542 |
\begin{table}[hb] |
1543 |
\begin{tabular}{c c c} |
1544 |
\includegraphics[width=\thumbnailwidth]{figures/Map} & |
1545 |
\includegraphics[width=\thumbnailwidth]{figures/MapOnPlaneCut} & |
1546 |
\includegraphics[width=\thumbnailwidth]{figures/MapOnPlaneClip} \\ |
1547 |
Map & MapOnPlaneCut & MapOnPlaneClip \\ |
1548 |
\includegraphics[width=\thumbnailwidth]{figures/MapOnScalarClip} & |
1549 |
\includegraphics[width=\thumbnailwidth]{figures/MapOnScalarClipWithRotation} & |
1550 |
\includegraphics[width=\thumbnailwidth]{figures/StreamLine} \\ |
1551 |
MapOnScalarClip & MapOnScalarClipWithRotation & Streamline \\ \\ \\ |
1552 |
\includegraphics[width=\thumbnailwidth]{figures/Velocity} & |
1553 |
\includegraphics[width=\thumbnailwidth]{figures/VelocityOnPlaneCut} & |
1554 |
\includegraphics[width=\thumbnailwidth]{figures/VelocityOnPlaneClip} \\ |
1555 |
Velocity & VelocityOnPlaneCut & VelocityOnPlaneClip \\ \\ \\ |
1556 |
\includegraphics[width=\thumbnailwidth]{figures/Ellipsoid} & |
1557 |
\includegraphics[width=\thumbnailwidth]{figures/EllipsoidOnPlaneCut} & |
1558 |
\includegraphics[width=\thumbnailwidth]{figures/EllipsoidOnPlaneClip} \\ |
1559 |
Ellipsoid & EllipsoidOnPlaneCut & EllipsoidOnPlaneClip \\ \\ |
1560 |
\end{tabular} |
1561 |
%\caption{Sample output} |
1562 |
\end{table} |
1563 |
% |
1564 |
\newpage |
1565 |
% |
1566 |
\begin{table}[t] |
1567 |
\begin{tabular}{c c c} |
1568 |
\includegraphics[width=\thumbnailwidth]{figures/Contour} & |
1569 |
\includegraphics[width=\thumbnailwidth]{figures/ContourOnPlaneCut} & |
1570 |
\includegraphics[width=\thumbnailwidth]{figures/ContourOnPlaneClip} \\ |
1571 |
Contour & ContourOnPlaneCut & ContourOnPlaneClip \\ \\ \\ |
1572 |
\includegraphics[width=\thumbnailwidth]{figures/Carpet} & |
1573 |
\includegraphics[width=\thumbnailwidth]{figures/Rectangle} & |
1574 |
\includegraphics[width=\thumbnailwidth]{figures/Image} \\ |
1575 |
Carpet & Rectangle & Image \\ \\ \\ \\ \\ |
1576 |
\includegraphics[width=\thumbnailwidth]{figures/Text} & |
1577 |
\includegraphics[width=\thumbnailwidth]{figures/Logo} & |
1578 |
\includegraphics[width=\thumbnailwidth]{figures/Legend} \\ |
1579 |
Text & Logo & Legend \\ \\ |
1580 |
\end{tabular} |
1581 |
%\caption{Sample Output (continued)} |
1582 |
\end{table} |
1583 |
|
1584 |
|