summaryrefslogtreecommitdiff
path: root/lbm.org
diff options
context:
space:
mode:
authorAdrian Kummerlaender2021-06-17 23:27:31 +0200
committerAdrian Kummerlaender2021-06-17 23:27:31 +0200
commitb8d9e36baf889f93aff5761f49fa7eb30874d283 (patch)
treec8b7e67454de34e77c34fb0f326b86cec3254fd9 /lbm.org
parent73fdd53ba37a09092d2bee9de03707aeb3cd6b2e (diff)
downloadLiterateLB-b8d9e36baf889f93aff5761f49fa7eb30874d283.tar
LiterateLB-b8d9e36baf889f93aff5761f49fa7eb30874d283.tar.gz
LiterateLB-b8d9e36baf889f93aff5761f49fa7eb30874d283.tar.bz2
LiterateLB-b8d9e36baf889f93aff5761f49fa7eb30874d283.tar.lz
LiterateLB-b8d9e36baf889f93aff5761f49fa7eb30874d283.tar.xz
LiterateLB-b8d9e36baf889f93aff5761f49fa7eb30874d283.tar.zst
LiterateLB-b8d9e36baf889f93aff5761f49fa7eb30874d283.zip
Reimplement camera controller
Diffstat (limited to 'lbm.org')
-rw-r--r--lbm.org150
1 files changed, 86 insertions, 64 deletions
diff --git a/lbm.org b/lbm.org
index 3e934ad..c953031 100644
--- a/lbm.org
+++ b/lbm.org
@@ -135,8 +135,9 @@ to decouple program exposition from the strict confines of machine-targeted lang
LiterateLB utilizes the literate programming framework offered by [[https://emacs.org][Emacs's]] [[https://orgmode.org][Org Mode]].
-The easiest way to tangle and compile the project is to use the [[https::/nixos.org][Nix package manager]].
-On CUDA-enabled NixOS hosts the following commands are all that is needed to tangle, compile and run one of the simulation examples:
+The easiest way to tangle and compile the project is to use the [[https://nixos.org][Nix package manager]].
+On CUDA-enabled NixOS hosts the following commands are all that is needed to tangle,
+compile and run one of the simulation examples:
#+BEGIN_SRC sh :eval no
git clone https://code.kummerlaender.eu/LiterateLB
@@ -151,8 +152,8 @@ On other systems the dependencies
+ Nvidia CUDA 10.2 or later
+ SFML 2.5 or later and ImGui-SFML
+ Python with SymPy, NumPy, SciPy and Mako
-will have to be provided manually. Note that the current tangle output is included so strictly speaking compiling and testing
-the examples requires neither Emacs nor Python.
+will have to be provided manually. Note that the current tangle output is included so strictly
+speaking compiling and testing the examples requires neither Emacs nor Python.
Note that I started developing this as a beginner in both Emacs and Org mode so some aspects of this document
may be more clunky than necessary. Most of the complexity stems from maintaining a Python session for the
@@ -4095,11 +4096,7 @@ __device__ float2 getNormalizedScreenPos(float w, float h, float x, float y) {
);
}
-__device__ float3 getEyeRayDir(float2 screen_pos, float3 eye_pos, float3 eye_target) {
- const float3 forward = normalize(eye_target - eye_pos);
- const float3 right = normalize(cross(make_float3(0.f, 0.f, -1.f), forward));
- const float3 up = normalize(cross(forward, right));
-
+__device__ float3 getEyeRayDir(float2 screen_pos, float3 forward, float3 right, float3 up) {
return normalize(screen_pos.x*right + screen_pos.y*up + 4*forward);
}
#+END_SRC
@@ -4115,9 +4112,9 @@ absorption up to the current point. This way samples that are closer to the view
of the same magnitude that are farther away which of course is also a basic version of how a real volume looks, hence
the name.
-We are now going to implement a speed optimized version of this volume rendering equation to determine
+We are now going to implement an optimized version of this volume rendering equation to determine
the color of a pixel in location =screen_pos= given a normalized ray direction =ray_dir= and an eye position
-=eye_pos= as the starting point.
+=camera_position= as the starting point.
#+NAME: volumetric-render-config
#+BEGIN_SRC cpp
@@ -4132,8 +4129,10 @@ struct VolumetricRenderConfig {
float brightness = 1;
float3 background = make_float3(22.f / 255.f);
- float3 eye_pos;
- float3 eye_dir;
+ float3 camera_position;
+ float3 camera_forward;
+ float3 camera_right;
+ float3 camera_up;
cudaSurfaceObject_t canvas;
uint2 canvas_size;
@@ -4155,13 +4154,13 @@ float a = 0;
float tmin = 0;
float tmax = 4000;
-if (aabb(config.eye_pos, ray_dir, config.cuboid, tmin, tmax)) {
+if (aabb(config.camera_position, ray_dir, config.cuboid, tmin, tmax)) {
float volume_dist = tmax - tmin;
- float3 geometry_pos = config.eye_pos + tmin*ray_dir;
+ float3 geometry_pos = config.camera_position + tmin*ray_dir;
float geometry_dist = approximateDistance(geometry, geometry_pos, ray_dir, 0, volume_dist);
geometry_pos += geometry_dist * ray_dir;
- float jitter = config.align_slices_to_view * (floor(fabs(dot(config.eye_dir, tmin*ray_dir)) / config.delta) * config.delta - tmin)
+ float jitter = config.align_slices_to_view * (floor(fabs(dot(config.camera_forward, tmin*ray_dir)) / config.delta) * config.delta - tmin)
+ config.apply_noise * config.delta * noiseFromTexture(config.noise, threadIdx.x, threadIdx.y);
tmin += jitter;
@@ -4169,7 +4168,7 @@ if (aabb(config.eye_pos, ray_dir, config.cuboid, tmin, tmax)) {
geometry_dist -= jitter;
if (volume_dist > config.delta) {
- float3 sample_pos = config.eye_pos + tmin * ray_dir;
+ float3 sample_pos = config.camera_position + tmin * ray_dir;
unsigned n_samples = floor(geometry_dist / config.delta);
for (unsigned i=0; i < n_samples; ++i) {
<<volumetric-renderer-body>>
@@ -4243,7 +4242,7 @@ __global__ void raymarch(
}
const float2 screen_pos = getNormalizedScreenPos(config.canvas_size.x, config.canvas_size.y, x, y);
- const float3 ray_dir = getEyeRayDir(screen_pos, config.eye_pos, config.eye_pos + config.eye_dir);
+ const float3 ray_dir = getEyeRayDir(screen_pos, config.camera_forward, config.camera_right, config.camera_up);
<<volumetric-renderer>>
@@ -4487,35 +4486,52 @@ print(', '.join([ str(pdf(x).evalf()) for x in range(-3,4) ]))
: 0.00443184841193801, 0.0539909665131881, 0.241970724519143, 0.398942280401433, 0.241970724519143, 0.0539909665131881, 0.00443184841193801
*** Camera Controller
-A convenient way of interactively controlling the view that is provided by a pinhole camera is to implement an /Arcball/ camera.
-i.e. a camera that can be rotated around a central target point using the mouse.
+A convenient way of interactively controlling the view parameters of a pinhole camera is to implement
+a orbiting camera. This type of camera can be rotated around a central target point using the mouse.
+
+While translation is realized easily by adding a shift vector to the current camera position, rotation
+is more complex. We are going to accumulate all rotation operations in a single quaternion variable
+=_rotation=. This 4D vector is easily converted into a rotation matrix for mapping the screen space
+pinhole parameters into world space.
-One straight forward way of parametrizing this kind of camera is to use spherical coordinates with respect to a target point:
-#+NAME: arcball-camera-spherical-coordinates
+#+NAME: camera-spherical-coordinates
#+BEGIN_SRC cpp
-float _distance;
-float _phi;
-float _psi;
-float3 _target;
+glm::quat _rotation;
+glm::mat3 _matrix;
#+END_SRC
-While these are the parameters that the user can change by dragging the mouse, our pinhole camera uses a eye position and direction.
-We will automatically update these values each time the spherical coordinates change.
-#+NAME: arcball-camera-cartesian-coordinates
+Using the rotation matrix we can compute the pinhole camera parameters. As they only change
+when a rotation is performed we store them in a set of variables.
+
+#+NAME: camera-cartesian-coordinates
#+BEGIN_SRC cpp
-float3 _eye;
-float3 _direction;
+float3 _target;
+float3 _position;
+float3 _forward;
+float3 _right;
+float3 _up;
+float _distance;
#+END_SRC
-This update operation is simply the projection of our spherical coordinates into the space of cartesian coordinates.
-#+NAME: arcball-camera-projection
+The =update= function projects the screen space forward, right and up vectors into world space
+by simply multiplying them by the rotation matrix.
+
+#+NAME: camera-projection
#+BEGIN_SRC cpp :eval no :main no
-_eye = _target + make_float3(_distance*sin(_psi)*cos(_phi), _distance*sin(_psi)*sin(_phi), _distance*cos(_psi));
-_direction = normalize(_target - _eye);
+glm::vec3 position = _matrix * glm::vec3(0, _distance, 0);
+_position = _target + make_float3(position[0], position[1], position[2]);
+_forward = normalize(_target - _position);
+
+glm::vec3 right = _matrix * glm::vec4(-1, 0, 0, 0);
+_right = make_float3(right[0], right[1], right[2]);
+glm::vec3 up = _matrix * glm::vec4(glm::cross(glm::vec3(0, 1, 0), glm::vec3(-1, 0, 0)), 0);
+_up = make_float3(up[0], up[1], up[2]);
#+END_SRC
-Finally we need to handle user input events to change the spherical coordinates.
-#+NAME: arcball-camera-event-handling
+Finally we need to handle user input events to change the translation vector
+and rotation state.
+
+#+NAME: camera-event-handling
#+BEGIN_SRC cpp :eval no :main no
switch (event.type) {
case sf::Event::MouseWheelMoved:
@@ -4542,41 +4558,35 @@ case sf::Event::MouseMoved:
float2 mouse = make_float2(event.mouseMove.x, event.mouseMove.y);
float2 delta = mouse - _lastMouse;
_lastMouse = mouse;
- _phi += 0.4*delta.x * 2*M_PI/360;
- if (delta.y > 0 && _psi <= M_PI-2*M_PI/60) {
- _psi += 0.4*delta.y * M_PI/180;
- } else if (delta.y < 0 && _psi >= 2*M_PI/60) {
- _psi += 0.4*delta.y * M_PI/180;
- }
+
+ glm::quat rotation_z = glm::conjugate(_rotation) * glm::vec3(0,0,0.01*delta.x);
+ glm::quat rotation_x = glm::conjugate(_rotation) * glm::vec3(0.01*delta.y,0,0);
+
+ _matrix = _matrix * glm::mat3_cast(_rotation * glm::cross(rotation_x, rotation_z));
}
if (_moving) {
float2 mouse = make_float2(event.mouseMove.x, event.mouseMove.y);
float2 delta = mouse - _lastMouse;
_lastMouse = mouse;
- float3 forward = normalize(_target - _eye);
- float3 right = normalize(cross(make_float3(0.f, 0.f, -1.f), forward));
- float3 up = cross(right, forward);
- _target += 0.4*right*delta.x - 0.4*up*delta.y;
+ _target += 0.4*_right*delta.x + 0.4*_up*delta.y;
}
break;
}
#+END_SRC
-#+NAME: arcball-camera
+#+NAME: camera
#+BEGIN_SRC cpp
class Camera {
private:
- <<arcball-camera-spherical-coordinates>>
- <<arcball-camera-cartesian-coordinates>>
+ <<camera-spherical-coordinates>>
+ <<camera-cartesian-coordinates>>
bool _dragging;
bool _moving;
float2 _lastMouse;
public:
- Camera(float3 target, float distance, float phi, float psi):
+ Camera(float3 target, float distance):
_distance(distance),
- _phi(phi),
- _psi(psi),
_target(target),
_dragging(false),
_moving(false) {
@@ -4584,20 +4594,25 @@ public:
}
void update() {
- <<arcball-camera-projection>>
+ <<camera-projection>>
}
void handle(sf::Event& event) {
- <<arcball-camera-event-handling>>
+ <<camera-event-handling>>
update();
}
- float3 getDirection() const {
- return _direction;
+ float3 getPosition() const {
+ return _position;
}
-
- float3 getEyePosition() const {
- return _eye;
+ float3 getForward() const {
+ return _forward;
+ }
+ float3 getRight() const {
+ return _right;
+ }
+ float3 getUp() const {
+ return _up;
}
};
#+END_SRC
@@ -4606,7 +4621,12 @@ public:
#include <cuda-samples/Common/helper_math.h>
#include "SFML/Window/Event.hpp"
-<<arcball-camera>>
+#include <glm/glm.hpp>
+#include <glm/gtc/quaternion.hpp>
+#include <glm/gtx/quaternion.hpp>
+#include <glm/common.hpp>
+
+<<camera>>
#+END_SRC
*** Samplers
@@ -4738,8 +4758,10 @@ Any input events that are not captured by the UI framework are used to control c
#+NAME: volumetric-example-handle-events
#+BEGIN_SRC cpp
_camera.handle(event);
-_config.eye_pos = _camera.getEyePosition();
-_config.eye_dir = _camera.getDirection();
+_config.camera_position = _camera.getPosition();
+_config.camera_forward = _camera.getForward();
+_config.camera_right = _camera.getRight();
+_config.camera_up = _camera.getUp();
_config.canvas_size = make_uint2(this->getRenderView().width, this->getRenderView().height);
#+END_SRC
@@ -4770,7 +4792,7 @@ int _samples_per_second = 30;
public:
VolumetricExample(descriptor::CuboidD<3> cuboid):
RenderWindow("LiterateLB"),
- _camera(make_float3(cuboid.nX/2,cuboid.nY/2,cuboid.nZ/2), cuboid.nX, M_PI/2, M_PI/2),
+ _camera(make_float3(cuboid.nX/2,cuboid.nY/2,cuboid.nZ/2), cuboid.nX),
_config(cuboid),
_palette(_config.palette),
_noise(_config.noise)