Blender  V3.3
COM_PlaneDistortCommonOperation.cc
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2013 Blender Foundation. */
3 
5 
6 #include "BLI_jitter_2d.h"
7 
8 #include "BKE_tracking.h"
9 
10 namespace blender::compositor {
11 
13  : motion_blur_samples_(1), motion_blur_shutter_(0.5f)
14 {
15 }
16 
17 void PlaneDistortBaseOperation::calculate_corners(const float corners[4][2],
18  bool normalized,
19  int sample)
20 {
22  MotionSample *sample_data = &samples_[sample];
23  if (normalized) {
24  for (int i = 0; i < 4; i++) {
25  sample_data->frame_space_corners[i][0] = corners[i][0] * this->get_width();
26  sample_data->frame_space_corners[i][1] = corners[i][1] * this->get_height();
27  }
28  }
29  else {
30  for (int i = 0; i < 4; i++) {
31  sample_data->frame_space_corners[i][0] = corners[i][0];
32  sample_data->frame_space_corners[i][1] = corners[i][1];
33  }
34  }
35 }
36 
37 /* ******** PlaneDistort WarpImage ******** */
38 
39 BLI_INLINE void warp_coord(float x, float y, float matrix[3][3], float uv[2], float deriv[2][2])
40 {
41  float vec[3] = {x, y, 1.0f};
42  mul_m3_v3(matrix, vec);
43  uv[0] = vec[0] / vec[2];
44  uv[1] = vec[1] / vec[2];
45 
46  /* Offset so that pixel center corresponds to a (0.5, 0.5), which helps keeping transformed
47  * image sharp. */
48  uv[0] += 0.5f;
49  uv[1] += 0.5f;
50 
51  deriv[0][0] = (matrix[0][0] - matrix[0][2] * uv[0]) / vec[2];
52  deriv[1][0] = (matrix[0][1] - matrix[0][2] * uv[1]) / vec[2];
53  deriv[0][1] = (matrix[1][0] - matrix[1][2] * uv[0]) / vec[2];
54  deriv[1][1] = (matrix[1][1] - matrix[1][2] * uv[1]) / vec[2];
55 }
56 
58 {
61  pixel_reader_ = nullptr;
62  flags_.complex = true;
63 }
64 
66  bool normalized,
67  int sample)
68 {
70 
72  const int width = image->get_width();
73  const int height = image->get_height();
74 
75  MotionSample *sample_data = &samples_[sample];
76 
77  /* If the image which is to be warped empty assume unit transform and don't attempt to calculate
78  * actual homography (otherwise homography solver will attempt to deal with singularity). */
79  if (width == 0 || height == 0) {
80  unit_m3(sample_data->perspective_matrix);
81  return;
82  }
83 
84  float frame_corners[4][2] = {
85  {0.0f, 0.0f}, {(float)width, 0.0f}, {(float)width, (float)height}, {0.0f, (float)height}};
87  sample_data->frame_space_corners, frame_corners, sample_data->perspective_matrix);
88 }
89 
91 {
93 }
94 
96 {
97  pixel_reader_ = nullptr;
98 }
99 
101  float x,
102  float y,
103  PixelSampler /*sampler*/)
104 {
105  float uv[2];
106  float deriv[2][2];
107  if (motion_blur_samples_ == 1) {
108  warp_coord(x, y, samples_[0].perspective_matrix, uv, deriv);
109  pixel_reader_->read_filtered(output, uv[0], uv[1], deriv[0], deriv[1]);
110  }
111  else {
112  zero_v4(output);
113  for (int sample = 0; sample < motion_blur_samples_; sample++) {
114  float color[4];
115  warp_coord(x, y, samples_[sample].perspective_matrix, uv, deriv);
116  pixel_reader_->read_filtered(color, uv[0], uv[1], deriv[0], deriv[1]);
118  }
119  mul_v4_fl(output, 1.0f / (float)motion_blur_samples_);
120  }
121 }
122 
124  const rcti &area,
126 {
127  const MemoryBuffer *input_img = inputs[0];
128  float uv[2];
129  float deriv[2][2];
130  BuffersIterator<float> it = output->iterate_with({}, area);
131  if (motion_blur_samples_ == 1) {
132  for (; !it.is_end(); ++it) {
133  warp_coord(it.x, it.y, samples_[0].perspective_matrix, uv, deriv);
134  input_img->read_elem_filtered(uv[0], uv[1], deriv[0], deriv[1], it.out);
135  }
136  }
137  else {
138  for (; !it.is_end(); ++it) {
139  zero_v4(it.out);
140  for (const int sample : IndexRange(motion_blur_samples_)) {
141  float color[4];
142  warp_coord(it.x, it.y, samples_[sample].perspective_matrix, uv, deriv);
143  input_img->read_elem_filtered(uv[0], uv[1], deriv[0], deriv[1], color);
144  add_v4_v4(it.out, color);
145  }
146  mul_v4_fl(it.out, 1.0f / (float)motion_blur_samples_);
147  }
148  }
149 }
150 
152  rcti *input, ReadBufferOperation *read_operation, rcti *output)
153 {
154  float min[2], max[2];
155  INIT_MINMAX2(min, max);
156 
157  for (int sample = 0; sample < motion_blur_samples_; sample++) {
158  float UVs[4][2];
159  float deriv[2][2];
160  MotionSample *sample_data = &samples_[sample];
161  /* TODO(sergey): figure out proper way to do this. */
162  warp_coord(input->xmin - 2, input->ymin - 2, sample_data->perspective_matrix, UVs[0], deriv);
163  warp_coord(input->xmax + 2, input->ymin - 2, sample_data->perspective_matrix, UVs[1], deriv);
164  warp_coord(input->xmax + 2, input->ymax + 2, sample_data->perspective_matrix, UVs[2], deriv);
165  warp_coord(input->xmin - 2, input->ymax + 2, sample_data->perspective_matrix, UVs[3], deriv);
166  for (int i = 0; i < 4; i++) {
167  minmax_v2v2_v2(min, max, UVs[i]);
168  }
169  }
170 
171  rcti new_input;
172 
173  new_input.xmin = min[0] - 1;
174  new_input.ymin = min[1] - 1;
175  new_input.xmax = max[0] + 1;
176  new_input.ymax = max[1] + 1;
177 
178  return NodeOperation::determine_depending_area_of_interest(&new_input, read_operation, output);
179 }
180 
182  const rcti &output_area,
183  rcti &r_input_area)
184 {
185  if (input_idx != 0) {
186  r_input_area = output_area;
187  return;
188  }
189 
190  /* TODO: figure out the area needed for warping and EWA filtering. */
191  r_input_area = get_input_operation(0)->get_canvas();
192 
193 /* Old implementation but resulting coordinates are way out of input operation bounds and in some
194  * cases the area result may incorrectly cause cropping. */
195 #if 0
196  float min[2], max[2];
197  INIT_MINMAX2(min, max);
198  for (int sample = 0; sample < motion_blur_samples_; sample++) {
199  float UVs[4][2];
200  float deriv[2][2];
201  MotionSample *sample_data = &samples_[sample];
202  /* TODO(sergey): figure out proper way to do this. */
203  warp_coord(
204  output_area.xmin - 2, output_area.ymin - 2, sample_data->perspective_matrix, UVs[0], deriv);
205  warp_coord(
206  output_area.xmax + 2, output_area.ymin - 2, sample_data->perspective_matrix, UVs[1], deriv);
207  warp_coord(
208  output_area.xmax + 2, output_area.ymax + 2, sample_data->perspective_matrix, UVs[2], deriv);
209  warp_coord(
210  output_area.xmin - 2, output_area.ymax + 2, sample_data->perspective_matrix, UVs[3], deriv);
211  for (int i = 0; i < 4; i++) {
212  minmax_v2v2_v2(min, max, UVs[i]);
213  }
214  }
215 
216  r_input_area.xmin = min[0] - 1;
217  r_input_area.ymin = min[1] - 1;
218  r_input_area.xmax = max[0] + 1;
219  r_input_area.ymax = max[1] + 1;
220 #endif
221 }
222 
223 /* ******** PlaneDistort Mask ******** */
224 
226 {
228 
229  /* Currently hardcoded to 8 samples. */
230  osa_ = 8;
231 }
232 
234 {
236 }
237 
239  float x,
240  float y,
241  PixelSampler /*sampler*/)
242 {
243  float point[2];
244  int inside_counter = 0;
245  if (motion_blur_samples_ == 1) {
246  MotionSample *sample_data = &samples_[0];
247  for (int sample = 0; sample < osa_; sample++) {
248  point[0] = x + jitter_[sample][0];
249  point[1] = y + jitter_[sample][1];
251  sample_data->frame_space_corners[0],
252  sample_data->frame_space_corners[1],
253  sample_data->frame_space_corners[2]) ||
255  sample_data->frame_space_corners[0],
256  sample_data->frame_space_corners[2],
257  sample_data->frame_space_corners[3])) {
258  inside_counter++;
259  }
260  }
261  output[0] = (float)inside_counter / osa_;
262  }
263  else {
264  for (int motion_sample = 0; motion_sample < motion_blur_samples_; motion_sample++) {
265  MotionSample *sample_data = &samples_[motion_sample];
266  for (int osa_sample = 0; osa_sample < osa_; osa_sample++) {
267  point[0] = x + jitter_[osa_sample][0];
268  point[1] = y + jitter_[osa_sample][1];
270  sample_data->frame_space_corners[0],
271  sample_data->frame_space_corners[1],
272  sample_data->frame_space_corners[2]) ||
274  sample_data->frame_space_corners[0],
275  sample_data->frame_space_corners[2],
276  sample_data->frame_space_corners[3])) {
277  inside_counter++;
278  }
279  }
280  }
281  output[0] = (float)inside_counter / (osa_ * motion_blur_samples_);
282  }
283 }
284 
286  const rcti &area,
288 {
289  for (BuffersIterator<float> it = output->iterate_with({}, area); !it.is_end(); ++it) {
290  int inside_count = 0;
291  for (const int motion_sample : IndexRange(motion_blur_samples_)) {
292  MotionSample &sample = samples_[motion_sample];
293  inside_count += get_jitter_samples_inside_count(it.x, it.y, sample);
294  }
295  *it.out = (float)inside_count / (osa_ * motion_blur_samples_);
296  }
297 }
298 
299 int PlaneDistortMaskOperation::get_jitter_samples_inside_count(int x,
300  int y,
301  MotionSample &sample_data)
302 {
303  float point[2];
304  int inside_count = 0;
305  for (int sample = 0; sample < osa_; sample++) {
306  point[0] = x + jitter_[sample][0];
307  point[1] = y + jitter_[sample][1];
309  sample_data.frame_space_corners[0],
310  sample_data.frame_space_corners[1],
311  sample_data.frame_space_corners[2]) ||
313  sample_data.frame_space_corners[0],
314  sample_data.frame_space_corners[2],
315  sample_data.frame_space_corners[3])) {
316  inside_count++;
317  }
318  }
319  return inside_count;
320 }
321 
322 } // namespace blender::compositor
typedef float(TangentPoint)[2]
void BKE_tracking_homography_between_two_quads(float reference_corners[4][2], float corners[4][2], float H[3][3])
#define BLI_assert(a)
Definition: BLI_assert.h:46
#define BLI_INLINE
void BLI_jitter_init(float(*jitarr)[2], int num)
Definition: jitter_2d.c:126
int isect_point_tri_v2(const float pt[2], const float v1[2], const float v2[2], const float v3[2])
Definition: math_geom.c:1516
void mul_m3_v3(const float M[3][3], float r[3])
Definition: math_matrix.c:926
void unit_m3(float m[3][3])
Definition: math_matrix.c:40
MINLINE void mul_v4_fl(float r[4], float f)
MINLINE void add_v4_v4(float r[4], const float a[4])
void minmax_v2v2_v2(float min[2], float max[2], const float vec[2])
Definition: math_vector.c:890
MINLINE void zero_v4(float r[4])
#define INIT_MINMAX2(min, max)
#define UNUSED(x)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei height
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint y
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei width
in reality light always falls off quadratically Particle Retrieve the data of the particle that spawned the object for example to give variation to multiple instances of an object Point Retrieve information about points in a point cloud Retrieve the edges of an object as it appears to Cycles topology will always appear triangulated Convert a blackbody temperature to an RGB value Normal Generate a perturbed normal from an RGB normal map image Typically used for faking highly detailed surfaces Generate an OSL shader from a file or text data block Image Sample an image file as a texture Sky Generate a procedural sky texture Noise Generate fractal Perlin noise Wave Generate procedural bands or rings with noise Voronoi Generate Worley noise based on the distance to random points Typically used to generate textures such as or biological cells Brick Generate a procedural texture producing bricks Texture Retrieve multiple types of texture coordinates nTypically used as inputs for texture nodes Vector Convert a point
Group Output data from inside of a node group A color picker Mix two input colors RGB to Convert a color s luminance to a grayscale value Generate a normal vector and a dot product Bright Control the brightness and contrast of the input color Vector Map an input vectors to used to fine tune the interpolation of the input Camera Retrieve information about the camera and how it relates to the current shading point s position Clamp a value between a minimum and a maximum Vector Perform vector math operation Invert a color
SIMD_FORCE_INLINE btVector3 normalized() const
Return a normalized version of this vector.
a MemoryBuffer contains access to the data of a chunk
void read_elem_filtered(float x, float y, float dx[2], float dy[2], float *out) const
NodeOperation contains calculation logic.
void add_output_socket(DataType datatype)
void read_filtered(float result[4], float x, float y, float dx[2], float dy[2])
SocketReader * get_input_socket_reader(unsigned int index)
NodeOperation * get_input_operation(int index)
virtual bool determine_depending_area_of_interest(rcti *input, ReadBufferOperation *read_operation, rcti *output)
void add_input_socket(DataType datatype, ResizeMode resize_mode=ResizeMode::Center)
virtual void calculate_corners(const float corners[4][2], bool normalized, int sample)
void execute_pixel_sampled(float output[4], float x, float y, PixelSampler sampler) override
calculate a single pixel
void update_memory_buffer_partial(MemoryBuffer *output, const rcti &area, Span< MemoryBuffer * > inputs) override
void execute_pixel_sampled(float output[4], float x, float y, PixelSampler sampler) override
calculate a single pixel
void update_memory_buffer_partial(MemoryBuffer *output, const rcti &area, Span< MemoryBuffer * > inputs) override
void calculate_corners(const float corners[4][2], bool normalized, int sample) override
void get_area_of_interest(int input_idx, const rcti &output_area, rcti &r_input_area) override
Get input operation area being read by this operation on rendering given output area.
bool determine_depending_area_of_interest(rcti *input, ReadBufferOperation *read_operation, rcti *output) override
depth_tx normal_tx diffuse_light_tx specular_light_tx volume_light_tx environment_tx ambient_occlusion_tx aov_value_tx in_weight_img image(1, GPU_R32F, Qualifier::WRITE, ImageType::FLOAT_2D_ARRAY, "out_weight_img") .image(3
ccl_global KernelShaderEvalInput ccl_global float * output
ccl_global KernelShaderEvalInput * input
static void area(int d1, int d2, int e1, int e2, float weights[2])
typename BuffersIteratorBuilder< T >::Iterator BuffersIterator
static void sample(SocketReader *reader, int x, int y, float color[4])
BLI_INLINE void warp_coord(float x, float y, float matrix[3][3], float uv[2], float deriv[2][2])
static bNodeSocketTemplate inputs[]
#define min(a, b)
Definition: sort.c:35
int ymin
Definition: DNA_vec_types.h:64
int ymax
Definition: DNA_vec_types.h:64
int xmin
Definition: DNA_vec_types.h:63
int xmax
Definition: DNA_vec_types.h:63
float max