Blender  V3.3
image_drawing_mode.hh
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2021 Blender Foundation. */
3 
8 #pragma once
9 
11 
12 #include "IMB_imbuf_types.h"
13 
14 #include "BLI_float4x4.hh"
15 #include "BLI_math_vec_types.hh"
16 
17 #include "image_batches.hh"
18 #include "image_private.hh"
19 
21 
22 constexpr float EPSILON_UV_BOUNDS = 0.00001f;
23 
29 
31  {
32  }
33 
35  void update_screen_space_bounds(const ARegion *region)
36  {
37  /* Create a single texture that covers the visible screen space. */
39  &instance_data->texture_infos[0].clipping_bounds, 0, region->winx, 0, region->winy);
41 
42  /* Mark the other textures as invalid. */
43  for (int i = 1; i < SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN; i++) {
46  }
47  }
48 
50  {
51  for (int i = 0; i < SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN; i++) {
53  }
54  }
55 
57  {
58  /* Although this works, computing an inverted matrix adds some precision issues and leads to
59  * tearing artifacts. This should be modified to use the scaling and transformation from the
60  * not inverted matrix. */
62  float4x4 mat_inv = mat.inverted();
63  float3 min_uv = mat_inv * float3(0.0f, 0.0f, 0.0f);
64  float3 max_uv = mat_inv * float3(1.0f, 1.0f, 0.0f);
65  rctf new_clipping_bounds;
66  BLI_rctf_init(&new_clipping_bounds, min_uv[0], max_uv[0], min_uv[1], max_uv[1]);
67 
68  if (!BLI_rctf_compare(&info.clipping_uv_bounds, &new_clipping_bounds, EPSILON_UV_BOUNDS)) {
69  info.clipping_uv_bounds = new_clipping_bounds;
70  info.dirty = true;
71  }
72  }
73 };
74 
76 using namespace blender::bke::image;
77 
78 template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractDrawingMode {
79  private:
80  DRWPass *create_image_pass() const
81  {
84  return DRW_pass_create("Image", state);
85  }
86 
87  DRWPass *create_depth_pass() const
88  {
89  /* Depth is needed for background overlay rendering. Near depth is used for
90  * transparency checker and Far depth is used for indicating the image size. */
92  return DRW_pass_create("Depth", state);
93  }
94 
95  void add_shgroups(const IMAGE_InstanceData *instance_data) const
96  {
97  const ShaderParameters &sh_params = instance_data->sh_params;
100 
101  DRWShadingGroup *shgrp = DRW_shgroup_create(shader, instance_data->passes.image_pass);
102  DRW_shgroup_uniform_vec2_copy(shgrp, "farNearDistances", sh_params.far_near);
103  DRW_shgroup_uniform_vec4_copy(shgrp, "shuffle", sh_params.shuffle);
104  DRW_shgroup_uniform_int_copy(shgrp, "drawFlags", sh_params.flags);
105  DRW_shgroup_uniform_bool_copy(shgrp, "imgPremultiplied", sh_params.use_premul_alpha);
106  DRW_shgroup_uniform_texture(shgrp, "depth_texture", dtxl->depth);
107  float image_mat[4][4];
108  unit_m4(image_mat);
109  for (int i = 0; i < SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN; i++) {
110  const TextureInfo &info = instance_data->texture_infos[i];
111  if (!info.visible) {
112  continue;
113  }
114 
115  DRWShadingGroup *shgrp_sub = DRW_shgroup_create_sub(shgrp);
116  DRW_shgroup_uniform_texture_ex(shgrp_sub, "imageTexture", info.texture, GPU_SAMPLER_DEFAULT);
117  DRW_shgroup_call_obmat(shgrp_sub, info.batch, image_mat);
118  }
119  }
120 
126  void add_depth_shgroups(IMAGE_InstanceData &instance_data,
127  Image *image,
128  ImageUser *image_user) const
129  {
130  GPUShader *shader = IMAGE_shader_depth_get();
131  DRWShadingGroup *shgrp = DRW_shgroup_create(shader, instance_data.passes.depth_pass);
132 
133  float image_mat[4][4];
134  unit_m4(image_mat);
135 
136  ImageUser tile_user = {0};
137  if (image_user) {
138  tile_user = *image_user;
139  }
140 
141  for (int i = 0; i < SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN; i++) {
142  const TextureInfo &info = instance_data.texture_infos[i];
143  if (!info.visible) {
144  continue;
145  }
146 
147  LISTBASE_FOREACH (ImageTile *, image_tile_ptr, &image->tiles) {
148  const ImageTileWrapper image_tile(image_tile_ptr);
149  const int tile_x = image_tile.get_tile_x_offset();
150  const int tile_y = image_tile.get_tile_y_offset();
151  tile_user.tile = image_tile.get_tile_number();
152 
153  /* NOTE: `BKE_image_has_ibuf` doesn't work as it fails for render results. That could be a
154  * bug or a feature. For now we just acquire to determine if there is a texture. */
155  void *lock;
156  ImBuf *tile_buffer = BKE_image_acquire_ibuf(image, &tile_user, &lock);
157  if (tile_buffer != nullptr) {
158  instance_data.float_buffers.mark_used(tile_buffer);
159 
160  DRWShadingGroup *shsub = DRW_shgroup_create_sub(shgrp);
161  float4 min_max_uv(tile_x, tile_y, tile_x + 1, tile_y + 1);
162  DRW_shgroup_uniform_vec4_copy(shsub, "min_max_uv", min_max_uv);
163  DRW_shgroup_call_obmat(shsub, info.batch, image_mat);
164  }
165  BKE_image_release_ibuf(image, tile_buffer, lock);
166  }
167  }
168  }
169 
176  void update_textures(IMAGE_InstanceData &instance_data,
177  Image *image,
178  ImageUser *image_user) const
179  {
181  image, image_user, instance_data.partial_update.user);
183 
184  switch (changes.get_result_code()) {
185  case ePartialUpdateCollectResult::FullUpdateNeeded:
186  instance_data.mark_all_texture_slots_dirty();
187  instance_data.float_buffers.clear();
188  break;
189  case ePartialUpdateCollectResult::NoChangesDetected:
190  break;
191  case ePartialUpdateCollectResult::PartialChangesDetected:
192  /* Partial update when wrap repeat is enabled is not supported. */
193  if (instance_data.flags.do_tile_drawing) {
194  instance_data.float_buffers.clear();
195  instance_data.mark_all_texture_slots_dirty();
196  }
197  else {
198  do_partial_update(changes, instance_data);
199  }
200  break;
201  }
202  do_full_update_for_dirty_textures(instance_data, image_user);
203  }
204 
208  void do_partial_update_float_buffer(
209  ImBuf *float_buffer, PartialUpdateChecker<ImageTileData>::CollectResult &iterator) const
210  {
211  ImBuf *src = iterator.tile_data.tile_buffer;
212  BLI_assert(float_buffer->rect_float != nullptr);
213  BLI_assert(float_buffer->rect == nullptr);
214  BLI_assert(src->rect_float == nullptr);
215  BLI_assert(src->rect != nullptr);
216 
217  /* Calculate the overlap between the updated region and the buffer size. Partial Update Checker
218  * always returns a tile (256x256). Which could lay partially outside the buffer when using
219  * different resolutions.
220  */
221  rcti buffer_rect;
222  BLI_rcti_init(&buffer_rect, 0, float_buffer->x, 0, float_buffer->y);
223  rcti clipped_update_region;
224  const bool has_overlap = BLI_rcti_isect(
225  &buffer_rect, &iterator.changed_region.region, &clipped_update_region);
226  if (!has_overlap) {
227  return;
228  }
229 
230  IMB_float_from_rect_ex(float_buffer, src, &clipped_update_region);
231  }
232 
233  void do_partial_update(PartialUpdateChecker<ImageTileData>::CollectResult &iterator,
234  IMAGE_InstanceData &instance_data) const
235  {
236  while (iterator.get_next_change() == ePartialUpdateIterResult::ChangeAvailable) {
237  /* Quick exit when tile_buffer isn't available. */
238  if (iterator.tile_data.tile_buffer == nullptr) {
239  continue;
240  }
241  ImBuf *tile_buffer = ensure_float_buffer(instance_data, iterator.tile_data.tile_buffer);
242  if (tile_buffer != iterator.tile_data.tile_buffer) {
243  do_partial_update_float_buffer(tile_buffer, iterator);
244  }
245 
246  const float tile_width = static_cast<float>(iterator.tile_data.tile_buffer->x);
247  const float tile_height = static_cast<float>(iterator.tile_data.tile_buffer->y);
248 
249  for (int i = 0; i < SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN; i++) {
250  const TextureInfo &info = instance_data.texture_infos[i];
251  /* Dirty images will receive a full update. No need to do a partial one now. */
252  if (info.dirty) {
253  continue;
254  }
255  if (!info.visible) {
256  continue;
257  }
258  GPUTexture *texture = info.texture;
259  const float texture_width = GPU_texture_width(texture);
260  const float texture_height = GPU_texture_height(texture);
261  /* TODO: early bound check. */
262  ImageTileWrapper tile_accessor(iterator.tile_data.tile);
263  float tile_offset_x = static_cast<float>(tile_accessor.get_tile_x_offset());
264  float tile_offset_y = static_cast<float>(tile_accessor.get_tile_y_offset());
265  rcti *changed_region_in_texel_space = &iterator.changed_region.region;
266  rctf changed_region_in_uv_space;
267  BLI_rctf_init(&changed_region_in_uv_space,
268  static_cast<float>(changed_region_in_texel_space->xmin) /
269  static_cast<float>(iterator.tile_data.tile_buffer->x) +
270  tile_offset_x,
271  static_cast<float>(changed_region_in_texel_space->xmax) /
272  static_cast<float>(iterator.tile_data.tile_buffer->x) +
273  tile_offset_x,
274  static_cast<float>(changed_region_in_texel_space->ymin) /
275  static_cast<float>(iterator.tile_data.tile_buffer->y) +
276  tile_offset_y,
277  static_cast<float>(changed_region_in_texel_space->ymax) /
278  static_cast<float>(iterator.tile_data.tile_buffer->y) +
279  tile_offset_y);
280  rctf changed_overlapping_region_in_uv_space;
281  const bool region_overlap = BLI_rctf_isect(&info.clipping_uv_bounds,
282  &changed_region_in_uv_space,
283  &changed_overlapping_region_in_uv_space);
284  if (!region_overlap) {
285  continue;
286  }
287  /* Convert the overlapping region to texel space and to ss_pixel space...
288  * TODO: first convert to ss_pixel space as integer based. and from there go back to texel
289  * space. But perhaps this isn't needed and we could use an extraction offset somehow. */
290  rcti gpu_texture_region_to_update;
292  &gpu_texture_region_to_update,
293  floor((changed_overlapping_region_in_uv_space.xmin - info.clipping_uv_bounds.xmin) *
294  texture_width / BLI_rctf_size_x(&info.clipping_uv_bounds)),
295  floor((changed_overlapping_region_in_uv_space.xmax - info.clipping_uv_bounds.xmin) *
296  texture_width / BLI_rctf_size_x(&info.clipping_uv_bounds)),
297  ceil((changed_overlapping_region_in_uv_space.ymin - info.clipping_uv_bounds.ymin) *
298  texture_height / BLI_rctf_size_y(&info.clipping_uv_bounds)),
299  ceil((changed_overlapping_region_in_uv_space.ymax - info.clipping_uv_bounds.ymin) *
300  texture_height / BLI_rctf_size_y(&info.clipping_uv_bounds)));
301 
302  rcti tile_region_to_extract;
304  &tile_region_to_extract,
305  floor((changed_overlapping_region_in_uv_space.xmin - tile_offset_x) * tile_width),
306  floor((changed_overlapping_region_in_uv_space.xmax - tile_offset_x) * tile_width),
307  ceil((changed_overlapping_region_in_uv_space.ymin - tile_offset_y) * tile_height),
308  ceil((changed_overlapping_region_in_uv_space.ymax - tile_offset_y) * tile_height));
309 
310  /* Create an image buffer with a size.
311  * Extract and scale into an imbuf. */
312  const int texture_region_width = BLI_rcti_size_x(&gpu_texture_region_to_update);
313  const int texture_region_height = BLI_rcti_size_y(&gpu_texture_region_to_update);
314 
315  ImBuf extracted_buffer;
317  &extracted_buffer, texture_region_width, texture_region_height, 32, IB_rectfloat);
318 
319  int offset = 0;
320  for (int y = gpu_texture_region_to_update.ymin; y < gpu_texture_region_to_update.ymax;
321  y++) {
322  float yf = y / (float)texture_height;
323  float v = info.clipping_uv_bounds.ymax * yf + info.clipping_uv_bounds.ymin * (1.0 - yf) -
324  tile_offset_y;
325  for (int x = gpu_texture_region_to_update.xmin; x < gpu_texture_region_to_update.xmax;
326  x++) {
327  float xf = x / (float)texture_width;
328  float u = info.clipping_uv_bounds.xmax * xf +
329  info.clipping_uv_bounds.xmin * (1.0 - xf) - tile_offset_x;
330  nearest_interpolation_color(tile_buffer,
331  nullptr,
332  &extracted_buffer.rect_float[offset * 4],
333  u * tile_buffer->x,
334  v * tile_buffer->y);
335  offset++;
336  }
337  }
338  IMB_gpu_clamp_half_float(&extracted_buffer);
339 
340  GPU_texture_update_sub(texture,
342  extracted_buffer.rect_float,
343  gpu_texture_region_to_update.xmin,
344  gpu_texture_region_to_update.ymin,
345  0,
346  extracted_buffer.x,
347  extracted_buffer.y,
348  0);
349  imb_freerectImbuf_all(&extracted_buffer);
350  }
351  }
352  }
353 
354  void do_full_update_for_dirty_textures(IMAGE_InstanceData &instance_data,
355  const ImageUser *image_user) const
356  {
357  for (int i = 0; i < SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN; i++) {
358  TextureInfo &info = instance_data.texture_infos[i];
359  if (!info.dirty) {
360  continue;
361  }
362  if (!info.visible) {
363  continue;
364  }
365  do_full_update_gpu_texture(info, instance_data, image_user);
366  }
367  }
368 
369  void do_full_update_gpu_texture(TextureInfo &info,
370  IMAGE_InstanceData &instance_data,
371  const ImageUser *image_user) const
372  {
373  ImBuf texture_buffer;
374  const int texture_width = GPU_texture_width(info.texture);
375  const int texture_height = GPU_texture_height(info.texture);
376  IMB_initImBuf(&texture_buffer, texture_width, texture_height, 0, IB_rectfloat);
377  ImageUser tile_user = {0};
378  if (image_user) {
379  tile_user = *image_user;
380  }
381 
382  void *lock;
383 
384  Image *image = instance_data.image;
385  LISTBASE_FOREACH (ImageTile *, image_tile_ptr, &image->tiles) {
386  const ImageTileWrapper image_tile(image_tile_ptr);
387  tile_user.tile = image_tile.get_tile_number();
388 
389  ImBuf *tile_buffer = BKE_image_acquire_ibuf(image, &tile_user, &lock);
390  if (tile_buffer != nullptr) {
391  do_full_update_texture_slot(instance_data, info, texture_buffer, *tile_buffer, image_tile);
392  }
393  BKE_image_release_ibuf(image, tile_buffer, lock);
394  }
395  IMB_gpu_clamp_half_float(&texture_buffer);
396  GPU_texture_update(info.texture, GPU_DATA_FLOAT, texture_buffer.rect_float);
397  imb_freerectImbuf_all(&texture_buffer);
398  }
399 
408  ImBuf *ensure_float_buffer(IMAGE_InstanceData &instance_data, ImBuf *image_buffer) const
409  {
410  return instance_data.float_buffers.ensure_float_buffer(image_buffer);
411  }
412 
413  void do_full_update_texture_slot(IMAGE_InstanceData &instance_data,
414  const TextureInfo &texture_info,
415  ImBuf &texture_buffer,
416  ImBuf &tile_buffer,
417  const ImageTileWrapper &image_tile) const
418  {
419  const int texture_width = texture_buffer.x;
420  const int texture_height = texture_buffer.y;
421  ImBuf *float_tile_buffer = ensure_float_buffer(instance_data, &tile_buffer);
422 
423  /* IMB_transform works in a non-consistent space. This should be documented or fixed!.
424  * Construct a variant of the info_uv_to_texture that adds the texel space
425  * transformation. */
426  float uv_to_texel[4][4];
427  copy_m4_m4(uv_to_texel, instance_data.ss_to_texture);
428  float scale[3] = {static_cast<float>(texture_width) / static_cast<float>(tile_buffer.x),
429  static_cast<float>(texture_height) / static_cast<float>(tile_buffer.y),
430  1.0f};
431  rescale_m4(uv_to_texel, scale);
432  uv_to_texel[3][0] += image_tile.get_tile_x_offset() /
433  BLI_rctf_size_x(&texture_info.clipping_uv_bounds);
434  uv_to_texel[3][1] += image_tile.get_tile_y_offset() /
435  BLI_rctf_size_y(&texture_info.clipping_uv_bounds);
436  uv_to_texel[3][0] *= texture_width;
437  uv_to_texel[3][1] *= texture_height;
438  invert_m4(uv_to_texel);
439 
440  rctf crop_rect;
441  rctf *crop_rect_ptr = nullptr;
442  eIMBTransformMode transform_mode;
443  if (instance_data.flags.do_tile_drawing) {
444  transform_mode = IMB_TRANSFORM_MODE_WRAP_REPEAT;
445  }
446  else {
447  BLI_rctf_init(&crop_rect, 0.0, tile_buffer.x, 0.0, tile_buffer.y);
448  crop_rect_ptr = &crop_rect;
449  transform_mode = IMB_TRANSFORM_MODE_CROP_SRC;
450  }
451 
452  IMB_transform(float_tile_buffer,
453  &texture_buffer,
454  transform_mode,
456  uv_to_texel,
457  crop_rect_ptr);
458  }
459 
460  public:
461  void cache_init(IMAGE_Data *vedata) const override
462  {
463  IMAGE_InstanceData *instance_data = vedata->instance_data;
464  instance_data->passes.image_pass = create_image_pass();
465  instance_data->passes.depth_pass = create_depth_pass();
466  }
467 
468  void cache_image(IMAGE_Data *vedata, Image *image, ImageUser *iuser) const override
469  {
470  const DRWContextState *draw_ctx = DRW_context_state_get();
471  IMAGE_InstanceData *instance_data = vedata->instance_data;
472  TextureMethod method(instance_data);
473 
474  instance_data->partial_update.ensure_image(image);
475  instance_data->clear_dirty_flag();
476  instance_data->float_buffers.reset_usage_flags();
477 
478  /* Step: Find out which screen space textures are needed to draw on the screen. Remove the
479  * screen space textures that aren't needed. */
480  const ARegion *region = draw_ctx->region;
481  method.update_screen_space_bounds(region);
482  method.update_screen_uv_bounds();
483 
484  /* Check for changes in the image user compared to the last time. */
485  instance_data->update_image_usage(iuser);
486 
487  /* Step: Update the GPU textures based on the changes in the image. */
488  instance_data->update_gpu_texture_allocations();
489  update_textures(*instance_data, image, iuser);
490 
491  /* Step: Add the GPU textures to the shgroup. */
492  instance_data->update_batches();
493  if (!instance_data->flags.do_tile_drawing) {
494  add_depth_shgroups(*instance_data, image, iuser);
495  }
496  add_shgroups(instance_data);
497  }
498 
499  void draw_finish(IMAGE_Data *vedata) const override
500  {
501  IMAGE_InstanceData *instance_data = vedata->instance_data;
502  instance_data->float_buffers.remove_unused_buffers();
503  }
504 
505  void draw_scene(IMAGE_Data *vedata) const override
506  {
507  IMAGE_InstanceData *instance_data = vedata->instance_data;
508 
511 
512  static float clear_col[4] = {0.0f, 0.0f, 0.0f, 0.0f};
513  float clear_depth = instance_data->flags.do_tile_drawing ? 0.75 : 1.0f;
514  GPU_framebuffer_clear_color_depth(dfbl->default_fb, clear_col, clear_depth);
515 
516  DRW_view_set_active(instance_data->view);
517  DRW_draw_pass(instance_data->passes.depth_pass);
519  DRW_draw_pass(instance_data->passes.image_pass);
520  DRW_view_set_active(nullptr);
522  }
523 }; // namespace clipping
524 
525 } // namespace blender::draw::image_engine
typedef float(TangentPoint)[2]
void BKE_image_release_ibuf(struct Image *ima, struct ImBuf *ibuf, void *lock)
struct ImBuf * BKE_image_acquire_ibuf(struct Image *ima, struct ImageUser *iuser, void **r_lock)
#define BLI_assert(a)
Definition: BLI_assert.h:46
#define LISTBASE_FOREACH(type, var, list)
Definition: BLI_listbase.h:336
bool invert_m4(float R[4][4])
Definition: math_matrix.c:1206
void unit_m4(float m[4][4])
Definition: rct.c:1090
void rescale_m4(float mat[4][4], const float scale[3])
Definition: math_matrix.c:2362
void copy_m4_m4(float m1[4][4], const float m2[4][4])
Definition: math_matrix.c:77
BLI_INLINE int BLI_rcti_size_y(const struct rcti *rct)
Definition: BLI_rect.h:190
bool BLI_rctf_isect(const struct rctf *src1, const struct rctf *src2, struct rctf *dest)
void BLI_rcti_init(struct rcti *rect, int xmin, int xmax, int ymin, int ymax)
Definition: rct.c:417
void BLI_rctf_init(struct rctf *rect, float xmin, float xmax, float ymin, float ymax)
Definition: rct.c:407
bool BLI_rcti_isect(const struct rcti *src1, const struct rcti *src2, struct rcti *dest)
BLI_INLINE int BLI_rcti_size_x(const struct rcti *rct)
Definition: BLI_rect.h:186
BLI_INLINE float BLI_rctf_size_x(const struct rctf *rct)
Definition: BLI_rect.h:194
BLI_INLINE float BLI_rctf_size_y(const struct rctf *rct)
Definition: BLI_rect.h:198
bool BLI_rctf_compare(const struct rctf *rect_a, const struct rctf *rect_b, float limit)
void BLI_rctf_init_minmax(struct rctf *rect)
Definition: rct.c:483
DRWState
Definition: DRW_render.h:298
@ DRW_STATE_WRITE_DEPTH
Definition: DRW_render.h:302
@ DRW_STATE_WRITE_COLOR
Definition: DRW_render.h:303
@ DRW_STATE_DEPTH_LESS_EQUAL
Definition: DRW_render.h:311
@ DRW_STATE_DEPTH_ALWAYS
Definition: DRW_render.h:309
@ DRW_STATE_BLEND_ALPHA_PREMUL
Definition: DRW_render.h:330
#define DRW_shgroup_call_obmat(shgroup, geom, obmat)
Definition: DRW_render.h:420
void GPU_framebuffer_bind(GPUFrameBuffer *fb)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint y
struct GPUShader GPUShader
Definition: GPU_shader.h:20
@ GPU_SAMPLER_DEFAULT
Definition: GPU_texture.h:26
void GPU_texture_update_sub(GPUTexture *tex, eGPUDataFormat data_format, const void *pixels, int offset_x, int offset_y, int offset_z, int width, int height, int depth)
Definition: gpu_texture.cc:417
int GPU_texture_height(const GPUTexture *tex)
Definition: gpu_texture.cc:607
struct GPUTexture GPUTexture
Definition: GPU_texture.h:17
int GPU_texture_width(const GPUTexture *tex)
Definition: gpu_texture.cc:602
@ GPU_DATA_FLOAT
Definition: GPU_texture.h:171
void GPU_texture_update(GPUTexture *tex, eGPUDataFormat data_format, const void *data)
Definition: gpu_texture.cc:444
eIMBTransformMode
Transform modes to use for IMB_transform function.
Definition: IMB_imbuf.h:892
@ IMB_TRANSFORM_MODE_WRAP_REPEAT
Wrap repeat the source buffer. Only supported in with nearest filtering.
Definition: IMB_imbuf.h:898
@ IMB_TRANSFORM_MODE_CROP_SRC
Crop the source buffer.
Definition: IMB_imbuf.h:896
bool IMB_initImBuf(struct ImBuf *ibuf, unsigned int x, unsigned int y, unsigned char planes, unsigned int flags)
Definition: allocimbuf.c:516
void imb_freerectImbuf_all(struct ImBuf *ibuf)
Definition: allocimbuf.c:182
void IMB_float_from_rect_ex(struct ImBuf *dst, const struct ImBuf *src, const struct rcti *region_to_update)
void IMB_gpu_clamp_half_float(struct ImBuf *image_buffer)
Definition: util_gpu.c:294
void IMB_transform(const struct ImBuf *src, struct ImBuf *dst, eIMBTransformMode mode, eIMBInterpolationFilterMode filter, const float transform_matrix[4][4], const struct rctf *src_crop)
Transform source image buffer onto destination image buffer using a transform matrix.
Definition: transform.cc:569
void nearest_interpolation_color(const struct ImBuf *in, unsigned char outI[4], float outF[4], float u, float v)
Definition: imageprocess.c:278
@ IMB_FILTER_NEAREST
Definition: IMB_imbuf.h:350
Contains defines and structs used throughout the imbuf module.
@ IB_rectfloat
volatile int lock
ATTR_WARN_UNUSED_RESULT const BMVert * v
void cache_init(IMAGE_Data *vedata) const override
void draw_finish(IMAGE_Data *vedata) const override
void draw_scene(IMAGE_Data *vedata) const override
void cache_image(IMAGE_Data *vedata, Image *image, ImageUser *iuser) const override
SyclQueue void void * src
DefaultFramebufferList * DRW_viewport_framebuffer_list_get(void)
Definition: draw_manager.c:633
const DRWContextState * DRW_context_state_get(void)
DefaultTextureList * DRW_viewport_texture_list_get(void)
Definition: draw_manager.c:638
void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
void DRW_shgroup_uniform_int_copy(DRWShadingGroup *shgroup, const char *name, const int value)
void DRW_shgroup_uniform_texture_ex(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex, eGPUSamplerState sampler_state)
DRWShadingGroup * DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
DRWShadingGroup * DRW_shgroup_create_sub(DRWShadingGroup *shgroup)
void DRW_shgroup_uniform_vec4_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
DRWPass * DRW_pass_create(const char *name, DRWState state)
void DRW_shgroup_uniform_bool_copy(DRWShadingGroup *shgroup, const char *name, const bool value)
void DRW_shgroup_uniform_vec2_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
void DRW_draw_pass(DRWPass *pass)
void DRW_view_set_active(const DRWView *view)
depth_tx normal_tx diffuse_light_tx specular_light_tx volume_light_tx environment_tx ambient_occlusion_tx aov_value_tx in_weight_img image(1, GPU_R32F, Qualifier::WRITE, ImageType::FLOAT_2D_ARRAY, "out_weight_img") .image(3
constexpr int SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN
max allowed textures to use by the ScreenSpaceDrawingMode.
ccl_gpu_kernel_postfix ccl_global float int int int int float bool int offset
const int state
ccl_device_inline float3 ceil(const float3 &a)
Definition: math_float3.h:363
GPUShader * IMAGE_shader_image_get()
Definition: image_shader.cc:28
GPUShader * IMAGE_shader_depth_get()
Definition: image_shader.cc:37
T floor(const T &a)
vec_base< float, 3 > float3
struct ARegion * region
Definition: DRW_render.h:974
struct GPUFrameBuffer * default_fb
struct GPUFrameBuffer * color_only_fb
struct GPUTexture * depth
ImBuf * ensure_float_buffer(ImBuf *image_buffer)
void mark_used(const ImBuf *image_buffer)
struct DRWView * view
ShaderParameters sh_params
struct IMAGE_InstanceData::@223 passes
void update_image_usage(const ImageUser *image_user)
bool do_tile_drawing
should we perform tiled drawing (wrap repeat).
FloatBufferCache float_buffers
float ss_to_texture[4][4]
Transform matrix to convert a normalized screen space coordinates to texture space.
TextureInfo texture_infos[SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN]
PartialImageUpdater partial_update
struct IMAGE_InstanceData::@222 flags
unsigned int * rect
float * rect_float
struct PartialUpdateUser * user
void ensure_image(const Image *new_image)
Ensure that there is a partial update user for the given image.
GPUTexture * texture
GPU Texture for a partial region of the image editor.
bool dirty
does this texture need a full update.
GPUBatch * batch
Batch to draw the associated text on the screen.
rctf clipping_uv_bounds
uv area of the texture in screen space.
rctf clipping_bounds
area of the texture in screen space.
bool visible
Is the texture clipped.
ePartialUpdateIterResult get_next_change()
Load the next changed region.
CollectResult collect_changes()
Check for new changes since the last time this method was invoked for this user.
struct rcti region
region of the image that has been updated. Region can be bigger than actual changes.
Screen space method using a single texture spawning the whole screen.
OneTextureMethod(IMAGE_InstanceData *instance_data)
void update_screen_space_bounds(const ARegion *region)
Update the texture slot uv and screen space bounds.
float4x4 inverted() const
float xmax
Definition: DNA_vec_types.h:69
float xmin
Definition: DNA_vec_types.h:69
float ymax
Definition: DNA_vec_types.h:70
float ymin
Definition: DNA_vec_types.h:70
int ymin
Definition: DNA_vec_types.h:64
int ymax
Definition: DNA_vec_types.h:64
int xmin
Definition: DNA_vec_types.h:63
int xmax
Definition: DNA_vec_types.h:63