Blender  V3.3
COM_ExecutionGroup.cc
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2011 Blender Foundation. */
3 
4 #include "COM_ExecutionGroup.h"
5 #include "COM_ChunkOrder.h"
6 #include "COM_Debug.h"
8 #include "COM_ViewerOperation.h"
9 #include "COM_WorkScheduler.h"
11 #include "COM_defines.h"
12 
13 #include "BLI_rand.hh"
14 
15 #include "BLT_translation.h"
16 
17 #include "PIL_time.h"
18 
19 namespace blender::compositor {
20 
21 std::ostream &operator<<(std::ostream &os, const ExecutionGroupFlags &flags)
22 {
23  if (flags.initialized) {
24  os << "init,";
25  }
26  if (flags.is_output) {
27  os << "output,";
28  }
29  if (flags.complex) {
30  os << "complex,";
31  }
32  if (flags.open_cl) {
33  os << "open_cl,";
34  }
35  if (flags.single_threaded) {
36  os << "single_threaded,";
37  }
38  return os;
39 }
40 
42 {
43  id_ = id;
44  bTree_ = nullptr;
45  height_ = 0;
46  width_ = 0;
47  max_read_buffer_offset_ = 0;
48  x_chunks_len_ = 0;
49  y_chunks_len_ = 0;
50  chunks_len_ = 0;
51  chunks_finished_ = 0;
52  BLI_rcti_init(&viewer_border_, 0, 0, 0, 0);
53  execution_start_time_ = 0;
54 }
55 
56 std::ostream &operator<<(std::ostream &os, const ExecutionGroup &execution_group)
57 {
58  os << "ExecutionGroup(id=" << execution_group.get_id();
59  os << ",flags={" << execution_group.get_flags() << "}";
60  os << ",operation=" << *execution_group.get_output_operation() << "";
61  os << ")";
62  return os;
63 }
64 
66 {
67  return this->get_output_operation()->get_render_priority();
68 }
69 
70 bool ExecutionGroup::can_contain(NodeOperation &operation)
71 {
72  if (!flags_.initialized) {
73  return true;
74  }
75 
76  if (operation.get_flags().is_read_buffer_operation) {
77  return true;
78  }
79  if (operation.get_flags().is_write_buffer_operation) {
80  return false;
81  }
82  if (operation.get_flags().is_set_operation) {
83  return true;
84  }
85 
86  /* complex groups don't allow further ops (except read buffer and values, see above) */
87  if (flags_.complex) {
88  return false;
89  }
90  /* complex ops can't be added to other groups (except their own, which they initialize, see
91  * above) */
92  if (operation.get_flags().complex) {
93  return false;
94  }
95 
96  return true;
97 }
98 
100 {
101  if (!can_contain(*operation)) {
102  return false;
103  }
104 
105  if (!operation->get_flags().is_read_buffer_operation &&
106  !operation->get_flags().is_write_buffer_operation) {
107  flags_.complex = operation->get_flags().complex;
108  flags_.open_cl = operation->get_flags().open_cl;
109  flags_.single_threaded = operation->get_flags().single_threaded;
110  flags_.initialized = true;
111  }
112 
113  operations_.append(operation);
114 
115  return true;
116 }
117 
119 {
120  return this
121  ->operations_[0]; /* the first operation of the group is always the output operation. */
122 }
123 
124 void ExecutionGroup::init_work_packages()
125 {
126  work_packages_.clear();
127  if (chunks_len_ != 0) {
128  work_packages_.resize(chunks_len_);
129  for (unsigned int index = 0; index < chunks_len_; index++) {
130  work_packages_[index].type = eWorkPackageType::Tile;
131  work_packages_[index].state = eWorkPackageState::NotScheduled;
132  work_packages_[index].execution_group = this;
133  work_packages_[index].chunk_number = index;
134  determine_chunk_rect(&work_packages_[index].rect, index);
135  }
136  }
137 }
138 
139 void ExecutionGroup::init_read_buffer_operations()
140 {
141  unsigned int max_offset = 0;
142  for (NodeOperation *operation : operations_) {
143  if (operation->get_flags().is_read_buffer_operation) {
144  ReadBufferOperation *read_operation = static_cast<ReadBufferOperation *>(operation);
145  read_operations_.append(read_operation);
146  max_offset = MAX2(max_offset, read_operation->get_offset());
147  }
148  }
149  max_offset++;
150  max_read_buffer_offset_ = max_offset;
151 }
152 
154 {
155  init_number_of_chunks();
156  init_work_packages();
157  init_read_buffer_operations();
158 }
159 
161 {
162  work_packages_.clear();
163  chunks_len_ = 0;
164  x_chunks_len_ = 0;
165  y_chunks_len_ = 0;
166  read_operations_.clear();
167  bTree_ = nullptr;
168 }
169 
170 void ExecutionGroup::determine_resolution(unsigned int resolution[2])
171 {
172  NodeOperation *operation = this->get_output_operation();
173  resolution[0] = operation->get_width();
174  resolution[1] = operation->get_height();
175  this->set_resolution(resolution);
176  BLI_rcti_init(&viewer_border_, 0, width_, 0, height_);
177 }
178 
179 void ExecutionGroup::init_number_of_chunks()
180 {
181  if (flags_.single_threaded) {
182  x_chunks_len_ = 1;
183  y_chunks_len_ = 1;
184  chunks_len_ = 1;
185  }
186  else {
187  const float chunk_sizef = chunk_size_;
188  const int border_width = BLI_rcti_size_x(&viewer_border_);
189  const int border_height = BLI_rcti_size_y(&viewer_border_);
190  x_chunks_len_ = ceil(border_width / chunk_sizef);
191  y_chunks_len_ = ceil(border_height / chunk_sizef);
192  chunks_len_ = x_chunks_len_ * y_chunks_len_;
193  }
194 }
195 
196 blender::Array<unsigned int> ExecutionGroup::get_execution_order() const
197 {
198  blender::Array<unsigned int> chunk_order(chunks_len_);
199  for (int chunk_index = 0; chunk_index < chunks_len_; chunk_index++) {
200  chunk_order[chunk_index] = chunk_index;
201  }
202 
203  NodeOperation *operation = this->get_output_operation();
204  float centerX = 0.5f;
205  float centerY = 0.5f;
207 
208  if (operation->get_flags().is_viewer_operation) {
209  ViewerOperation *viewer = (ViewerOperation *)operation;
210  centerX = viewer->getCenterX();
211  centerY = viewer->getCenterY();
212  order_type = viewer->get_chunk_order();
213  }
214 
215  const int border_width = BLI_rcti_size_x(&viewer_border_);
216  const int border_height = BLI_rcti_size_y(&viewer_border_);
217  int index;
218  switch (order_type) {
219  case ChunkOrdering::Random: {
221  blender::MutableSpan<unsigned int> span = chunk_order.as_mutable_span();
222  /* Shuffle twice to make it more random. */
223  rng.shuffle(span);
224  rng.shuffle(span);
225  break;
226  }
228  ChunkOrderHotspot hotspot(border_width * centerX, border_height * centerY, 0.0f);
229  blender::Array<ChunkOrder> chunk_orders(chunks_len_);
230  for (index = 0; index < chunks_len_; index++) {
231  const WorkPackage &work_package = work_packages_[index];
232  chunk_orders[index].index = index;
233  chunk_orders[index].x = work_package.rect.xmin - viewer_border_.xmin;
234  chunk_orders[index].y = work_package.rect.ymin - viewer_border_.ymin;
235  chunk_orders[index].update_distance(&hotspot, 1);
236  }
237 
238  std::sort(&chunk_orders[0], &chunk_orders[chunks_len_ - 1]);
239  for (index = 0; index < chunks_len_; index++) {
240  chunk_order[index] = chunk_orders[index].index;
241  }
242 
243  break;
244  }
246  unsigned int tx = border_width / 6;
247  unsigned int ty = border_height / 6;
248  unsigned int mx = border_width / 2;
249  unsigned int my = border_height / 2;
250  unsigned int bx = mx + 2 * tx;
251  unsigned int by = my + 2 * ty;
252  float addition = chunks_len_ / COM_RULE_OF_THIRDS_DIVIDER;
253 
254  ChunkOrderHotspot hotspots[9]{
255  ChunkOrderHotspot(mx, my, addition * 0),
256  ChunkOrderHotspot(tx, my, addition * 1),
257  ChunkOrderHotspot(bx, my, addition * 2),
258  ChunkOrderHotspot(bx, by, addition * 3),
259  ChunkOrderHotspot(tx, ty, addition * 4),
260  ChunkOrderHotspot(bx, ty, addition * 5),
261  ChunkOrderHotspot(tx, by, addition * 6),
262  ChunkOrderHotspot(mx, ty, addition * 7),
263  ChunkOrderHotspot(mx, by, addition * 8),
264  };
265 
266  blender::Array<ChunkOrder> chunk_orders(chunks_len_);
267  for (index = 0; index < chunks_len_; index++) {
268  const WorkPackage &work_package = work_packages_[index];
269  chunk_orders[index].index = index;
270  chunk_orders[index].x = work_package.rect.xmin - viewer_border_.xmin;
271  chunk_orders[index].y = work_package.rect.ymin - viewer_border_.ymin;
272  chunk_orders[index].update_distance(hotspots, 9);
273  }
274 
275  std::sort(&chunk_orders[0], &chunk_orders[chunks_len_]);
276 
277  for (index = 0; index < chunks_len_; index++) {
278  chunk_order[index] = chunk_orders[index].index;
279  }
280 
281  break;
282  }
284  default:
285  break;
286  }
287  return chunk_order;
288 }
289 
291 {
292  const CompositorContext &context = graph->get_context();
293  const bNodeTree *bTree = context.get_bnodetree();
294  if (width_ == 0 || height_ == 0) {
295  return;
296  }
297  if (bTree->test_break && bTree->test_break(bTree->tbh)) {
298  return;
299  }
300  if (chunks_len_ == 0) {
301  return;
302  }
303  unsigned int chunk_index;
304 
305  execution_start_time_ = PIL_check_seconds_timer();
306 
307  chunks_finished_ = 0;
308  bTree_ = bTree;
309 
310  blender::Array<unsigned int> chunk_order = get_execution_order();
311 
314 
315  bool breaked = false;
316  bool finished = false;
317  unsigned int start_index = 0;
318  const int max_number_evaluated = BLI_system_thread_count() * 2;
319 
320  while (!finished && !breaked) {
321  bool start_evaluated = false;
322  finished = true;
323  int number_evaluated = 0;
324 
325  for (int index = start_index; index < chunks_len_ && number_evaluated < max_number_evaluated;
326  index++) {
327  chunk_index = chunk_order[index];
328  int y_chunk = chunk_index / x_chunks_len_;
329  int x_chunk = chunk_index - (y_chunk * x_chunks_len_);
330  const WorkPackage &work_package = work_packages_[chunk_index];
331  switch (work_package.state) {
333  schedule_chunk_when_possible(graph, x_chunk, y_chunk);
334  finished = false;
335  start_evaluated = true;
336  number_evaluated++;
337 
338  if (bTree->update_draw) {
339  bTree->update_draw(bTree->udh);
340  }
341  break;
342  }
344  finished = false;
345  start_evaluated = true;
346  number_evaluated++;
347  break;
348  }
350  if (!start_evaluated) {
351  start_index = index + 1;
352  }
353  }
354  };
355  }
356 
358 
359  if (bTree->test_break && bTree->test_break(bTree->tbh)) {
360  breaked = true;
361  }
362  }
365 }
366 
368 {
369  WorkPackage &work_package = work_packages_[chunk_number];
370 
371  MemoryBuffer **memory_buffers = (MemoryBuffer **)MEM_callocN(
372  sizeof(MemoryBuffer *) * max_read_buffer_offset_, __func__);
373  rcti output;
374  for (ReadBufferOperation *read_operation : read_operations_) {
375  MemoryProxy *memory_proxy = read_operation->get_memory_proxy();
376  this->determine_depending_area_of_interest(&work_package.rect, read_operation, &output);
377  MemoryBuffer *memory_buffer =
378  memory_proxy->get_executor()->construct_consolidated_memory_buffer(*memory_proxy, output);
379  memory_buffers[read_operation->get_offset()] = memory_buffer;
380  }
381  return memory_buffers;
382 }
383 
385  rcti &rect)
386 {
387  MemoryBuffer *image_buffer = memory_proxy.get_buffer();
388  MemoryBuffer *result = new MemoryBuffer(&memory_proxy, rect, MemoryBufferState::Temporary);
389  result->fill_from(*image_buffer);
390  return result;
391 }
392 
393 void ExecutionGroup::finalize_chunk_execution(int chunk_number, MemoryBuffer **memory_buffers)
394 {
395  WorkPackage &work_package = work_packages_[chunk_number];
396  if (work_package.state == eWorkPackageState::Scheduled) {
397  work_package.state = eWorkPackageState::Executed;
398  }
399 
400  atomic_add_and_fetch_u(&chunks_finished_, 1);
401  if (memory_buffers) {
402  for (unsigned int index = 0; index < max_read_buffer_offset_; index++) {
403  MemoryBuffer *buffer = memory_buffers[index];
404  if (buffer) {
405  if (buffer->is_temporarily()) {
406  memory_buffers[index] = nullptr;
407  delete buffer;
408  }
409  }
410  }
411  MEM_freeN(memory_buffers);
412  }
413  if (bTree_) {
414  /* Status report is only performed for top level Execution Groups. */
415  float progress = chunks_finished_;
416  progress /= chunks_len_;
417  bTree_->progress(bTree_->prh, progress);
418 
419  char buf[128];
420  BLI_snprintf(
421  buf, sizeof(buf), TIP_("Compositing | Tile %u-%u"), chunks_finished_, chunks_len_);
422  bTree_->stats_draw(bTree_->sdh, buf);
423  }
424 }
425 
426 inline void ExecutionGroup::determine_chunk_rect(rcti *r_rect,
427  const unsigned int x_chunk,
428  const unsigned int y_chunk) const
429 {
430  const int border_width = BLI_rcti_size_x(&viewer_border_);
431  const int border_height = BLI_rcti_size_y(&viewer_border_);
432 
433  if (flags_.single_threaded) {
434  BLI_rcti_init(r_rect, viewer_border_.xmin, border_width, viewer_border_.ymin, border_height);
435  }
436  else {
437  const unsigned int minx = x_chunk * chunk_size_ + viewer_border_.xmin;
438  const unsigned int miny = y_chunk * chunk_size_ + viewer_border_.ymin;
439  const unsigned int width = MIN2((unsigned int)viewer_border_.xmax, width_);
440  const unsigned int height = MIN2((unsigned int)viewer_border_.ymax, height_);
441  BLI_rcti_init(r_rect,
442  MIN2(minx, width_),
443  MIN2(minx + chunk_size_, width),
444  MIN2(miny, height_),
445  MIN2(miny + chunk_size_, height));
446  }
447 }
448 
449 void ExecutionGroup::determine_chunk_rect(rcti *r_rect, const unsigned int chunk_number) const
450 {
451  const unsigned int y_chunk = chunk_number / x_chunks_len_;
452  const unsigned int x_chunk = chunk_number - (y_chunk * x_chunks_len_);
453  determine_chunk_rect(r_rect, x_chunk, y_chunk);
454 }
455 
457 {
458  /* We assume that this method is only called from complex execution groups. */
459  NodeOperation *operation = this->get_output_operation();
460  if (operation->get_flags().is_write_buffer_operation) {
461  WriteBufferOperation *write_operation = (WriteBufferOperation *)operation;
463  write_operation->get_memory_proxy(), rect, MemoryBufferState::Temporary);
464  return buffer;
465  }
466  return nullptr;
467 }
468 
469 bool ExecutionGroup::schedule_area_when_possible(ExecutionSystem *graph, rcti *area)
470 {
471  if (flags_.single_threaded) {
472  return schedule_chunk_when_possible(graph, 0, 0);
473  }
474  /* Find all chunks inside the rect
475  * determine `minxchunk`, `minychunk`, `maxxchunk`, `maxychunk`
476  * where x and y are chunk-numbers. */
477 
478  int indexx, indexy;
479  int minx = max_ii(area->xmin - viewer_border_.xmin, 0);
480  int maxx = min_ii(area->xmax - viewer_border_.xmin, viewer_border_.xmax - viewer_border_.xmin);
481  int miny = max_ii(area->ymin - viewer_border_.ymin, 0);
482  int maxy = min_ii(area->ymax - viewer_border_.ymin, viewer_border_.ymax - viewer_border_.ymin);
483  int minxchunk = minx / (int)chunk_size_;
484  int maxxchunk = (maxx + (int)chunk_size_ - 1) / (int)chunk_size_;
485  int minychunk = miny / (int)chunk_size_;
486  int maxychunk = (maxy + (int)chunk_size_ - 1) / (int)chunk_size_;
487  minxchunk = max_ii(minxchunk, 0);
488  minychunk = max_ii(minychunk, 0);
489  maxxchunk = min_ii(maxxchunk, (int)x_chunks_len_);
490  maxychunk = min_ii(maxychunk, (int)y_chunks_len_);
491 
492  bool result = true;
493  for (indexx = minxchunk; indexx < maxxchunk; indexx++) {
494  for (indexy = minychunk; indexy < maxychunk; indexy++) {
495  if (!schedule_chunk_when_possible(graph, indexx, indexy)) {
496  result = false;
497  }
498  }
499  }
500 
501  return result;
502 }
503 
504 bool ExecutionGroup::schedule_chunk(unsigned int chunk_number)
505 {
506  WorkPackage &work_package = work_packages_[chunk_number];
507  if (work_package.state == eWorkPackageState::NotScheduled) {
508  work_package.state = eWorkPackageState::Scheduled;
509  WorkScheduler::schedule(&work_package);
510  return true;
511  }
512  return false;
513 }
514 
515 bool ExecutionGroup::schedule_chunk_when_possible(ExecutionSystem *graph,
516  const int chunk_x,
517  const int chunk_y)
518 {
519  if (chunk_x < 0 || chunk_x >= (int)x_chunks_len_) {
520  return true;
521  }
522  if (chunk_y < 0 || chunk_y >= (int)y_chunks_len_) {
523  return true;
524  }
525 
526  /* Check if chunk is already executed or scheduled and not yet executed. */
527  const int chunk_index = chunk_y * x_chunks_len_ + chunk_x;
528  WorkPackage &work_package = work_packages_[chunk_index];
529  if (work_package.state == eWorkPackageState::Executed) {
530  return true;
531  }
532  if (work_package.state == eWorkPackageState::Scheduled) {
533  return false;
534  }
535 
536  bool can_be_executed = true;
537  rcti area;
538 
539  for (ReadBufferOperation *read_operation : read_operations_) {
540  BLI_rcti_init(&area, 0, 0, 0, 0);
541  MemoryProxy *memory_proxy = read_operation->get_memory_proxy();
542  determine_depending_area_of_interest(&work_package.rect, read_operation, &area);
543  ExecutionGroup *group = memory_proxy->get_executor();
544 
545  if (!group->schedule_area_when_possible(graph, &area)) {
546  can_be_executed = false;
547  }
548  }
549 
550  if (can_be_executed) {
551  schedule_chunk(chunk_index);
552  }
553 
554  return false;
555 }
556 
557 void ExecutionGroup::determine_depending_area_of_interest(rcti *input,
558  ReadBufferOperation *read_operation,
559  rcti *output)
560 {
562  input, read_operation, output);
563 }
564 
565 void ExecutionGroup::set_viewer_border(float xmin, float xmax, float ymin, float ymax)
566 {
567  const NodeOperation &operation = *this->get_output_operation();
568  if (operation.get_flags().use_viewer_border) {
569  BLI_rcti_init(&viewer_border_, xmin * width_, xmax * width_, ymin * height_, ymax * height_);
570  }
571 }
572 
573 void ExecutionGroup::set_render_border(float xmin, float xmax, float ymin, float ymax)
574 {
575  const NodeOperation &operation = *this->get_output_operation();
576  if (operation.is_output_operation(true) && operation.get_flags().use_render_border) {
577  BLI_rcti_init(&viewer_border_, xmin * width_, xmax * width_, ymin * height_, ymax * height_);
578  }
579 }
580 
581 } // namespace blender::compositor
MINLINE int min_ii(int a, int b)
MINLINE int max_ii(int a, int b)
BLI_INLINE int BLI_rcti_size_y(const struct rcti *rct)
Definition: BLI_rect.h:190
void BLI_rcti_init(struct rcti *rect, int xmin, int xmax, int ymin, int ymax)
Definition: rct.c:417
BLI_INLINE int BLI_rcti_size_x(const struct rcti *rct)
Definition: BLI_rect.h:186
size_t BLI_snprintf(char *__restrict dst, size_t maxncpy, const char *__restrict format,...) ATTR_NONNULL(1
int BLI_system_thread_count(void)
Definition: threads.cc:281
#define MAX2(a, b)
#define MIN2(a, b)
#define TIP_(msgid)
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei height
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum const void *lists _GL_VOID_RET _GL_VOID const GLdouble *equation _GL_VOID_RET _GL_VOID GLdouble GLdouble blue _GL_VOID_RET _GL_VOID GLfloat GLfloat blue _GL_VOID_RET _GL_VOID GLint GLint blue _GL_VOID_RET _GL_VOID GLshort GLshort blue _GL_VOID_RET _GL_VOID GLubyte GLubyte blue _GL_VOID_RET _GL_VOID GLuint GLuint blue _GL_VOID_RET _GL_VOID GLushort GLushort blue _GL_VOID_RET _GL_VOID GLbyte GLbyte GLbyte alpha _GL_VOID_RET _GL_VOID GLdouble GLdouble GLdouble alpha _GL_VOID_RET _GL_VOID GLfloat GLfloat GLfloat alpha _GL_VOID_RET _GL_VOID GLint GLint GLint alpha _GL_VOID_RET _GL_VOID GLshort GLshort GLshort alpha _GL_VOID_RET _GL_VOID GLubyte GLubyte GLubyte alpha _GL_VOID_RET _GL_VOID GLuint GLuint GLuint alpha _GL_VOID_RET _GL_VOID GLushort GLushort GLushort alpha _GL_VOID_RET _GL_VOID GLenum mode _GL_VOID_RET _GL_VOID GLint GLsizei width
Platform independent time functions.
ATOMIC_INLINE unsigned int atomic_add_and_fetch_u(unsigned int *p, unsigned int x)
void sort(btMatrix3x3 &U, btVector3 &sigma, btMatrix3x3 &V, int t)
Helper function of 3X3 SVD for sorting singular values.
void shuffle(MutableSpan< T > values)
Definition: BLI_rand.hh:77
Overall context of the compositor.
static void execution_group_started(const ExecutionGroup *group)
Definition: COM_Debug.h:102
static void execution_group_finished(const ExecutionGroup *group)
Definition: COM_Debug.h:108
static void graphviz(const ExecutionSystem *system, StringRefNull name="")
Definition: COM_Debug.cc:414
Class ExecutionGroup is a group of Operations that are executed as one. This grouping is used to comb...
void finalize_chunk_execution(int chunk_number, MemoryBuffer **memory_buffers)
after a chunk is executed the needed resources can be freed or unlocked.
void set_resolution(unsigned int resolution[2])
set the resolution of this executiongroup
void execute(ExecutionSystem *graph)
schedule an ExecutionGroup
bool add_operation(NodeOperation *operation)
add an operation to this ExecutionGroup
NodeOperation * get_output_operation() const
get the output operation of this ExecutionGroup
const ExecutionGroupFlags get_flags() const
MemoryBuffer * construct_consolidated_memory_buffer(MemoryProxy &memory_proxy, rcti &rect)
compose multiple chunks into a single chunk
MemoryBuffer ** get_input_buffers_opencl(int chunk_number)
get all inputbuffers needed to calculate an chunk
MemoryBuffer * allocate_output_buffer(rcti &rect)
allocate the outputbuffer of a chunk
void init_execution()
init_execution is called just before the execution of the whole graph will be done.
eCompositorPriority get_render_priority()
get the Render priority of this ExecutionGroup
void determine_resolution(unsigned int resolution[2])
determine the resolution of this ExecutionGroup
void set_render_border(float xmin, float xmax, float ymin, float ymax)
void set_viewer_border(float xmin, float xmax, float ymin, float ymax)
set border for viewer operation
void deinit_execution()
deinit_execution is called just after execution the whole graph.
the ExecutionSystem contains the whole compositor tree.
a MemoryBuffer contains access to the data of a chunk
A MemoryProxy is a unique identifier for a memory buffer. A single MemoryProxy is used among all chun...
ExecutionGroup * get_executor() const
get the ExecutionGroup that can be scheduled to calculate a certain chunk.
MemoryBuffer * get_buffer()
get the allocated memory
NodeOperation contains calculation logic.
virtual bool is_output_operation(bool) const
is_output_operation determines whether this operation is an output of the ExecutionSystem during rend...
const NodeOperationFlags get_flags() const
virtual bool determine_depending_area_of_interest(rcti *input, ReadBufferOperation *read_operation, rcti *output)
virtual eCompositorPriority get_render_priority() const
get the render priority of this node.
Depsgraph * graph
eCompositorPriority
Possible priority settings.
Definition: COM_Enums.h:32
ChunkOrdering
The order of chunks to be scheduled.
Definition: COM_defines.h:95
@ NotScheduled
chunk is not yet scheduled
@ Scheduled
chunk is scheduled, but not yet executed
@ RuleOfThirds
experimental ordering with 9 hot-spots.
@ CenterOut
order from a distance to centerX/centerY
@ Tile
Executes an execution group tile.
@ Temporary
chunk is consolidated from other chunks. special state.
ccl_global float * buffer
ccl_global KernelShaderEvalInput ccl_global float * output
ccl_global KernelShaderEvalInput * input
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:31
ccl_device_inline float3 ceil(const float3 &a)
Definition: math_float3.h:363
static void area(int d1, int d2, int e1, int e2, float weights[2])
constexpr float COM_RULE_OF_THIRDS_DIVIDER
Definition: COM_defines.h:109
std::ostream & operator<<(std::ostream &os, const eCompositorPriority &priority)
Definition: COM_Enums.cc:26
void(* progress)(void *, float progress)
int(* test_break)(void *)
void(* update_draw)(void *)
void(* stats_draw)(void *, const char *str)
contains data about work that can be scheduled
static void schedule(WorkPackage *package)
schedule a chunk of a group to be calculated. An execution group schedules a chunk in the WorkSchedul...
static void finish()
wait for all work to be completed.
int ymin
Definition: DNA_vec_types.h:64
int ymax
Definition: DNA_vec_types.h:64
int xmin
Definition: DNA_vec_types.h:63
int xmax
Definition: DNA_vec_types.h:63
double PIL_check_seconds_timer(void)
Definition: time.c:64