Blender  V3.3
deg_eval.cc
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later
2  * Copyright 2013 Blender Foundation. All rights reserved. */
3 
10 #include "intern/eval/deg_eval.h"
11 
12 #include "PIL_time.h"
13 
14 #include "BLI_compiler_attrs.h"
15 #include "BLI_gsqueue.h"
16 #include "BLI_task.h"
17 #include "BLI_utildefines.h"
18 
19 #include "BKE_global.h"
20 
21 #include "DNA_node_types.h"
22 #include "DNA_object_types.h"
23 #include "DNA_scene_types.h"
24 
25 #include "DEG_depsgraph.h"
26 #include "DEG_depsgraph_query.h"
27 
28 #ifdef WITH_PYTHON
29 # include "BPY_extern.h"
30 #endif
31 
32 #include "atomic_ops.h"
33 
34 #include "intern/depsgraph.h"
36 #include "intern/depsgraph_tag.h"
41 #include "intern/node/deg_node.h"
46 
47 namespace blender::deg {
48 
49 namespace {
50 
51 struct DepsgraphEvalState;
52 
53 void deg_task_run_func(TaskPool *pool, void *taskdata);
54 
55 template<typename ScheduleFunction, typename... ScheduleFunctionArgs>
56 void schedule_children(DepsgraphEvalState *state,
57  OperationNode *node,
58  ScheduleFunction *schedule_function,
59  ScheduleFunctionArgs... schedule_function_args);
60 
61 void schedule_node_to_pool(OperationNode *node, const int UNUSED(thread_id), TaskPool *pool)
62 {
63  BLI_task_pool_push(pool, deg_task_run_func, node, false, nullptr);
64 }
65 
66 /* Denotes which part of dependency graph is being evaluated. */
67 enum class EvaluationStage {
68  /* Stage 1: Only Copy-on-Write operations are to be evaluated, prior to anything else.
69  * This allows other operations to access its dependencies when there is a dependency cycle
70  * involved. */
71  COPY_ON_WRITE,
72 
73  /* Evaluate actual ID nodes visibility based on the current state of animation and drivers. */
74  DYNAMIC_VISIBILITY,
75 
76  /* Threaded evaluation of all possible operations. */
77  THREADED_EVALUATION,
78 
79  /* Workaround for areas which can not be evaluated in threads.
80  *
81  * For example, meta-balls, which are iterating over all bases and are requesting dupli-lists
82  * to see whether there are meta-balls inside. */
83  SINGLE_THREADED_WORKAROUND,
84 };
85 
86 struct DepsgraphEvalState {
88  bool do_stats;
89  EvaluationStage stage;
92 };
93 
94 void evaluate_node(const DepsgraphEvalState *state, OperationNode *operation_node)
95 {
96  ::Depsgraph *depsgraph = reinterpret_cast<::Depsgraph *>(state->graph);
97 
98  /* Sanity checks. */
99  BLI_assert_msg(!operation_node->is_noop(), "NOOP nodes should not actually be scheduled");
100  /* Perform operation. */
101  if (state->do_stats) {
102  const double start_time = PIL_check_seconds_timer();
103  operation_node->evaluate(depsgraph);
104  operation_node->stats.current_time += PIL_check_seconds_timer() - start_time;
105  }
106  else {
107  operation_node->evaluate(depsgraph);
108  }
109 
110  /* Clear the flag early on, allowing partial updates without re-evaluating the same node multiple
111  * times.
112  * This is a thread-safe modification as the node's flags are only read for a non-scheduled nodes
113  * and this node has been scheduled. */
114  operation_node->flag &= ~(DEPSOP_FLAG_DIRECTLY_MODIFIED | DEPSOP_FLAG_NEEDS_UPDATE |
116 }
117 
118 void deg_task_run_func(TaskPool *pool, void *taskdata)
119 {
120  void *userdata_v = BLI_task_pool_user_data(pool);
121  DepsgraphEvalState *state = (DepsgraphEvalState *)userdata_v;
122 
123  /* Evaluate node. */
124  OperationNode *operation_node = reinterpret_cast<OperationNode *>(taskdata);
125  evaluate_node(state, operation_node);
126 
127  /* Schedule children. */
128  schedule_children(state, operation_node, schedule_node_to_pool, pool);
129 }
130 
131 bool check_operation_node_visible(const DepsgraphEvalState *state, OperationNode *op_node)
132 {
133  const ComponentNode *comp_node = op_node->owner;
134  /* Special case for copy on write component: it is to be always evaluated, to keep copied
135  * "database" in a consistent state. */
136  if (comp_node->type == NodeType::COPY_ON_WRITE) {
137  return true;
138  }
139 
140  /* Special case for dynamic visibility pass: the actual visibility is not yet known, so limit to
141  * only operations which affects visibility. */
142  if (state->stage == EvaluationStage::DYNAMIC_VISIBILITY) {
143  return op_node->flag & OperationFlag::DEPSOP_FLAG_AFFECTS_VISIBILITY;
144  }
145 
146  return comp_node->affects_visible_id;
147 }
148 
149 void calculate_pending_parents_for_node(const DepsgraphEvalState *state, OperationNode *node)
150 {
151  /* Update counters, applies for both visible and invisible IDs. */
152  node->num_links_pending = 0;
153  node->scheduled = false;
154  /* Invisible IDs requires no pending operations. */
155  if (!check_operation_node_visible(state, node)) {
156  return;
157  }
158  /* No need to bother with anything if node is not tagged for update. */
159  if ((node->flag & DEPSOP_FLAG_NEEDS_UPDATE) == 0) {
160  return;
161  }
162  for (Relation *rel : node->inlinks) {
163  if (rel->from->type == NodeType::OPERATION && (rel->flag & RELATION_FLAG_CYCLIC) == 0) {
164  OperationNode *from = (OperationNode *)rel->from;
165  /* TODO(sergey): This is how old layer system was checking for the
166  * calculation, but how is it possible that visible object depends
167  * on an invisible? This is something what is prohibited after
168  * deg_graph_build_flush_layers(). */
169  if (!check_operation_node_visible(state, from)) {
170  continue;
171  }
172  /* No need to wait for operation which is up to date. */
173  if ((from->flag & DEPSOP_FLAG_NEEDS_UPDATE) == 0) {
174  continue;
175  }
177  }
178  }
179 }
180 
181 void calculate_pending_parents_if_needed(DepsgraphEvalState *state)
182 {
183  if (!state->need_update_pending_parents) {
184  return;
185  }
186 
187  for (OperationNode *node : state->graph->operations) {
188  calculate_pending_parents_for_node(state, node);
189  }
190 
191  state->need_update_pending_parents = false;
192 }
193 
194 void initialize_execution(DepsgraphEvalState *state, Depsgraph *graph)
195 {
196  /* Clear tags and other things which needs to be clear. */
197  if (state->do_stats) {
198  for (OperationNode *node : graph->operations) {
200  }
201  }
202 }
203 
204 bool is_metaball_object_operation(const OperationNode *operation_node)
205 {
206  const ComponentNode *component_node = operation_node->owner;
207  const IDNode *id_node = component_node->owner;
208  if (GS(id_node->id_cow->name) != ID_OB) {
209  return false;
210  }
211  const Object *object = reinterpret_cast<const Object *>(id_node->id_cow);
212  return object->type == OB_MBALL;
213 }
214 
215 bool need_evaluate_operation_at_stage(DepsgraphEvalState *state,
216  const OperationNode *operation_node)
217 {
218  const ComponentNode *component_node = operation_node->owner;
219  switch (state->stage) {
220  case EvaluationStage::COPY_ON_WRITE:
221  return (component_node->type == NodeType::COPY_ON_WRITE);
222 
223  case EvaluationStage::DYNAMIC_VISIBILITY:
224  return operation_node->flag & OperationFlag::DEPSOP_FLAG_AFFECTS_VISIBILITY;
225 
226  case EvaluationStage::THREADED_EVALUATION:
227  if (is_metaball_object_operation(operation_node)) {
228  state->need_single_thread_pass = true;
229  return false;
230  }
231  return true;
232 
233  case EvaluationStage::SINGLE_THREADED_WORKAROUND:
234  return true;
235  }
236  BLI_assert_msg(0, "Unhandled evaluation stage, should never happen.");
237  return false;
238 }
239 
240 /* Schedule a node if it needs evaluation.
241  * dec_parents: Decrement pending parents count, true when child nodes are
242  * scheduled after a task has been completed.
243  */
244 template<typename ScheduleFunction, typename... ScheduleFunctionArgs>
245 void schedule_node(DepsgraphEvalState *state,
246  OperationNode *node,
247  bool dec_parents,
248  ScheduleFunction *schedule_function,
249  ScheduleFunctionArgs... schedule_function_args)
250 {
251  /* No need to schedule nodes of invisible ID. */
252  if (!check_operation_node_visible(state, node)) {
253  return;
254  }
255  /* No need to schedule operations which are not tagged for update, they are
256  * considered to be up to date. */
257  if ((node->flag & DEPSOP_FLAG_NEEDS_UPDATE) == 0) {
258  return;
259  }
260  /* TODO(sergey): This is not strictly speaking safe to read
261  * num_links_pending. */
262  if (dec_parents) {
265  }
266  /* Cal not schedule operation while its dependencies are not yet
267  * evaluated. */
268  if (node->num_links_pending != 0) {
269  return;
270  }
271  /* During the COW stage only schedule COW nodes. */
272  if (!need_evaluate_operation_at_stage(state, node)) {
273  return;
274  }
275  /* Actually schedule the node. */
276  bool is_scheduled = atomic_fetch_and_or_uint8((uint8_t *)&node->scheduled, (uint8_t) true);
277  if (!is_scheduled) {
278  if (node->is_noop()) {
279  /* skip NOOP node, schedule children right away */
280  schedule_children(state, node, schedule_function, schedule_function_args...);
281  }
282  else {
283  /* children are scheduled once this task is completed */
284  schedule_function(node, 0, schedule_function_args...);
285  }
286  }
287 }
288 
289 template<typename ScheduleFunction, typename... ScheduleFunctionArgs>
290 void schedule_graph(DepsgraphEvalState *state,
291  ScheduleFunction *schedule_function,
292  ScheduleFunctionArgs... schedule_function_args)
293 {
294  for (OperationNode *node : state->graph->operations) {
295  schedule_node(state, node, false, schedule_function, schedule_function_args...);
296  }
297 }
298 
299 template<typename ScheduleFunction, typename... ScheduleFunctionArgs>
300 void schedule_children(DepsgraphEvalState *state,
301  OperationNode *node,
302  ScheduleFunction *schedule_function,
303  ScheduleFunctionArgs... schedule_function_args)
304 {
305  for (Relation *rel : node->outlinks) {
306  OperationNode *child = (OperationNode *)rel->to;
307  BLI_assert(child->type == NodeType::OPERATION);
308  if (child->scheduled) {
309  /* Happens when having cyclic dependencies. */
310  continue;
311  }
312  schedule_node(state,
313  child,
314  (rel->flag & RELATION_FLAG_CYCLIC) == 0,
315  schedule_function,
316  schedule_function_args...);
317  }
318 }
319 
320 void schedule_node_to_queue(OperationNode *node,
321  const int /*thread_id*/,
322  GSQueue *evaluation_queue)
323 {
324  BLI_gsqueue_push(evaluation_queue, &node);
325 }
326 
327 /* Evaluate given stage of the dependency graph evaluation using multiple threads.
328  *
329  * NOTE: Will assign the `state->stage` to the given stage. */
330 void evaluate_graph_threaded_stage(DepsgraphEvalState *state,
332  const EvaluationStage stage)
333 {
334  state->stage = stage;
335 
336  calculate_pending_parents_if_needed(state);
337 
338  schedule_graph(state, schedule_node_to_pool, task_pool);
340 }
341 
342 /* Evaluate remaining operations of the dependency graph in a single threaded manner. */
343 void evaluate_graph_single_threaded_if_needed(DepsgraphEvalState *state)
344 {
345  if (!state->need_single_thread_pass) {
346  return;
347  }
348 
349  BLI_assert(!state->need_update_pending_parents);
350 
351  state->stage = EvaluationStage::SINGLE_THREADED_WORKAROUND;
352 
353  GSQueue *evaluation_queue = BLI_gsqueue_new(sizeof(OperationNode *));
354  schedule_graph(state, schedule_node_to_queue, evaluation_queue);
355 
356  while (!BLI_gsqueue_is_empty(evaluation_queue)) {
357  OperationNode *operation_node;
358  BLI_gsqueue_pop(evaluation_queue, &operation_node);
359 
360  evaluate_node(state, operation_node);
361  schedule_children(state, operation_node, schedule_node_to_queue, evaluation_queue);
362  }
363 
364  BLI_gsqueue_free(evaluation_queue);
365 }
366 
367 void depsgraph_ensure_view_layer(Depsgraph *graph)
368 {
369  /* We update copy-on-write scene in the following cases:
370  * - It was not expanded yet.
371  * - It was tagged for update of CoW component.
372  * This allows us to have proper view layer pointer. */
373  Scene *scene_cow = graph->scene_cow;
374  if (deg_copy_on_write_is_expanded(&scene_cow->id) &&
375  (scene_cow->id.recalc & ID_RECALC_COPY_ON_WRITE) == 0) {
376  return;
377  }
378 
379  const IDNode *scene_id_node = graph->find_id_node(&graph->scene->id);
381 }
382 
383 TaskPool *deg_evaluate_task_pool_create(DepsgraphEvalState *state)
384 {
385  if (G.debug & G_DEBUG_DEPSGRAPH_NO_THREADS) {
387  }
388 
390 }
391 
392 } // namespace
393 
395 {
396  /* Nothing to update, early out. */
397  if (graph->entry_tags.is_empty()) {
398  return;
399  }
400 
402 
403 #ifdef WITH_PYTHON
404  /* Release the GIL so that Python drivers can be evaluated. See T91046. */
406 #endif
407 
408  graph->is_evaluating = true;
409  depsgraph_ensure_view_layer(graph);
410 
411  /* Set up evaluation state. */
412  DepsgraphEvalState state;
413  state.graph = graph;
414  state.do_stats = graph->debug.do_time_debug();
415 
416  /* Prepare all nodes for evaluation. */
417  initialize_execution(&state, graph);
418 
419  /* Evaluation happens in several incremental steps:
420  *
421  * - Start with the copy-on-write operations which never form dependency cycles. This will ensure
422  * that if a dependency graph has a cycle evaluation functions will always "see" valid expanded
423  * datablock. It might not be evaluated yet, but at least the datablock will be valid.
424  *
425  * - If there is potentially dynamically changing visibility in the graph update the actual
426  * nodes visibilities, so that actual heavy data evaluation can benefit from knowledge that
427  * something heavy is not currently visible.
428  *
429  * - Multi-threaded evaluation of all possible nodes.
430  * Certain operations (and their subtrees) could be ignored. For example, meta-balls are not
431  * safe from threading point of view, so the threaded evaluation will stop at the metaball
432  * operation node.
433  *
434  * - Single-threaded pass of all remaining operations. */
435 
436  TaskPool *task_pool = deg_evaluate_task_pool_create(&state);
437 
438  evaluate_graph_threaded_stage(&state, task_pool, EvaluationStage::COPY_ON_WRITE);
439 
441  /* Update pending parents including only the ones which are affecting operations which are
442  * affecting visibility. */
443  state.need_update_pending_parents = true;
444 
445  evaluate_graph_threaded_stage(&state, task_pool, EvaluationStage::DYNAMIC_VISIBILITY);
446 
448 
449  /* Update parents to an updated visibility and evaluation stage.
450  *
451  * Need to do it regardless of whether visibility is actually changed or not: current state of
452  * the pending parents are all zeroes because it was previously calculated for only visibility
453  * related nodes and those are fully evaluated by now. */
454  state.need_update_pending_parents = true;
455  }
456 
457  evaluate_graph_threaded_stage(&state, task_pool, EvaluationStage::THREADED_EVALUATION);
458 
460 
461  evaluate_graph_single_threaded_if_needed(&state);
462 
463  /* Finalize statistics gathering. This is because we only gather single
464  * operation timing here, without aggregating anything to avoid any extra
465  * synchronization. */
466  if (state.do_stats) {
468  }
469 
470  /* Clear any uncleared tags. */
472  graph->is_evaluating = false;
473 
474 #ifdef WITH_PYTHON
476 #endif
477 
479 }
480 
481 } // namespace blender::deg
@ G_DEBUG_DEPSGRAPH_NO_THREADS
Definition: BKE_global.h:186
#define BLI_assert(a)
Definition: BLI_assert.h:46
#define BLI_assert_msg(a, msg)
Definition: BLI_assert.h:53
void BLI_gsqueue_free(GSQueue *queue)
Definition: gsqueue.c:90
GSQueue * BLI_gsqueue_new(size_t elem_size)
Definition: gsqueue.c:69
void BLI_gsqueue_push(GSQueue *queue, const void *item)
Definition: gsqueue.c:97
void BLI_gsqueue_pop(GSQueue *queue, void *r_item)
Definition: gsqueue.c:131
bool BLI_gsqueue_is_empty(const GSQueue *queue)
Definition: gsqueue.c:159
@ TASK_PRIORITY_HIGH
Definition: BLI_task.h:57
void * BLI_task_pool_user_data(TaskPool *pool)
Definition: task_pool.cc:525
void BLI_task_pool_work_and_wait(TaskPool *pool)
Definition: task_pool.cc:480
TaskPool * BLI_task_pool_create_suspended(void *userdata, eTaskPriority priority)
Definition: task_pool.cc:417
TaskPool * BLI_task_pool_create_no_threads(void *userdata)
Definition: task_pool.cc:426
void BLI_task_pool_free(TaskPool *pool)
Definition: task_pool.cc:440
void BLI_task_pool_push(TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskFreeFunction freedata)
Definition: task_pool.cc:459
#define UNUSED(x)
#define BPy_BEGIN_ALLOW_THREADS
Definition: BPY_extern.h:54
#define BPy_END_ALLOW_THREADS
Definition: BPY_extern.h:58
struct Depsgraph Depsgraph
Definition: DEG_depsgraph.h:35
@ ID_RECALC_COPY_ON_WRITE
Definition: DNA_ID.h:834
@ ID_OB
Definition: DNA_ID_enums.h:47
Object is a sort of wrapper for general info.
@ OB_MBALL
Platform independent time functions.
Provides wrapper around system-specific atomic primitives, and some extensions (faked-atomic operatio...
ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b)
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
OperationNode * node
StackEntry * from
const IDNode * id_node
bool need_update_pending_parents
Definition: deg_eval.cc:90
bool do_stats
Definition: deg_eval.cc:88
bool need_single_thread_pass
Definition: deg_eval.cc:91
EvaluationStage stage
Definition: deg_eval.cc:89
Depsgraph * graph
Definition: deg_eval.cc:87
const Depsgraph * depsgraph
TaskPool * task_pool
#define GS(x)
Definition: iris.c:225
const int state
#define G(x, y, z)
void deg_eval_stats_aggregate(Depsgraph *graph)
ID * deg_update_copy_on_write_datablock(const Depsgraph *depsgraph, const IDNode *id_node)
void deg_graph_clear_tags(Depsgraph *graph)
void deg_evaluate_on_refresh(Depsgraph *graph)
Definition: deg_eval.cc:394
bool deg_copy_on_write_is_expanded(const ID *id_cow)
void deg_graph_flush_visibility_flags_if_needed(Depsgraph *graph)
unsigned char uint8_t
Definition: stdint.h:78
int recalc
Definition: DNA_ID.h:390
char name[66]
Definition: DNA_ID.h:378
IDNode * find_id_node(const ID *id) const
Definition: depsgraph.cc:101
OperationNodes operations
Definition: depsgraph.h:120
DepsgraphDebug debug
Definition: depsgraph.h:150
Set< OperationNode * > entry_tags
Definition: depsgraph.h:114
Relations inlinks
Definition: deg_node.h:173
Relations outlinks
Definition: deg_node.h:174
double PIL_check_seconds_timer(void)
Definition: time.c:64