Blender  V3.3
node_tree_update.cc
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 #include "BLI_map.hh"
4 #include "BLI_multi_value_map.hh"
5 #include "BLI_noise.hh"
6 #include "BLI_set.hh"
7 #include "BLI_stack.hh"
8 #include "BLI_vector_set.hh"
9 
10 #include "DNA_anim_types.h"
11 #include "DNA_modifier_types.h"
12 #include "DNA_node_types.h"
13 
14 #include "BKE_anim_data.h"
15 #include "BKE_image.h"
16 #include "BKE_main.h"
17 #include "BKE_node.h"
18 #include "BKE_node_runtime.hh"
19 #include "BKE_node_tree_update.h"
20 
21 #include "MOD_nodes.h"
22 
23 #include "NOD_node_declaration.hh"
24 #include "NOD_node_tree_ref.hh"
25 #include "NOD_texture.h"
26 
27 #include "DEG_depsgraph_query.h"
28 
29 using namespace blender::nodes;
30 
38  NTREE_CHANGED_ANY = (1 << 1),
42  NTREE_CHANGED_LINK = (1 << 5),
48 };
49 
51 {
52  ntree->runtime->changed_flag |= flag;
53 }
54 
56 {
57  add_tree_tag(ntree, flag);
58  node->runtime->changed_flag |= flag;
59 }
60 
62 {
63  add_tree_tag(ntree, flag);
64  socket->runtime->changed_flag |= flag;
65 }
66 
67 namespace blender::bke {
68 
69 namespace node_field_inferencing {
70 
72 {
74 }
75 
76 static bool is_field_socket_type(const SocketRef &socket)
77 {
79 }
80 
82  const InputSocketRef &socket)
83 {
84  if (!is_field_socket_type(socket)) {
86  }
87  if (node.is_reroute_node()) {
89  }
90  if (node.is_group_output_node()) {
91  /* Outputs always support fields when the data type is correct. */
93  }
94  if (node.is_undefined()) {
96  }
97  if (node.bnode()->type == NODE_CUSTOM) {
99  }
100 
101  const NodeDeclaration *node_decl = node.declaration();
102 
103  /* Node declarations should be implemented for nodes involved here. */
104  BLI_assert(node_decl != nullptr);
105 
106  /* Get the field type from the declaration. */
107  const SocketDeclaration &socket_decl = *node_decl->inputs()[socket.index()];
108  const InputSocketFieldType field_type = socket_decl.input_field_type();
109  if (field_type == InputSocketFieldType::Implicit) {
110  return field_type;
111  }
112  if (node_decl->is_function_node()) {
113  /* In a function node, every socket supports fields. */
115  }
116  return field_type;
117 }
118 
120  const OutputSocketRef &socket)
121 {
122  if (!is_field_socket_type(socket)) {
123  /* Non-field sockets always output data. */
125  }
126  if (node.is_reroute_node()) {
127  /* The reroute just forwards what is passed in. */
129  }
130  if (node.is_group_input_node()) {
131  /* Input nodes get special treatment in #determine_group_input_states. */
133  }
134  if (node.is_undefined()) {
136  }
137  if (node.bnode()->type == NODE_CUSTOM) {
139  }
140 
141  const NodeDeclaration *node_decl = node.declaration();
142 
143  /* Node declarations should be implemented for nodes involved here. */
144  BLI_assert(node_decl != nullptr);
145 
146  if (node_decl->is_function_node()) {
147  /* In a generic function node, all outputs depend on all inputs. */
149  }
150 
151  /* Use the socket declaration. */
152  const SocketDeclaration &socket_decl = *node_decl->outputs()[socket.index()];
153  return socket_decl.output_field_dependency();
154 }
155 
157 {
158  FieldInferencingInterface inferencing_interface;
159  inferencing_interface.inputs.append_n_times(InputSocketFieldType::None, node.inputs().size());
160  inferencing_interface.outputs.append_n_times(OutputFieldDependency::ForDataSource(),
161  node.outputs().size());
162  return inferencing_interface;
163 }
164 
171 {
172  /* Node groups already reference all required information, so just return that. */
173  if (node.is_group_node()) {
174  bNodeTree *group = (bNodeTree *)node.bnode()->id;
175  if (group == nullptr) {
176  return FieldInferencingInterface();
177  }
178  if (!ntreeIsRegistered(group)) {
179  /* This can happen when there is a linked node group that was not found (see T92799). */
181  }
182  if (!group->runtime->field_inferencing_interface) {
183  /* This shouldn't happen because referenced node groups should always be updated first. */
185  }
186  return *group->runtime->field_inferencing_interface;
187  }
188 
189  FieldInferencingInterface inferencing_interface;
190  for (const InputSocketRef *input_socket : node.inputs()) {
191  inferencing_interface.inputs.append(get_interface_input_field_type(node, *input_socket));
192  }
193 
194  for (const OutputSocketRef *output_socket : node.outputs()) {
195  inferencing_interface.outputs.append(
197  }
198  return inferencing_interface;
199 }
200 
206  /* This socket starts a new field. */
207  bool is_field_source = false;
208  /* This socket can never become a field, because the node itself does not support it. */
209  bool is_always_single = false;
210  /* This socket is currently a single value. It could become a field though. */
211  bool is_single = true;
212  /* This socket is required to be a single value. This can be because the node itself only
213  * supports this socket to be a single value, or because a node afterwards requires this to be a
214  * single value. */
215  bool requires_single = false;
216 };
217 
219  const OutputFieldDependency &field_dependency, const NodeRef &node)
220 {
221  const OutputSocketFieldType type = field_dependency.field_type();
222  Vector<const InputSocketRef *> input_sockets;
223  switch (type) {
226  break;
227  }
229  /* This output depends on all inputs. */
230  input_sockets.extend(node.inputs());
231  break;
232  }
234  /* This output depends only on a few inputs. */
235  for (const int i : field_dependency.linked_input_indices()) {
236  input_sockets.append(&node.input(i));
237  }
238  break;
239  }
240  }
241  return input_sockets;
242 }
243 
249  const InputSocketRef &group_output_socket,
250  const Span<SocketFieldState> field_state_by_socket_id)
251 {
252  if (!is_field_socket_type(group_output_socket)) {
254  }
255 
256  /* Use a Set here instead of an array indexed by socket id, because we my only need to look at
257  * very few sockets. */
258  Set<const InputSocketRef *> handled_sockets;
259  Stack<const InputSocketRef *> sockets_to_check;
260 
261  handled_sockets.add(&group_output_socket);
262  sockets_to_check.push(&group_output_socket);
263 
264  /* Keeps track of group input indices that are (indirectly) connected to the output. */
265  Vector<int> linked_input_indices;
266 
267  while (!sockets_to_check.is_empty()) {
268  const InputSocketRef *input_socket = sockets_to_check.pop();
269 
270  if (!input_socket->is_directly_linked() &&
271  !field_state_by_socket_id[input_socket->id()].is_single) {
272  /* This socket uses a field as input by default. */
274  }
275 
276  for (const OutputSocketRef *origin_socket : input_socket->directly_linked_sockets()) {
277  const NodeRef &origin_node = origin_socket->node();
278  const SocketFieldState &origin_state = field_state_by_socket_id[origin_socket->id()];
279 
280  if (origin_state.is_field_source) {
281  if (origin_node.is_group_input_node()) {
282  /* Found a group input that the group output depends on. */
283  linked_input_indices.append_non_duplicates(origin_socket->index());
284  }
285  else {
286  /* Found a field source that is not the group input. So the output is always a field. */
288  }
289  }
290  else if (!origin_state.is_single) {
291  const FieldInferencingInterface inferencing_interface =
293  const OutputFieldDependency &field_dependency =
294  inferencing_interface.outputs[origin_socket->index()];
295 
296  /* Propagate search further to the left. */
297  for (const InputSocketRef *origin_input_socket :
298  gather_input_socket_dependencies(field_dependency, origin_node)) {
299  if (!origin_input_socket->is_available()) {
300  continue;
301  }
302  if (!field_state_by_socket_id[origin_input_socket->id()].is_single) {
303  if (handled_sockets.add(origin_input_socket)) {
304  sockets_to_check.push(origin_input_socket);
305  }
306  }
307  }
308  }
309  }
310  }
311  return OutputFieldDependency::ForPartiallyDependentField(std::move(linked_input_indices));
312 }
313 
315  const NodeTreeRef &tree, const MutableSpan<SocketFieldState> field_state_by_socket_id)
316 {
317  const NodeTreeRef::ToposortResult toposort_result = tree.toposort(
319 
320  for (const NodeRef *node : toposort_result.sorted_nodes) {
322  *node);
323 
324  for (const OutputSocketRef *output_socket : node->outputs()) {
325  SocketFieldState &state = field_state_by_socket_id[output_socket->id()];
326 
327  const OutputFieldDependency &field_dependency =
328  inferencing_interface.outputs[output_socket->index()];
329 
330  if (field_dependency.field_type() == OutputSocketFieldType::FieldSource) {
331  continue;
332  }
333  if (field_dependency.field_type() == OutputSocketFieldType::None) {
334  state.requires_single = true;
335  state.is_always_single = true;
336  continue;
337  }
338 
339  /* The output is required to be a single value when it is connected to any input that does
340  * not support fields. */
341  for (const InputSocketRef *target_socket : output_socket->directly_linked_sockets()) {
342  if (target_socket->is_available()) {
343  state.requires_single |= field_state_by_socket_id[target_socket->id()].requires_single;
344  }
345  }
346 
347  if (state.requires_single) {
348  bool any_input_is_field_implicitly = false;
350  field_dependency, *node);
351  for (const InputSocketRef *input_socket : connected_inputs) {
352  if (!input_socket->is_available()) {
353  continue;
354  }
355  if (inferencing_interface.inputs[input_socket->index()] ==
357  if (!input_socket->is_logically_linked()) {
358  any_input_is_field_implicitly = true;
359  break;
360  }
361  }
362  }
363  if (any_input_is_field_implicitly) {
364  /* This output isn't a single value actually. */
365  state.requires_single = false;
366  }
367  else {
368  /* If the output is required to be a single value, the connected inputs in the same node
369  * must not be fields as well. */
370  for (const InputSocketRef *input_socket : connected_inputs) {
371  field_state_by_socket_id[input_socket->id()].requires_single = true;
372  }
373  }
374  }
375  }
376 
377  /* Some inputs do not require fields independent of what the outputs are connected to. */
378  for (const InputSocketRef *input_socket : node->inputs()) {
379  SocketFieldState &state = field_state_by_socket_id[input_socket->id()];
380  if (inferencing_interface.inputs[input_socket->index()] == InputSocketFieldType::None) {
381  state.requires_single = true;
382  state.is_always_single = true;
383  }
384  }
385  }
386 }
387 
389  const NodeTreeRef &tree,
390  FieldInferencingInterface &new_inferencing_interface,
391  const MutableSpan<SocketFieldState> field_state_by_socket_id)
392 {
393  {
394  /* Non-field inputs never support fields. */
395  int index;
396  LISTBASE_FOREACH_INDEX (bNodeSocket *, group_input, &tree.btree()->inputs, index) {
397  if (!is_field_socket_type((eNodeSocketDatatype)group_input->type)) {
398  new_inferencing_interface.inputs[index] = InputSocketFieldType::None;
399  }
400  }
401  }
402  /* Check if group inputs are required to be single values, because they are (indirectly)
403  * connected to some socket that does not support fields. */
404  for (const NodeRef *node : tree.nodes_by_type("NodeGroupInput")) {
405  for (const OutputSocketRef *output_socket : node->outputs().drop_back(1)) {
406  SocketFieldState &state = field_state_by_socket_id[output_socket->id()];
407  if (state.requires_single) {
408  new_inferencing_interface.inputs[output_socket->index()] = InputSocketFieldType::None;
409  }
410  }
411  }
412  /* If an input does not support fields, this should be reflected in all Group Input nodes. */
413  for (const NodeRef *node : tree.nodes_by_type("NodeGroupInput")) {
414  for (const OutputSocketRef *output_socket : node->outputs().drop_back(1)) {
415  SocketFieldState &state = field_state_by_socket_id[output_socket->id()];
416  const bool supports_field = new_inferencing_interface.inputs[output_socket->index()] !=
418  if (supports_field) {
419  state.is_single = false;
420  state.is_field_source = true;
421  }
422  else {
423  state.requires_single = true;
424  }
425  }
426  SocketFieldState &dummy_socket_state = field_state_by_socket_id[node->outputs().last()->id()];
427  dummy_socket_state.requires_single = true;
428  }
429 }
430 
432  const NodeTreeRef &tree, const MutableSpan<SocketFieldState> field_state_by_socket_id)
433 {
434  const NodeTreeRef::ToposortResult toposort_result = tree.toposort(
436 
437  for (const NodeRef *node : toposort_result.sorted_nodes) {
438  if (node->is_group_input_node()) {
439  continue;
440  }
441 
443  *node);
444 
445  /* Update field state of input sockets, also taking into account linked origin sockets. */
446  for (const InputSocketRef *input_socket : node->inputs()) {
447  SocketFieldState &state = field_state_by_socket_id[input_socket->id()];
448  if (state.is_always_single) {
449  state.is_single = true;
450  continue;
451  }
452  state.is_single = true;
453  if (input_socket->directly_linked_sockets().is_empty()) {
454  if (inferencing_interface.inputs[input_socket->index()] ==
456  state.is_single = false;
457  }
458  }
459  else {
460  for (const OutputSocketRef *origin_socket : input_socket->directly_linked_sockets()) {
461  if (!field_state_by_socket_id[origin_socket->id()].is_single) {
462  state.is_single = false;
463  break;
464  }
465  }
466  }
467  }
468 
469  /* Update field state of output sockets, also taking into account input sockets. */
470  for (const OutputSocketRef *output_socket : node->outputs()) {
471  SocketFieldState &state = field_state_by_socket_id[output_socket->id()];
472  const OutputFieldDependency &field_dependency =
473  inferencing_interface.outputs[output_socket->index()];
474 
475  switch (field_dependency.field_type()) {
477  state.is_single = true;
478  break;
479  }
481  state.is_single = false;
482  state.is_field_source = true;
483  break;
484  }
487  for (const InputSocketRef *input_socket :
488  gather_input_socket_dependencies(field_dependency, *node)) {
489  if (!input_socket->is_available()) {
490  continue;
491  }
492  if (!field_state_by_socket_id[input_socket->id()].is_single) {
493  state.is_single = false;
494  break;
495  }
496  }
497  break;
498  }
499  }
500  }
501  }
502 }
503 
505  FieldInferencingInterface &new_inferencing_interface,
506  const Span<SocketFieldState> field_state_by_socket_id)
507 {
508  for (const NodeRef *group_output_node : tree.nodes_by_type("NodeGroupOutput")) {
509  /* Ignore inactive group output nodes. */
510  if (!(group_output_node->bnode()->flag & NODE_DO_OUTPUT)) {
511  continue;
512  }
513  /* Determine dependencies of all group outputs. */
514  for (const InputSocketRef *group_output_socket : group_output_node->inputs().drop_back(1)) {
516  *group_output_socket, field_state_by_socket_id);
517  new_inferencing_interface.outputs[group_output_socket->index()] = std::move(
518  field_dependency);
519  }
520  break;
521  }
522 }
523 
525  const Span<SocketFieldState> field_state_by_socket_id)
526 {
527  const eNodeSocketDisplayShape requires_data_shape = SOCK_DISPLAY_SHAPE_CIRCLE;
528  const eNodeSocketDisplayShape data_but_can_be_field_shape = SOCK_DISPLAY_SHAPE_DIAMOND_DOT;
530 
531  auto get_shape_for_state = [&](const SocketFieldState &state) {
532  if (state.is_always_single) {
533  return requires_data_shape;
534  }
535  if (!state.is_single) {
536  return is_field_shape;
537  }
538  if (state.requires_single) {
539  return requires_data_shape;
540  }
541  return data_but_can_be_field_shape;
542  };
543 
544  for (const InputSocketRef *socket : tree.input_sockets()) {
545  bNodeSocket *bsocket = socket->bsocket();
546  const SocketFieldState &state = field_state_by_socket_id[socket->id()];
547  bsocket->display_shape = get_shape_for_state(state);
548  }
549  for (const OutputSocketRef *socket : tree.output_sockets()) {
550  bNodeSocket *bsocket = socket->bsocket();
551  const SocketFieldState &state = field_state_by_socket_id[socket->id()];
552  bsocket->display_shape = get_shape_for_state(state);
553  }
554 }
555 
557 {
558  bNodeTree &btree = *tree.btree();
559 
560  /* Create new inferencing interface for this node group. */
561  std::unique_ptr<FieldInferencingInterface> new_inferencing_interface =
562  std::make_unique<FieldInferencingInterface>();
563  new_inferencing_interface->inputs.resize(BLI_listbase_count(&btree.inputs),
565  new_inferencing_interface->outputs.resize(BLI_listbase_count(&btree.outputs),
567 
568  /* Keep track of the state of all sockets. The index into this array is #SocketRef::id(). */
569  Array<SocketFieldState> field_state_by_socket_id(tree.sockets().size());
570 
571  propagate_data_requirements_from_right_to_left(tree, field_state_by_socket_id);
572  determine_group_input_states(tree, *new_inferencing_interface, field_state_by_socket_id);
573  propagate_field_status_from_left_to_right(tree, field_state_by_socket_id);
574  determine_group_output_states(tree, *new_inferencing_interface, field_state_by_socket_id);
575  update_socket_shapes(tree, field_state_by_socket_id);
576 
577  /* Update the previous group interface. */
578  const bool group_interface_changed = !btree.runtime->field_inferencing_interface ||
579  *btree.runtime->field_inferencing_interface !=
580  *new_inferencing_interface;
581  btree.runtime->field_inferencing_interface = std::move(new_inferencing_interface);
582 
583  return group_interface_changed;
584 }
585 
586 } // namespace node_field_inferencing
587 
595 {
596  switch (to->type) {
597  case SOCK_RGBA:
598  switch (from->type) {
599  case SOCK_RGBA:
600  return 4;
601  case SOCK_FLOAT:
602  return 3;
603  case SOCK_INT:
604  return 2;
605  case SOCK_BOOLEAN:
606  return 1;
607  }
608  return -1;
609  case SOCK_VECTOR:
610  switch (from->type) {
611  case SOCK_VECTOR:
612  return 4;
613  case SOCK_FLOAT:
614  return 3;
615  case SOCK_INT:
616  return 2;
617  case SOCK_BOOLEAN:
618  return 1;
619  }
620  return -1;
621  case SOCK_FLOAT:
622  switch (from->type) {
623  case SOCK_FLOAT:
624  return 5;
625  case SOCK_INT:
626  return 4;
627  case SOCK_BOOLEAN:
628  return 3;
629  case SOCK_RGBA:
630  return 2;
631  case SOCK_VECTOR:
632  return 1;
633  }
634  return -1;
635  case SOCK_INT:
636  switch (from->type) {
637  case SOCK_INT:
638  return 5;
639  case SOCK_FLOAT:
640  return 4;
641  case SOCK_BOOLEAN:
642  return 3;
643  case SOCK_RGBA:
644  return 2;
645  case SOCK_VECTOR:
646  return 1;
647  }
648  return -1;
649  case SOCK_BOOLEAN:
650  switch (from->type) {
651  case SOCK_BOOLEAN:
652  return 5;
653  case SOCK_INT:
654  return 4;
655  case SOCK_FLOAT:
656  return 3;
657  case SOCK_RGBA:
658  return 2;
659  case SOCK_VECTOR:
660  return 1;
661  }
662  return -1;
663  }
664 
665  /* The rest of the socket types only allow an internal link if both the input and output socket
666  * have the same type. If the sockets are custom, we check the idname instead. */
667  if (to->type == from->type && (to->type != SOCK_CUSTOM || STREQ(to->idname, from->idname))) {
668  return 1;
669  }
670 
671  return -1;
672 }
673 
674 using TreeNodePair = std::pair<bNodeTree *, bNode *>;
675 using ObjectModifierPair = std::pair<Object *, ModifierData *>;
676 using NodeSocketPair = std::pair<bNode *, bNodeSocket *>;
677 
683  private:
684  Main *bmain_;
685  std::optional<Vector<bNodeTree *>> all_trees_;
686  std::optional<Map<bNodeTree *, ID *>> owner_ids_;
687  std::optional<MultiValueMap<bNodeTree *, TreeNodePair>> group_node_users_;
688  std::optional<MultiValueMap<bNodeTree *, ObjectModifierPair>> modifiers_users_;
689 
690  public:
691  NodeTreeRelations(Main *bmain) : bmain_(bmain)
692  {
693  }
694 
696  {
697  if (all_trees_.has_value()) {
698  return;
699  }
700  all_trees_.emplace();
701  owner_ids_.emplace();
702  if (bmain_ == nullptr) {
703  return;
704  }
705 
706  FOREACH_NODETREE_BEGIN (bmain_, ntree, id) {
707  all_trees_->append(ntree);
708  if (&ntree->id != id) {
709  owner_ids_->add_new(ntree, id);
710  }
711  }
713  }
714 
716  {
717  this->ensure_all_trees();
718  }
719 
721  {
722  if (group_node_users_.has_value()) {
723  return;
724  }
725  group_node_users_.emplace();
726  if (bmain_ == nullptr) {
727  return;
728  }
729 
730  this->ensure_all_trees();
731 
732  for (bNodeTree *ntree : *all_trees_) {
734  if (node->id == nullptr) {
735  continue;
736  }
737  ID *id = node->id;
738  if (GS(id->name) == ID_NT) {
739  bNodeTree *group = (bNodeTree *)id;
740  group_node_users_->add(group, {ntree, node});
741  }
742  }
743  }
744  }
745 
747  {
748  if (modifiers_users_.has_value()) {
749  return;
750  }
751  modifiers_users_.emplace();
752  if (bmain_ == nullptr) {
753  return;
754  }
755 
756  LISTBASE_FOREACH (Object *, object, &bmain_->objects) {
757  LISTBASE_FOREACH (ModifierData *, md, &object->modifiers) {
758  if (md->type == eModifierType_Nodes) {
760  if (nmd->node_group != nullptr) {
761  modifiers_users_->add(nmd->node_group, {object, md});
762  }
763  }
764  }
765  }
766  }
767 
769  {
770  BLI_assert(modifiers_users_.has_value());
771  return modifiers_users_->lookup(ntree);
772  }
773 
775  {
776  BLI_assert(group_node_users_.has_value());
777  return group_node_users_->lookup(ntree);
778  }
779 
781  {
782  BLI_assert(owner_ids_.has_value());
783  return owner_ids_->lookup_default(ntree, &ntree->id);
784  }
785 };
786 
788  bool interface_changed = false;
789  bool output_changed = false;
790 };
791 
793  private:
794  Main *bmain_;
795  NodeTreeUpdateExtraParams *params_;
796  Map<bNodeTree *, TreeUpdateResult> update_result_by_tree_;
797  NodeTreeRelations relations_;
798 
799  public:
801  : bmain_(bmain), params_(params), relations_(bmain)
802  {
803  }
804 
805  void update()
806  {
807  Vector<bNodeTree *> changed_ntrees;
808  FOREACH_NODETREE_BEGIN (bmain_, ntree, id) {
809  if (ntree->runtime->changed_flag != NTREE_CHANGED_NOTHING) {
810  changed_ntrees.append(ntree);
811  }
812  }
814  this->update_rooted(changed_ntrees);
815  }
816 
818  {
819  if (root_ntrees.is_empty()) {
820  return;
821  }
822 
823  bool is_single_tree_update = false;
824 
825  if (root_ntrees.size() == 1) {
826  bNodeTree *ntree = root_ntrees[0];
827  if (ntree->runtime->changed_flag == NTREE_CHANGED_NOTHING) {
828  return;
829  }
830  const TreeUpdateResult result = this->update_tree(*ntree);
831  update_result_by_tree_.add_new(ntree, result);
832  if (!result.interface_changed && !result.output_changed) {
833  is_single_tree_update = true;
834  }
835  }
836 
837  if (!is_single_tree_update) {
838  Vector<bNodeTree *> ntrees_in_order = this->get_tree_update_order(root_ntrees);
839  for (bNodeTree *ntree : ntrees_in_order) {
840  if (ntree->runtime->changed_flag == NTREE_CHANGED_NOTHING) {
841  continue;
842  }
843  if (!update_result_by_tree_.contains(ntree)) {
844  const TreeUpdateResult result = this->update_tree(*ntree);
845  update_result_by_tree_.add_new(ntree, result);
846  }
847  const TreeUpdateResult result = update_result_by_tree_.lookup(ntree);
848  Span<TreeNodePair> dependent_trees = relations_.get_group_node_users(ntree);
849  if (result.output_changed) {
850  for (const TreeNodePair &pair : dependent_trees) {
851  add_node_tag(pair.first, pair.second, NTREE_CHANGED_NODE_OUTPUT);
852  }
853  }
854  if (result.interface_changed) {
855  for (const TreeNodePair &pair : dependent_trees) {
856  add_node_tag(pair.first, pair.second, NTREE_CHANGED_NODE_PROPERTY);
857  }
858  }
859  }
860  }
861 
862  for (const auto item : update_result_by_tree_.items()) {
863  bNodeTree *ntree = item.key;
864  const TreeUpdateResult &result = item.value;
865 
866  this->reset_changed_flags(*ntree);
867 
868  if (result.interface_changed) {
869  if (ntree->type == NTREE_GEOMETRY) {
870  relations_.ensure_modifier_users();
871  for (const ObjectModifierPair &pair : relations_.get_modifier_users(ntree)) {
872  Object *object = pair.first;
873  ModifierData *md = pair.second;
874 
875  if (md->type == eModifierType_Nodes) {
877  }
878  }
879  }
880  }
881 
882  if (params_) {
883  relations_.ensure_owner_ids();
884  ID *id = relations_.get_owner_id(ntree);
885  if (params_->tree_changed_fn) {
886  params_->tree_changed_fn(id, ntree, params_->user_data);
887  }
888  if (params_->tree_output_changed_fn && result.output_changed) {
889  params_->tree_output_changed_fn(id, ntree, params_->user_data);
890  }
891  }
892  }
893  }
894 
895  private:
896  enum class ToposortMark {
897  None,
898  Temporary,
899  Permanent,
900  };
901 
902  using ToposortMarkMap = Map<bNodeTree *, ToposortMark>;
903 
908  Vector<bNodeTree *> get_tree_update_order(Span<bNodeTree *> root_ntrees)
909  {
910  relations_.ensure_group_node_users();
911 
912  Set<bNodeTree *> trees_to_update = get_trees_to_update(root_ntrees);
913 
914  Vector<bNodeTree *> sorted_ntrees;
915 
916  ToposortMarkMap marks;
917  for (bNodeTree *ntree : trees_to_update) {
918  marks.add_new(ntree, ToposortMark::None);
919  }
920  for (bNodeTree *ntree : trees_to_update) {
921  if (marks.lookup(ntree) == ToposortMark::None) {
922  const bool cycle_detected = !this->get_tree_update_order__visit_recursive(
923  ntree, marks, sorted_ntrees);
924  /* This should be prevented by higher level operators. */
925  BLI_assert(!cycle_detected);
926  UNUSED_VARS_NDEBUG(cycle_detected);
927  }
928  }
929 
930  std::reverse(sorted_ntrees.begin(), sorted_ntrees.end());
931 
932  return sorted_ntrees;
933  }
934 
935  bool get_tree_update_order__visit_recursive(bNodeTree *ntree,
936  ToposortMarkMap &marks,
937  Vector<bNodeTree *> &sorted_ntrees)
938  {
939  ToposortMark &mark = marks.lookup(ntree);
940  if (mark == ToposortMark::Permanent) {
941  return true;
942  }
943  if (mark == ToposortMark::Temporary) {
944  /* There is a dependency cycle. */
945  return false;
946  }
947 
948  mark = ToposortMark::Temporary;
949 
950  for (const TreeNodePair &pair : relations_.get_group_node_users(ntree)) {
951  this->get_tree_update_order__visit_recursive(pair.first, marks, sorted_ntrees);
952  }
953  sorted_ntrees.append(ntree);
954 
955  mark = ToposortMark::Permanent;
956  return true;
957  }
958 
959  Set<bNodeTree *> get_trees_to_update(Span<bNodeTree *> root_ntrees)
960  {
961  relations_.ensure_group_node_users();
962 
963  Set<bNodeTree *> reachable_trees;
964  VectorSet<bNodeTree *> trees_to_check = root_ntrees;
965 
966  while (!trees_to_check.is_empty()) {
967  bNodeTree *ntree = trees_to_check.pop();
968  if (reachable_trees.add(ntree)) {
969  for (const TreeNodePair &pair : relations_.get_group_node_users(ntree)) {
970  trees_to_check.add(pair.first);
971  }
972  }
973  }
974 
975  return reachable_trees;
976  }
977 
978  TreeUpdateResult update_tree(bNodeTree &ntree)
979  {
980  TreeUpdateResult result;
981 
982  /* Use a #NodeTreeRef to speedup certain queries. It is rebuilt whenever the node tree topology
983  * changes, which typically happens zero or one times during the entire update of the node
984  * tree. */
985  std::unique_ptr<NodeTreeRef> tree_ref;
986  this->ensure_tree_ref(ntree, tree_ref);
987 
988  this->update_socket_link_and_use(*tree_ref);
989  this->update_individual_nodes(ntree, tree_ref);
990  this->update_internal_links(ntree, tree_ref);
991  this->update_generic_callback(ntree, tree_ref);
992  this->remove_unused_previews_when_necessary(ntree);
993 
994  this->ensure_tree_ref(ntree, tree_ref);
995  this->propagate_runtime_flags(*tree_ref);
996  if (ntree.type == NTREE_GEOMETRY) {
998  result.interface_changed = true;
999  }
1000  }
1001 
1002  result.output_changed = this->check_if_output_changed(*tree_ref);
1003 
1004  this->update_socket_link_and_use(*tree_ref);
1005  this->update_node_levels(ntree);
1006  this->update_link_validation(ntree);
1007 
1008  if (ntree.type == NTREE_TEXTURE) {
1010  }
1011 
1012  if (ntree.runtime->changed_flag & NTREE_CHANGED_INTERFACE ||
1013  ntree.runtime->changed_flag & NTREE_CHANGED_ANY) {
1014  result.interface_changed = true;
1015  }
1016 
1017  if (result.interface_changed) {
1019  }
1020 
1021  return result;
1022  }
1023 
1024  void ensure_tree_ref(bNodeTree &ntree, std::unique_ptr<NodeTreeRef> &tree_ref)
1025  {
1026  if (!tree_ref) {
1027  tree_ref = std::make_unique<NodeTreeRef>(&ntree);
1028  }
1029  }
1030 
1031  void update_socket_link_and_use(const NodeTreeRef &tree)
1032  {
1033  for (const InputSocketRef *socket : tree.input_sockets()) {
1034  bNodeSocket *bsocket = socket->bsocket();
1035  if (socket->directly_linked_links().is_empty()) {
1036  bsocket->link = nullptr;
1037  }
1038  else {
1039  bsocket->link = socket->directly_linked_links()[0]->blink();
1040  }
1041  }
1042 
1043  this->update_socket_used_tags(tree);
1044  }
1045 
1046  void update_socket_used_tags(const NodeTreeRef &tree)
1047  {
1048  for (const SocketRef *socket : tree.sockets()) {
1049  bNodeSocket *bsocket = socket->bsocket();
1050  bsocket->flag &= ~SOCK_IN_USE;
1051  for (const LinkRef *link : socket->directly_linked_links()) {
1052  if (!link->is_muted()) {
1053  bsocket->flag |= SOCK_IN_USE;
1054  break;
1055  }
1056  }
1057  }
1058  }
1059 
1060  void update_individual_nodes(bNodeTree &ntree, std::unique_ptr<NodeTreeRef> &tree_ref)
1061  {
1062  /* Iterate over nodes instead of #NodeTreeRef, because the #tree_ref might be outdated after
1063  * some update functions. */
1064  LISTBASE_FOREACH (bNode *, bnode, &ntree.nodes) {
1065  this->ensure_tree_ref(ntree, tree_ref);
1066  const NodeRef &node = *tree_ref->find_node(*bnode);
1067  if (this->should_update_individual_node(node)) {
1068  const uint32_t old_changed_flag = ntree.runtime->changed_flag;
1069  ntree.runtime->changed_flag = NTREE_CHANGED_NOTHING;
1070 
1071  /* This may set #ntree.runtime->changed_flag which is detected below. */
1072  this->update_individual_node(node);
1073 
1074  if (ntree.runtime->changed_flag != NTREE_CHANGED_NOTHING) {
1075  /* The tree ref is outdated and needs to be rebuilt. Generally, only very few update
1076  * functions change the node. Typically zero or one nodes change after an update. */
1077  tree_ref.reset();
1078  }
1079  ntree.runtime->changed_flag |= old_changed_flag;
1080  }
1081  }
1082  }
1083 
1084  bool should_update_individual_node(const NodeRef &node)
1085  {
1086  bNodeTree &ntree = *node.btree();
1087  bNode &bnode = *node.bnode();
1088  if (ntree.runtime->changed_flag & NTREE_CHANGED_ANY) {
1089  return true;
1090  }
1091  if (bnode.runtime->changed_flag & NTREE_CHANGED_NODE_PROPERTY) {
1092  return true;
1093  }
1094  if (ntree.runtime->changed_flag & NTREE_CHANGED_LINK) {
1095  /* Node groups currently always rebuilt their sockets when they are updated.
1096  * So avoid calling the update method when no new link was added to it. */
1097  if (node.is_group_input_node()) {
1098  if (node.outputs().last()->is_directly_linked()) {
1099  return true;
1100  }
1101  }
1102  else if (node.is_group_output_node()) {
1103  if (node.inputs().last()->is_directly_linked()) {
1104  return true;
1105  }
1106  }
1107  else {
1108  /* Currently we have no way to tell if a node needs to be updated when a link changed. */
1109  return true;
1110  }
1111  }
1112  if (ntree.runtime->changed_flag & NTREE_CHANGED_INTERFACE) {
1113  if (node.is_group_input_node() || node.is_group_output_node()) {
1114  return true;
1115  }
1116  }
1117  return false;
1118  }
1119 
1120  void update_individual_node(const NodeRef &node)
1121  {
1122  bNodeTree &ntree = *node.btree();
1123  bNode &bnode = *node.bnode();
1124  bNodeType &ntype = *bnode.typeinfo;
1125  if (ntype.group_update_func) {
1126  ntype.group_update_func(&ntree, &bnode);
1127  }
1128  if (ntype.updatefunc) {
1129  ntype.updatefunc(&ntree, &bnode);
1130  }
1131  }
1132 
1133  void update_internal_links(bNodeTree &ntree, std::unique_ptr<NodeTreeRef> &tree_ref)
1134  {
1135  bool any_internal_links_updated = false;
1136  this->ensure_tree_ref(ntree, tree_ref);
1137  for (const NodeRef *node : tree_ref->nodes()) {
1138  if (!this->should_update_individual_node(*node)) {
1139  continue;
1140  }
1141  /* Find all expected internal links. */
1142  Vector<std::pair<bNodeSocket *, bNodeSocket *>> expected_internal_links;
1143  for (const OutputSocketRef *output_socket : node->outputs()) {
1144  if (!output_socket->is_available()) {
1145  continue;
1146  }
1147  if (!output_socket->is_directly_linked()) {
1148  continue;
1149  }
1150  if (output_socket->bsocket()->flag & SOCK_NO_INTERNAL_LINK) {
1151  continue;
1152  }
1153  const InputSocketRef *input_socket = this->find_internally_linked_input(output_socket);
1154  if (input_socket != nullptr) {
1155  expected_internal_links.append({input_socket->bsocket(), output_socket->bsocket()});
1156  }
1157  }
1158  /* rebuilt internal links if they have changed. */
1159  if (node->internal_links().size() != expected_internal_links.size()) {
1160  this->update_internal_links_in_node(ntree, *node->bnode(), expected_internal_links);
1161  any_internal_links_updated = true;
1162  }
1163  else {
1164  for (auto &item : expected_internal_links) {
1165  const bNodeSocket *from_socket = item.first;
1166  const bNodeSocket *to_socket = item.second;
1167  bool found = false;
1168  for (const InternalLinkRef *internal_link : node->internal_links()) {
1169  if (from_socket == internal_link->from().bsocket() &&
1170  to_socket == internal_link->to().bsocket()) {
1171  found = true;
1172  }
1173  }
1174  if (!found) {
1175  this->update_internal_links_in_node(ntree, *node->bnode(), expected_internal_links);
1176  any_internal_links_updated = true;
1177  break;
1178  }
1179  }
1180  }
1181  }
1182 
1183  if (any_internal_links_updated) {
1184  tree_ref.reset();
1185  }
1186  }
1187 
1188  const InputSocketRef *find_internally_linked_input(const OutputSocketRef *output_socket)
1189  {
1190  const InputSocketRef *selected_socket = nullptr;
1191  int selected_priority = -1;
1192  bool selected_is_linked = false;
1193  for (const InputSocketRef *input_socket : output_socket->node().inputs()) {
1194  if (!input_socket->is_available()) {
1195  continue;
1196  }
1197  if (input_socket->bsocket()->flag & SOCK_NO_INTERNAL_LINK) {
1198  continue;
1199  }
1200  const int priority = get_internal_link_type_priority(input_socket->bsocket()->typeinfo,
1201  output_socket->bsocket()->typeinfo);
1202  if (priority < 0) {
1203  continue;
1204  }
1205  const bool is_linked = input_socket->is_directly_linked();
1206  const bool is_preferred = priority > selected_priority || (is_linked && !selected_is_linked);
1207  if (!is_preferred) {
1208  continue;
1209  }
1210  selected_socket = input_socket;
1211  selected_priority = priority;
1212  selected_is_linked = is_linked;
1213  }
1214  return selected_socket;
1215  }
1216 
1217  void update_internal_links_in_node(bNodeTree &ntree,
1218  bNode &node,
1219  Span<std::pair<bNodeSocket *, bNodeSocket *>> links)
1220  {
1221  BLI_freelistN(&node.internal_links);
1222  for (const auto &item : links) {
1223  bNodeSocket *from_socket = item.first;
1224  bNodeSocket *to_socket = item.second;
1225  bNodeLink *link = MEM_cnew<bNodeLink>(__func__);
1226  link->fromnode = &node;
1227  link->fromsock = from_socket;
1228  link->tonode = &node;
1229  link->tosock = to_socket;
1230  link->flag |= NODE_LINK_VALID;
1231  BLI_addtail(&node.internal_links, link);
1232  }
1234  }
1235 
1236  void update_generic_callback(bNodeTree &ntree, std::unique_ptr<NodeTreeRef> &tree_ref)
1237  {
1238  if (ntree.typeinfo->update == nullptr) {
1239  return;
1240  }
1241 
1242  /* Reset the changed_flag to allow detecting when the update callback changed the node tree. */
1243  const uint32_t old_changed_flag = ntree.runtime->changed_flag;
1244  ntree.runtime->changed_flag = NTREE_CHANGED_NOTHING;
1245 
1247 
1248  if (ntree.runtime->changed_flag != NTREE_CHANGED_NOTHING) {
1249  /* The tree ref is outdated and needs to be rebuilt. */
1250  tree_ref.reset();
1251  }
1252  ntree.runtime->changed_flag |= old_changed_flag;
1253  }
1254 
1255  void remove_unused_previews_when_necessary(bNodeTree &ntree)
1256  {
1257  /* Don't trigger preview removal when only those flags are set. */
1261  if ((ntree.runtime->changed_flag & allowed_flags) == ntree.runtime->changed_flag) {
1262  return;
1263  }
1265  }
1266 
1267  void propagate_runtime_flags(const NodeTreeRef &tree_ref)
1268  {
1269  bNodeTree &ntree = *tree_ref.btree();
1270  ntree.runtime->runtime_flag = 0;
1271  if (ntree.type != NTREE_SHADER) {
1272  return;
1273  }
1274 
1275  /* Check if a used node group has an animated image. */
1276  for (const NodeRef *group_node : tree_ref.nodes_by_type("ShaderNodeGroup")) {
1277  const bNodeTree *group = reinterpret_cast<bNodeTree *>(group_node->bnode()->id);
1278  if (group != nullptr) {
1279  ntree.runtime->runtime_flag |= group->runtime->runtime_flag;
1280  }
1281  }
1282  /* Check if the tree itself has an animated image. */
1283  for (const StringRefNull idname : {"ShaderNodeTexImage", "ShaderNodeTexEnvironment"}) {
1284  for (const NodeRef *node : tree_ref.nodes_by_type(idname)) {
1285  Image *image = reinterpret_cast<Image *>(node->bnode()->id);
1286  if (image != nullptr && BKE_image_is_animated(image)) {
1288  break;
1289  }
1290  }
1291  }
1292  /* Check if the tree has a material output. */
1293  for (const StringRefNull idname : {"ShaderNodeOutputMaterial",
1294  "ShaderNodeOutputLight",
1295  "ShaderNodeOutputWorld",
1296  "ShaderNodeOutputAOV"}) {
1297  const Span<const NodeRef *> nodes = tree_ref.nodes_by_type(idname);
1298  if (!nodes.is_empty()) {
1300  break;
1301  }
1302  }
1303  }
1304 
1305  void update_node_levels(bNodeTree &ntree)
1306  {
1308  }
1309 
1310  void update_link_validation(bNodeTree &ntree)
1311  {
1312  LISTBASE_FOREACH (bNodeLink *, link, &ntree.links) {
1313  link->flag |= NODE_LINK_VALID;
1314  if (link->fromnode && link->tonode && link->fromnode->level <= link->tonode->level) {
1315  link->flag &= ~NODE_LINK_VALID;
1316  }
1317  else if (ntree.typeinfo->validate_link) {
1318  const eNodeSocketDatatype from_type = static_cast<eNodeSocketDatatype>(
1319  link->fromsock->type);
1320  const eNodeSocketDatatype to_type = static_cast<eNodeSocketDatatype>(link->tosock->type);
1321  if (!ntree.typeinfo->validate_link(from_type, to_type)) {
1322  link->flag &= ~NODE_LINK_VALID;
1323  }
1324  }
1325  }
1326  }
1327 
1328  bool check_if_output_changed(const NodeTreeRef &tree)
1329  {
1330  bNodeTree &btree = *tree.btree();
1331 
1332  /* Compute a hash that represents the node topology connected to the output. This always has to
1333  * be updated even if it is not used to detect changes right now. Otherwise
1334  * #btree.runtime.output_topology_hash will go out of date. */
1335  const Vector<const SocketRef *> tree_output_sockets = this->find_output_sockets(tree);
1336  const uint32_t old_topology_hash = btree.runtime->output_topology_hash;
1337  const uint32_t new_topology_hash = this->get_combined_socket_topology_hash(
1338  tree, tree_output_sockets);
1339  btree.runtime->output_topology_hash = new_topology_hash;
1340 
1341  if (const AnimData *adt = BKE_animdata_from_id(&btree.id)) {
1342  /* Drivers may copy values in the node tree around arbitrarily and may cause the output to
1343  * change even if it wouldn't without drivers. Only some special drivers like `frame/5` can
1344  * be used without causing updates all the time currently. In the future we could try to
1345  * handle other drivers better as well.
1346  * Note that this optimization only works in practice when the depsgraph didn't also get a
1347  * copy-on-write tag for the node tree (which happens when changing node properties). It does
1348  * work in a few situations like adding reroutes and duplicating nodes though. */
1349  LISTBASE_FOREACH (const FCurve *, fcurve, &adt->drivers) {
1350  const ChannelDriver *driver = fcurve->driver;
1351  const StringRef expression = driver->expression;
1352  if (expression.startswith("frame")) {
1353  const StringRef remaining_expression = expression.drop_known_prefix("frame");
1354  if (remaining_expression.find_first_not_of(" */+-0123456789.") == StringRef::not_found) {
1355  continue;
1356  }
1357  }
1358  /* Unrecognized driver, assume that the output always changes. */
1359  return true;
1360  }
1361  }
1362 
1363  if (btree.runtime->changed_flag & NTREE_CHANGED_ANY) {
1364  return true;
1365  }
1366 
1367  if (old_topology_hash != new_topology_hash) {
1368  return true;
1369  }
1370 
1371  /* The topology hash can only be used when only topology-changing operations have been done. */
1372  if (btree.runtime->changed_flag ==
1373  (btree.runtime->changed_flag & (NTREE_CHANGED_LINK | NTREE_CHANGED_REMOVED_NODE))) {
1374  if (old_topology_hash == new_topology_hash) {
1375  return false;
1376  }
1377  }
1378 
1379  if (!this->check_if_socket_outputs_changed_based_on_flags(tree, tree_output_sockets)) {
1380  return false;
1381  }
1382 
1383  return true;
1384  }
1385 
1386  Vector<const SocketRef *> find_output_sockets(const NodeTreeRef &tree)
1387  {
1388  Vector<const SocketRef *> sockets;
1389  for (const NodeRef *node : tree.nodes()) {
1390  if (!this->is_output_node(*node)) {
1391  continue;
1392  }
1393  for (const InputSocketRef *socket : node->inputs()) {
1394  if (socket->idname() != "NodeSocketVirtual") {
1395  sockets.append(socket);
1396  }
1397  }
1398  }
1399  return sockets;
1400  }
1401 
1402  bool is_output_node(const NodeRef &node) const
1403  {
1404  const bNode &bnode = *node.bnode();
1405  if (bnode.typeinfo->nclass == NODE_CLASS_OUTPUT) {
1406  return true;
1407  }
1408  if (bnode.type == NODE_GROUP_OUTPUT) {
1409  return true;
1410  }
1411  /* Assume node groups without output sockets are outputs. */
1412  if (bnode.type == NODE_GROUP) {
1413  const bNodeTree *node_group = reinterpret_cast<const bNodeTree *>(bnode.id);
1414  if (node_group != nullptr &&
1415  node_group->runtime->runtime_flag & NTREE_RUNTIME_FLAG_HAS_MATERIAL_OUTPUT) {
1416  return true;
1417  }
1418  }
1419  return false;
1420  }
1421 
1426  uint32_t get_combined_socket_topology_hash(const NodeTreeRef &tree,
1427  Span<const SocketRef *> sockets)
1428  {
1429  if (tree.has_link_cycles()) {
1430  /* Return dummy value when the link has any cycles. The algorithm below could be improved to
1431  * handle cycles more gracefully. */
1432  return 0;
1433  }
1434  Array<uint32_t> hashes = this->get_socket_topology_hashes(tree, sockets);
1435  uint32_t combined_hash = 0;
1436  for (uint32_t hash : hashes) {
1437  combined_hash = noise::hash(combined_hash, hash);
1438  }
1439  return combined_hash;
1440  }
1441 
1442  Array<uint32_t> get_socket_topology_hashes(const NodeTreeRef &tree,
1443  Span<const SocketRef *> sockets)
1444  {
1445  BLI_assert(!tree.has_link_cycles());
1446  Array<std::optional<uint32_t>> hash_by_socket_id(tree.sockets().size());
1447  Stack<const SocketRef *> sockets_to_check = sockets;
1448 
1449  while (!sockets_to_check.is_empty()) {
1450  const SocketRef &in_out_socket = *sockets_to_check.peek();
1451  const NodeRef &node = in_out_socket.node();
1452 
1453  if (hash_by_socket_id[in_out_socket.id()].has_value()) {
1454  sockets_to_check.pop();
1455  /* Socket is handled already. */
1456  continue;
1457  }
1458 
1459  if (in_out_socket.is_input()) {
1460  /* For input sockets, first compute the hashes of all linked sockets. */
1461  const InputSocketRef &socket = in_out_socket.as_input();
1462  bool all_origins_computed = true;
1463  for (const OutputSocketRef *origin_socket : socket.logically_linked_sockets()) {
1464  if (!hash_by_socket_id[origin_socket->id()].has_value()) {
1465  sockets_to_check.push(origin_socket);
1466  all_origins_computed = false;
1467  }
1468  }
1469  if (!all_origins_computed) {
1470  continue;
1471  }
1472  /* When the hashes for the linked sockets are ready, combine them into a hash for the input
1473  * socket. */
1474  const uint64_t socket_ptr = (uintptr_t)socket.bsocket();
1475  uint32_t socket_hash = noise::hash(socket_ptr, socket_ptr >> 32);
1476  for (const OutputSocketRef *origin_socket : socket.logically_linked_sockets()) {
1477  const uint32_t origin_socket_hash = *hash_by_socket_id[origin_socket->id()];
1478  socket_hash = noise::hash(socket_hash, origin_socket_hash);
1479  }
1480  hash_by_socket_id[socket.id()] = socket_hash;
1481  sockets_to_check.pop();
1482  }
1483  else {
1484  /* For output sockets, first compute the hashes of all available input sockets. */
1485  const OutputSocketRef &socket = in_out_socket.as_output();
1486  bool all_available_inputs_computed = true;
1487  for (const InputSocketRef *input_socket : node.inputs()) {
1488  if (input_socket->is_available()) {
1489  if (!hash_by_socket_id[input_socket->id()].has_value()) {
1490  sockets_to_check.push(input_socket);
1491  all_available_inputs_computed = false;
1492  }
1493  }
1494  }
1495  if (!all_available_inputs_computed) {
1496  continue;
1497  }
1498  /* When all input socket hashes have been computed, combine them into a hash for the output
1499  * socket. */
1500  const uint64_t socket_ptr = (uintptr_t)socket.bsocket();
1501  uint32_t socket_hash = noise::hash(socket_ptr, socket_ptr >> 32);
1502  for (const InputSocketRef *input_socket : node.inputs()) {
1503  if (input_socket->is_available()) {
1504  const uint32_t input_socket_hash = *hash_by_socket_id[input_socket->id()];
1505  socket_hash = noise::hash(socket_hash, input_socket_hash);
1506  }
1507  }
1508  /* The Image Texture node has a special case. The behavior of the color output changes
1509  * depending on whether the Alpha output is linked. */
1510  if (node.bnode()->type == SH_NODE_TEX_IMAGE && socket.index() == 0) {
1511  BLI_assert(socket.name() == "Color");
1512  const OutputSocketRef &alpha_socket = node.output(1);
1513  BLI_assert(alpha_socket.name() == "Alpha");
1514  if (alpha_socket.is_directly_linked()) {
1515  socket_hash = noise::hash(socket_hash);
1516  }
1517  }
1518  hash_by_socket_id[socket.id()] = socket_hash;
1519  sockets_to_check.pop();
1520  }
1521  }
1522 
1523  /* Create output array. */
1524  Array<uint32_t> hashes(sockets.size());
1525  for (const int i : sockets.index_range()) {
1526  hashes[i] = *hash_by_socket_id[sockets[i]->id()];
1527  }
1528  return hashes;
1529  }
1530 
1535  bool check_if_socket_outputs_changed_based_on_flags(const NodeTreeRef &tree,
1536  Span<const SocketRef *> sockets)
1537  {
1538  /* Avoid visiting the same socket twice when multiple links point to the same socket. */
1539  Array<bool> pushed_by_socket_id(tree.sockets().size(), false);
1540  Stack<const SocketRef *> sockets_to_check = sockets;
1541 
1542  for (const SocketRef *socket : sockets) {
1543  pushed_by_socket_id[socket->id()] = true;
1544  }
1545 
1546  while (!sockets_to_check.is_empty()) {
1547  const SocketRef &in_out_socket = *sockets_to_check.pop();
1548  const NodeRef &node = in_out_socket.node();
1549  const bNode &bnode = *node.bnode();
1550  const bNodeSocket &bsocket = *in_out_socket.bsocket();
1551  if (bsocket.runtime->changed_flag != NTREE_CHANGED_NOTHING) {
1552  return true;
1553  }
1554  if (bnode.runtime->changed_flag != NTREE_CHANGED_NOTHING) {
1555  const bool only_unused_internal_link_changed = (bnode.flag & NODE_MUTED) == 0 &&
1556  bnode.runtime->changed_flag ==
1558  if (!only_unused_internal_link_changed) {
1559  return true;
1560  }
1561  }
1562  if (in_out_socket.is_input()) {
1563  const InputSocketRef &socket = in_out_socket.as_input();
1564  for (const OutputSocketRef *origin_socket : socket.logically_linked_sockets()) {
1565  bool &pushed = pushed_by_socket_id[origin_socket->id()];
1566  if (!pushed) {
1567  sockets_to_check.push(origin_socket);
1568  pushed = true;
1569  }
1570  }
1571  }
1572  else {
1573  const OutputSocketRef &socket = in_out_socket.as_output();
1574  for (const InputSocketRef *input_socket : node.inputs()) {
1575  if (input_socket->is_available()) {
1576  bool &pushed = pushed_by_socket_id[input_socket->id()];
1577  if (!pushed) {
1578  sockets_to_check.push(input_socket);
1579  pushed = true;
1580  }
1581  }
1582  }
1583  /* The Normal node has a special case, because the value stored in the first output socket
1584  * is used as input in the node. */
1585  if (bnode.type == SH_NODE_NORMAL && socket.index() == 1) {
1586  BLI_assert(socket.name() == "Dot");
1587  const OutputSocketRef &normal_output = node.output(0);
1588  BLI_assert(normal_output.name() == "Normal");
1589  bool &pushed = pushed_by_socket_id[normal_output.id()];
1590  if (!pushed) {
1591  sockets_to_check.push(&normal_output);
1592  pushed = true;
1593  }
1594  }
1595  }
1596  }
1597  return false;
1598  }
1599 
1600  void reset_changed_flags(bNodeTree &ntree)
1601  {
1602  ntree.runtime->changed_flag = NTREE_CHANGED_NOTHING;
1604  node->runtime->changed_flag = NTREE_CHANGED_NOTHING;
1605  node->update = 0;
1606  LISTBASE_FOREACH (bNodeSocket *, socket, &node->inputs) {
1607  socket->runtime->changed_flag = NTREE_CHANGED_NOTHING;
1608  }
1609  LISTBASE_FOREACH (bNodeSocket *, socket, &node->outputs) {
1610  socket->runtime->changed_flag = NTREE_CHANGED_NOTHING;
1611  }
1612  }
1613  }
1614 };
1615 
1616 } // namespace blender::bke
1617 
1619 {
1621 }
1622 
1624 {
1626 }
1627 
1629 {
1631 }
1632 
1634 {
1636 }
1637 
1639 {
1641 }
1642 
1644 {
1646 }
1647 
1649 {
1651 }
1652 
1654 {
1656 }
1657 
1659 {
1661 }
1662 
1664 {
1666 }
1667 
1669 {
1671 }
1672 
1674 {
1676 }
1677 
1679 {
1681 }
1682 
1684 {
1686 }
1687 
1689 {
1691 }
1692 
1694 {
1696 }
1697 
1699 {
1701 }
1702 
1704 {
1706 }
1707 
1709 {
1710  FOREACH_NODETREE_BEGIN (bmain, ntree, ntree_id) {
1712  if (node->id == id) {
1713  node->update |= NODE_UPDATE_ID;
1715  }
1716  }
1717  }
1719 }
1720 
1722 {
1723  /* Would have to search for the node that uses the image user for a more detailed tag. */
1725 }
1726 
1734 static bool is_updating = false;
1735 
1737 {
1738  if (is_updating) {
1739  return;
1740  }
1741 
1742  is_updating = true;
1743  blender::bke::NodeTreeMainUpdater updater{bmain, params};
1744  updater.update();
1745  is_updating = false;
1746 }
1747 
1749 {
1750  if (ntree == nullptr) {
1751  BKE_ntree_update_main(bmain, params);
1752  return;
1753  }
1754 
1755  if (is_updating) {
1756  return;
1757  }
1758 
1759  is_updating = true;
1760  blender::bke::NodeTreeMainUpdater updater{bmain, params};
1761  updater.update_rooted({ntree});
1762  is_updating = false;
1763 }
struct AnimData * BKE_animdata_from_id(const struct ID *id)
bool BKE_image_is_animated(struct Image *image)
#define NODE_CLASS_OUTPUT
Definition: BKE_node.h:346
void BKE_node_preview_remove_unused(struct bNodeTree *ntree)
Definition: node.cc:2831
void ntreeUpdateNodeLevels(struct bNodeTree *ntree)
Definition: node.cc:4122
#define NODE_CUSTOM
Definition: BKE_node.h:981
void ntreeInterfaceTypeUpdate(struct bNodeTree *ntree)
Definition: node.cc:3537
#define FOREACH_NODETREE_END
Definition: BKE_node.h:1058
bool ntreeIsRegistered(struct bNodeTree *ntree)
Definition: node.cc:1317
#define FOREACH_NODETREE_BEGIN(bmain, _nodetree, _id)
Definition: BKE_node.h:1048
#define BLI_assert_unreachable()
Definition: BLI_assert.h:93
#define BLI_assert(a)
Definition: BLI_assert.h:46
#define LISTBASE_FOREACH(type, var, list)
Definition: BLI_listbase.h:336
#define LISTBASE_FOREACH_INDEX(type, var, list, index_var)
Definition: BLI_listbase.h:344
void void BLI_freelistN(struct ListBase *listbase) ATTR_NONNULL(1)
Definition: listbase.c:466
void BLI_addtail(struct ListBase *listbase, void *vlink) ATTR_NONNULL(1)
Definition: listbase.c:80
int BLI_listbase_count(const struct ListBase *listbase) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
#define UNUSED_VARS_NDEBUG(...)
#define UNUSED(x)
#define ELEM(...)
#define STREQ(a, b)
@ ID_NT
Definition: DNA_ID_enums.h:68
@ eModifierType_Nodes
#define NODE_LINK_VALID
#define NTREE_TEXTURE
#define NODE_DO_OUTPUT
#define NTREE_GEOMETRY
#define NODE_MUTED
#define NODE_UPDATE_ID
@ NTREE_RUNTIME_FLAG_HAS_MATERIAL_OUTPUT
@ NTREE_RUNTIME_FLAG_HAS_IMAGE_ANIMATION
@ SOCK_IN_USE
@ SOCK_NO_INTERNAL_LINK
eNodeSocketDisplayShape
@ SOCK_DISPLAY_SHAPE_CIRCLE
@ SOCK_DISPLAY_SHAPE_DIAMOND
@ SOCK_DISPLAY_SHAPE_DIAMOND_DOT
eNodeSocketDatatype
@ SOCK_INT
@ SOCK_VECTOR
@ SOCK_BOOLEAN
@ SOCK_FLOAT
@ SOCK_CUSTOM
@ SOCK_RGBA
#define NTREE_SHADER
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum type
void MOD_nodes_update_interface(Object *object, NodesModifierData *nmd)
Definition: MOD_nodes.cc:643
NODE_GROUP_OUTPUT
NODE_GROUP
Group Output data from inside of a node group A color picker Mix two input colors RGB to Convert a color s luminance to a grayscale value SH_NODE_NORMAL
in reality light always falls off quadratically Particle Retrieve the data of the particle that spawned the object for example to give variation to multiple instances of an object Point Retrieve information about points in a point cloud Retrieve the edges of an object as it appears to Cycles topology will always appear triangulated Convert a blackbody temperature to an RGB value Normal Generate a perturbed normal from an RGB normal map image Typically used for faking highly detailed surfaces Generate an OSL shader from a file or text data block SH_NODE_TEX_IMAGE
void ntreeTexCheckCyclics(struct bNodeTree *ntree)
const Value & lookup(const Key &key) const
Definition: BLI_map.hh:485
void add_new(const Key &key, const Value &value)
Definition: BLI_map.hh:220
ItemIterator items() const
Definition: BLI_map.hh:859
bool contains(const Key &key) const
Definition: BLI_map.hh:308
bool add(const Key &key)
Definition: BLI_set.hh:253
constexpr int64_t size() const
Definition: BLI_span.hh:240
constexpr bool is_empty() const
Definition: BLI_span.hh:248
bool is_empty() const
Definition: BLI_stack.hh:308
void push(const T &value)
Definition: BLI_stack.hh:213
static constexpr int64_t not_found
void append(const T &value)
Definition: BLI_vector.hh:433
void extend(Span< T > array)
Definition: BLI_vector.hh:530
void append_non_duplicates(const T &value)
Definition: BLI_vector.hh:472
void append_n_times(const T &value, const int64_t n)
Definition: BLI_vector.hh:504
void update_rooted(Span< bNodeTree * > root_ntrees)
NodeTreeMainUpdater(Main *bmain, NodeTreeUpdateExtraParams *params)
Span< const OutputSocketRef * > directly_linked_sockets() const
Span< const OutputSocketRef * > logically_linked_sockets() const
Span< SocketDeclarationPtr > outputs() const
Span< SocketDeclarationPtr > inputs() const
Span< const InputSocketRef * > inputs() const
Span< const NodeRef * > nodes_by_type(StringRefNull idname) const
static OutputFieldDependency ForPartiallyDependentField(Vector< int > indices)
static OutputFieldDependency ForDataSource()
static OutputFieldDependency ForDependentField()
static OutputFieldDependency ForFieldSource()
OutputSocketFieldType field_type() const
Span< const InputSocketRef * > directly_linked_sockets() const
const OutputFieldDependency & output_field_dependency() const
InputSocketFieldType input_field_type() const
const NodeRef & node() const
const OutputSocketRef & as_output() const
bNodeSocketType * typeinfo() const
bNodeSocket * bsocket() const
const InputSocketRef & as_input() const
StringRefNull name() const
OperationNode * node
StackEntry * from
void * tree
depth_tx normal_tx diffuse_light_tx specular_light_tx volume_light_tx environment_tx ambient_occlusion_tx aov_value_tx in_weight_img image(1, GPU_R32F, Qualifier::WRITE, ImageType::FLOAT_2D_ARRAY, "out_weight_img") .image(3
bNodeTree * ntree
uiWidgetBaseParameters params[MAX_WIDGET_BASE_BATCH]
#define GS(x)
Definition: iris.c:225
const int state
static void determine_group_input_states(const NodeTreeRef &tree, FieldInferencingInterface &new_inferencing_interface, const MutableSpan< SocketFieldState > field_state_by_socket_id)
static Vector< const InputSocketRef * > gather_input_socket_dependencies(const OutputFieldDependency &field_dependency, const NodeRef &node)
static OutputFieldDependency get_interface_output_field_dependency(const NodeRef &node, const OutputSocketRef &socket)
static void update_socket_shapes(const NodeTreeRef &tree, const Span< SocketFieldState > field_state_by_socket_id)
static bool update_field_inferencing(const NodeTreeRef &tree)
static void determine_group_output_states(const NodeTreeRef &tree, FieldInferencingInterface &new_inferencing_interface, const Span< SocketFieldState > field_state_by_socket_id)
static bool is_field_socket_type(const SocketRef &socket)
static FieldInferencingInterface get_node_field_inferencing_interface(const NodeRef &node)
static void propagate_data_requirements_from_right_to_left(const NodeTreeRef &tree, const MutableSpan< SocketFieldState > field_state_by_socket_id)
static void propagate_field_status_from_left_to_right(const NodeTreeRef &tree, const MutableSpan< SocketFieldState > field_state_by_socket_id)
static InputSocketFieldType get_interface_input_field_type(const NodeRef &node, const InputSocketRef &socket)
static FieldInferencingInterface get_dummy_field_inferencing_interface(const NodeRef &node)
static OutputFieldDependency find_group_output_dependencies(const InputSocketRef &group_output_socket, const Span< SocketFieldState > field_state_by_socket_id)
std::pair< Object *, ModifierData * > ObjectModifierPair
std::pair< bNode *, bNodeSocket * > NodeSocketPair
static int get_internal_link_type_priority(const bNodeSocketType *from, const bNodeSocketType *to)
std::pair< bNodeTree *, bNode * > TreeNodePair
static Type to_type(const eGPUType type)
uint32_t hash(uint32_t kx)
Definition: noise.cc:67
void BKE_ntree_update_tag_node_new(bNodeTree *ntree, bNode *node)
static void add_node_tag(bNodeTree *ntree, bNode *node, const eNodeTreeChangedFlag flag)
void BKE_ntree_update_tag_active_output_changed(bNodeTree *ntree)
void BKE_ntree_update_tag_socket_type(bNodeTree *ntree, bNodeSocket *socket)
void BKE_ntree_update_tag_node_internal_link(bNodeTree *ntree, bNode *node)
void BKE_ntree_update_tag_link_mute(bNodeTree *ntree, bNodeLink *UNUSED(link))
void BKE_ntree_update_tag_node_mute(bNodeTree *ntree, bNode *node)
void BKE_ntree_update_tag_all(bNodeTree *ntree)
void BKE_ntree_update_tag_interface(bNodeTree *ntree)
void BKE_ntree_update_tag_link_changed(bNodeTree *ntree)
void BKE_ntree_update_tag_socket_new(bNodeTree *ntree, bNodeSocket *socket)
void BKE_ntree_update_tag_socket_removed(bNodeTree *ntree)
void BKE_ntree_update_tag_socket_property(bNodeTree *ntree, bNodeSocket *socket)
static void add_socket_tag(bNodeTree *ntree, bNodeSocket *socket, const eNodeTreeChangedFlag flag)
static void add_tree_tag(bNodeTree *ntree, const eNodeTreeChangedFlag flag)
void BKE_ntree_update_main(Main *bmain, NodeTreeUpdateExtraParams *params)
void BKE_ntree_update_tag_missing_runtime_data(bNodeTree *ntree)
void BKE_ntree_update_main_tree(Main *bmain, bNodeTree *ntree, NodeTreeUpdateExtraParams *params)
static bool is_updating
void BKE_ntree_update_tag_link_removed(bNodeTree *ntree)
void BKE_ntree_update_tag_socket_availability(bNodeTree *ntree, bNodeSocket *socket)
void BKE_ntree_update_tag_image_user_changed(bNodeTree *ntree, ImageUser *UNUSED(iuser))
void BKE_ntree_update_tag_node_removed(bNodeTree *ntree)
void BKE_ntree_update_tag_id_changed(Main *bmain, ID *id)
eNodeTreeChangedFlag
@ NTREE_CHANGED_ANY
@ NTREE_CHANGED_REMOVED_SOCKET
@ NTREE_CHANGED_NOTHING
@ NTREE_CHANGED_INTERFACE
@ NTREE_CHANGED_NODE_OUTPUT
@ NTREE_CHANGED_NODE_PROPERTY
@ NTREE_CHANGED_LINK
@ NTREE_CHANGED_SOCKET_PROPERTY
@ NTREE_CHANGED_REMOVED_NODE
@ NTREE_CHANGED_ALL
@ NTREE_CHANGED_INTERNAL_LINK
void BKE_ntree_update_tag_node_property(bNodeTree *ntree, bNode *node)
void BKE_ntree_update_tag_link_added(bNodeTree *ntree, bNodeLink *UNUSED(link))
#define hash
Definition: noise.c:153
_W64 unsigned int uintptr_t
Definition: stdint.h:119
unsigned int uint32_t
Definition: stdint.h:80
unsigned __int64 uint64_t
Definition: stdint.h:90
char expression[256]
Definition: DNA_ID.h:368
char name[66]
Definition: DNA_ID.h:378
Definition: BKE_main.h:121
ListBase objects
Definition: BKE_main.h:170
void(* tree_output_changed_fn)(struct ID *, struct bNodeTree *, void *user_data)
void(* tree_changed_fn)(struct ID *, struct bNodeTree *, void *user_data)
struct bNodeTree * node_group
Defines a socket type.
Definition: BKE_node.h:143
char idname[64]
Definition: BKE_node.h:145
bNodeSocketRuntimeHandle * runtime
struct bNodeLink * link
struct bNodeSocketType * typeinfo
bool(* validate_link)(eNodeSocketDatatype from, eNodeSocketDatatype to)
Definition: BKE_node.h:402
void(* update)(struct bNodeTree *ntree)
Definition: BKE_node.h:400
bNodeTreeRuntimeHandle * runtime
struct bNodeTreeType * typeinfo
ListBase nodes
ListBase inputs
ListBase links
ListBase outputs
Defines a node type.
Definition: BKE_node.h:226
void(* group_update_func)(struct bNodeTree *ntree, struct bNode *node)
Definition: BKE_node.h:267
void(* updatefunc)(struct bNodeTree *ntree, struct bNode *node)
Definition: BKE_node.h:265
short nclass
Definition: BKE_node.h:236
struct bNodeType * typeinfo
struct ID * id
short type
bNodeRuntimeHandle * runtime
Span< ObjectModifierPair > get_modifier_users(bNodeTree *ntree)
ID * get_owner_id(bNodeTree *ntree)
Span< TreeNodePair > get_group_node_users(bNodeTree *ntree)
Vector< OutputFieldDependency > outputs