12 #ifdef WITH_CXX_GUARDEDALLOC
22 active_buffers_(shared_buffers),
23 num_operations_finished_(0)
26 if (!
context.is_fast_calculation()) {
39 determine_areas_to_render_and_reads();
43 void FullFrameExecutionModel::determine_areas_to_render_and_reads()
52 if (op->is_output_operation(is_rendering) && op->get_render_priority() == priority) {
53 get_output_render_area(op,
area);
54 determine_areas_to_render(op,
area);
61 Vector<MemoryBuffer *> FullFrameExecutionModel::get_input_buffers(NodeOperation *op,
65 const int num_inputs = op->get_number_of_input_sockets();
66 Vector<MemoryBuffer *> inputs_buffers(num_inputs);
67 for (
int i = 0; i < num_inputs; i++) {
68 NodeOperation *
input = op->get_input_operation(i);
69 const int offset_x = (
input->get_canvas().xmin - op->get_canvas().xmin) + output_x;
70 const int offset_y = (
input->get_canvas().ymin - op->get_canvas().ymin) + output_y;
73 rcti rect = buf->get_rect();
75 inputs_buffers[i] =
new MemoryBuffer(
76 buf->get_buffer(), buf->get_num_channels(), rect, buf->is_a_single_elem());
78 return inputs_buffers;
81 MemoryBuffer *FullFrameExecutionModel::create_operation_buffer(NodeOperation *op,
87 &rect, output_x, output_x + op->get_width(), output_y, output_y + op->get_height());
89 const DataType data_type = op->get_output_socket(0)->get_data_type();
90 const bool is_a_single_elem = op->get_flags().is_constant_operation;
91 return new MemoryBuffer(data_type, rect, is_a_single_elem);
94 void FullFrameExecutionModel::render_operation(NodeOperation *op)
97 constexpr
int output_x = 0;
98 constexpr
int output_y = 0;
100 const bool has_outputs = op->get_number_of_output_sockets() > 0;
101 MemoryBuffer *op_buf = has_outputs ? create_operation_buffer(op, output_x, output_y) : nullptr;
102 if (op->get_width() > 0 && op->get_height() > 0) {
103 Vector<MemoryBuffer *> input_bufs = get_input_buffers(op, output_x, output_y);
104 const int op_offset_x = output_x - op->get_canvas().xmin;
105 const int op_offset_y = output_y - op->get_canvas().ymin;
107 op->render(op_buf, areas, input_bufs);
110 for (MemoryBuffer *buf : input_bufs) {
118 operation_finished(op);
121 void FullFrameExecutionModel::render_operations()
128 const bool has_size = op->get_width() > 0 && op->get_height() > 0;
129 const bool is_priority_output = op->is_output_operation(is_rendering) &&
130 op->get_render_priority() == priority;
131 if (is_priority_output && has_size) {
132 render_output_dependencies(op);
133 render_operation(op);
135 else if (is_priority_output && !has_size && op->is_active_viewer_output()) {
136 static_cast<ViewerOperation *
>(op)->clear_display_buffer();
152 next_outputs.
append(operation);
153 while (next_outputs.
size() > 0) {
155 next_outputs.
clear();
157 for (
int i = 0; i <
output->get_number_of_input_sockets(); i++) {
161 dependencies.
extend(next_outputs);
165 std::reverse(dependencies.
begin(), dependencies.
end());
170 void FullFrameExecutionModel::render_output_dependencies(NodeOperation *output_op)
174 for (NodeOperation *op : dependencies) {
176 render_operation(op);
181 void FullFrameExecutionModel::determine_areas_to_render(NodeOperation *output_op,
182 const rcti &output_area)
186 Vector<std::pair<NodeOperation *, const rcti>> stack;
187 stack.append({output_op, output_area});
188 while (stack.size() > 0) {
189 std::pair<NodeOperation *, rcti> pair = stack.pop_last();
190 NodeOperation *operation = pair.first;
191 const rcti &render_area = pair.second;
199 const int num_inputs = operation->get_number_of_input_sockets();
200 for (
int i = 0; i < num_inputs; i++) {
201 NodeOperation *input_op = operation->get_input_operation(i);
203 operation->get_area_of_interest(input_op, render_area, input_area);
206 BLI_rcti_isect(&input_area, &input_op->get_canvas(), &input_area);
208 stack.append({input_op, input_area});
213 void FullFrameExecutionModel::determine_reads(NodeOperation *output_op)
217 Vector<NodeOperation *> stack;
218 stack.append(output_op);
219 while (stack.size() > 0) {
220 NodeOperation *operation = stack.pop_last();
221 const int num_inputs = operation->get_number_of_input_sockets();
222 for (
int i = 0; i < num_inputs; i++) {
223 NodeOperation *input_op = operation->get_input_operation(i);
225 stack.append(input_op);
232 void FullFrameExecutionModel::get_output_render_area(NodeOperation *output_op,
rcti &r_area)
237 rcti canvas = output_op->get_canvas();
240 const bool has_viewer_border =
border_.use_viewer_border &&
241 (output_op->get_flags().is_viewer_operation ||
242 output_op->get_flags().is_preview_operation);
243 const bool has_render_border =
border_.use_render_border;
244 if (has_viewer_border || has_render_border) {
246 const rctf *norm_border = has_viewer_border ?
border_.viewer_border :
border_.render_border;
249 const int w = output_op->get_width();
250 const int h = output_op->get_height();
258 void FullFrameExecutionModel::operation_finished(NodeOperation *operation)
261 const int num_inputs = operation->get_number_of_input_sockets();
262 for (
int i = 0; i < num_inputs; i++) {
263 active_buffers_.
read_finished(operation->get_input_operation(i));
266 num_operations_finished_++;
267 update_progress_bar();
270 void FullFrameExecutionModel::update_progress_bar()
274 const float progress = num_operations_finished_ /
static_cast<float>(
operations_.size());
275 tree->progress(
tree->prh, progress);
280 TIP_(
"Compositing | Operation %i-%li"),
281 num_operations_finished_ + 1,
void BLI_rcti_translate(struct rcti *rect, int x, int y)
void BLI_rcti_init(struct rcti *rect, int xmin, int xmax, int ymin, int ymax)
bool BLI_rcti_isect(const struct rcti *src1, const struct rcti *src2, struct rcti *dest)
bool BLI_rcti_is_empty(const struct rcti *rect)
size_t BLI_snprintf(char *__restrict dst, size_t maxncpy, const char *__restrict format,...) ATTR_NONNULL(1
Read Guarded memory(de)allocation.
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
void append(const T &value)
void extend(Span< T > array)
Overall context of the compositor.
bool is_rendering() const
get the rendering field of the context
const bNodeTree * get_bnodetree() const
get the bnodetree of the context
static void operation_rendered(const NodeOperation *op, MemoryBuffer *render)
static void graphviz(const ExecutionSystem *system, StringRefNull name="")
CompositorContext & context_
Span< NodeOperation * > operations_
struct blender::compositor::ExecutionModel::@178 border_
the ExecutionSystem contains the whole compositor tree.
FullFrameExecutionModel(CompositorContext &context, SharedOperationBuffers &shared_buffers, Span< NodeOperation * > operations)
void execute(ExecutionSystem &exec_system) override
NodeOperation contains calculation logic.
bool has_registered_reads(NodeOperation *op)
void set_rendered_buffer(NodeOperation *op, std::unique_ptr< MemoryBuffer > buffer)
Vector< rcti > get_areas_to_render(NodeOperation *op, int offset_x, int offset_y)
bool is_area_registered(NodeOperation *op, const rcti &area_to_render)
bool is_operation_rendered(NodeOperation *op)
void register_read(NodeOperation *read_op)
MemoryBuffer * get_rendered_buffer(NodeOperation *op)
void register_area(NodeOperation *op, const rcti &area_to_render)
void read_finished(NodeOperation *read_op)
eCompositorPriority
Possible priority settings.
@ Low
Low quality setting.
@ High
High quality setting.
@ Medium
Medium quality setting.
ccl_global KernelShaderEvalInput ccl_global float * output
ccl_global KernelShaderEvalInput * input
static void area(int d1, int d2, int e1, int e2, float weights[2])
static Vector< NodeOperation * > get_operation_dependencies(NodeOperation *operation)
static bNodeSocketTemplate outputs[]
void(* stats_draw)(void *, const char *str)
static void start(const CompositorContext &context)
Start the execution this methods will start the WorkScheduler. Inside this method all threads are ini...
static void stop()
stop the execution All created thread by the start method are destroyed.