Blender  V3.3
gpu_py_buffer.c
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
12 #include <Python.h>
13 
14 #include "BLI_utildefines.h"
15 
16 #include "MEM_guardedalloc.h"
17 
18 #include "GPU_texture.h"
19 
20 #include "../generic/py_capi_utils.h"
21 
22 #include "gpu_py.h"
23 
24 #include "gpu_py_buffer.h"
25 
26 #define PYGPU_BUFFER_PROTOCOL
27 #define MAX_DIMENSIONS 64
28 
29 /* -------------------------------------------------------------------- */
33 static Py_ssize_t pygpu_buffer_dimensions_tot_elem(const Py_ssize_t *shape, Py_ssize_t shape_len)
34 {
35  Py_ssize_t tot = shape[0];
36  for (int i = 1; i < shape_len; i++) {
37  tot *= shape[i];
38  }
39 
40  return tot;
41 }
42 
43 static bool pygpu_buffer_dimensions_tot_len_compare(const Py_ssize_t *shape_a,
44  const Py_ssize_t shape_a_len,
45  const Py_ssize_t *shape_b,
46  const Py_ssize_t shape_b_len)
47 {
48  if (pygpu_buffer_dimensions_tot_elem(shape_a, shape_a_len) !=
49  pygpu_buffer_dimensions_tot_elem(shape_b, shape_b_len)) {
50  PyErr_Format(PyExc_BufferError, "array size does not match");
51  return false;
52  }
53 
54  return true;
55 }
56 
57 static bool pygpu_buffer_pyobj_as_shape(PyObject *shape_obj,
58  Py_ssize_t r_shape[MAX_DIMENSIONS],
59  Py_ssize_t *r_shape_len)
60 {
61  Py_ssize_t shape_len = 0;
62  if (PyLong_Check(shape_obj)) {
63  shape_len = 1;
64  if (((r_shape[0] = PyLong_AsLong(shape_obj)) < 1)) {
65  PyErr_SetString(PyExc_AttributeError, "dimension must be greater than or equal to 1");
66  return false;
67  }
68  }
69  else if (PySequence_Check(shape_obj)) {
70  shape_len = PySequence_Size(shape_obj);
71  if (shape_len > MAX_DIMENSIONS) {
72  PyErr_SetString(PyExc_AttributeError,
73  "too many dimensions, max is " STRINGIFY(MAX_DIMENSIONS));
74  return false;
75  }
76  if (shape_len < 1) {
77  PyErr_SetString(PyExc_AttributeError, "sequence must have at least one dimension");
78  return false;
79  }
80 
81  for (int i = 0; i < shape_len; i++) {
82  PyObject *ob = PySequence_GetItem(shape_obj, i);
83  if (!PyLong_Check(ob)) {
84  PyErr_Format(PyExc_TypeError,
85  "invalid dimension %i, expected an int, not a %.200s",
86  i,
87  Py_TYPE(ob)->tp_name);
88  Py_DECREF(ob);
89  return false;
90  }
91 
92  r_shape[i] = PyLong_AsLong(ob);
93  Py_DECREF(ob);
94 
95  if (r_shape[i] < 1) {
96  PyErr_SetString(PyExc_AttributeError, "dimension must be greater than or equal to 1");
97  return false;
98  }
99  }
100  }
101  else {
102  PyErr_Format(PyExc_TypeError,
103  "invalid second argument argument expected a sequence "
104  "or an int, not a %.200s",
105  Py_TYPE(shape_obj)->tp_name);
106  }
107 
108  *r_shape_len = shape_len;
109  return true;
110 }
111 
112 static const char *pygpu_buffer_formatstr(eGPUDataFormat data_format)
113 {
114  switch (data_format) {
115  case GPU_DATA_FLOAT:
116  return "f";
117  case GPU_DATA_INT:
118  return "i";
119  case GPU_DATA_UINT:
120  return "I";
121  case GPU_DATA_UBYTE:
122  return "B";
123  case GPU_DATA_UINT_24_8:
125  return "I";
126  default:
127  break;
128  }
129  return NULL;
130 }
131 
134 /* -------------------------------------------------------------------- */
138 static BPyGPUBuffer *pygpu_buffer_make_from_data(PyObject *parent,
139  const eGPUDataFormat format,
140  const int shape_len,
141  const Py_ssize_t *shape,
142  void *buf)
143 {
144  BPyGPUBuffer *buffer = (BPyGPUBuffer *)_PyObject_GC_New(&BPyGPU_BufferType);
145 
146  buffer->parent = NULL;
147  buffer->format = format;
148  buffer->shape_len = shape_len;
149  buffer->shape = MEM_mallocN(shape_len * sizeof(*buffer->shape), "BPyGPUBuffer shape");
150  memcpy(buffer->shape, shape, shape_len * sizeof(*buffer->shape));
151  buffer->buf.as_void = buf;
152 
153  if (parent) {
154  Py_INCREF(parent);
155  buffer->parent = parent;
156  PyObject_GC_Track(buffer);
157  }
158  return buffer;
159 }
160 
161 static PyObject *pygpu_buffer__sq_item(BPyGPUBuffer *self, int i)
162 {
163  if (i >= self->shape[0] || i < 0) {
164  PyErr_SetString(PyExc_IndexError, "array index out of range");
165  return NULL;
166  }
167 
168  const char *formatstr = pygpu_buffer_formatstr(self->format);
169 
170  if (self->shape_len == 1) {
171  switch (self->format) {
172  case GPU_DATA_FLOAT:
173  return Py_BuildValue(formatstr, self->buf.as_float[i]);
174  case GPU_DATA_INT:
175  return Py_BuildValue(formatstr, self->buf.as_int[i]);
176  case GPU_DATA_UBYTE:
177  return Py_BuildValue(formatstr, self->buf.as_byte[i]);
178  case GPU_DATA_UINT:
179  case GPU_DATA_UINT_24_8:
181  return Py_BuildValue(formatstr, self->buf.as_uint[i]);
182  }
183  }
184  else {
185  int offset = i * GPU_texture_dataformat_size(self->format);
186  for (int j = 1; j < self->shape_len; j++) {
187  offset *= self->shape[j];
188  }
189 
190  return (PyObject *)pygpu_buffer_make_from_data((PyObject *)self,
191  self->format,
192  self->shape_len - 1,
193  self->shape + 1,
194  self->buf.as_byte + offset);
195  }
196 
197  return NULL;
198 }
199 
200 static PyObject *pygpu_buffer_to_list(BPyGPUBuffer *self)
201 {
202  int i, len = self->shape[0];
203  PyObject *list = PyList_New(len);
204 
205  for (i = 0; i < len; i++) {
206  PyList_SET_ITEM(list, i, pygpu_buffer__sq_item(self, i));
207  }
208 
209  return list;
210 }
211 
213 {
214  PyObject *list;
215 
216  if (self->shape_len > 1) {
217  int i, len = self->shape[0];
218  list = PyList_New(len);
219 
220  for (i = 0; i < len; i++) {
221  /* "BPyGPUBuffer *sub_tmp" is a temporary object created just to be read for nested lists.
222  * That is why it is decremented/freed soon after.
223  * TODO: For efficiency, avoid creating #BPyGPUBuffer when creating nested lists. */
224  BPyGPUBuffer *sub_tmp = (BPyGPUBuffer *)pygpu_buffer__sq_item(self, i);
225  PyList_SET_ITEM(list, i, pygpu_buffer_to_list_recursive(sub_tmp));
226  Py_DECREF(sub_tmp);
227  }
228  }
229  else {
230  list = pygpu_buffer_to_list(self);
231  }
232 
233  return list;
234 }
235 
236 static PyObject *pygpu_buffer_dimensions_get(BPyGPUBuffer *self, void *UNUSED(arg))
237 {
238  PyObject *list = PyList_New(self->shape_len);
239  int i;
240 
241  for (i = 0; i < self->shape_len; i++) {
242  PyList_SET_ITEM(list, i, PyLong_FromLong(self->shape[i]));
243  }
244 
245  return list;
246 }
247 
248 static int pygpu_buffer_dimensions_set(BPyGPUBuffer *self, PyObject *value, void *UNUSED(type))
249 {
250  Py_ssize_t shape[MAX_DIMENSIONS];
251  Py_ssize_t shape_len = 0;
252 
253  if (!pygpu_buffer_pyobj_as_shape(value, shape, &shape_len)) {
254  return -1;
255  }
256 
257  if (!pygpu_buffer_dimensions_tot_len_compare(shape, shape_len, self->shape, self->shape_len)) {
258  return -1;
259  }
260 
261  size_t size = shape_len * sizeof(*self->shape);
262  if (shape_len != self->shape_len) {
263  MEM_freeN(self->shape);
264  self->shape = MEM_mallocN(size, __func__);
265  }
266 
267  self->shape_len = shape_len;
268  memcpy(self->shape, shape, size);
269  return 0;
270 }
271 
272 static int pygpu_buffer__tp_traverse(BPyGPUBuffer *self, visitproc visit, void *arg)
273 {
274  Py_VISIT(self->parent);
275  return 0;
276 }
277 
279 {
280  if (self->parent) {
281  Py_CLEAR(self->parent);
282  self->buf.as_void = NULL;
283  }
284  return 0;
285 }
286 
288 {
289  if (self->parent) {
290  PyObject_GC_UnTrack(self);
291  Py_CLEAR(self->parent);
292  }
293  else if (self->buf.as_void) {
294  MEM_freeN(self->buf.as_void);
295  }
296 
297  MEM_freeN(self->shape);
298 
299  PyObject_GC_Del(self);
300 }
301 
302 static PyObject *pygpu_buffer__tp_repr(BPyGPUBuffer *self)
303 {
304  PyObject *repr;
305 
306  PyObject *list = pygpu_buffer_to_list_recursive(self);
307  const char *typestr = PyC_StringEnum_FindIDFromValue(bpygpu_dataformat_items, self->format);
308 
309  repr = PyUnicode_FromFormat("Buffer(%s, %R)", typestr, list);
310  Py_DECREF(list);
311 
312  return repr;
313 }
314 
315 static int pygpu_buffer__sq_ass_item(BPyGPUBuffer *self, int i, PyObject *v);
316 
318  Py_ssize_t begin,
319  Py_ssize_t end,
320  PyObject *seq)
321 {
322  PyObject *item;
323  int count, err = 0;
324 
325  if (begin < 0) {
326  begin = 0;
327  }
328  if (end > self->shape[0]) {
329  end = self->shape[0];
330  }
331  if (begin > end) {
332  begin = end;
333  }
334 
335  if (!PySequence_Check(seq)) {
336  PyErr_Format(PyExc_TypeError,
337  "buffer[:] = value, invalid assignment. "
338  "Expected a sequence, not an %.200s type",
339  Py_TYPE(seq)->tp_name);
340  return -1;
341  }
342 
343  /* re-use count var */
344  if ((count = PySequence_Size(seq)) != (end - begin)) {
345  PyErr_Format(PyExc_TypeError,
346  "buffer[:] = value, size mismatch in assignment. "
347  "Expected: %d (given: %d)",
348  count,
349  end - begin);
350  return -1;
351  }
352 
353  for (count = begin; count < end; count++) {
354  item = PySequence_GetItem(seq, count - begin);
355  if (item) {
356  err = pygpu_buffer__sq_ass_item(self, count, item);
357  Py_DECREF(item);
358  }
359  else {
360  err = -1;
361  }
362  if (err) {
363  break;
364  }
365  }
366  return err;
367 }
368 
369 static PyObject *pygpu_buffer__tp_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
370 {
371  PyObject *length_ob, *init = NULL;
373  Py_ssize_t shape[MAX_DIMENSIONS];
374 
375  Py_ssize_t shape_len = 0;
376 
377  if (kwds && PyDict_Size(kwds)) {
378  PyErr_SetString(PyExc_TypeError, "Buffer(): takes no keyword args");
379  return NULL;
380  }
381 
382  const struct PyC_StringEnum pygpu_dataformat = {bpygpu_dataformat_items, GPU_DATA_FLOAT};
383  if (!PyArg_ParseTuple(
384  args, "O&O|O: Buffer", PyC_ParseStringEnum, &pygpu_dataformat, &length_ob, &init)) {
385  return NULL;
386  }
387 
388  if (!pygpu_buffer_pyobj_as_shape(length_ob, shape, &shape_len)) {
389  return NULL;
390  }
391 
392  if (init && PyObject_CheckBuffer(init)) {
393  Py_buffer pybuffer;
394 
395  if (PyObject_GetBuffer(init, &pybuffer, PyBUF_ND | PyBUF_FORMAT) == -1) {
396  /* PyObject_GetBuffer raise a PyExc_BufferError */
397  return NULL;
398  }
399 
400  Py_ssize_t *pybuffer_shape = pybuffer.shape;
401  Py_ssize_t pybuffer_ndim = pybuffer.ndim;
402  if (!pybuffer_shape) {
403  pybuffer_shape = &pybuffer.len;
404  pybuffer_ndim = 1;
405  }
406 
407  if (pygpu_buffer_dimensions_tot_len_compare(shape, shape_len, pybuffer_shape, pybuffer_ndim)) {
409  init, pygpu_dataformat.value_found, shape_len, shape, pybuffer.buf);
410  }
411 
412  PyBuffer_Release(&pybuffer);
413  }
414  else {
415  buffer = BPyGPU_Buffer_CreatePyObject(pygpu_dataformat.value_found, shape, shape_len, NULL);
416  if (init && pygpu_buffer_ass_slice(buffer, 0, shape[0], init)) {
417  Py_DECREF(buffer);
418  return NULL;
419  }
420  }
421 
422  return (PyObject *)buffer;
423 }
424 
425 /* BPyGPUBuffer sequence methods */
426 
428 {
429  return self->shape[0];
430 }
431 
432 static PyObject *pygpu_buffer_slice(BPyGPUBuffer *self, Py_ssize_t begin, Py_ssize_t end)
433 {
434  PyObject *list;
435  Py_ssize_t count;
436 
437  if (begin < 0) {
438  begin = 0;
439  }
440  if (end > self->shape[0]) {
441  end = self->shape[0];
442  }
443  if (begin > end) {
444  begin = end;
445  }
446 
447  list = PyList_New(end - begin);
448 
449  for (count = begin; count < end; count++) {
450  PyList_SET_ITEM(list, count - begin, pygpu_buffer__sq_item(self, count));
451  }
452  return list;
453 }
454 
455 static int pygpu_buffer__sq_ass_item(BPyGPUBuffer *self, int i, PyObject *v)
456 {
457  if (i >= self->shape[0] || i < 0) {
458  PyErr_SetString(PyExc_IndexError, "array assignment index out of range");
459  return -1;
460  }
461 
462  if (self->shape_len != 1) {
464 
465  if (row) {
466  const int ret = pygpu_buffer_ass_slice(row, 0, self->shape[1], v);
467  Py_DECREF(row);
468  return ret;
469  }
470 
471  return -1;
472  }
473 
474  switch (self->format) {
475  case GPU_DATA_FLOAT:
476  return PyArg_Parse(v, "f:Expected floats", &self->buf.as_float[i]) ? 0 : -1;
477  case GPU_DATA_INT:
478  return PyArg_Parse(v, "i:Expected ints", &self->buf.as_int[i]) ? 0 : -1;
479  case GPU_DATA_UBYTE:
480  return PyArg_Parse(v, "b:Expected ints", &self->buf.as_byte[i]) ? 0 : -1;
481  case GPU_DATA_UINT:
482  case GPU_DATA_UINT_24_8:
484  return PyArg_Parse(v, "I:Expected unsigned ints", &self->buf.as_uint[i]) ? 0 : -1;
485  default:
486  return 0; /* should never happen */
487  }
488 }
489 
490 static PyObject *pygpu_buffer__mp_subscript(BPyGPUBuffer *self, PyObject *item)
491 {
492  if (PyIndex_Check(item)) {
493  Py_ssize_t i;
494  i = PyNumber_AsSsize_t(item, PyExc_IndexError);
495  if (i == -1 && PyErr_Occurred()) {
496  return NULL;
497  }
498  if (i < 0) {
499  i += self->shape[0];
500  }
501  return pygpu_buffer__sq_item(self, i);
502  }
503  if (PySlice_Check(item)) {
504  Py_ssize_t start, stop, step, slicelength;
505 
506  if (PySlice_GetIndicesEx(item, self->shape[0], &start, &stop, &step, &slicelength) < 0) {
507  return NULL;
508  }
509 
510  if (slicelength <= 0) {
511  return PyTuple_New(0);
512  }
513  if (step == 1) {
514  return pygpu_buffer_slice(self, start, stop);
515  }
516 
517  PyErr_SetString(PyExc_IndexError, "slice steps not supported with vectors");
518  return NULL;
519  }
520 
521  PyErr_Format(
522  PyExc_TypeError, "buffer indices must be integers, not %.200s", Py_TYPE(item)->tp_name);
523  return NULL;
524 }
525 
526 static int pygpu_buffer__mp_ass_subscript(BPyGPUBuffer *self, PyObject *item, PyObject *value)
527 {
528  if (PyIndex_Check(item)) {
529  Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError);
530  if (i == -1 && PyErr_Occurred()) {
531  return -1;
532  }
533  if (i < 0) {
534  i += self->shape[0];
535  }
536  return pygpu_buffer__sq_ass_item(self, i, value);
537  }
538  if (PySlice_Check(item)) {
539  Py_ssize_t start, stop, step, slicelength;
540 
541  if (PySlice_GetIndicesEx(item, self->shape[0], &start, &stop, &step, &slicelength) < 0) {
542  return -1;
543  }
544 
545  if (step == 1) {
546  return pygpu_buffer_ass_slice(self, start, stop, value);
547  }
548 
549  PyErr_SetString(PyExc_IndexError, "slice steps not supported with vectors");
550  return -1;
551  }
552 
553  PyErr_Format(
554  PyExc_TypeError, "buffer indices must be integers, not %.200s", Py_TYPE(item)->tp_name);
555  return -1;
556 }
557 
558 static PyMethodDef pygpu_buffer__tp_methods[] = {
559  {"to_list",
560  (PyCFunction)pygpu_buffer_to_list_recursive,
561  METH_NOARGS,
562  "return the buffer as a list"},
563  {NULL, NULL, 0, NULL},
564 };
565 
566 static PyGetSetDef pygpu_buffer_getseters[] = {
567  {"dimensions",
570  NULL,
571  NULL},
572  {NULL, NULL, NULL, NULL, NULL},
573 };
574 
575 static PySequenceMethods pygpu_buffer__tp_as_sequence = {
576  (lenfunc)pygpu_buffer__sq_length, /* sq_length */
577  (binaryfunc)NULL, /* sq_concat */
578  (ssizeargfunc)NULL, /* sq_repeat */
579  (ssizeargfunc)pygpu_buffer__sq_item, /* sq_item */
580  (ssizessizeargfunc)NULL, /* sq_slice, deprecated, handled in pygpu_buffer__sq_item */
581  (ssizeobjargproc)pygpu_buffer__sq_ass_item, /* sq_ass_item */
582  (ssizessizeobjargproc)NULL, /* sq_ass_slice, deprecated handled in pygpu_buffer__sq_ass_item */
583  (objobjproc)NULL, /* sq_contains */
584  (binaryfunc)NULL, /* sq_inplace_concat */
585  (ssizeargfunc)NULL, /* sq_inplace_repeat */
586 };
587 
588 static PyMappingMethods pygpu_buffer__tp_as_mapping = {
589  (lenfunc)pygpu_buffer__sq_length,
590  (binaryfunc)pygpu_buffer__mp_subscript,
591  (objobjargproc)pygpu_buffer__mp_ass_subscript,
592 };
593 
594 #ifdef PYGPU_BUFFER_PROTOCOL
596  const int shape_len,
597  const Py_ssize_t *shape,
598  Py_ssize_t *r_strides)
599 {
600  r_strides[0] = GPU_texture_dataformat_size(format);
601  for (int i = 1; i < shape_len; i++) {
602  r_strides[i] = r_strides[i - 1] * shape[i - 1];
603  }
604 }
605 
606 /* Here is the buffer interface function */
607 static int pygpu_buffer__bf_getbuffer(BPyGPUBuffer *self, Py_buffer *view, int flags)
608 {
609  if (view == NULL) {
610  PyErr_SetString(PyExc_ValueError, "NULL view in getbuffer");
611  return -1;
612  }
613 
614  memset(view, 0, sizeof(*view));
615 
616  view->obj = (PyObject *)self;
617  view->buf = (void *)self->buf.as_void;
618  view->len = bpygpu_Buffer_size(self);
619  view->readonly = 0;
620  view->itemsize = GPU_texture_dataformat_size(self->format);
621  if (flags & PyBUF_FORMAT) {
622  view->format = (char *)pygpu_buffer_formatstr(self->format);
623  }
624  if (flags & PyBUF_ND) {
625  view->ndim = self->shape_len;
626  view->shape = self->shape;
627  }
628  if (flags & PyBUF_STRIDES) {
629  view->strides = MEM_mallocN(view->ndim * sizeof(*view->strides), "BPyGPUBuffer strides");
630  pygpu_buffer_strides_calc(self->format, view->ndim, view->shape, view->strides);
631  }
632  view->suboffsets = NULL;
633  view->internal = NULL;
634 
635  Py_INCREF(self);
636  return 0;
637 }
638 
639 static void pygpu_buffer__bf_releasebuffer(PyObject *UNUSED(exporter), Py_buffer *view)
640 {
641  MEM_SAFE_FREE(view->strides);
642 }
643 
644 static PyBufferProcs pygpu_buffer__tp_as_buffer = {
645  (getbufferproc)pygpu_buffer__bf_getbuffer,
646  (releasebufferproc)pygpu_buffer__bf_releasebuffer,
647 };
648 #endif
649 
651  pygpu_buffer__tp_doc,
652  ".. class:: Buffer(format, dimensions, data)\n"
653  "\n"
654  " For Python access to GPU functions requiring a pointer.\n"
655  "\n"
656  " :arg format: Format type to interpret the buffer.\n"
657  " Possible values are `FLOAT`, `INT`, `UINT`, `UBYTE`, `UINT_24_8` and `10_11_11_REV`.\n"
658  " :type type: str\n"
659  " :arg dimensions: Array describing the dimensions.\n"
660  " :type dimensions: int\n"
661  " :arg data: Optional data array.\n"
662  " :type data: sequence\n");
663 PyTypeObject BPyGPU_BufferType = {
664  PyVarObject_HEAD_INIT(NULL, 0).tp_name = "Buffer",
665  .tp_basicsize = sizeof(BPyGPUBuffer),
666  .tp_dealloc = (destructor)pygpu_buffer__tp_dealloc,
667  .tp_repr = (reprfunc)pygpu_buffer__tp_repr,
668  .tp_as_sequence = &pygpu_buffer__tp_as_sequence,
669  .tp_as_mapping = &pygpu_buffer__tp_as_mapping,
671  .tp_as_buffer = &pygpu_buffer__tp_as_buffer,
672 #endif
673  .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
674  .tp_doc = pygpu_buffer__tp_doc,
675  .tp_traverse = (traverseproc)pygpu_buffer__tp_traverse,
676  .tp_clear = (inquiry)pygpu_buffer__tp_clear,
677  .tp_methods = pygpu_buffer__tp_methods,
678  .tp_getset = pygpu_buffer_getseters,
679  .tp_new = pygpu_buffer__tp_new,
680 };
681 
682 static size_t pygpu_buffer_calc_size(const int format,
683  const int shape_len,
684  const Py_ssize_t *shape)
685 {
687 }
688 
690 {
691  return pygpu_buffer_calc_size(buffer->format, buffer->shape_len, buffer->shape);
692 }
693 
695  const Py_ssize_t *shape,
696  const int shape_len,
697  void *buffer)
698 {
699  if (buffer == NULL) {
700  size_t size = pygpu_buffer_calc_size(format, shape_len, shape);
701  buffer = MEM_callocN(size, "BPyGPUBuffer buffer");
702  }
703 
704  return pygpu_buffer_make_from_data(NULL, format, shape_len, shape, buffer);
705 }
706 
#define STRINGIFY(x)
#define UNUSED(x)
static AppView * view
_GL_VOID GLfloat value _GL_VOID_RET _GL_VOID const GLuint GLboolean *residences _GL_BOOL_RET _GL_VOID GLsizei GLfloat GLfloat GLfloat GLfloat const GLubyte *bitmap _GL_VOID_RET _GL_VOID GLenum type
size_t GPU_texture_dataformat_size(eGPUDataFormat data_format)
Definition: gpu_texture.cc:722
eGPUDataFormat
Definition: GPU_texture.h:170
@ GPU_DATA_UINT_24_8
Definition: GPU_texture.h:175
@ GPU_DATA_INT
Definition: GPU_texture.h:172
@ GPU_DATA_10_11_11_REV
Definition: GPU_texture.h:176
@ GPU_DATA_UBYTE
Definition: GPU_texture.h:174
@ GPU_DATA_UINT
Definition: GPU_texture.h:173
@ GPU_DATA_FLOAT
Definition: GPU_texture.h:171
Read Guarded memory(de)allocation.
#define MEM_SAFE_FREE(v)
ATTR_WARN_UNUSED_RESULT const BMVert * v
PyObject * self
Definition: bpy_driver.c:165
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition: btDbvt.cpp:52
int len
Definition: draw_manager.c:108
struct PyC_StringEnumItems bpygpu_dataformat_items[]
Definition: gpu_py.c:38
BPyGPUBuffer * BPyGPU_Buffer_CreatePyObject(const int format, const Py_ssize_t *shape, const int shape_len, void *buffer)
static Py_ssize_t pygpu_buffer_dimensions_tot_elem(const Py_ssize_t *shape, Py_ssize_t shape_len)
Definition: gpu_py_buffer.c:33
static size_t pygpu_buffer_calc_size(const int format, const int shape_len, const Py_ssize_t *shape)
static PyObject * pygpu_buffer__sq_item(BPyGPUBuffer *self, int i)
static PyBufferProcs pygpu_buffer__tp_as_buffer
static int pygpu_buffer__tp_traverse(BPyGPUBuffer *self, visitproc visit, void *arg)
static void pygpu_buffer_strides_calc(const eGPUDataFormat format, const int shape_len, const Py_ssize_t *shape, Py_ssize_t *r_strides)
PyTypeObject BPyGPU_BufferType
static int pygpu_buffer_dimensions_set(BPyGPUBuffer *self, PyObject *value, void *UNUSED(type))
#define MAX_DIMENSIONS
Definition: gpu_py_buffer.c:27
static PyMappingMethods pygpu_buffer__tp_as_mapping
#define PYGPU_BUFFER_PROTOCOL
Definition: gpu_py_buffer.c:26
static PySequenceMethods pygpu_buffer__tp_as_sequence
static PyObject * pygpu_buffer__tp_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
static bool pygpu_buffer_dimensions_tot_len_compare(const Py_ssize_t *shape_a, const Py_ssize_t shape_a_len, const Py_ssize_t *shape_b, const Py_ssize_t shape_b_len)
Definition: gpu_py_buffer.c:43
static void pygpu_buffer__bf_releasebuffer(PyObject *UNUSED(exporter), Py_buffer *view)
static PyObject * pygpu_buffer_to_list_recursive(BPyGPUBuffer *self)
static int pygpu_buffer__sq_ass_item(BPyGPUBuffer *self, int i, PyObject *v)
static PyObject * pygpu_buffer_dimensions_get(BPyGPUBuffer *self, void *UNUSED(arg))
static int pygpu_buffer__sq_length(BPyGPUBuffer *self)
static PyObject * pygpu_buffer_slice(BPyGPUBuffer *self, Py_ssize_t begin, Py_ssize_t end)
static PyObject * pygpu_buffer_to_list(BPyGPUBuffer *self)
static int pygpu_buffer__mp_ass_subscript(BPyGPUBuffer *self, PyObject *item, PyObject *value)
static PyObject * pygpu_buffer__tp_repr(BPyGPUBuffer *self)
static PyMethodDef pygpu_buffer__tp_methods[]
static PyGetSetDef pygpu_buffer_getseters[]
static int pygpu_buffer__bf_getbuffer(BPyGPUBuffer *self, Py_buffer *view, int flags)
static bool pygpu_buffer_pyobj_as_shape(PyObject *shape_obj, Py_ssize_t r_shape[MAX_DIMENSIONS], Py_ssize_t *r_shape_len)
Definition: gpu_py_buffer.c:57
static int pygpu_buffer_ass_slice(BPyGPUBuffer *self, Py_ssize_t begin, Py_ssize_t end, PyObject *seq)
size_t bpygpu_Buffer_size(BPyGPUBuffer *buffer)
static BPyGPUBuffer * pygpu_buffer_make_from_data(PyObject *parent, const eGPUDataFormat format, const int shape_len, const Py_ssize_t *shape, void *buf)
static const char * pygpu_buffer_formatstr(eGPUDataFormat data_format)
static PyObject * pygpu_buffer__mp_subscript(BPyGPUBuffer *self, PyObject *item)
static void pygpu_buffer__tp_dealloc(BPyGPUBuffer *self)
PyDoc_STRVAR(pygpu_buffer__tp_doc, ".. class:: Buffer(format, dimensions, data)\n" "\n" " For Python access to GPU functions requiring a pointer.\n" "\n" " :arg format: Format type to interpret the buffer.\n" " Possible values are `FLOAT`, `INT`, `UINT`, `UBYTE`, `UINT_24_8` and `10_11_11_REV`.\n" " :type type: str\n" " :arg dimensions: Array describing the dimensions.\n" " :type dimensions: int\n" " :arg data: Optional data array.\n" " :type data: sequence\n")
static int pygpu_buffer__tp_clear(BPyGPUBuffer *self)
struct BPyGPUBuffer BPyGPUBuffer
int count
ccl_global float * buffer
ccl_gpu_kernel_postfix ccl_global float int int int int float bool int offset
format
Definition: logImageCore.h:38
void(* MEM_freeN)(void *vmemh)
Definition: mallocn.c:27
void *(* MEM_callocN)(size_t len, const char *str)
Definition: mallocn.c:31
void *(* MEM_mallocN)(size_t len, const char *str)
Definition: mallocn.c:33
int PyC_ParseStringEnum(PyObject *o, void *p)
const char * PyC_StringEnum_FindIDFromValue(const struct PyC_StringEnumItems *items, const int value)
return ret
static FT_Error err