43 if (u <= 0 ||
v <= 0 ||
w <= 0) {
51 if (u <= 0 ||
v <= 0 ||
w <= 0) {
54 return (((((u - 1) *
v) + ((
v - 1) * u)) *
w) + ((
w - 1) * (u *
v)));
63 const int u = lt->
pntsu;
81 const int u = lt->
pntsu;
171 if (rdata->loose_verts) {
194 return &rdata->
bp[vert_idx];
255 memset(cache, 0,
sizeof(*cache));
344 for (
int i = 0; i < vert_len; i++) {
349 const float no_active_weight = 666.0f;
368 int edge_len_real = 0;
373 #define LATT_INDEX(u, v, w) ((((w)*rdata->dims.v_len + (v)) * rdata->dims.u_len) + (u))
379 for (
int u = 0; u < rdata->
dims.
u_len; u++) {
429 if (
format.attr_len == 0) {
439 for (
int i = 0; i < vert_len; i++) {
444 if (i == rdata->
actbp) {
@ BKE_LATTICE_BATCH_DIRTY_SELECT
@ BKE_LATTICE_BATCH_DIRTY_ALL
#define GPU_batch_create(prim, verts, elem)
#define GPU_BATCH_DISCARD_SAFE(batch)
GPUBatch * GPU_batch_create_ex(GPUPrimType prim, GPUVertBuf *vert, GPUIndexBuf *elem, eGPUBatchFlag owns_flag)
struct GPUIndexBuf GPUIndexBuf
#define GPU_INDEXBUF_DISCARD_SAFE(elem)
void GPU_indexbuf_init(GPUIndexBufBuilder *, GPUPrimType, uint prim_len, uint vertex_len)
GPUIndexBuf * GPU_indexbuf_build(GPUIndexBufBuilder *)
void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder *, uint v1, uint v2)
#define GPU_vertbuf_create_with_format(format)
struct GPUVertBuf GPUVertBuf
void GPU_vertbuf_data_alloc(GPUVertBuf *, uint v_len)
#define GPU_VERTBUF_DISCARD_SAFE(verts)
void GPU_vertbuf_attr_set(GPUVertBuf *, uint a_idx, uint v_idx, const void *data)
Read Guarded memory(de)allocation.
ATTR_WARN_UNUSED_RESULT const BMVert * v
SIMD_FORCE_INLINE const btScalar & w() const
Return the w value.
CCL_NAMESPACE_BEGIN struct Options options
void DRW_lattice_batch_cache_free(Lattice *lt)
static void lattice_render_data_free(LatticeRenderData *rdata)
static int edge_len_calc(int u, int v, int w)
GPUBatch * DRW_lattice_batch_cache_get_edit_verts(Lattice *lt)
static GPUIndexBuf * lattice_batch_cache_get_edges(LatticeRenderData *rdata, LatticeBatchCache *cache)
static int lattice_render_verts_len_get(Lattice *lt)
static const BPoint * lattice_render_data_vert_bpoint(const LatticeRenderData *rdata, const int vert_idx)
static int lattice_render_edges_len_get(Lattice *lt)
#define LATT_INDEX(u, v, w)
static int vert_len_calc(int u, int v, int w)
GPUBatch * DRW_lattice_batch_cache_get_all_verts(Lattice *lt)
GPUBatch * DRW_lattice_batch_cache_get_all_edges(Lattice *lt, bool use_weight, const int actdef)
static void lattice_batch_cache_init(Lattice *lt)
static int lattice_render_data_edges_len_get(const LatticeRenderData *rdata)
void DRW_lattice_batch_cache_dirty_tag(Lattice *lt, int mode)
static int lattice_render_data_verts_len_get(const LatticeRenderData *rdata)
static GPUVertBuf * lattice_batch_cache_get_pos(LatticeRenderData *rdata, LatticeBatchCache *cache, bool use_weight, const int actdef)
static LatticeRenderData * lattice_render_data_create(Lattice *lt, const int types)
struct LatticeRenderData LatticeRenderData
static LatticeBatchCache * lattice_batch_cache_get(Lattice *lt)
static void lattice_batch_cache_create_overlay_batches(Lattice *lt)
void DRW_lattice_batch_cache_validate(Lattice *lt)
struct LatticeBatchCache LatticeBatchCache
static bool lattice_batch_cache_valid(Lattice *lt)
static void lattice_batch_cache_clear(Lattice *lt)
struct @653::@656 attr_id
void(* MEM_freeN)(void *vmemh)
void *(* MEM_callocN)(size_t len, const char *str)
struct LatticeBatchCache::@289 dims
const struct MDeformVert * dvert
struct LatticeRenderData::@288 dims
struct EditLatt * edit_latt
struct MDeformVert * dvert
struct EditLatt * editlatt