From bb76759c5e8dd660018413da3a9629a9104e7c10 Mon Sep 17 00:00:00 2001 From: SAM-tak Date: Wed, 15 Nov 2023 17:30:40 +0900 Subject: [PATCH 1/3] fix --- README.md | 41 ++++++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index f9129d56..ed3be8cd 100644 --- a/README.md +++ b/README.md @@ -1,28 +1,37 @@ # Blender For Unreal Engine + This Add-on allows you to export content created with Blender to Unreal Engine. StaticMeshs, SkeletalMeshs, Animations (NLA and Actions), Collisions and Sockets, Alembic animations, Camera and sequencer [...] It works with Blender 3.5 and older to 2.8 and work for UE4, UE5! Discord: If you need help or you want see my sides project you can join the discord! --> https://discord.gg/XuYeGCFtxa +-> [Discord](https://discord.gg/XuYeGCFtxa) + +## How it works -# How it works Working on object packs for Unreal Engine can be tedious with Blender. That's why I created the Add-on: "Blender for UnrealEngine". it simplified the export process from Blender to Unreal Engine by allowing you to export all the assets of a scene at the same time. It even automatically distributes them in a proper tree structure in correlation with the Unreal Engine pipeline! Sockets and collision shapes are created directly in Blender, you can precisely choose which animations need to be exported. Blender For Unreal Engine also includes a error checker to prevent the all potential problems. You have an explanation of the problem and how to solve it. In addition to the possibility in certain cases of correcting the problem automatically. By using Blender For Unreal Engine you can generate python scripts to import the all exported assets directly in Unreal Engine. That work too for the cameras in a Level Sequence with the all cuts and camera ratio. -# Wiki +## Wiki + - [Home](https://github.com/xavier150/Blender-For-UnrealEngine-Addons/wiki) -### Quick Start + +## Quick Start + - [Download And Installation](https://github.com/xavier150/Blender-For-UnrealEngine-Addons/wiki/Download-And-Installation) - [Blender For Unreal Engine - Quick Start](https://github.com/xavier150/Blender-For-UnrealEngine-Addons/wiki/Blender-For-Unreal-Engine-Quick-Start) -### Videos -- How Import Blender assets to Unreal Engine -> https://youtu.be/2ehb2Ih3Nbg -- How Import Blender camera to Unreal Sequencer -> https://youtu.be/Xx_9MQu2EkM -- Old Teaser -> https://youtu.be/YLOZZIlhgaM -### Overview Pages + +## Videos + +- [How Import Blender assets to Unreal Engine](https://youtu.be/2ehb2Ih3Nbg) +- [How Import Blender camera to Unreal Sequencer](https://youtu.be/Xx_9MQu2EkM) +- [Old Teaser](https://youtu.be/YLOZZIlhgaM) + +## Overview Pages + - [Transform And Pivot Point](https://github.com/xavier150/Blender-For-UnrealEngine-Addons/wiki/Transform-And-Pivot-Point) - [Level of details](https://github.com/xavier150/Blender-For-UnrealEngine-Addons/wiki/Level-of-details) - [Collisions](https://github.com/xavier150/Blender-For-UnrealEngine-Addons/wiki/Collisions) @@ -37,13 +46,19 @@ By using Blender For Unreal Engine you can generate python scripts to import the - [Custom Properties Animation to Curves UE](https://github.com/xavier150/Blender-For-UnrealEngine-Addons/wiki/Custom-Properties-Animation-to-Curves-UE) - [Export Collection like a StaticMesh](https://github.com/xavier150/Blender-For-UnrealEngine-Addons/wiki/Export-collection-like-a-StaticMesh) - [Import Exported Assets to UE](https://github.com/xavier150/Blender-For-UnrealEngine-Addons/wiki/How-import-assets) -### Skeleton and Skeletal Meshs + +## Skeleton and Skeletal Meshs + - [Modular Skeletal Meshs](https://github.com/xavier150/Blender-For-UnrealEngine-Addons/wiki/Modular-skeletal-mesh) - [Skeleton & Root bone](https://github.com/xavier150/Blender-For-UnrealEngine-Addons/wiki/Skeleton-&-Root-bone) - [UE Bone Structure and UE Mannequin](https://github.com/xavier150/Blender-For-UnrealEngine-Addons/wiki/UE-Bone-Structure-and-UE-Mannequin) -### Community videos -- TRACK THIS! - Camera Tracking From Blender To Unreal Engine -> https://youtu.be/Ta0am2wC-SI?si=2TNyqYHmnDpf20u8 -### Additional Information + +## Community videos + +- TRACK THIS! - [Camera Tracking From Blender To Unreal Engine](https://youtu.be/Ta0am2wC-SI?si=2TNyqYHmnDpf20u8) + +## Additional Information + - [API](https://github.com/xavier150/Blender-For-UnrealEngine-Addons/wiki/API) - [Nomenclature](https://github.com/xavier150/Blender-For-UnrealEngine-Addons/wiki/Nomenclature) - [Avoid potential errors](https://github.com/xavier150/Blender-For-UnrealEngine-Addons/wiki/How-avoid-potential-errors) From 2913693896c527a0b2f8a29d3a0d1347b5516b2c Mon Sep 17 00:00:00 2001 From: SAM-tak Date: Thu, 16 Nov 2023 20:57:29 +0900 Subject: [PATCH 2/3] update fbxio with blender 4.0 --- README.md | 9 +- .../bfu_object_ui_and_props.py | 20 +- .../export/bfu_export_single_fbx_action.py | 2 +- blender-for-unrealengine/fbxio/data_types.py | 9 +- blender-for-unrealengine/fbxio/encode_bin.py | 55 +- .../fbxio/export_fbx_bin.py | 623 ++++++++++-------- blender-for-unrealengine/fbxio/fbx_utils.py | 451 +++++++++++-- 7 files changed, 825 insertions(+), 344 deletions(-) diff --git a/README.md b/README.md index ed3be8cd..67578c13 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,14 @@ # Blender For Unreal Engine -This Add-on allows you to export content created with Blender to Unreal Engine. StaticMeshs, SkeletalMeshs, Animations (NLA and Actions), Collisions and Sockets, Alembic animations, Camera and sequencer [...] +This Add-on allows you to export content created with Blender to Unreal Engine. StaticMeshs, SkeletalMeshs, Animations (NLA and Actions), Collisions and Sockets, Alembic animations, Camera and sequencer... + It works with Blender 3.5 and older to 2.8 and work for UE4, UE5! -Discord: +## Discord + If you need help or you want see my sides project you can join the discord! --> [Discord](https://discord.gg/XuYeGCFtxa) + +- [BleuRaven side projects](https://discord.gg/XuYeGCFtxa) ## How it works diff --git a/blender-for-unrealengine/bfu_addon_parts/bfu_object_ui_and_props.py b/blender-for-unrealengine/bfu_addon_parts/bfu_object_ui_and_props.py index 97540e6d..c7e3bcc1 100644 --- a/blender-for-unrealengine/bfu_addon_parts/bfu_object_ui_and_props.py +++ b/blender-for-unrealengine/bfu_addon_parts/bfu_object_ui_and_props.py @@ -856,6 +856,15 @@ class BFU_PT_BlenderForUnrealObject(bpy.types.Panel): default='X', ) + bpy.types.Object.bfu_export_animation_without_mesh = BoolProperty( + name="Export animation without mesh", + description=( + "If checked, When exporting animation, do not include mesh data in the FBX file." + ), + override={'LIBRARY_OVERRIDABLE'}, + default=True + ) + bpy.types.Object.bfu_mirror_symmetry_right_side_bones = BoolProperty( name="Revert direction of symmetry right side bones", description=( @@ -1524,10 +1533,13 @@ def draw(self, context): unreal_skeleton.prop(obj, "bfu_target_skeleton_custom_name") if obj.bfu_skeleton_search_mode == "custom_reference": unreal_skeleton.prop(obj, "bfu_target_skeleton_custom_ref") - unreal_skeleton.prop(obj, "bfu_mirror_symmetry_right_side_bones") - MirrorSymmetryRightSideBonesRow = unreal_skeleton.row() - MirrorSymmetryRightSideBonesRow.enabled = obj.bfu_mirror_symmetry_right_side_bones - MirrorSymmetryRightSideBonesRow.prop(obj, "bfu_use_ue_mannequin_bone_alignment") + ue_standard_skeleton = layout.column() + ue_standard_skeleton.enabled = obj.bfu_export_procedure == "ue-standard" + ue_standard_skeleton.prop(obj, "bfu_export_animation_without_mesh") + ue_standard_skeleton.prop(obj, "bfu_mirror_symmetry_right_side_bones") + mirror_symmetry_right_side_bones = ue_standard_skeleton.row() + mirror_symmetry_right_side_bones.enabled = obj.bfu_mirror_symmetry_right_side_bones + mirror_symmetry_right_side_bones.prop(obj, "bfu_use_ue_mannequin_bone_alignment") scene.bfu_modular_skeletal_mesh_properties_expanded.draw(layout) if scene.bfu_modular_skeletal_mesh_properties_expanded.is_expend(): diff --git a/blender-for-unrealengine/export/bfu_export_single_fbx_action.py b/blender-for-unrealengine/export/bfu_export_single_fbx_action.py index ff0455c8..95349dbe 100644 --- a/blender-for-unrealengine/export/bfu_export_single_fbx_action.py +++ b/blender-for-unrealengine/export/bfu_export_single_fbx_action.py @@ -167,7 +167,7 @@ def ExportSingleFbxAction( filepath=bfu_export_utils.GetExportFullpath(dirpath, filename), check_existing=False, use_selection=True, - animation_only=True, + animation_only=active.bfu_export_animation_without_mesh, global_matrix=axis_conversion(to_forward=active.bfu_export_axis_forward, to_up=active.bfu_export_axis_up).to_4x4(), apply_unit_scale=True, global_scale=bfu_utils.GetObjExportScale(active), diff --git a/blender-for-unrealengine/fbxio/data_types.py b/blender-for-unrealengine/fbxio/data_types.py index cf612b8e..328ba3a9 100644 --- a/blender-for-unrealengine/fbxio/data_types.py +++ b/blender-for-unrealengine/fbxio/data_types.py @@ -1,9 +1,10 @@ +# SPDX-FileCopyrightText: 2006-2012 assimp team +# SPDX-FileCopyrightText: 2013 Blender Foundation +# # SPDX-License-Identifier: GPL-2.0-or-later -# Script copyright (C) 2006-2012, assimp team -# Script copyright (C) 2013 Blender Foundation - -BOOL = b'C'[0] +BOOL = b'B'[0] +CHAR = b'C'[0] INT8 = b'Z'[0] INT16 = b'Y'[0] INT32 = b'I'[0] diff --git a/blender-for-unrealengine/fbxio/encode_bin.py b/blender-for-unrealengine/fbxio/encode_bin.py index 9014d2bc..a7e8071e 100644 --- a/blender-for-unrealengine/fbxio/encode_bin.py +++ b/blender-for-unrealengine/fbxio/encode_bin.py @@ -1,7 +1,7 @@ +# SPDX-FileCopyrightText: 2013 Campbell Barton +# # SPDX-License-Identifier: GPL-2.0-or-later -# Script copyright (C) 2013 Campbell Barton - try: from . import data_types except: @@ -12,8 +12,10 @@ import numpy as np import zlib -_BLOCK_SENTINEL_LENGTH = 13 -_BLOCK_SENTINEL_DATA = (b'\0' * _BLOCK_SENTINEL_LENGTH) +_BLOCK_SENTINEL_LENGTH = ... +_BLOCK_SENTINEL_DATA = ... +_ELEM_META_FORMAT = ... +_ELEM_META_SIZE = ... _IS_BIG_ENDIAN = (__import__("sys").byteorder != 'little') _HEAD_MAGIC = b'Kaydara FBX Binary\x20\x20\x00\x1a\x00' @@ -56,6 +58,14 @@ def add_bool(self, data): self.props_type.append(data_types.BOOL) self.props.append(data) + def add_char(self, data): + assert(isinstance(data, bytes)) + assert(len(data) == 1) + data = pack(' 2 - # - Combine with edges that are sharp because they're in more than two faces - e_use_sharp_mask = np.logical_or(e_use_sharp_mask, e_more_than_two_faces_mask, out=e_use_sharp_mask) + # - Get sharp edges from the "sharp_edge" attribute. The attribute may not exist, in which case, there + # are no edges marked as sharp. + e_use_sharp_mask = MESH_ATTRIBUTE_SHARP_EDGE.get_ndarray(attributes) + if e_use_sharp_mask is not None: + # - Combine with edges that are sharp because they're in more than two faces + e_use_sharp_mask = np.logical_or(e_use_sharp_mask, e_more_than_two_faces_mask, out=e_use_sharp_mask) + else: + e_use_sharp_mask = e_more_than_two_faces_mask - # - Combine with edges that are sharp because a polygon they're in has flat shading - e_use_sharp_mask[sharp_edge_indices_from_polygons] = True + # - Get sharp edges from flat shaded faces + p_flat_mask = MESH_ATTRIBUTE_SHARP_FACE.get_ndarray(attributes) + if p_flat_mask is not None: + # Convert flat shaded polygons to flat shaded loops by repeating each element by the number of sides + # of that polygon. + # Polygon sides can be calculated from the element-wise difference of loop starts appended by the + # number of loops. Alternatively, polygon sides can be retrieved directly from the 'loop_total' + # attribute of polygons, but since we already have t_ls, it tends to be quicker to calculate from + # t_ls. + polygon_sides = np.diff(mesh_t_ls_view, append=mesh_loop_nbr) + p_flat_loop_mask = np.repeat(p_flat_mask, polygon_sides) + # Convert flat shaded loops to flat shaded (sharp) edge indices. + # Note that if an edge is in multiple loops that are part of flat shaded faces, its edge index will + # end up in sharp_edge_indices_from_polygons multiple times. + sharp_edge_indices_from_polygons = mesh_t_lei_view[p_flat_loop_mask] + + # - Combine with edges that are sharp because a polygon they're in has flat shading + e_use_sharp_mask[sharp_edge_indices_from_polygons] = True + del sharp_edge_indices_from_polygons + del p_flat_loop_mask + del polygon_sides + del p_flat_mask # - Convert sharp edges to sharp edge keys (t_pvi) ek_use_sharp_mask = e_use_sharp_mask[t_pvi_edge_indices] @@ -1086,11 +1099,6 @@ def _infinite_gen(val): t_ps = np.invert(ek_use_sharp_mask, out=ek_use_sharp_mask) del ek_use_sharp_mask del e_use_sharp_mask - del sharp_edge_indices_from_polygons - del p_flat_loop_mask - del polygon_sides - del p_flat_mask - del p_use_smooth_mask del mesh_t_lei_view del mesh_t_ls_view else: @@ -1111,19 +1119,25 @@ def _infinite_gen(val): ec_fbx_dtype = np.float64 if t_pvi_edge_indices.size: ec_bl_dtype = np.single - t_ec_raw = np.empty(len(me.edges), dtype=ec_bl_dtype) - me.edges.foreach_get('crease', t_ec_raw) - - # Convert to t_pvi edge-keys. - t_ec_ek_raw = t_ec_raw[t_pvi_edge_indices] - - # Blender squares those values before sending them to OpenSubdiv, when other software don't, - # so we need to compensate that to get similar results through FBX... - # Use the precision of the fbx dtype for the calculation since it's usually higher precision. - t_ec_ek_raw = t_ec_ek_raw.astype(ec_fbx_dtype, copy=False) - t_ec = np.square(t_ec_ek_raw, out=t_ec_ek_raw) - del t_ec_ek_raw - del t_ec_raw + edge_creases = me.edge_creases + if edge_creases: + t_ec_raw = np.empty(len(me.edges), dtype=ec_bl_dtype) + edge_creases.data.foreach_get("value", t_ec_raw) + + # Convert to t_pvi edge-keys. + t_ec_ek_raw = t_ec_raw[t_pvi_edge_indices] + + # Blender squares those values before sending them to OpenSubdiv, when other software don't, + # so we need to compensate that to get similar results through FBX... + # Use the precision of the fbx dtype for the calculation since it's usually higher precision. + t_ec_ek_raw = t_ec_ek_raw.astype(ec_fbx_dtype, copy=False) + t_ec = np.square(t_ec_ek_raw, out=t_ec_ek_raw) + del t_ec_ek_raw + del t_ec_raw + else: + # todo: Blender edge creases are optional now, we may be able to avoid writing the array to FBX when + # there are no edge creases. + t_ec = np.zeros(t_pvi_edge_indices.shape, dtype=ec_fbx_dtype) else: t_ec = np.empty(0, dtype=ec_fbx_dtype) @@ -1338,7 +1352,7 @@ def _infinite_gen(val): elem_data_single_string(lay_uv, b"MappingInformationType", b"ByPolygonVertex") elem_data_single_string(lay_uv, b"ReferenceInformationType", b"IndexToDirect") - uvlayer.data.foreach_get("uv", t_luv) + uvlayer.uv.foreach_get("vector", t_luv) # t_luv_fast_pair_view is a view in a dtype that compares elements by individual bytes, but float types have # separate byte representations of positive and negative zero. For uniqueness, these should be considered @@ -1414,11 +1428,13 @@ def _infinite_gen(val): elem_data_single_int32(lay_ma, b"Version", FBX_GEOMETRY_MATERIAL_VERSION) elem_data_single_string(lay_ma, b"Name", b"") nbr_mats = len(me_fbxmaterials_idx) - if nbr_mats > 1: - bl_pm_dtype = np.uintc + multiple_fbx_mats = nbr_mats > 1 + # If a mesh does not have more than one material its material_index attribute can be ignored. + # If a mesh has multiple materials but all its polygons are assigned to the first material, its + # material_index attribute may not exist. + t_pm = None if not multiple_fbx_mats else MESH_ATTRIBUTE_MATERIAL_INDEX.get_ndarray(attributes) + if t_pm is not None: fbx_pm_dtype = np.int32 - t_pm = np.empty(len(me.polygons), dtype=bl_pm_dtype) - me.polygons.foreach_get("material_index", t_pm) # We have to validate mat indices, and map them to FBX indices. # Note a mat might not be in me_fbxmaterials_idx (e.g. node mats are ignored). @@ -1430,7 +1446,10 @@ def _infinite_gen(val): # Set material indices that are out of bounds to the default material index mat_idx_limit = len(me_blmaterials) - t_pm[t_pm >= mat_idx_limit] = def_me_blmaterial_idx + # Material indices shouldn't be negative, but they technically could be. Viewing as unsigned before + # checking for indices that are too large means that a single >= check will pick up both negative + # indices and indices that are too large. + t_pm[t_pm.view("u%i" % t_pm.itemsize) >= mat_idx_limit] = def_me_blmaterial_idx # Map to FBX indices. Materials not in me_fbxmaterials_idx will be set to the default material index. blmat_fbx_idx = np.fromiter((me_fbxmaterials_idx.get(m, def_ma) for m in me_blmaterials), @@ -1444,11 +1463,18 @@ def _infinite_gen(val): # indices??? *sigh*). elem_data_single_string(lay_ma, b"ReferenceInformationType", b"IndexToDirect") elem_data_single_int32_array(lay_ma, b"Materials", t_pm) - del t_pm else: elem_data_single_string(lay_ma, b"MappingInformationType", b"AllSame") elem_data_single_string(lay_ma, b"ReferenceInformationType", b"IndexToDirect") - elem_data_single_int32_array(lay_ma, b"Materials", [0]) + if multiple_fbx_mats: + # There's no material_index attribute, so every material index is effectively zero. + # In the order of the mesh's materials, get the FBX index of the first material that is exported. + all_same_idx = next(me_fbxmaterials_idx[m] for m in me_blmaterials if m in me_fbxmaterials_idx) + else: + # There's only one fbx material, so the index will always be zero. + all_same_idx = 0 + elem_data_single_int32_array(lay_ma, b"Materials", [all_same_idx]) + del t_pm # And the "layer TOC"... @@ -1784,18 +1810,16 @@ def fbx_data_armature_elements(root, arm_obj, scene_data): elem_data_single_int32(fbx_skin, b"Version", FBX_DEFORMER_SKIN_VERSION) elem_data_single_float64(fbx_skin, b"Link_DeformAcuracy", 50.0) # Only vague idea what it is... - # Pre-process vertex weights (also to check vertices assigned to more than four bones). + # Pre-process vertex weights so that the vertices only need to be iterated once. ob = ob_obj.bdata bo_vg_idx = {bo_obj.bdata.name: ob.vertex_groups[bo_obj.bdata.name].index for bo_obj in clusters.keys() if bo_obj.bdata.name in ob.vertex_groups} valid_idxs = set(bo_vg_idx.values()) vgroups = {vg.index: {} for vg in ob.vertex_groups} - verts_vgroups = (sorted(((vg.group, vg.weight) for vg in v.groups if vg.weight and vg.group in valid_idxs), - key=lambda e: e[1], reverse=True) - for v in me.vertices) - for idx, vgs in enumerate(verts_vgroups): - for vg_idx, w in vgs: - vgroups[vg_idx][idx] = w + for idx, v in enumerate(me.vertices): + for vg in v.groups: + if (w := vg.weight) and (vg_idx := vg.group) in valid_idxs: + vgroups[vg_idx][idx] = w for bo_obj, clstr_key in clusters.items(): bo = bo_obj.bdata @@ -1874,7 +1898,8 @@ def fbx_data_leaf_bone_elements(root, scene_data): # object type, etc. elem_data_single_int32(model, b"MultiLayer", 0) elem_data_single_int32(model, b"MultiTake", 0) - elem_data_single_bool(model, b"Shading", True) + # Probably the FbxNode.EShadingMode enum. Full description in fbx_data_object_elements. + elem_data_single_char(model, b"Shading", b"\x01") elem_data_single_string(model, b"Culling", b"CullingOff") elem_props_template_finalize(tmpl, props) @@ -1938,7 +1963,12 @@ def fbx_data_object_elements(root, ob_obj, scene_data): # object type, etc. elem_data_single_int32(model, b"MultiLayer", 0) elem_data_single_int32(model, b"MultiTake", 0) - elem_data_single_bool(model, b"Shading", True) + # This is probably the FbxNode.EShadingMode enum. Not directly used by the FBX SDK, but the SDK guarantees that the + # value will be passed through from an imported file to an exported one. Common values are 'Y' and 'T'. 'U' and 'W' + # have also been seen in older FBX files. It's not clear which enum member each of these values corresponds to or if + # these values are actually application specific. Blender had been exporting this as a `True` bool for a long time + # seemingly without issue. The '\x01' char is the same value as `True` in raw bytes. + elem_data_single_char(model, b"Shading", b"\x01") elem_data_single_string(model, b"Culling", b"CullingOff") if obj_type == b"Camera": @@ -1964,12 +1994,6 @@ def fbx_data_animation_elements(root, scene_data): animations = scene_data.animations if not animations: return - scene = scene_data.scene - - fps = scene.render.fps / scene.render.fps_base - - def keys_to_ktimes(keys): - return (int(v) for v in convert_sec_to_ktime_iter((f / fps for f, _v in keys))) # Animation stacks. for astack_key, alayers, alayer_key, name, f_start, f_end in animations: @@ -2009,18 +2033,18 @@ def keys_to_ktimes(keys): acn_tmpl = elem_props_template_init(scene_data.templates, b"AnimationCurveNode") acn_props = elem_properties(acurvenode) - for fbx_item, (acurve_key, def_value, keys, _acurve_valid) in acurves.items(): + for fbx_item, (acurve_key, def_value, (keys, values), _acurve_valid) in acurves.items(): elem_props_template_set(acn_tmpl, acn_props, "p_number", fbx_item.encode(), def_value, animatable=True) # Only create Animation curve if needed! - if keys: + nbr_keys = len(keys) + if nbr_keys: acurve = elem_data_single_int64(root, b"AnimationCurve", get_fbx_uuid_from_key(acurve_key)) acurve.add_string(fbx_name_class(b"", b"AnimCurve")) acurve.add_string(b"") # key attributes... - nbr_keys = len(keys) # flags... keyattr_flags = ( 1 << 2 | # interpolation mode, 1 = constant, 2 = linear, 3 = cubic. @@ -2035,8 +2059,8 @@ def keys_to_ktimes(keys): # And now, the *real* data! elem_data_single_float64(acurve, b"Default", def_value) elem_data_single_int32(acurve, b"KeyVer", FBX_ANIM_KEY_VERSION) - elem_data_single_int64_array(acurve, b"KeyTime", keys_to_ktimes(keys)) - elem_data_single_float32_array(acurve, b"KeyValueFloat", (v for _f, v in keys)) + elem_data_single_int64_array(acurve, b"KeyTime", astype_view_signedness(keys, np.int64)) + elem_data_single_float32_array(acurve, b"KeyValueFloat", values.astype(np.float32, copy=False)) elem_data_single_int32_array(acurve, b"KeyAttrFlags", keyattr_flags) elem_data_single_float32_array(acurve, b"KeyAttrDataFloat", keyattr_datafloat) elem_data_single_int32_array(acurve, b"KeyAttrRefCount", (nbr_keys,)) @@ -2258,81 +2282,131 @@ def fbx_animations_do(scene_data, ref_id, f_start, f_end, start_zero, objects=No force_sek, (cam.dof.focus_distance,)) animdata_cameras[cam_key] = (acnode_lens, acnode_focus_distance, cam) - currframe = f_start - while currframe <= f_end: - real_currframe = currframe - f_start if start_zero else currframe - scene.frame_set(int(currframe), subframe=currframe - int(currframe)) + # Get all parent bdata of animated dupli instances, so that we can quickly identify which instances in + # `depsgraph.object_instances` are animated and need their ObjectWrappers' matrices updated each frame. + dupli_parent_bdata = {dup.get_parent().bdata for dup in animdata_ob if dup.is_dupli} + has_animated_duplis = bool(dupli_parent_bdata) - for dp_obj in ob_obj.dupli_list_gen(depsgraph): - pass # Merely updating dupli matrix of ObjectWrapper... - for ob_obj, (anim_loc, anim_rot, anim_scale) in animdata_ob.items(): - # We compute baked loc/rot/scale for all objects (rot being euler-compat with previous value!). - p_rot = p_rots.get(ob_obj, None) - loc, rot, scale, _m, _mr = ob_obj.fbx_object_tx(scene_data, rot_euler_compat=p_rot) - p_rots[ob_obj] = rot - anim_loc.add_keyframe(real_currframe, loc) - anim_rot.add_keyframe(real_currframe, tuple(convert_rad_to_deg_iter(rot))) - anim_scale.add_keyframe(real_currframe, scale) - for anim_shape, me, shape in animdata_shapes.values(): - anim_shape.add_keyframe(real_currframe, (shape.value * 100.0,)) - for anim_camera_lens, anim_camera_focus_distance, camera in animdata_cameras.values(): - anim_camera_lens.add_keyframe(real_currframe, (camera.lens,)) - anim_camera_focus_distance.add_keyframe(real_currframe, (camera.dof.focus_distance * 1000 * gscale,)) - for custom_curve_name, (custom_curve, custom_curve_holder) in custom_curves.items(): - # add custom animation curve for UnrealEngine - custom_curve.add_keyframe(real_currframe, (custom_curve_holder[custom_curve_name],)) - currframe += bake_step + # Initialize keyframe times array. Each AnimationCurveNodeWrapper will share the same instance. + # `np.arange` excludes the `stop` argument like when using `range`, so we use np.nextafter to get the next + # representable value after f_end and use that as the `stop` argument instead. + currframes = np.arange(f_start, np.nextafter(f_end, np.inf), step=bake_step) + # Convert from Blender time to FBX time. + fps = scene.render.fps / scene.render.fps_base + real_currframes = currframes - f_start if start_zero else currframes + real_currframes = (real_currframes / fps * FBX_KTIME).astype(np.int64) + + # Generator that yields the animated values of each frame in order. + def frame_values_gen(): + # Precalculate integer frames and subframes. + int_currframes = currframes.astype(int) + subframes = currframes - int_currframes + + # Create simpler iterables that return only the values we care about. + animdata_shapes_only = [shape for _anim_shape, _me, shape in animdata_shapes.values()] + animdata_cameras_only = [camera for _anim_camera_lens, _anim_camera_focus_distance, camera + in animdata_cameras.values()] + # Previous frame's rotation for each object in animdata_ob, this will be updated each frame. + animdata_ob_p_rots = p_rots.values() + + # Iterate through each frame and yield the values for that frame. + # Iterating .data, the memoryview of an array, is faster than iterating the array directly. + for int_currframe, subframe in zip(int_currframes.data, subframes.data): + scene.frame_set(int_currframe, subframe=subframe) + + if has_animated_duplis: + # Changing the scene's frame invalidates existing dupli instances. To get the updated matrices of duplis + # for this frame, we must get the duplis from the depsgraph again. + for dup in depsgraph.object_instances: + if (parent := dup.parent) and parent.original in dupli_parent_bdata: + # ObjectWrapper caches its instances. Attempting to create a new instance updates the existing + # ObjectWrapper instance with the current frame's matrix and then returns the existing instance. + ObjectWrapper(dup) + next_p_rots = [] + for ob_obj, p_rot in zip(animdata_ob, animdata_ob_p_rots): + # We compute baked loc/rot/scale for all objects (rot being euler-compat with previous value!). + loc, rot, scale, _m, _mr = ob_obj.fbx_object_tx(scene_data, rot_euler_compat=p_rot) + next_p_rots.append(rot) + yield from loc + yield from rot + yield from scale + animdata_ob_p_rots = next_p_rots + for shape in animdata_shapes_only: + yield shape.value + for camera in animdata_cameras_only: + yield camera.lens + yield camera.dof.focus_distance + + # Providing `count` to np.fromiter pre-allocates the array, avoiding extra memory allocations while iterating. + num_ob_values = len(animdata_ob) * 9 # Location, rotation and scale, each of which have x, y, and z components + num_shape_values = len(animdata_shapes) # Only 1 value per shape key + num_camera_values = len(animdata_cameras) * 2 # Focal length (`.lens`) and focus distance + num_values_per_frame = num_ob_values + num_shape_values + num_camera_values + num_frames = len(real_currframes) + all_values_flat = np.fromiter(frame_values_gen(), dtype=float, count=num_frames * num_values_per_frame) + + # Restore the scene's current frame. scene.frame_set(back_currframe, subframe=0.0) + # View such that each column is all values for a single frame and each row is all values for a single curve. + all_values = all_values_flat.reshape(num_frames, num_values_per_frame).T + # Split into views of the arrays for each curve type. + split_at = [num_ob_values, num_shape_values, num_camera_values] + # For unequal sized splits, np.split takes indices to split at, which can be acquired through a cumulative sum + # across the list. + # The last value isn't needed, because the last split is assumed to go to the end of the array. + split_at = np.cumsum(split_at[:-1]) + all_ob_values, all_shape_key_values, all_camera_values = np.split(all_values, split_at) + + all_anims = [] + + # Set location/rotation/scale curves. + # Split into equal sized views of the arrays for each object. + split_into = len(animdata_ob) + per_ob_values = np.split(all_ob_values, split_into) if split_into > 0 else () + for anims, ob_values in zip(animdata_ob.values(), per_ob_values): + # Split again into equal sized views of the location, rotation and scaling arrays. + loc_xyz, rot_xyz, sca_xyz = np.split(ob_values, 3) + # In-place convert from Blender rotation to FBX rotation. + np.rad2deg(rot_xyz, out=rot_xyz) + + anim_loc, anim_rot, anim_scale = anims + anim_loc.set_keyframes(real_currframes, loc_xyz) + anim_rot.set_keyframes(real_currframes, rot_xyz) + anim_scale.set_keyframes(real_currframes, sca_xyz) + all_anims.extend(anims) + + # Set shape key curves. + # There's only one array per shape key, so there's no need to split `all_shape_key_values`. + for (anim_shape, _me, _shape), shape_key_values in zip(animdata_shapes.values(), all_shape_key_values): + # In-place convert from Blender Shape Key Value to FBX Deform Percent. + shape_key_values *= 100.0 + anim_shape.set_keyframes(real_currframes, shape_key_values) + all_anims.append(anim_shape) + + # Set camera curves. + # Split into equal sized views of the arrays for each camera. + split_into = len(animdata_cameras) + per_camera_values = np.split(all_camera_values, split_into) if split_into > 0 else () + zipped = zip(animdata_cameras.values(), per_camera_values) + for (anim_camera_lens, anim_camera_focus_distance, _camera), (lens_values, focus_distance_values) in zipped: + # In-place convert from Blender focus distance to FBX. + focus_distance_values *= (1000 * gscale) + anim_camera_lens.set_keyframes(real_currframes, lens_values) + anim_camera_focus_distance.set_keyframes(real_currframes, focus_distance_values) + all_anims.append(anim_camera_lens) + all_anims.append(anim_camera_focus_distance) + animations = {} # And now, produce final data (usable by FBX export code) - # Objects-like loc/rot/scale... - for ob_obj, anims in animdata_ob.items(): - for anim in anims: - anim.simplify(simplify_fac, bake_step, force_keep) - if not anim: - continue - for obj_key, group_key, group, fbx_group, fbx_gname in anim.get_final_data(scene, ref_id, force_keep): - anim_data = animations.setdefault(obj_key, ("dummy_unused_key", {})) - anim_data[1][fbx_group] = (group_key, group, fbx_gname) - - # And meshes' shape keys. - for channel_key, (anim_shape, me, shape) in animdata_shapes.items(): - final_keys = {} - anim_shape.simplify(simplify_fac, bake_step, force_keep) - if not anim_shape: + for anim in all_anims: + anim.simplify(simplify_fac, bake_step, force_keep) + if not anim: continue - for elem_key, group_key, group, fbx_group, fbx_gname in anim_shape.get_final_data(scene, ref_id, force_keep): - anim_data = animations.setdefault(elem_key, ("dummy_unused_key", {})) - anim_data[1][fbx_group] = (group_key, group, fbx_gname) - - # And cameras' lens and focus distance keys. - for cam_key, (anim_camera_lens, anim_camera_focus_distance, camera) in animdata_cameras.items(): - final_keys = {} - anim_camera_lens.simplify(simplify_fac, bake_step, force_keep) - anim_camera_focus_distance.simplify(simplify_fac, bake_step, force_keep) - if anim_camera_lens: - for elem_key, group_key, group, fbx_group, fbx_gname in \ - anim_camera_lens.get_final_data(scene, ref_id, force_keep): - anim_data = animations.setdefault(elem_key, ("dummy_unused_key", {})) - anim_data[1][fbx_group] = (group_key, group, fbx_gname) - if anim_camera_focus_distance: - for elem_key, group_key, group, fbx_group, fbx_gname in \ - anim_camera_focus_distance.get_final_data(scene, ref_id, force_keep): - anim_data = animations.setdefault(elem_key, ("dummy_unused_key", {})) - anim_data[1][fbx_group] = (group_key, group, fbx_gname) - - # And UE4 Custom Values - for custom_curve_name, (custom_curve, custom_curve_key) in custom_curves.items(): - final_keys = {} - custom_curve.simplify(simplify_fac, bake_step, force_keep) - if not custom_curve: - continue - finaldata = custom_curve.get_final_data(scene, ref_id, force_keep) - for elem_key, group_key, group, fbx_group, fbx_gname in finaldata: - anim_data = animations.setdefault(elem_key, ("dummy_unused_key", {})) + for obj_key, group_key, group, fbx_group, fbx_gname in anim.get_final_data(scene, ref_id, force_keep): + anim_data = animations.setdefault(obj_key, ("dummy_unused_key", {})) anim_data[1][fbx_group] = (group_key, group, fbx_gname) astack_key = get_blender_anim_stack_key(scene, ref_id) @@ -2548,7 +2622,6 @@ def fbx_data_from_scene(scene, depsgraph, settings): if ob_obj.type not in BLENDER_OBJECT_TYPES_MESHLIKE: continue ob = ob_obj.bdata - use_org_data = True org_ob_obj = None # Do not want to systematically recreate a new mesh for dupliobject instances, kind of break purpose of those. @@ -2558,90 +2631,114 @@ def fbx_data_from_scene(scene, depsgraph, settings): data_meshes[ob_obj] = data_meshes[org_ob_obj] continue - is_ob_material = any(ms.link == 'OBJECT' for ms in ob.material_slots) - - if settings.use_mesh_modifiers or settings.use_triangles or ob.type in BLENDER_OTHER_OBJECT_TYPES or is_ob_material: - # We cannot use default mesh in that case, or material would not be the right ones... - use_org_data = not (is_ob_material or ob.type in BLENDER_OTHER_OBJECT_TYPES) - backup_pose_positions = [] - tmp_mods = [] - if use_org_data and ob.type == 'MESH': - if settings.use_triangles: - use_org_data = False - # No need to create a new mesh in this case, if no modifier is active! - last_subsurf = None - for mod in ob.modifiers: - # For meshes, when armature export is enabled, disable Armature modifiers here! - # XXX Temp hacks here since currently we only have access to a viewport depsgraph... - # - # NOTE: We put armature to the rest pose instead of disabling it so we still - # have vertex groups in the evaluated mesh. - if mod.type == 'ARMATURE' and 'ARMATURE' in settings.object_types: - object = mod.object - if object and object.type == 'ARMATURE': - armature = object.data - # If armature is already in REST position, there's nothing to back-up - # This cuts down on export time dramatically, if all armatures are already in REST position - # by not triggering dependency graph update - if armature.pose_position != 'REST': - backup_pose_positions.append((armature, armature.pose_position)) - armature.pose_position = 'REST' - elif mod.show_render or mod.show_viewport: - # If exporting with subsurf collect the last Catmull-Clark subsurf modifier - # and disable it. We can use the original data as long as this is the first - # found applicable subsurf modifier. - if settings.use_subsurf and mod.type == 'SUBSURF' and mod.subdivision_type == 'CATMULL_CLARK': - if last_subsurf: - use_org_data = False - last_subsurf = mod - else: - use_org_data = False - if settings.use_subsurf and last_subsurf: - # XXX: When exporting with subsurf information temporarily disable - # the last subsurf modifier. - tmp_mods.append((last_subsurf, last_subsurf.show_render, last_subsurf.show_viewport)) - last_subsurf.show_render = False - last_subsurf.show_viewport = False - if not use_org_data: - # If modifiers has been altered need to update dependency graph. - if backup_pose_positions or tmp_mods: - depsgraph.update() - ob_to_convert = ob.evaluated_get(depsgraph) if settings.use_mesh_modifiers else ob - # NOTE: The dependency graph might be re-evaluating multiple times, which could - # potentially free the mesh created early on. So we put those meshes to bmain and - # free them afterwards. Not ideal but ensures correct ownerwhip. - tmp_me = bpy.data.meshes.new_from_object( - ob_to_convert, preserve_all_data_layers=True, depsgraph=depsgraph) - # Triangulate the mesh if requested - if settings.use_triangles: - import bmesh - bm = bmesh.new() - bm.from_mesh(tmp_me) - bmesh.ops.triangulate(bm, faces=bm.faces) - bm.to_mesh(tmp_me) - bm.free() - # Usually the materials of the evaluated object will be the same, but modifiers, such as Geometry Nodes, - # can change the materials. - orig_mats = tuple(slot.material for slot in ob.material_slots) - eval_mats = tuple(slot.material.original if slot.material else None - for slot in ob_to_convert.material_slots) - if orig_mats != eval_mats: - # Override the default behaviour of getting materials from ob_obj.bdata.material_slots. - ob_obj.override_materials = eval_mats - data_meshes[ob_obj] = (get_blenderID_key(tmp_me), tmp_me, True) - # Change armatures back. - for armature, pose_position in backup_pose_positions: - # print((armature, pose_position)) - armature.pose_position = pose_position - # Update now, so we don't leave modified state after last object was exported. - # Re-enable temporary disabled modifiers. - for mod, show_render, show_viewport in tmp_mods: - mod.show_render = show_render - mod.show_viewport = show_viewport + # There are 4 different cases for what we need to do with the original data of each Object: + # 1) The original data can be used without changes. + # 2) A copy of the original data needs to be made. + # - If an export option modifies the data, e.g. Triangulate Faces is enabled. + # - If the Object has Object-linked materials. This is because our current mapping of materials to FBX requires + # that multiple Objects sharing a single mesh must have the same materials. + # 3) The Object needs to be converted to a mesh. + # - All mesh-like Objects that are not meshes need to be converted to a mesh in order to be exported. + # 4) The Object needs to be evaluated and then converted to a mesh. + # - Whenever use_mesh_modifiers is enabled and either there are modifiers to apply or the Object needs to be + # converted to a mesh. + # If multiple cases apply to an Object, then only the last applicable case is relevant. + do_copy = any(ms.link == 'OBJECT' for ms in ob.material_slots) or settings.use_triangles + do_convert = ob.type in BLENDER_OTHER_OBJECT_TYPES + do_evaluate = do_convert and settings.use_mesh_modifiers + + # If the Object is a mesh, and we're applying modifiers, check if there are actually any modifiers to apply. + # If there are then the mesh will need to be evaluated, and we may need to make some temporary changes to the + # modifiers or scene before the mesh is evaluated. + backup_pose_positions = [] + tmp_mods = [] + if ob.type == 'MESH' and settings.use_mesh_modifiers: + # No need to create a new mesh in this case, if no modifier is active! + last_subsurf = None + for mod in ob.modifiers: + # For meshes, when armature export is enabled, disable Armature modifiers here! + # XXX Temp hacks here since currently we only have access to a viewport depsgraph... + # + # NOTE: We put armature to the rest pose instead of disabling it so we still + # have vertex groups in the evaluated mesh. + if mod.type == 'ARMATURE' and 'ARMATURE' in settings.object_types: + object = mod.object + if object and object.type == 'ARMATURE': + armature = object.data + # If armature is already in REST position, there's nothing to back-up + # This cuts down on export time dramatically, if all armatures are already in REST position + # by not triggering dependency graph update + if armature.pose_position != 'REST': + backup_pose_positions.append((armature, armature.pose_position)) + armature.pose_position = 'REST' + elif mod.show_render or mod.show_viewport: + # If exporting with subsurf collect the last Catmull-Clark subsurf modifier + # and disable it. We can use the original data as long as this is the first + # found applicable subsurf modifier. + if settings.use_subsurf and mod.type == 'SUBSURF' and mod.subdivision_type == 'CATMULL_CLARK': + if last_subsurf: + do_evaluate = True + last_subsurf = mod + else: + do_evaluate = True + if settings.use_subsurf and last_subsurf: + # XXX: When exporting with subsurf information temporarily disable + # the last subsurf modifier. + tmp_mods.append((last_subsurf, last_subsurf.show_render, last_subsurf.show_viewport)) + + if do_evaluate: + # If modifiers has been altered need to update dependency graph. if backup_pose_positions or tmp_mods: depsgraph.update() - if use_org_data: + ob_to_convert = ob.evaluated_get(depsgraph) + # NOTE: The dependency graph might be re-evaluating multiple times, which could + # potentially free the mesh created early on. So we put those meshes to bmain and + # free them afterwards. Not ideal but ensures correct ownership. + tmp_me = bpy.data.meshes.new_from_object( + ob_to_convert, preserve_all_data_layers=True, depsgraph=depsgraph) + + # Usually the materials of the evaluated object will be the same, but modifiers, such as Geometry Nodes, + # can change the materials. + orig_mats = tuple(slot.material for slot in ob.material_slots) + eval_mats = tuple(slot.material.original if slot.material else None + for slot in ob_to_convert.material_slots) + if orig_mats != eval_mats: + # Override the default behaviour of getting materials from ob_obj.bdata.material_slots. + ob_obj.override_materials = eval_mats + elif do_convert: + tmp_me = bpy.data.meshes.new_from_object(ob, preserve_all_data_layers=True, depsgraph=depsgraph) + elif do_copy: + # bpy.data.meshes.new_from_object removes shape keys (see #104714), so create a copy of the mesh instead. + tmp_me = ob.data.copy() + else: + tmp_me = None + + if tmp_me is None: + # Use the original data of this Object. data_meshes[ob_obj] = (get_blenderID_key(ob.data), ob.data, False) + else: + # Triangulate the mesh if requested + if settings.use_triangles: + import bmesh + bm = bmesh.new() + bm.from_mesh(tmp_me) + bmesh.ops.triangulate(bm, faces=bm.faces) + bm.to_mesh(tmp_me) + bm.free() + # A temporary mesh was created for this Object, which should be deleted once the export is complete. + data_meshes[ob_obj] = (get_blenderID_key(tmp_me), tmp_me, True) + + # Change armatures back. + for armature, pose_position in backup_pose_positions: + print((armature, pose_position)) + armature.pose_position = pose_position + # Update now, so we don't leave modified state after last object was exported. + # Re-enable temporary disabled modifiers. + for mod, show_render, show_viewport in tmp_mods: + mod.show_render = show_render + mod.show_viewport = show_viewport + if backup_pose_positions or tmp_mods: + depsgraph.update() # In case "real" source object of that dupli did not yet still existed in data_meshes, create it now! if org_ob_obj is not None: @@ -2677,10 +2774,10 @@ def empty_verts_fallbacks(): # Get and cache only the cos that we need @cache def sk_cos(shape_key): - _cos = np.empty(len(me.vertices) * 3, dtype=co_bl_dtype) if shape_key == sk_base: - me.vertices.foreach_get("co", _cos) + _cos = MESH_ATTRIBUTE_POSITION.to_ndarray(me.attributes) else: + _cos = np.empty(len(me.vertices) * 3, dtype=co_bl_dtype) shape_key.data.foreach_get("co", _cos) return vcos_transformed(_cos, geom_mat_co, co_fbx_dtype) @@ -2860,8 +2957,8 @@ def sk_cos(shape_key): for _alayer_key, alayer in astack.values(): for _acnode_key, acnode, _acnode_name in alayer.values(): nbr_acnodes += 1 - for _acurve_key, _dval, acurve, acurve_valid in acnode.values(): - if acurve: + for _acurve_key, _dval, (keys, _values), acurve_valid in acnode.values(): + if len(keys): nbr_acurves += 1 templates[b"AnimationStack"] = fbx_template_def_animstack(scene, settings, nbr_users=nbr_astacks) @@ -2995,8 +3092,8 @@ def sk_cos(shape_key): connections.append((b"OO", acurvenode_id, alayer_id, None)) # Animcurvenode -> object property. connections.append((b"OP", acurvenode_id, elem_id, fbx_prop.encode())) - for fbx_item, (acurve_key, default_value, acurve, acurve_valid) in acurves.items(): - if acurve: + for fbx_item, (acurve_key, default_value, (keys, values), acurve_valid) in acurves.items(): + if len(keys): # Animcurve -> Animcurvenode. connections.append((b"OP", get_fbx_uuid_from_key(acurve_key), acurvenode_id, fbx_item.encode())) diff --git a/blender-for-unrealengine/fbxio/fbx_utils.py b/blender-for-unrealengine/fbxio/fbx_utils.py index fc02c2ec..97d6116b 100644 --- a/blender-for-unrealengine/fbxio/fbx_utils.py +++ b/blender-for-unrealengine/fbxio/fbx_utils.py @@ -1,8 +1,8 @@ +# SPDX-FileCopyrightText: 2013 Campbell Barton +# SPDX-FileCopyrightText: 2014 Bastien Montagne +# # SPDX-License-Identifier: GPL-2.0-or-later -# Script copyright (C) Campbell Barton, Bastien Montagne - - import math import time import re @@ -10,6 +10,8 @@ from collections import namedtuple from collections.abc import Iterable from itertools import zip_longest, chain +from dataclasses import dataclass, field +from typing import Callable import numpy as np import bpy @@ -66,6 +68,9 @@ BLENDER_OTHER_OBJECT_TYPES = {'CURVE', 'SURFACE', 'FONT', 'META'} BLENDER_OBJECT_TYPES_MESHLIKE = {'MESH'} | BLENDER_OTHER_OBJECT_TYPES +SHAPE_KEY_SLIDER_HARD_MIN = bpy.types.ShapeKey.bl_rna.properties["slider_min"].hard_min +SHAPE_KEY_SLIDER_HARD_MAX = bpy.types.ShapeKey.bl_rna.properties["slider_max"].hard_max + # Lamps. FBX_LIGHT_TYPES = { @@ -413,8 +418,13 @@ def nors_transformed(raw_nors, m=None, dtype=None): def astype_view_signedness(arr, new_dtype): - """Unsafely views arr as new_dtype if the itemsize and byteorder of arr matches but the signedness does not, - otherwise calls np.ndarray.astype with copy=False. + """Unsafely views arr as new_dtype if the itemsize and byteorder of arr matches but the signedness does not. + + Safely views arr as new_dtype if both arr and new_dtype have the same itemsize, byteorder and signedness, but could + have a different character code, e.g. 'i' and 'l'. np.ndarray.astype with copy=False does not normally create this + view, but Blender can be picky about the character code used, so this function will create the view. + + Otherwise, calls np.ndarray.astype with copy=False. The benefit of copy=False is that if the array can be safely viewed as the new type, then a view is made, instead of a copy with the new type. @@ -435,13 +445,14 @@ def astype_view_signedness(arr, new_dtype): # else is left to .astype. arr_kind = arr_dtype.kind new_kind = new_dtype.kind + # Signed and unsigned int are opposite in terms of signedness. Other types don't have signedness. + integer_kinds = {'i', 'u'} if ( - # Signed and unsigned int are opposite in terms of signedness. Other types don't have signedness. - ((arr_kind == 'i' and new_kind == 'u') or (arr_kind == 'u' and new_kind == 'i')) + arr_kind in integer_kinds and new_kind in integer_kinds and arr_dtype.itemsize == new_dtype.itemsize and arr_dtype.byteorder == new_dtype.byteorder ): - # new_dtype has opposite signedness and matching itemsize and byteorder, so return a view of the new type. + # arr and new_dtype have signedness and matching itemsize and byteorder, so return a view of the new type. return arr.view(new_dtype) else: return arr.astype(new_dtype, copy=False) @@ -593,6 +604,190 @@ def ensure_object_not_in_edit_mode(context, obj): return True +def expand_shape_key_range(shape_key, value_to_fit): + """Attempt to expand the slider_min/slider_max of a shape key to fit `value_to_fit` within the slider range, + expanding slightly beyond `value_to_fit` if possible, so that the new slider_min/slider_max is not the same as + `value_to_fit`. Blender has a hard minimum and maximum for slider values, so it may not be possible to fit the value + within the slider range. + + If `value_to_fit` is already within the slider range, no changes are made. + + First tries setting slider_min/slider_max to double `value_to_fit`, otherwise, expands the range in the direction of + `value_to_fit` by double the distance to `value_to_fit`. + + The new slider_min/slider_max is rounded down/up to the nearest whole number for a more visually pleasing result. + + Returns whether it was possible to expand the slider range to fit `value_to_fit`.""" + if value_to_fit < (slider_min := shape_key.slider_min): + if value_to_fit < 0.0: + # For the most common case, set slider_min to double value_to_fit. + target_slider_min = value_to_fit * 2.0 + else: + # Doubling value_to_fit would make it larger, so instead decrease slider_min by double the distance between + # slider_min and value_to_fit. + target_slider_min = slider_min - (slider_min - value_to_fit) * 2.0 + # Set slider_min to the first whole number less than or equal to target_slider_min. + shape_key.slider_min = math.floor(target_slider_min) + + return value_to_fit >= SHAPE_KEY_SLIDER_HARD_MIN + elif value_to_fit > (slider_max := shape_key.slider_max): + if value_to_fit > 0.0: + # For the most common case, set slider_max to double value_to_fit. + target_slider_max = value_to_fit * 2.0 + else: + # Doubling value_to_fit would make it smaller, so instead increase slider_max by double the distance between + # slider_max and value_to_fit. + target_slider_max = slider_max + (value_to_fit - slider_max) * 2.0 + # Set slider_max to the first whole number greater than or equal to target_slider_max. + shape_key.slider_max = math.ceil(target_slider_max) + + return value_to_fit <= SHAPE_KEY_SLIDER_HARD_MAX + else: + # Value is already within the range. + return True + + +# ##### Attribute utils. ##### +AttributeDataTypeInfo = namedtuple("AttributeDataTypeInfo", ["dtype", "foreach_attribute", "item_size"]) +_attribute_data_type_info_lookup = { + 'FLOAT': AttributeDataTypeInfo(np.single, "value", 1), + 'INT': AttributeDataTypeInfo(np.intc, "value", 1), + 'FLOAT_VECTOR': AttributeDataTypeInfo(np.single, "vector", 3), + 'FLOAT_COLOR': AttributeDataTypeInfo(np.single, "color", 4), # color_srgb is an alternative + 'BYTE_COLOR': AttributeDataTypeInfo(np.single, "color", 4), # color_srgb is an alternative + 'STRING': AttributeDataTypeInfo(None, "value", 1), # Not usable with foreach_get/set + 'BOOLEAN': AttributeDataTypeInfo(bool, "value", 1), + 'FLOAT2': AttributeDataTypeInfo(np.single, "vector", 2), + 'INT8': AttributeDataTypeInfo(np.intc, "value", 1), + 'INT32_2D': AttributeDataTypeInfo(np.intc, "value", 2), +} + + +def attribute_get(attributes, name, data_type, domain): + """Get an attribute by its name, data_type and domain. + + Returns None if no attribute with this name, data_type and domain exists.""" + attr = attributes.get(name) + if not attr: + return None + if attr.data_type == data_type and attr.domain == domain: + return attr + # It shouldn't normally happen, but it's possible there are multiple attributes with the same name, but different + # data_types or domains. + for attr in attributes: + if attr.name == name and attr.data_type == data_type and attr.domain == domain: + return attr + return None + + +def attribute_foreach_set(attribute, array_or_list, foreach_attribute=None): + """Set every value of an attribute with foreach_set.""" + if foreach_attribute is None: + foreach_attribute = _attribute_data_type_info_lookup[attribute.data_type].foreach_attribute + attribute.data.foreach_set(foreach_attribute, array_or_list) + + +def attribute_to_ndarray(attribute, foreach_attribute=None): + """Create a NumPy ndarray from an attribute.""" + data = attribute.data + data_type_info = _attribute_data_type_info_lookup[attribute.data_type] + ndarray = np.empty(len(data) * data_type_info.item_size, dtype=data_type_info.dtype) + if foreach_attribute is None: + foreach_attribute = data_type_info.foreach_attribute + data.foreach_get(foreach_attribute, ndarray) + return ndarray + + +@dataclass +class AttributeDescription: + """Helper class to reduce duplicate code for handling built-in Blender attributes.""" + name: str + # Valid identifiers can be found in bpy.types.Attribute.bl_rna.properties["data_type"].enum_items + data_type: str + # Valid identifiers can be found in bpy.types.Attribute.bl_rna.properties["domain"].enum_items + domain: str + # Some attributes are required to exist if certain conditions are met. If a required attribute does not exist when + # attempting to get it, an AssertionError is raised. + is_required_check: Callable[[bpy.types.AttributeGroup], bool] = None + # NumPy dtype that matches the internal C data of this attribute. + dtype: np.dtype = field(init=False) + # The default attribute name to use with foreach_get and foreach_set. + foreach_attribute: str = field(init=False) + # The number of elements per value of the attribute when flattened into a 1-dimensional list/array. + item_size: int = field(init=False) + + def __post_init__(self): + data_type_info = _attribute_data_type_info_lookup[self.data_type] + self.dtype = data_type_info.dtype + self.foreach_attribute = data_type_info.foreach_attribute + self.item_size = data_type_info.item_size + + def is_required(self, attributes): + """Check if the attribute is required to exist in the provided attributes.""" + is_required_check = self.is_required_check + return is_required_check and is_required_check(attributes) + + def get(self, attributes): + """Get the attribute. + + If the attribute is required, but does not exist, an AssertionError is raised, otherwise None is returned.""" + attr = attribute_get(attributes, self.name, self.data_type, self.domain) + if not attr and self.is_required(attributes): + raise AssertionError("Required attribute '%s' with type '%s' and domain '%s' not found in %r" + % (self.name, self.data_type, self.domain, attributes)) + return attr + + def ensure(self, attributes): + """Get the attribute, creating it if it does not exist. + + Raises a RuntimeError if the attribute could not be created, which should only happen when attempting to create + an attribute with a reserved name, but with the wrong data_type or domain. See usage of + BuiltinCustomDataLayerProvider in Blender source for most reserved names. + + There is no guarantee that the returned attribute has the desired name because the name could already be in use + by another attribute with a different data_type and/or domain.""" + attr = self.get(attributes) + if attr: + return attr + + attr = attributes.new(self.name, self.data_type, self.domain) + if not attr: + raise RuntimeError("Could not create attribute '%s' with type '%s' and domain '%s' in %r" + % (self.name, self.data_type, self.domain, attributes)) + return attr + + def foreach_set(self, attributes, array_or_list, foreach_attribute=None): + """Get the attribute, creating it if it does not exist, and then set every value in the attribute.""" + attribute_foreach_set(self.ensure(attributes), array_or_list, foreach_attribute) + + def get_ndarray(self, attributes, foreach_attribute=None): + """Get the attribute and if it exists, return a NumPy ndarray containing its data, otherwise return None.""" + attr = self.get(attributes) + return attribute_to_ndarray(attr, foreach_attribute) if attr else None + + def to_ndarray(self, attributes, foreach_attribute=None): + """Get the attribute and if it exists, return a NumPy ndarray containing its data, otherwise return a + zero-length ndarray.""" + ndarray = self.get_ndarray(attributes, foreach_attribute) + return ndarray if ndarray is not None else np.empty(0, dtype=self.dtype) + + +# Built-in Blender attributes +# Only attributes used by the importer/exporter are included here. +# See usage of BuiltinCustomDataLayerProvider in Blender source to find most built-in attributes. +MESH_ATTRIBUTE_MATERIAL_INDEX = AttributeDescription("material_index", 'INT', 'FACE') +MESH_ATTRIBUTE_POSITION = AttributeDescription("position", 'FLOAT_VECTOR', 'POINT', + is_required_check=lambda attributes: bool(attributes.id_data.vertices)) +MESH_ATTRIBUTE_SHARP_EDGE = AttributeDescription("sharp_edge", 'BOOLEAN', 'EDGE') +MESH_ATTRIBUTE_EDGE_VERTS = AttributeDescription(".edge_verts", 'INT32_2D', 'EDGE', + is_required_check=lambda attributes: bool(attributes.id_data.edges)) +MESH_ATTRIBUTE_CORNER_VERT = AttributeDescription(".corner_vert", 'INT', 'CORNER', + is_required_check=lambda attributes: bool(attributes.id_data.loops)) +MESH_ATTRIBUTE_CORNER_EDGE = AttributeDescription(".corner_edge", 'INT', 'CORNER', + is_required_check=lambda attributes: bool(attributes.id_data.loops)) +MESH_ATTRIBUTE_SHARP_FACE = AttributeDescription("sharp_face", 'BOOLEAN', 'FACE') + + # ##### UIDs code. ##### # ID class (mere int). @@ -775,6 +970,10 @@ def elem_data_single_bool(elem, name, value): return _elem_data_single(elem, name, value, "add_bool") +def elem_data_single_char(elem, name, value): + return _elem_data_single(elem, name, value, "add_char") + + def elem_data_single_int8(elem, name, value): return _elem_data_single(elem, name, value, "add_int8") @@ -1040,8 +1239,10 @@ class AnimationCurveNodeWrapper: and easy API to handle those. """ __slots__ = ( - 'elem_keys', '_keys', 'default_values', 'fbx_group', 'fbx_gname', 'fbx_props', - 'force_keying', 'force_startend_keying') + 'elem_keys', 'default_values', 'fbx_group', 'fbx_gname', 'fbx_props', + 'force_keying', 'force_startend_keying', + '_frame_times_array', '_frame_values_array', '_frame_write_mask_array', + ) kinds = { 'LCL_TRANSLATION': ("Lcl Translation", "T", ("X", "Y", "Z")), @@ -1067,7 +1268,9 @@ def __init__(self, elem_key, kind, force_keying, force_startend_keying, default_ self.fbx_props = [self.kinds[kind][2]] self.force_keying = force_keying self.force_startend_keying = force_startend_keying - self._keys = [] # (frame, values, write_flags) + self._frame_times_array = None + self._frame_values_array = None + self._frame_write_mask_array = None if default_values is not ...: assert(len(default_values) == len(self.fbx_props[0])) self.default_values = default_values @@ -1076,7 +1279,7 @@ def __init__(self, elem_key, kind, force_keying, force_startend_keying, default_ def __bool__(self): # We are 'True' if we do have some validated keyframes... - return bool(self._keys) and (True in ((True in k[2]) for k in self._keys)) + return self._frame_write_mask_array is not None and bool(np.any(self._frame_write_mask_array)) def add_group(self, elem_key, fbx_group, fbx_gname, fbx_props): """ @@ -1089,19 +1292,31 @@ def add_group(self, elem_key, fbx_group, fbx_gname, fbx_props): self.fbx_gname.append(fbx_gname) self.fbx_props.append(fbx_props) - def add_keyframe(self, frame, values): + def set_keyframes(self, keyframe_times, keyframe_values): """ - Add a new keyframe to all curves of the group. + Set all keyframe times and values of the group. + Values can be a 2D array where each row is the values for a separate curve. """ - assert(len(values) == len(self.fbx_props[0])) - self._keys.append((frame, values, [True] * len(values))) # write everything by default. + # View 1D keyframe_values as 2D with a single row, so that the same code can be used for both 1D and + # 2D inputs. + if len(keyframe_values.shape) == 1: + keyframe_values = keyframe_values[np.newaxis] + # There must be a time for each column of values. + assert(len(keyframe_times) == keyframe_values.shape[1]) + # There must be as many rows of values as there are properties. + assert(len(self.fbx_props[0]) == len(keyframe_values)) + write_mask = np.full_like(keyframe_values, True, dtype=bool) # write everything by default + self._frame_times_array = keyframe_times + self._frame_values_array = keyframe_values + self._frame_write_mask_array = write_mask def simplify(self, fac, step, force_keep=False): """ Simplifies sampled curves by only enabling samples when: * their values relatively differ from the previous sample ones. """ - if not self._keys: + if self._frame_times_array is None: + # Keyframes have not been added yet. return if fac == 0.0: @@ -1110,36 +1325,155 @@ def simplify(self, fac, step, force_keep=False): # So that, with default factor and step values (1), we get: min_reldiff_fac = fac * 1.0e-3 # min relative value evolution: 0.1% of current 'order of magnitude'. min_absdiff_fac = 0.1 # A tenth of reldiff... - keys = self._keys - - p_currframe, p_key, p_key_write = keys[0] - p_keyed = list(p_key) - are_keyed = [False] * len(p_key) - for currframe, key, key_write in keys: - for idx, (val, p_val) in enumerate(zip(key, p_key)): - key_write[idx] = False - p_keyedval = p_keyed[idx] - if val == p_val: - # Never write keyframe when value is exactly the same as prev one! - continue - # This is contracted form of relative + absolute-near-zero difference: - # absdiff = abs(a - b) - # if absdiff < min_reldiff_fac * min_absdiff_fac: - # return False - # return (absdiff / ((abs(a) + abs(b)) / 2)) > min_reldiff_fac - # Note that we ignore the '/ 2' part here, since it's not much significant for us. - if abs(val - p_val) > (min_reldiff_fac * max(abs(val) + abs(p_val), min_absdiff_fac)): - # If enough difference from previous sampled value, key this value *and* the previous one! - key_write[idx] = True - p_key_write[idx] = True - p_keyed[idx] = val - are_keyed[idx] = True - elif abs(val - p_keyedval) > (min_reldiff_fac * max((abs(val) + abs(p_keyedval)), min_absdiff_fac)): - # Else, if enough difference from previous keyed value, key this value only! - key_write[idx] = True - p_keyed[idx] = val - are_keyed[idx] = True - p_currframe, p_key, p_key_write = currframe, key, key_write + + # Initialise to no values enabled for writing. + self._frame_write_mask_array[:] = False + + # Values are enabled for writing if they differ enough from either of their adjacent values or if they differ + # enough from the closest previous value that is enabled due to either of these conditions. + for sampled_values, enabled_mask in zip(self._frame_values_array, self._frame_write_mask_array): + # Create overlapping views of the 'previous' (all but the last) and 'current' (all but the first) + # `sampled_values` and `enabled_mask`. + # Calculate absolute values from `sampled_values` so that the 'previous' and 'current' absolute arrays can + # be views into the same array instead of separately calculated arrays. + abs_sampled_values = np.abs(sampled_values) + # 'previous' views. + p_val_view = sampled_values[:-1] + p_abs_val_view = abs_sampled_values[:-1] + p_enabled_mask_view = enabled_mask[:-1] + # 'current' views. + c_val_view = sampled_values[1:] + c_abs_val_view = abs_sampled_values[1:] + c_enabled_mask_view = enabled_mask[1:] + + # If enough difference from previous sampled value, enable the current value *and* the previous one! + # The difference check is symmetrical, so this will compare each value to both of its adjacent values. + # Unless it is forcefully enabled later, this is the only way that the first value can be enabled. + # This is a contracted form of relative + absolute-near-zero difference: + # def is_different(a, b): + # abs_diff = abs(a - b) + # if abs_diff < min_reldiff_fac * min_absdiff_fac: + # return False + # return (abs_diff / ((abs(a) + abs(b)) / 2)) > min_reldiff_fac + # Note that we ignore the '/ 2' part here, since it's not much significant for us. + # Contracted form using only builtin Python functions: + # return abs(a - b) > (min_reldiff_fac * max(abs(a) + abs(b), min_absdiff_fac)) + abs_diff = np.abs(c_val_view - p_val_view) + different_if_greater_than = min_reldiff_fac * np.maximum(c_abs_val_view + p_abs_val_view, min_absdiff_fac) + enough_diff_p_val_mask = abs_diff > different_if_greater_than + # Enable both the current values *and* the previous values where `enough_diff_p_val_mask` is True. Some + # values may get set to True twice because the views overlap, but this is not a problem. + p_enabled_mask_view[enough_diff_p_val_mask] = True + c_enabled_mask_view[enough_diff_p_val_mask] = True + + # Else, if enough difference from previous enabled value, enable the current value only! + # For each 'current' value, get the index of the nearest previous enabled value in `sampled_values` (or + # itself if the value is enabled). + # Start with an array that is the index of the 'current' value in `sampled_values`. The 'current' values are + # all but the first value, so the indices will be from 1 to `len(sampled_values)` exclusive. + # Let len(sampled_values) == 9: + # [1, 2, 3, 4, 5, 6, 7, 8] + p_enabled_idx_in_sampled_values = np.arange(1, len(sampled_values)) + # Replace the indices of all disabled values with 0 in preparation of filling them in with the index of the + # nearest previous enabled value. We choose to replace with 0 so that if there is no nearest previous + # enabled value, we instead default to `sampled_values[0]`. + c_val_disabled_mask = ~c_enabled_mask_view + # Let `c_val_disabled_mask` be: + # [F, F, T, F, F, T, T, T] + # Set indices to 0 where `c_val_disabled_mask` is True: + # [1, 2, 3, 4, 5, 6, 7, 8] + # v v v v + # [1, 2, 0, 4, 5, 0, 0, 0] + p_enabled_idx_in_sampled_values[c_val_disabled_mask] = 0 + # Accumulative maximum travels across the array from left to right, filling in the zeroed indices with the + # maximum value so far, which will be the closest previous enabled index because the non-zero indices are + # strictly increasing. + # [1, 2, 0, 4, 5, 0, 0, 0] + # v v v v + # [1, 2, 2, 4, 5, 5, 5, 5] + p_enabled_idx_in_sampled_values = np.maximum.accumulate(p_enabled_idx_in_sampled_values) + # Only disabled values need to be checked against their nearest previous enabled values. + # We can additionally ignore all values which equal their immediately previous value because those values + # will never be enabled if they were not enabled by the earlier difference check against immediately + # previous values. + p_enabled_diff_to_check_mask = np.logical_and(c_val_disabled_mask, p_val_view != c_val_view) + # Convert from a mask to indices because we need the indices later and because the array of indices will + # usually be smaller than the mask array making it faster to index other arrays with. + p_enabled_diff_to_check_idx = np.flatnonzero(p_enabled_diff_to_check_mask) + # `p_enabled_idx_in_sampled_values` from earlier: + # [1, 2, 2, 4, 5, 5, 5, 5] + # `p_enabled_diff_to_check_mask` assuming no values equal their immediately previous value: + # [F, F, T, F, F, T, T, T] + # `p_enabled_diff_to_check_idx`: + # [ 2, 5, 6, 7] + # `p_enabled_idx_in_sampled_values_to_check`: + # [ 2, 5, 5, 5] + p_enabled_idx_in_sampled_values_to_check = p_enabled_idx_in_sampled_values[p_enabled_diff_to_check_idx] + # Get the 'current' disabled values that need to be checked. + c_val_to_check = c_val_view[p_enabled_diff_to_check_idx] + c_abs_val_to_check = c_abs_val_view[p_enabled_diff_to_check_idx] + # Get the nearest previous enabled value for each value to be checked. + nearest_p_enabled_val = sampled_values[p_enabled_idx_in_sampled_values_to_check] + abs_nearest_p_enabled_val = np.abs(nearest_p_enabled_val) + # Check the relative + absolute-near-zero difference again, but against the nearest previous enabled value + # this time. + abs_diff = np.abs(c_val_to_check - nearest_p_enabled_val) + different_if_greater_than = (min_reldiff_fac + * np.maximum(c_abs_val_to_check + abs_nearest_p_enabled_val, min_absdiff_fac)) + enough_diff_p_enabled_val_mask = abs_diff > different_if_greater_than + # If there are any that are different enough from the previous enabled value, then we have to check them all + # iteratively because enabling a new value can change the nearest previous enabled value of some elements, + # which changes their relative + absolute-near-zero difference: + # `p_enabled_diff_to_check_idx`: + # [2, 5, 6, 7] + # `p_enabled_idx_in_sampled_values_to_check`: + # [2, 5, 5, 5] + # Let `enough_diff_p_enabled_val_mask` be: + # [F, F, T, T] + # The first index that is newly enabled is 6: + # [2, 5,>6<,5] + # But 6 > 5, so the next value's nearest previous enabled index is also affected: + # [2, 5, 6,>6<] + # We had calculated a newly enabled index of 7 too, but that was calculated against the old nearest previous + # enabled index of 5, which has now been updated to 6, so whether 7 is enabled or not needs to be + # recalculated: + # [F, F, T, ?] + if np.any(enough_diff_p_enabled_val_mask): + # Accessing .data, the memoryview of the array, iteratively or by individual index is faster than doing + # the same with the array itself. + zipped = zip(p_enabled_diff_to_check_idx.data, + c_val_to_check.data, + c_abs_val_to_check.data, + p_enabled_idx_in_sampled_values_to_check.data, + enough_diff_p_enabled_val_mask.data) + # While iterating, we could set updated values into `enough_diff_p_enabled_val_mask` as we go and then + # update `enabled_mask` in bulk after the iteration, but if we're going to update an array while + # iterating, we may as well update `enabled_mask` directly instead and skip the bulk update. + # Additionally, the number of `True` writes to `enabled_mask` is usually much less than the number of + # updates that would be required to `enough_diff_p_enabled_val_mask`. + c_enabled_mask_view_mv = c_enabled_mask_view.data + + # While iterating, keep track of the most recent newly enabled index, so we can tell when we need to + # recalculate whether the current value needs to be enabled. + new_p_enabled_idx = -1 + # Keep track of its value too for performance. + new_p_enabled_val = -1 + new_abs_p_enabled_val = -1 + for cur_idx, c_val, c_abs_val, old_p_enabled_idx, enough_diff in zipped: + if new_p_enabled_idx > old_p_enabled_idx: + # The nearest previous enabled value is newly enabled and was not included when + # `enough_diff_p_enabled_val_mask` was calculated, so whether the current value is different + # enough needs to be recalculated using the newly enabled value. + # Check if the relative + absolute-near-zero difference is enough to enable this value. + enough_diff = (abs(c_val - new_p_enabled_val) + > (min_reldiff_fac * max(c_abs_val + new_abs_p_enabled_val, min_absdiff_fac))) + if enough_diff: + # The current value needs to be enabled. + c_enabled_mask_view_mv[cur_idx] = True + # Update the index and values for this newly enabled value. + new_p_enabled_idx = cur_idx + new_p_enabled_val = c_val + new_abs_p_enabled_val = c_abs_val # If we write nothing (action doing nothing) and are in 'force_keep' mode, we key everything! :P # See T41766. @@ -1148,24 +1482,26 @@ def simplify(self, fac, step, force_keep=False): # one key in this case. # See T41719, T41605, T41254... if self.force_keying or (force_keep and not self): - are_keyed[:] = [True] * len(are_keyed) + are_keyed = [True] * len(self._frame_write_mask_array) + else: + are_keyed = np.any(self._frame_write_mask_array, axis=1) # If we did key something, ensure first and last sampled values are keyed as well. if self.force_startend_keying: - for idx, is_keyed in enumerate(are_keyed): + for is_keyed, frame_write_mask in zip(are_keyed, self._frame_write_mask_array): if is_keyed: - keys[0][2][idx] = keys[-1][2][idx] = True + frame_write_mask[:1] = True + frame_write_mask[-1:] = True def get_final_data(self, scene, ref_id, force_keep=False): """ Yield final anim data for this 'curvenode' (for all curvenodes defined). force_keep is to force to keep a curve even if it only has one valid keyframe. """ - curves = [[] for k in self._keys[0][1]] - for currframe, key, key_write in self._keys: - for curve, val, wrt in zip(curves, key, key_write): - if wrt: - curve.append((currframe, val)) + curves = [ + (self._frame_times_array[write_mask], values[write_mask]) + for values, write_mask in zip(self._frame_values_array, self._frame_write_mask_array) + ] force_keep = force_keep or self.force_keying for elem_key, fbx_group, fbx_gname, fbx_props in \ @@ -1176,8 +1512,9 @@ def get_final_data(self, scene, ref_id, force_keep=False): fbx_item = FBX_ANIM_PROPSGROUP_NAME + "|" + fbx_item curve_key = get_blender_anim_curve_key(scene, ref_id, elem_key, fbx_group, fbx_item) # (curve key, default value, keyframes, write flag). - group[fbx_item] = (curve_key, def_val, c, - True if (len(c) > 1 or (len(c) > 0 and force_keep)) else False) + times = c[0] + write_flag = len(times) > (0 if force_keep else 1) + group[fbx_item] = (curve_key, def_val, c, write_flag) yield elem_key, group_key, group, fbx_group, fbx_gname From 6d3b3c59ed340466085d189f0cf55bd313d5e8db Mon Sep 17 00:00:00 2001 From: SAM-tak Date: Thu, 16 Nov 2023 22:17:04 +0900 Subject: [PATCH 3/3] fix merge miss --- blender-for-unrealengine/fbxio/export_fbx_bin.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/blender-for-unrealengine/fbxio/export_fbx_bin.py b/blender-for-unrealengine/fbxio/export_fbx_bin.py index ae02e4d2..f79eabc9 100644 --- a/blender-for-unrealengine/fbxio/export_fbx_bin.py +++ b/blender-for-unrealengine/fbxio/export_fbx_bin.py @@ -12,6 +12,8 @@ from itertools import zip_longest from functools import cache +from .. import __package__ as parent_package + if "bpy" in locals(): import importlib if "encode_bin" in locals(): @@ -25,7 +27,7 @@ import bpy_extras from bpy_extras import node_shader_utils from bpy.app.translations import pgettext_tip as tip_ -from mathutils import Vector, Matrix +from mathutils import Vector, Matrix, Quaternion from . import encode_bin, data_types, fbx_utils from .fbx_utils import (