-
Notifications
You must be signed in to change notification settings - Fork 129
/
blender_3dmigoto.py
executable file
·3048 lines (2648 loc) · 134 KB
/
blender_3dmigoto.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python3
# Updated to Blender 2.93
bl_info = {
"name": "3DMigoto",
"blender": (2, 93, 0),
"author": "Ian Munsie ([email protected])",
"location": "File > Import-Export",
"description": "Imports meshes dumped with 3DMigoto's frame analysis and exports meshes suitable for re-injection",
"category": "Import-Export",
"tracker_url": "https://github.com/DarkStarSword/3d-fixes/issues",
}
# TODO:
# - Option to reduce vertices on import to simplify mesh (can be noticeably lossy)
# - Option to untesselate triangles on import?
# - Operator to generate vertex group map
# - Generate bones, using vertex groups to approximate position
# - And maybe orientation & magnitude, but I'll have to figure out some funky
# maths to have it follow the mesh like a cylinder
# - Test in a wider variety of games
# - Handle TANGENT better on both import & export?
import io
import re
from array import array
import struct
import numpy
import itertools
import collections
import os
from glob import glob
import json
import copy
import textwrap
import bpy
from bpy_extras.io_utils import unpack_list, ImportHelper, ExportHelper, axis_conversion
from bpy.props import BoolProperty, StringProperty, CollectionProperty
from bpy_extras.image_utils import load_image
from mathutils import Matrix, Vector
try:
from bl_ui.generic_ui_list import draw_ui_list
except ImportError:
# Blender older than 3.5. Just disable the semantic remap feature
draw_ui_list = None
############## Begin (deprecated) Blender 2.7/2.8 compatibility wrappers (2.7 options removed) ##############
from bpy_extras.io_utils import orientation_helper
IOOBJOrientationHelper = type('DummyIOOBJOrientationHelper', (object,), {})
import_menu = bpy.types.TOPBAR_MT_file_import
export_menu = bpy.types.TOPBAR_MT_file_export
vertex_color_layer_channels = 4
# https://theduckcow.com/2019/update-addons-both-blender-28-and-27-support/
def make_annotations(cls):
"""Converts class fields to annotations"""
bl_props = {k: v for k, v in cls.__dict__.items() if isinstance(v, tuple)}
if bl_props:
if '__annotations__' not in cls.__dict__:
setattr(cls, '__annotations__', {})
annotations = cls.__dict__['__annotations__']
for k, v in bl_props.items():
annotations[k] = v
delattr(cls, k)
return cls
def select_get(object):
return object.select_get()
def select_set(object, state):
object.select_set(state)
def hide_get(object):
return object.hide_get()
def hide_set(object, state):
object.hide_set(state)
def set_active_object(context, obj):
context.view_layer.objects.active = obj # the 2.8 way
def get_active_object(context):
return context.view_layer.objects.active
def link_object_to_scene(context, obj):
context.scene.collection.objects.link(obj)
def unlink_object(context, obj):
context.scene.collection.objects.unlink(obj)
def matmul(a, b):
import operator # to get function names for operators like @, +, -
return operator.matmul(a, b) # the same as writing a @ b
############## End (deprecated) Blender 2.7/2.8 compatibility wrappers (2.7 options removed) ##############
supported_topologies = ('trianglelist', 'pointlist', 'trianglestrip')
def keys_to_ints(d):
return {k.isdecimal() and int(k) or k:v for k,v in d.items()}
def keys_to_strings(d):
return {str(k):v for k,v in d.items()}
class Fatal(Exception): pass
ImportPaths = collections.namedtuple('ImportPaths', ('vb_paths', 'ib_paths', 'use_bin', 'pose_path'))
# TODO: Support more DXGI formats:
f32_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]32)+_FLOAT''')
f16_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]16)+_FLOAT''')
u32_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]32)+_UINT''')
u16_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]16)+_UINT''')
u8_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]8)+_UINT''')
s32_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]32)+_SINT''')
s16_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]16)+_SINT''')
s8_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]8)+_SINT''')
unorm16_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]16)+_UNORM''')
unorm8_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]8)+_UNORM''')
snorm16_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]16)+_SNORM''')
snorm8_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD]8)+_SNORM''')
misc_float_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD][0-9]+)+_(?:FLOAT|UNORM|SNORM)''')
misc_int_pattern = re.compile(r'''(?:DXGI_FORMAT_)?(?:[RGBAD][0-9]+)+_[SU]INT''')
def EncoderDecoder(fmt):
if f32_pattern.match(fmt):
return (lambda data: b''.join(struct.pack('<f', x) for x in data),
lambda data: numpy.frombuffer(data, numpy.float32).tolist())
if f16_pattern.match(fmt):
return (lambda data: numpy.fromiter(data, numpy.float16).tobytes(),
lambda data: numpy.frombuffer(data, numpy.float16).tolist())
if u32_pattern.match(fmt):
return (lambda data: numpy.fromiter(data, numpy.uint32).tobytes(),
lambda data: numpy.frombuffer(data, numpy.uint32).tolist())
if u16_pattern.match(fmt):
return (lambda data: numpy.fromiter(data, numpy.uint16).tobytes(),
lambda data: numpy.frombuffer(data, numpy.uint16).tolist())
if u8_pattern.match(fmt):
return (lambda data: numpy.fromiter(data, numpy.uint8).tobytes(),
lambda data: numpy.frombuffer(data, numpy.uint8).tolist())
if s32_pattern.match(fmt):
return (lambda data: numpy.fromiter(data, numpy.int32).tobytes(),
lambda data: numpy.frombuffer(data, numpy.int32).tolist())
if s16_pattern.match(fmt):
return (lambda data: numpy.fromiter(data, numpy.int16).tobytes(),
lambda data: numpy.frombuffer(data, numpy.int16).tolist())
if s8_pattern.match(fmt):
return (lambda data: numpy.fromiter(data, numpy.int8).tobytes(),
lambda data: numpy.frombuffer(data, numpy.int8).tolist())
if unorm16_pattern.match(fmt):
return (lambda data: numpy.around((numpy.fromiter(data, numpy.float32) * 65535.0)).astype(numpy.uint16).tobytes(),
lambda data: (numpy.frombuffer(data, numpy.uint16) / 65535.0).tolist())
if unorm8_pattern.match(fmt):
return (lambda data: numpy.around((numpy.fromiter(data, numpy.float32) * 255.0)).astype(numpy.uint8).tobytes(),
lambda data: (numpy.frombuffer(data, numpy.uint8) / 255.0).tolist())
if snorm16_pattern.match(fmt):
return (lambda data: numpy.around((numpy.fromiter(data, numpy.float32) * 32767.0)).astype(numpy.int16).tobytes(),
lambda data: (numpy.frombuffer(data, numpy.int16) / 32767.0).tolist())
if snorm8_pattern.match(fmt):
return (lambda data: numpy.around((numpy.fromiter(data, numpy.float32) * 127.0)).astype(numpy.int8).tobytes(),
lambda data: (numpy.frombuffer(data, numpy.int8) / 127.0).tolist())
raise Fatal('File uses an unsupported DXGI Format: %s' % fmt)
components_pattern = re.compile(r'''(?<![0-9])[0-9]+(?![0-9])''')
def format_components(fmt):
return len(components_pattern.findall(fmt))
def format_size(fmt):
matches = components_pattern.findall(fmt)
return sum(map(int, matches)) // 8
class InputLayoutElement(object):
def __init__(self, arg):
self.RemappedSemanticName = None
self.RemappedSemanticIndex = None
if isinstance(arg, io.IOBase):
self.from_file(arg)
else:
self.from_dict(arg)
self.encoder, self.decoder = EncoderDecoder(self.Format)
def from_file(self, f):
self.SemanticName = self.next_validate(f, 'SemanticName')
self.SemanticIndex = int(self.next_validate(f, 'SemanticIndex'))
(self.RemappedSemanticName, line) = self.next_optional(f, 'RemappedSemanticName')
if line is None:
self.RemappedSemanticIndex = int(self.next_validate(f, 'RemappedSemanticIndex'))
self.Format = self.next_validate(f, 'Format', line)
self.InputSlot = int(self.next_validate(f, 'InputSlot'))
self.AlignedByteOffset = self.next_validate(f, 'AlignedByteOffset')
if self.AlignedByteOffset == 'append':
raise Fatal('Input layouts using "AlignedByteOffset=append" are not yet supported')
self.AlignedByteOffset = int(self.AlignedByteOffset)
self.InputSlotClass = self.next_validate(f, 'InputSlotClass')
self.InstanceDataStepRate = int(self.next_validate(f, 'InstanceDataStepRate'))
self.format_len = format_components(self.Format)
def to_dict(self):
d = {}
d['SemanticName'] = self.SemanticName
d['SemanticIndex'] = self.SemanticIndex
if self.RemappedSemanticName is not None:
d['RemappedSemanticName'] = self.RemappedSemanticName
d['RemappedSemanticIndex'] = self.RemappedSemanticIndex
d['Format'] = self.Format
d['InputSlot'] = self.InputSlot
d['AlignedByteOffset'] = self.AlignedByteOffset
d['InputSlotClass'] = self.InputSlotClass
d['InstanceDataStepRate'] = self.InstanceDataStepRate
return d
def to_string(self, indent=2):
ret = textwrap.dedent('''
SemanticName: %s
SemanticIndex: %i
''').lstrip() % (
self.SemanticName,
self.SemanticIndex,
)
if self.RemappedSemanticName is not None:
ret += textwrap.dedent('''
RemappedSemanticName: %s
RemappedSemanticIndex: %i
''').lstrip() % (
self.RemappedSemanticName,
self.RemappedSemanticIndex,
)
ret += textwrap.dedent('''
Format: %s
InputSlot: %i
AlignedByteOffset: %i
InputSlotClass: %s
InstanceDataStepRate: %i
''').lstrip() % (
self.Format,
self.InputSlot,
self.AlignedByteOffset,
self.InputSlotClass,
self.InstanceDataStepRate,
)
return textwrap.indent(ret, ' '*indent)
def from_dict(self, d):
self.SemanticName = d['SemanticName']
self.SemanticIndex = d['SemanticIndex']
try:
self.RemappedSemanticName = d['RemappedSemanticName']
self.RemappedSemanticIndex = d['RemappedSemanticIndex']
except KeyError: pass
self.Format = d['Format']
self.InputSlot = d['InputSlot']
self.AlignedByteOffset = d['AlignedByteOffset']
self.InputSlotClass = d['InputSlotClass']
self.InstanceDataStepRate = d['InstanceDataStepRate']
self.format_len = format_components(self.Format)
@staticmethod
def next_validate(f, field, line=None):
if line is None:
line = next(f).strip()
assert(line.startswith(field + ': '))
return line[len(field) + 2:]
@staticmethod
def next_optional(f, field, line=None):
if line is None:
line = next(f).strip()
if line.startswith(field + ': '):
return (line[len(field) + 2:], None)
return (None, line)
@property
def name(self):
if self.SemanticIndex:
return '%s%i' % (self.SemanticName, self.SemanticIndex)
return self.SemanticName
@property
def remapped_name(self):
if self.RemappedSemanticName is None:
return self.name
if self.RemappedSemanticIndex:
return '%s%i' % (self.RemappedSemanticName, self.RemappedSemanticIndex)
return self.RemappedSemanticName
def pad(self, data, val):
padding = self.format_len - len(data)
assert(padding >= 0)
data.extend([val]*padding)
return data
def clip(self, data):
return data[:format_components(self.Format)]
def size(self):
return format_size(self.Format)
def is_float(self):
return misc_float_pattern.match(self.Format)
def is_int(self):
return misc_int_pattern.match(self.Format)
def encode(self, data):
# print(self.Format, data)
return self.encoder(data)
def decode(self, data):
return self.decoder(data)
def __eq__(self, other):
return \
self.SemanticName == other.SemanticName and \
self.SemanticIndex == other.SemanticIndex and \
self.Format == other.Format and \
self.InputSlot == other.InputSlot and \
self.AlignedByteOffset == other.AlignedByteOffset and \
self.InputSlotClass == other.InputSlotClass and \
self.InstanceDataStepRate == other.InstanceDataStepRate
class InputLayout(object):
def __init__(self, custom_prop=[]):
self.semantic_translations_cache = None
self.elems = collections.OrderedDict()
for item in custom_prop:
elem = InputLayoutElement(item)
self.elems[elem.name] = elem
def serialise(self):
return [x.to_dict() for x in self.elems.values()]
def to_string(self):
ret = ''
for i, elem in enumerate(self.elems.values()):
ret += 'element[%i]:\n' % i
ret += elem.to_string()
return ret
def parse_element(self, f):
elem = InputLayoutElement(f)
self.elems[elem.name] = elem
def __iter__(self):
return iter(self.elems.values())
def __getitem__(self, semantic):
return self.elems[semantic]
def untranslate_semantic(self, translated_semantic_name, translated_semantic_index=0):
semantic_translations = self.get_semantic_remap()
reverse_semantic_translations = {v: k for k,v in semantic_translations.items()}
semantic = reverse_semantic_translations[(translated_semantic_name, translated_semantic_index)]
return self[semantic]
def encode(self, vertex, vbuf_idx, stride):
buf = bytearray(stride)
for semantic, data in vertex.items():
if semantic.startswith('~'):
continue
elem = self.elems[semantic]
if vbuf_idx.isnumeric() and elem.InputSlot != int(vbuf_idx):
# Belongs to a different vertex buffer
continue
data = elem.encode(data)
buf[elem.AlignedByteOffset:elem.AlignedByteOffset + len(data)] = data
assert(len(buf) == stride)
return buf
def decode(self, buf, vbuf_idx):
vertex = {}
for elem in self.elems.values():
if elem.InputSlot != vbuf_idx:
# Belongs to a different vertex buffer
continue
data = buf[elem.AlignedByteOffset:elem.AlignedByteOffset + elem.size()]
vertex[elem.name] = elem.decode(data)
return vertex
def __eq__(self, other):
return self.elems == other.elems
def apply_semantic_remap(self, operator):
semantic_translations = {}
semantic_highest_indices = {}
for elem in self.elems.values():
semantic_highest_indices[elem.SemanticName.upper()] = max(semantic_highest_indices.get(elem.SemanticName.upper(), 0), elem.SemanticIndex)
def find_free_elem_index(semantic):
idx = semantic_highest_indices.get(semantic, -1) + 1
semantic_highest_indices[semantic] = idx
return idx
for remap in operator.properties.semantic_remap:
if remap.semantic_to == 'None':
continue
if remap.semantic_from in semantic_translations:
operator.report({'ERROR'}, 'semantic remap for {} specified multiple times, only the first will be used'.format(remap.semantic_from))
continue
if remap.semantic_from not in self.elems:
operator.report({'WARNING'}, 'semantic "{}" not found in imported file, double check your semantic remaps'.format(remap.semantic_from))
continue
remapped_semantic_idx = find_free_elem_index(remap.semantic_to)
operator.report({'INFO'}, 'Remapping semantic {} -> {}{}'.format(remap.semantic_from, remap.semantic_to,
remapped_semantic_idx or ''))
self.elems[remap.semantic_from].RemappedSemanticName = remap.semantic_to
self.elems[remap.semantic_from].RemappedSemanticIndex = remapped_semantic_idx
semantic_translations[remap.semantic_from] = (remap.semantic_to, remapped_semantic_idx)
self.semantic_translations_cache = semantic_translations
return semantic_translations
def get_semantic_remap(self):
if self.semantic_translations_cache:
return self.semantic_translations_cache
semantic_translations = {}
for elem in self.elems.values():
if elem.RemappedSemanticName is not None:
semantic_translations[elem.name] = \
(elem.RemappedSemanticName, elem.RemappedSemanticIndex)
self.semantic_translations_cache = semantic_translations
return semantic_translations
class HashableVertex(dict):
def __hash__(self):
# Convert keys and values into immutable types that can be hashed
immutable = tuple((k, tuple(v)) for k,v in sorted(self.items()))
return hash(immutable)
class IndividualVertexBuffer(object):
'''
One individual vertex buffer. Multiple vertex buffers may contain
individual semantics which when combined together make up a vertex buffer
group.
'''
vb_elem_pattern = re.compile(r'''vb\d+\[\d*\]\+\d+ (?P<semantic>[^:]+): (?P<data>.*)$''')
def __init__(self, idx, f=None, layout=None, load_vertices=True):
self.vertices = []
self.layout = layout and layout or InputLayout()
self.first = 0
self.vertex_count = 0
self.offset = 0
self.topology = 'trianglelist'
self.stride = 0
self.idx = idx
if f is not None:
self.parse_vb_txt(f, load_vertices)
def parse_vb_txt(self, f, load_vertices):
split_vb_stride = 'vb%i stride:' % self.idx
for line in map(str.strip, f):
# print(line)
if line.startswith('byte offset:'):
self.offset = int(line[13:])
if line.startswith('first vertex:'):
self.first = int(line[14:])
if line.startswith('vertex count:'):
self.vertex_count = int(line[14:])
if line.startswith('stride:'):
self.stride = int(line[7:])
if line.startswith(split_vb_stride):
self.stride = int(line[len(split_vb_stride):])
if line.startswith('element['):
self.layout.parse_element(f)
if line.startswith('topology:'):
self.topology = line[10:]
if self.topology not in supported_topologies:
raise Fatal('"%s" is not yet supported' % line)
if line.startswith('vertex-data:'):
if not load_vertices:
return
self.parse_vertex_data(f)
# If the buffer is only per-instance elements there won't be any
# vertices. If the buffer has any per-vertex elements than we should
# have the number of vertices declared in the header.
if self.vertices:
assert(len(self.vertices) == self.vertex_count)
def parse_vb_bin(self, f, use_drawcall_range=False):
f.seek(self.offset)
if use_drawcall_range:
f.seek(self.first * self.stride, 1)
else:
self.first = 0
for i in itertools.count():
if use_drawcall_range and i == self.vertex_count:
break
vertex = f.read(self.stride)
if not vertex:
break
self.vertices.append(self.layout.decode(vertex, self.idx))
# We intentionally disregard the vertex count when loading from a
# binary file, as we assume frame analysis might have only dumped a
# partial buffer to the .txt files (e.g. if this was from a dump where
# the draw call index count was overridden it may be cut short, or
# where the .txt files contain only sub-meshes from each draw call and
# we are loading the .buf file because it contains the entire mesh):
self.vertex_count = len(self.vertices)
def append(self, vertex):
self.vertices.append(vertex)
self.vertex_count += 1
def parse_vertex_data(self, f):
vertex = {}
for line in map(str.strip, f):
#print(line)
if line.startswith('instance-data:'):
break
match = self.vb_elem_pattern.match(line)
if match:
vertex[match.group('semantic')] = self.parse_vertex_element(match)
elif line == '' and vertex:
self.vertices.append(vertex)
vertex = {}
if vertex:
self.vertices.append(vertex)
@staticmethod
def ms_float(val):
x = val.split('.#')
s = float(x[0])
if len(x) == 1:
return s
if x[1].startswith('INF'):
return s * numpy.inf # Will preserve sign
# TODO: Differentiate between SNAN / QNAN / IND
if s == -1: # Multiplying -1 * nan doesn't preserve sign
return -numpy.nan # so must use unary - operator
return numpy.nan
def parse_vertex_element(self, match):
fields = match.group('data').split(',')
if self.layout[match.group('semantic')].Format.endswith('INT'):
return tuple(map(int, fields))
return tuple(map(self.ms_float, fields))
class VertexBufferGroup(object):
'''
All the per-vertex data, which may be loaded/saved from potentially
multiple individual vertex buffers with different semantics in each.
'''
vb_idx_pattern = re.compile(r'''[-\.]vb([0-9]+)''')
# Python gotcha - do not set layout=InputLayout() in the default function
# parameters, as they would all share the *same* InputLayout since the
# default values are only evaluated once on file load
def __init__(self, files=None, layout=None, load_vertices=True, topology=None):
self.vertices = []
self.layout = layout and layout or InputLayout()
self.first = 0
self.vertex_count = 0
self.topology = topology or 'trianglelist'
self.vbs = []
self.slots = {}
if files is not None:
self.parse_vb_txt(files, load_vertices)
def parse_vb_txt(self, files, load_vertices):
for f in files:
match = self.vb_idx_pattern.search(f)
if match is None:
raise Fatal('Cannot determine vertex buffer index from filename %s' % f)
idx = int(match.group(1))
vb = IndividualVertexBuffer(idx, open(f, 'r'), self.layout, load_vertices)
if vb.vertices:
self.vbs.append(vb)
self.slots[idx] = vb
self.flag_invalid_semantics()
# Non buffer specific info:
self.first = self.vbs[0].first
self.vertex_count = self.vbs[0].vertex_count
self.topology = self.vbs[0].topology
if load_vertices:
self.merge_vbs(self.vbs)
assert(len(self.vertices) == self.vertex_count)
def parse_vb_bin(self, files, use_drawcall_range=False):
for (bin_f, fmt_f) in files:
match = self.vb_idx_pattern.search(bin_f)
if match is not None:
idx = int(match.group(1))
else:
print('Cannot determine vertex buffer index from filename %s, assuming 0 for backwards compatibility' % bin_f)
idx = 0
vb = IndividualVertexBuffer(idx, open(fmt_f, 'r'), self.layout, False)
vb.parse_vb_bin(open(bin_f, 'rb'), use_drawcall_range)
if vb.vertices:
self.vbs.append(vb)
self.slots[idx] = vb
self.flag_invalid_semantics()
# Non buffer specific info:
self.first = self.vbs[0].first
self.vertex_count = self.vbs[0].vertex_count
self.topology = self.vbs[0].topology
self.merge_vbs(self.vbs)
assert(len(self.vertices) == self.vertex_count)
def append(self, vertex):
self.vertices.append(vertex)
self.vertex_count += 1
def remap_blendindices(self, obj, mapping):
def lookup_vgmap(x):
vgname = obj.vertex_groups[x].name
return mapping.get(vgname, mapping.get(x, x))
for vertex in self.vertices:
for semantic in list(vertex):
if semantic.startswith('BLENDINDICES'):
vertex['~' + semantic] = vertex[semantic]
vertex[semantic] = tuple(lookup_vgmap(x) for x in vertex[semantic])
def revert_blendindices_remap(self):
# Significantly faster than doing a deep copy
for vertex in self.vertices:
for semantic in list(vertex):
if semantic.startswith('BLENDINDICES'):
vertex[semantic] = vertex['~' + semantic]
del vertex['~' + semantic]
def disable_blendweights(self):
for vertex in self.vertices:
for semantic in list(vertex):
if semantic.startswith('BLENDINDICES'):
vertex[semantic] = (0, 0, 0, 0)
def write(self, output_prefix, strides, operator=None):
if os.path.exists(output_prefix):
os.remove(output_prefix) # Remove old .vb if it exists before writing segmented .vb_ files
for vbuf_idx, stride in strides.items():
with open(output_prefix + vbuf_idx, 'wb') as output:
for vertex in self.vertices:
output.write(self.layout.encode(vertex, vbuf_idx, stride))
msg = 'Wrote %i vertices to %s' % (len(self), output.name)
if operator:
operator.report({'INFO'}, msg)
else:
print(msg)
def __len__(self):
return len(self.vertices)
def merge_vbs(self, vbs):
self.vertices = self.vbs[0].vertices
del self.vbs[0].vertices
assert(len(self.vertices) == self.vertex_count)
for vb in self.vbs[1:]:
assert(len(vb.vertices) == self.vertex_count)
[ self.vertices[i].update(vertex) for i,vertex in enumerate(vb.vertices) ]
del vb.vertices
def merge(self, other):
if self.layout != other.layout:
raise Fatal('Vertex buffers have different input layouts - ensure you are only trying to merge the same vertex buffer split across multiple draw calls')
if self.first != other.first:
# FIXME: Future 3DMigoto might automatically set first from the
# index buffer and chop off unreferenced vertices to save space
raise Fatal('Cannot merge multiple vertex buffers - please check for updates of the 3DMigoto import script, or import each buffer separately')
self.vertices.extend(other.vertices[self.vertex_count:])
self.vertex_count = max(self.vertex_count, other.vertex_count)
assert(len(self.vertices) == self.vertex_count)
def wipe_semantic_for_testing(self, semantic, val=0):
print('WARNING: WIPING %s FOR TESTING PURPOSES!!!' % semantic)
semantic, _, components = semantic.partition('.')
if components:
components = [{'x':0, 'y':1, 'z':2, 'w':3}[c] for c in components]
else:
components = range(4)
for vertex in self.vertices:
for s in list(vertex):
if s == semantic:
v = list(vertex[semantic])
for component in components:
if component < len(v):
v[component] = val
vertex[semantic] = v
def flag_invalid_semantics(self):
# This refactors some of the logic that used to be in import_vertices()
# and get_valid_semantics() - Any semantics that re-use the same offset
# of an earlier semantic is considered invalid and will be ignored when
# importing the vertices. These are usually a quirk of how certain
# engines handle unused semantics and at best will be repeating data we
# already imported in another semantic and at worst may be
# misinterpreting the data as a completely different type.
#
# Is is theoretically possible for the earlier semantic to be the
# invalid one - if we ever encounter that we might want to allow the
# user to choose which of the semantics sharing the same offset should
# be considerd the valid one.
#
# This also makes sure the corresponding vertex buffer is present and
# can fit the semantic.
seen_offsets = set()
for elem in self.layout:
if elem.InputSlotClass != 'per-vertex':
# Instance data isn't invalid, we just don't import it yet
continue
if (elem.InputSlot, elem.AlignedByteOffset) in seen_offsets:
# Setting two flags to avoid changing behaviour in the refactor
# - might be able to simplify this to one flag, but want to
# test semantics that [partially] overflow the stride first,
# and make sure that export flow (stride won't be set) works.
elem.reused_offset = True
elem.invalid_semantic = True
continue
seen_offsets.add((elem.InputSlot, elem.AlignedByteOffset))
elem.reused_offset = False
try:
stride = self.slots[elem.InputSlot].stride
except KeyError:
# UE4 claiming it uses vertex buffers that it doesn't bind.
elem.invalid_semantic = True
continue
if elem.AlignedByteOffset + format_size(elem.Format) > stride:
elem.invalid_semantic = True
continue
elem.invalid_semantic = False
def get_valid_semantics(self):
self.flag_invalid_semantics()
return set([elem.name for elem in self.layout
if elem.InputSlotClass == 'per-vertex' and not elem.invalid_semantic])
class IndexBuffer(object):
def __init__(self, *args, load_indices=True):
self.faces = []
self.first = 0
self.index_count = 0
self.format = 'DXGI_FORMAT_UNKNOWN'
self.offset = 0
self.topology = 'trianglelist'
self.used_in_drawcall = None
if isinstance(args[0], io.IOBase):
assert(len(args) == 1)
self.parse_ib_txt(args[0], load_indices)
else:
self.format, = args
self.encoder, self.decoder = EncoderDecoder(self.format)
def append(self, face):
self.faces.append(face)
self.index_count += len(face)
def parse_ib_txt(self, f, load_indices):
for line in map(str.strip, f):
if line.startswith('byte offset:'):
self.offset = int(line[13:])
# If we see this line we are looking at a 3DMigoto frame
# analysis dump, not a .fmt file exported by this script.
# If it was an indexed draw call it will be followed by "first
# index" and "index count", while if it was not an indexed draw
# call they will be absent. So by the end of parsing:
# used_in_drawcall = None signifies loading a .fmt file from a previous export
# used_in_drawcall = False signifies draw call did not use the bound IB
# used_in_drawcall = True signifies an indexed draw call
self.used_in_drawcall = False
if line.startswith('first index:'):
self.first = int(line[13:])
self.used_in_drawcall = True
elif line.startswith('index count:'):
self.index_count = int(line[13:])
self.used_in_drawcall = True
elif line.startswith('topology:'):
self.topology = line[10:]
if self.topology not in supported_topologies:
raise Fatal('"%s" is not yet supported' % line)
elif line.startswith('format:'):
self.format = line[8:]
elif line == '':
if not load_indices:
return
self.parse_index_data(f)
if self.used_in_drawcall != False:
assert(len(self.faces) * self.indices_per_face + self.extra_indices == self.index_count)
def parse_ib_bin(self, f, use_drawcall_range=False):
f.seek(self.offset)
stride = format_size(self.format)
if use_drawcall_range:
f.seek(self.first * stride, 1)
else:
self.first = 0
face = []
for i in itertools.count():
if use_drawcall_range and i == self.index_count:
break
index = f.read(stride)
if not index:
break
face.append(*self.decoder(index))
if len(face) == self.indices_per_face:
self.faces.append(tuple(face))
face = []
assert(len(face) == 0)
self.expand_strips()
if use_drawcall_range:
assert(len(self.faces) * self.indices_per_face + self.extra_indices == self.index_count)
else:
# We intentionally disregard the index count when loading from a
# binary file, as we assume frame analysis might have only dumped a
# partial buffer to the .txt files (e.g. if this was from a dump where
# the draw call index count was overridden it may be cut short, or
# where the .txt files contain only sub-meshes from each draw call and
# we are loading the .buf file because it contains the entire mesh):
self.index_count = len(self.faces) * self.indices_per_face + self.extra_indices
def parse_index_data(self, f):
for line in map(str.strip, f):
face = tuple(map(int, line.split()))
assert(len(face) == self.indices_per_face)
self.faces.append(face)
self.expand_strips()
def expand_strips(self):
if self.topology == 'trianglestrip':
# Every 2nd face has the vertices out of order to keep all faces in the same orientation:
# https://learn.microsoft.com/en-us/windows/win32/direct3d9/triangle-strips
self.faces = [(self.faces [i-2][0],
self.faces[i%2 and i or i-1][0],
self.faces[i%2 and i-1 or i ][0],
) for i in range(2, len(self.faces)) ]
elif self.topology == 'linestrip':
raise Fatal('linestrip topology conversion is untested')
self.faces = [(self.faces[i-1][0], self.faces[i][0])
for i in range(1, len(self.faces)) ]
def merge(self, other):
if self.format != other.format:
raise Fatal('Index buffers have different formats - ensure you are only trying to merge the same index buffer split across multiple draw calls')
self.first = min(self.first, other.first)
self.index_count += other.index_count
self.faces.extend(other.faces)
def write(self, output, operator=None):
for face in self.faces:
output.write(self.encoder(face))
msg = 'Wrote %i indices to %s' % (len(self), output.name)
if operator:
operator.report({'INFO'}, msg)
else:
print(msg)
@property
def indices_per_face(self):
return {
'trianglelist': 3,
'pointlist': 1,
'trianglestrip': 1, # + self.extra_indices for 1st tri
'linelist': 2,
'linestrip': 1, # + self.extra_indices for 1st line
}[self.topology]
@property
def extra_indices(self):
if len(self.faces) >= 1:
if self.topology == 'trianglestrip':
return 2
if self.topology == 'linestrip':
return 1
return 0
def __len__(self):
return len(self.faces) * self.indices_per_face + self.extra_indices
def load_3dmigoto_mesh_bin(operator, vb_paths, ib_paths, pose_path):
if len(vb_paths) != 1 or len(ib_paths) > 1:
raise Fatal('Cannot merge meshes loaded from binary files')
# Loading from binary files, but still need to use the .txt files as a
# reference for the format:
ib_bin_path, ib_txt_path = ib_paths[0]
use_drawcall_range = False
if hasattr(operator, 'load_buf_limit_range'): # Frame analysis import only
use_drawcall_range = operator.load_buf_limit_range
vb = VertexBufferGroup()
vb.parse_vb_bin(vb_paths[0], use_drawcall_range)
ib = None
if ib_bin_path:
ib = IndexBuffer(open(ib_txt_path, 'r'), load_indices=False)
if ib.used_in_drawcall == False:
operator.report({'WARNING'}, '{}: Discarding index buffer not used in draw call'.format(os.path.basename(ib_bin_path)))
ib = None
else:
ib.parse_ib_bin(open(ib_bin_path, 'rb'), use_drawcall_range)
return vb, ib, os.path.basename(vb_paths[0][0][0]), pose_path
def load_3dmigoto_mesh(operator, paths):
vb_paths, ib_paths, use_bin, pose_path = zip(*paths)
pose_path = pose_path[0]
if use_bin[0]:
return load_3dmigoto_mesh_bin(operator, vb_paths, ib_paths, pose_path)
vb = VertexBufferGroup(vb_paths[0])
# Merge additional vertex buffers for meshes split over multiple draw calls:
for vb_path in vb_paths[1:]:
tmp = VertexBufferGroup(vb_path)
vb.merge(tmp)
# For quickly testing how importent any unsupported semantics may be:
#vb.wipe_semantic_for_testing('POSITION.w', 1.0)
#vb.wipe_semantic_for_testing('TEXCOORD.w', 0.0)
#vb.wipe_semantic_for_testing('TEXCOORD5', 0)
#vb.wipe_semantic_for_testing('BINORMAL')
#vb.wipe_semantic_for_testing('TANGENT')
#vb.write(open(os.path.join(os.path.dirname(vb_paths[0]), 'TEST.vb'), 'wb'), operator=operator)
ib = None
if ib_paths and ib_paths != (None,):
ib = IndexBuffer(open(ib_paths[0], 'r'))
# Merge additional vertex buffers for meshes split over multiple draw calls:
for ib_path in ib_paths[1:]:
tmp = IndexBuffer(open(ib_path, 'r'))
ib.merge(tmp)
if ib.used_in_drawcall == False:
operator.report({'WARNING'}, '{}: Discarding index buffer not used in draw call'.format(os.path.basename(ib_paths[0])))
ib = None
return vb, ib, os.path.basename(vb_paths[0][0]), pose_path
def normal_import_translation(elem, flip):
unorm = elem.Format.endswith('_UNORM')
if unorm:
# Scale UNORM range 0:+1 to normal range -1:+1
if flip:
return lambda x: -(x*2.0 - 1.0)
else:
return lambda x: x*2.0 - 1.0
if flip:
return lambda x: -x
else:
return lambda x: x
def normal_export_translation(layout, semantic, flip):
try:
unorm = layout.untranslate_semantic(semantic).Format.endswith('_UNORM')
except KeyError:
unorm = False
if unorm:
# Scale normal range -1:+1 to UNORM range 0:+1
if flip:
return lambda x: -x/2.0 + 0.5
else:
return lambda x: x/2.0 + 0.5
if flip:
return lambda x: -x
else:
return lambda x: x
def import_normals_step1(mesh, data, vertex_layers, operator, translate_normal):
# Ensure normals are 3-dimensional:
# XXX: Assertion triggers in DOA6
if len(data[0]) == 4:
if [x[3] for x in data] != [0.0]*len(data):
#raise Fatal('Normals are 4D')
operator.report({'WARNING'}, 'Normals are 4D, storing W coordinate in NORMAL.w vertex layer. Beware that some types of edits on this mesh may be problematic.')
vertex_layers['NORMAL.w'] = [[x[3]] for x in data]
normals = [tuple(map(translate_normal, (x[0], x[1], x[2]))) for x in data]