diff --git a/3d-converter-0.8.0/3d_converter.egg-info/PKG-INFO b/3d-converter-0.8.0/3d_converter.egg-info/PKG-INFO
new file mode 100644
index 0000000..1cd65d1
--- /dev/null
+++ b/3d-converter-0.8.0/3d_converter.egg-info/PKG-INFO
@@ -0,0 +1,25 @@
+Metadata-Version: 2.1
+Name: 3d-converter
+Version: 0.8.0
+Summary: Python 3D Models Converter
+Home-page: https://github.com/vorono4ka/3d-converter
+Author: Vorono4ka
+Author-email: crowo4ka@gmail.com
+License: GPLv3
+Description: ## `Python 3D Models Converter`
+
+ **Version**: 0.8.0
+
+ ### Thanks a lot for motivating [AMIRMISTIK]!
+
+ ### **THIS IS NOT RELEASE VERSION!**
+
+
+ [AMIRMISTIK]: https://www.youtube.com/channel/UCksd1LeoySP5St6dKlv6mvQ
+
+Platform: UNKNOWN
+Classifier: Programming Language :: Python :: 3
+Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
+Classifier: Operating System :: OS Independent
+Requires-Python: >=3.7
+Description-Content-Type: text/markdown
diff --git a/3d-converter-0.8.0/3d_converter.egg-info/SOURCES.txt b/3d-converter-0.8.0/3d_converter.egg-info/SOURCES.txt
new file mode 100644
index 0000000..922e012
--- /dev/null
+++ b/3d-converter-0.8.0/3d_converter.egg-info/SOURCES.txt
@@ -0,0 +1,29 @@
+README.md
+setup.py
+3d_converter.egg-info/PKG-INFO
+3d_converter.egg-info/SOURCES.txt
+3d_converter.egg-info/dependency_links.txt
+3d_converter.egg-info/top_level.txt
+models_converter/__init__.py
+models_converter/formats/__init__.py
+models_converter/formats/dae.py
+models_converter/formats/gltf.py
+models_converter/formats/obj.py
+models_converter/formats/scw/__init__.py
+models_converter/formats/scw/parser.py
+models_converter/formats/scw/writer.py
+models_converter/formats/scw/chunks/__init__.py
+models_converter/formats/scw/chunks/came.py
+models_converter/formats/scw/chunks/chunk.py
+models_converter/formats/scw/chunks/geom.py
+models_converter/formats/scw/chunks/head.py
+models_converter/formats/scw/chunks/mate.py
+models_converter/formats/scw/chunks/node.py
+models_converter/formats/scw/chunks/wend.py
+models_converter/utils/__init__.py
+models_converter/utils/reader.py
+models_converter/utils/writer.py
+models_converter/utils/matrix/__init__.py
+models_converter/utils/matrix/matrix2x2.py
+models_converter/utils/matrix/matrix3x3.py
+models_converter/utils/matrix/matrix4x4.py
\ No newline at end of file
diff --git a/3d-converter-0.8.0/3d_converter.egg-info/dependency_links.txt b/3d-converter-0.8.0/3d_converter.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/3d-converter-0.8.0/3d_converter.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/3d-converter-0.8.0/3d_converter.egg-info/top_level.txt b/3d-converter-0.8.0/3d_converter.egg-info/top_level.txt
new file mode 100644
index 0000000..da3db61
--- /dev/null
+++ b/3d-converter-0.8.0/3d_converter.egg-info/top_level.txt
@@ -0,0 +1 @@
+models_converter
diff --git a/3d-converter-0.8.0/README.md b/3d-converter-0.8.0/README.md
new file mode 100644
index 0000000..7ec19cc
--- /dev/null
+++ b/3d-converter-0.8.0/README.md
@@ -0,0 +1,10 @@
+## `Python 3D Models Converter`
+
+**Version**: 0.8.0
+
+### Thanks a lot for motivating [AMIRMISTIK]!
+
+### **THIS IS NOT RELEASE VERSION!**
+
+
+[AMIRMISTIK]: https://www.youtube.com/channel/UCksd1LeoySP5St6dKlv6mvQ
diff --git a/3d-converter-0.8.0/models_converter/__init__.py b/3d-converter-0.8.0/models_converter/__init__.py
new file mode 100644
index 0000000..f5dd35a
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/__init__.py
@@ -0,0 +1,5 @@
+__all__ = [
+ 'formats',
+ 'chunks',
+ 'utils'
+]
diff --git a/3d-converter-0.8.0/models_converter/formats/__init__.py b/3d-converter-0.8.0/models_converter/formats/__init__.py
new file mode 100644
index 0000000..3e518f9
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/formats/__init__.py
@@ -0,0 +1,6 @@
+__all__ = [
+ 'scw',
+ 'dae',
+ 'obj',
+ 'gltf'
+]
diff --git a/3d-converter-0.8.0/models_converter/formats/dae.py b/3d-converter-0.8.0/models_converter/formats/dae.py
new file mode 100644
index 0000000..2c2dbe1
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/formats/dae.py
@@ -0,0 +1,786 @@
+from xml.etree.ElementTree import *
+
+from ..utils.matrix.matrix4x4 import Matrix4x4
+
+
+def _(*args):
+ print('[ScwUtils]', end=' ')
+ for arg in args:
+ print(arg, end=' ')
+ print()
+
+
+class Collada:
+ def __init__(self):
+ self.version = '1.4.1'
+ self.xml_namespace = 'http://www.collada.org/2005/11/COLLADASchema'
+ self.collada = Element('COLLADA', version=self.version, xmlns=self.xml_namespace)
+
+ @staticmethod
+ def write_source(parent,
+ source_id: str,
+ array_tag: str,
+ array_data: list,
+ stride: int,
+ params: list):
+ source = SubElement(parent, 'source', id=source_id)
+
+ array = SubElement(source, array_tag)
+ array.attrib = {'id': f'{source_id}-array',
+ 'count': f'{len(array_data) * stride}'}
+
+ technique_common = SubElement(source, 'technique_common')
+ accessor = SubElement(technique_common, 'accessor')
+ accessor.attrib = {'source': f'#{source_id}-array',
+ 'count': f'{len(array_data)}',
+ 'stride': f'{stride}'}
+
+ for param_data in params:
+ param = SubElement(accessor, 'param')
+ param.attrib = param_data
+
+ array.text = ' '.join(array_data)
+
+ @staticmethod
+ def write_input(parent,
+ semantic: str,
+ source_id: str,
+ offset: int = None):
+ attributes = {
+ 'semantic': semantic,
+ 'source': f'#{source_id}'
+ }
+
+ if offset is not None:
+ attributes['offset'] = f'{offset}'
+
+ _input = SubElement(parent, 'input')
+ _input.attrib = attributes
+
+
+class Writer:
+ def __init__(self):
+ self.writen = ''
+
+ def write(self, data: dict):
+ dae = Collada()
+ asset = SubElement(dae.collada, 'asset')
+
+ #
+ library_materials = SubElement(dae.collada, 'library_materials')
+ library_effects = SubElement(dae.collada, 'library_effects')
+ # library_images = SubElement(dae.collada, 'library_images')
+ library_geometries = SubElement(dae.collada, 'library_geometries')
+ library_controllers = SubElement(dae.collada, 'library_controllers')
+ library_animations = SubElement(dae.collada, 'library_animations')
+ # library_cameras = SubElement(dae.collada, 'library_cameras')
+ library_visual_scenes = SubElement(dae.collada, 'library_visual_scenes')
+ #
+
+ #
+ contributor = SubElement(asset, 'contributor')
+ SubElement(contributor, 'author').text = 'Vorono4ka'
+ SubElement(contributor, 'authoring_tool').text = 'models_converter (https://github.com/vorono4ka/3d-converter)'
+
+ if 'version' in data['header']:
+ SubElement(contributor, 'comments').text = 'SCW Version: ' + str(data['header']['version'])
+ #
+
+ #
+ for material_data in data['materials']:
+ material_name = material_data['name']
+
+ SubElement(library_materials, 'material', id=material_name)
+ effect_name = f'{material_name}-effect'
+
+ material = SubElement(library_materials, 'material', id=material_name)
+ SubElement(material, 'instance_effect', url=f'#{effect_name}')
+
+ effect = SubElement(library_effects, 'effect', id=effect_name)
+ profile = SubElement(effect, 'profile_COMMON')
+ technique = SubElement(profile, 'technique', sid='common')
+
+ ambient_data = material_data['effect']['ambient']
+ diffuse_data = material_data['effect']['diffuse']
+ emission_data = material_data['effect']['emission']
+ specular_data = material_data['effect']['specular']
+
+ phong = SubElement(technique, 'phong')
+
+ if type(ambient_data) is list:
+ ambient = SubElement(phong, 'ambient')
+ ambient_data[3] /= 255
+ ambient_data = [str(item) for item in ambient_data]
+ SubElement(ambient, 'color').text = ' '.join(ambient_data)
+ # else:
+ # SubElement(ambient, 'texture', texture=ambient_data, texcoord='CHANNEL0')
+
+ if type(diffuse_data) is list:
+ diffuse = SubElement(phong, 'diffuse')
+ diffuse_data[3] /= 255
+ diffuse_data = [str(item) for item in diffuse_data]
+ SubElement(diffuse, 'color').text = ' '.join(diffuse_data)
+ # else:
+ # SubElement(diffuse, 'texture', texture=diffuse_data, texcoord='CHANNEL0')
+
+ if type(emission_data) is list:
+ emission = SubElement(phong, 'emission')
+ emission_data[3] /= 255
+ emission_data = [str(item) for item in emission_data]
+ SubElement(emission, 'color').text = ' '.join(emission_data)
+ # else:
+ # SubElement(emission, 'texture', texture=emission_data, texcoord='CHANNEL0')
+
+ if type(specular_data) is list:
+ specular = SubElement(phong, 'specular')
+ specular_data[3] /= 255
+ specular_data = [str(item) for item in specular_data]
+ SubElement(specular, 'color').text = ' '.join(specular_data)
+ # else:
+ # SubElement(specular, 'texture', texture=specular_data, texcoord='CHANNEL0')
+ #
+
+ #
+ for geometry_data in data['geometries']:
+ geometry_name = geometry_data['name']
+
+ geometry = SubElement(library_geometries, 'geometry', id=f'{geometry_name}-geom')
+ mesh = SubElement(geometry, 'mesh')
+
+ #
+ for vertex_data in geometry_data['vertices']:
+ params = []
+
+ vertex_type = vertex_data['type']
+ vertex_name = vertex_data['name']
+ vertex = vertex_data['vertex']
+ stride = len(vertex[0])
+
+ if vertex_type == 'VERTEX':
+ vertex_type = 'POSITION'
+
+ source_name = f'{geometry_name}-{vertex_name}'
+
+ if vertex_type in ['POSITION', 'NORMAL']:
+ params.append({'name': 'X', 'type': 'float'})
+ params.append({'name': 'Y', 'type': 'float'})
+ params.append({'name': 'Z', 'type': 'float'})
+ elif vertex_type in ['TEXCOORD']:
+ params.append({'name': 'S', 'type': 'float'})
+ params.append({'name': 'T', 'type': 'float'})
+
+ dae.write_source(
+ mesh,
+ source_name,
+ 'float_array',
+ [' '.join([str(sub_item * vertex_data['scale']) for sub_item in item]) for item in vertex],
+ stride,
+ params
+ )
+
+ if vertex_type == 'POSITION':
+ vertices = SubElement(mesh, 'vertices', id=f'{source_name}-vertices')
+ dae.write_input(vertices, 'POSITION', source_name)
+ #
+
+ #
+ for material in geometry_data['materials']:
+ polygons_data = material['polygons']
+ material_name = material['name']
+
+ triangles = SubElement(mesh, 'triangles',
+ count=f'{len(polygons_data)}',
+ material=material_name)
+ for _input in material['inputs']:
+ input_offset = _input['offset']
+ input_name = _input['name']
+ input_type = _input['type']
+
+ if input_type == 'POSITION':
+ input_type = 'VERTEX'
+ source_id = f'{geometry_name}-{input_name}'
+ if input_type == 'VERTEX':
+ source_id = f'{source_id}-vertices'
+
+ dae.write_input(triangles, input_type, source_id, input_offset)
+ polygons = SubElement(triangles, 'p')
+
+ formatted_polygons_data = []
+ for polygon in polygons_data:
+ for point in polygon:
+ for vertex in point:
+ formatted_polygons_data.append(str(vertex))
+
+ polygons.text = ' '.join(formatted_polygons_data)
+ #
+
+ #
+ if geometry_data['have_bind_matrix']:
+ joints_matrices = []
+ joints_names = []
+
+ controller = SubElement(library_controllers, 'controller', id=f'{geometry_name}-cont')
+ skin = SubElement(controller, 'skin', source=f'#{geometry_name}-geom')
+
+ if 'bind_matrix' in geometry_data:
+ bind_matrix_data = [str(value) for value in geometry_data['bind_matrix']]
+ SubElement(skin, 'bind_shape_matrix').text = ' '.join(bind_matrix_data)
+
+ for joint in geometry_data['joints']:
+ joints_names.append(joint['name'])
+ joint_matrix = [str(value) for value in joint['matrix']]
+ joint_matrix = ' '.join(joint_matrix)
+ joints_matrices.append(joint_matrix)
+
+ joints_names_source_id = f'{geometry_name}-joints'
+ joints_matrices_source_id = f'{geometry_name}-joints-bind-matrices'
+ weights_source_id = f'{geometry_name}-weights'
+
+ dae.write_source(
+ skin,
+ joints_names_source_id,
+ 'Name_array',
+ joints_names,
+ 1,
+ [{'name': 'JOINT', 'type': 'name'}]
+ )
+
+ dae.write_source(
+ skin,
+ joints_matrices_source_id,
+ 'float_array',
+ joints_matrices,
+ 16,
+ [{'name': 'TRANSFORM', 'type': 'float4x4'}]
+ )
+
+ dae.write_source(
+ skin,
+ weights_source_id,
+ 'float_array',
+ [str(value) for value in geometry_data['weights']['weights']],
+ 1,
+ [{'name': 'WEIGHT', 'type': 'float'}]
+ )
+
+ joints = SubElement(skin, 'joints')
+ dae.write_input(joints, 'JOINT', joints_names_source_id)
+ dae.write_input(joints, 'INV_BIND_MATRIX', joints_matrices_source_id)
+
+ vertex_weights_data = [str(value) for value in geometry_data['weights']['vertex_weights']]
+ vcount = [str(value) for value in geometry_data['weights']['vcount']]
+
+ vertex_weights = SubElement(skin, 'vertex_weights', count=f'{len(vcount)}')
+ dae.write_input(vertex_weights, 'JOINT', joints_names_source_id, 0)
+ dae.write_input(vertex_weights, 'WEIGHT', weights_source_id, 1)
+
+ SubElement(vertex_weights, 'vcount').text = ' '.join(vcount)
+ SubElement(vertex_weights, 'v').text = ' '.join(vertex_weights_data)
+ #
+ #
+
+ #
+
+ visual_scene = SubElement(library_visual_scenes, 'visual_scene',
+ id='3dConverterScene',
+ name='3d-Converter Scene')
+
+ for node_data in data['nodes']:
+ parent_name = node_data['parent']
+ parent = visual_scene
+ if parent_name != '':
+ parent = visual_scene.find(f'.//*[@id="{parent_name}"]')
+ if parent is None:
+ parent = visual_scene
+ node_name = node_data['name']
+
+ node = SubElement(parent, 'node', id=node_data['name'])
+
+ for instance in node_data['instances']:
+ instance_type = instance['instance_type']
+ instance_name = instance['instance_name']
+ bind_material = None
+
+ if instance_type == 'CONT':
+ instance_controller = SubElement(node, 'instance_controller', url=f'#{instance_name}-cont')
+ bind_material = SubElement(instance_controller, 'bind_material')
+ elif instance_type == 'GEOM':
+ instance_controller = SubElement(node, 'instance_geometry', url=f'#{instance_name}-geom')
+ bind_material = SubElement(instance_controller, 'bind_material')
+
+ if instance_type in ['GEOM', 'CONT']:
+ technique_common = SubElement(bind_material, 'technique_common')
+ for bind in instance['binds']:
+ symbol = bind['symbol']
+ target = bind['target']
+
+ SubElement(technique_common, 'instance_material',
+ symbol=symbol,
+ target=f'#{target}')
+ else:
+ if parent_name != '' and len(node_data['instances']) == 0:
+ node.attrib['type'] = 'JOINT'
+
+ #
+ frame_rate = data['header']['frame_rate']
+ time_input = []
+ matrix_output = []
+ #
+
+ frames = node_data['frames']
+ for frame in frames:
+ frame_id = frame['frame_id']
+ matrix = Matrix4x4(size=(4, 4))
+
+ time_input.append(str(frame_id/frame_rate))
+
+ position_xyz = (frame['position']['x'], frame['position']['y'], frame['position']['z'])
+ rotation_xyz = (frame['rotation']['x'], frame['rotation']['y'], frame['rotation']['z'])
+ scale_xyz = (frame['scale']['x'], frame['scale']['y'], frame['scale']['z'])
+
+ matrix.put_rotation(rotation_xyz, frame['rotation']['w'])
+ matrix.put_position(position_xyz)
+ matrix.put_scale(scale_xyz)
+
+ matrix = matrix.translation_matrix @ matrix.rotation_matrix @ matrix.scale_matrix
+ matrix_values = []
+ for row in matrix.matrix:
+ for column in row:
+ matrix_values.append(str(column))
+
+ if node_data['frames'].index(frame) == 0:
+ SubElement(node, 'matrix', sid='transform').text = ' '.join(matrix_values)
+ matrix_output.append(' '.join(matrix_values))
+
+ if len(frames) > 1:
+ animation = SubElement(library_animations, 'animation', id=node_name)
+
+ dae.write_source(
+ animation,
+ f'{node_name}-time-input',
+ 'float_array',
+ time_input,
+ 1,
+ [{'name': 'TIME', 'type': 'float'}]
+ )
+ dae.write_source(
+ animation,
+ f'{node_name}-matrix-output',
+ 'float_array',
+ matrix_output,
+ 16,
+ [{'name': 'TRANSFORM', 'type': 'float4x4'}]
+ )
+ dae.write_source(
+ animation,
+ f'{node_name}-interpolation',
+ 'Name_array',
+ ['LINEAR'] * len(frames),
+ 1,
+ [{'name': 'INTERPOLATION', 'type': 'name'}]
+ )
+
+ sampler = SubElement(animation, 'sampler', id=f'{node_name}-sampler')
+
+ dae.write_input(
+ sampler,
+ 'INPUT',
+ f'{node_name}-time-input'
+ )
+
+ dae.write_input(
+ sampler,
+ 'OUTPUT',
+ f'{node_name}-matrix-output'
+ )
+
+ dae.write_input(
+ sampler,
+ 'INTERPOLATION',
+ f'{node_name}-interpolation'
+ )
+
+ SubElement(animation, 'channel',
+ source=f'#{node_name}-sampler',
+ target=f'{node_name}/transform')
+
+ scene = SubElement(dae.collada, 'scene')
+ SubElement(scene, 'instance_visual_scene',
+ url='#3dConverterScene',
+ name='3d-Converter Scene')
+
+ #
+
+ self.writen = tostring(dae.collada, xml_declaration=True).decode()
+
+
+class Parser:
+ def node(self, nodes):
+ nodes_list = []
+ for node in nodes:
+ instance_geometry = node.findall('collada:instance_geometry', self.namespaces)
+ instance_controller = node.findall('collada:instance_controller', self.namespaces)
+
+ instances = [*instance_geometry, *instance_controller]
+
+ children = self.node(node.findall('collada:node', self.namespaces))
+
+ if 'name' not in node.attrib:
+ node.attrib['name'] = node.attrib['id']
+
+ node_data = {
+ 'name': node.attrib['name'],
+ 'instances': []
+ }
+
+ for instance in instances:
+ instance_data = {}
+ binds = []
+
+ bind_material = instance.find('collada:bind_material', self.namespaces)
+ technique_common = bind_material[0]
+
+ for instance_material in technique_common:
+ binds.append({
+ 'symbol': instance_material.attrib['symbol'],
+ 'target': instance_material.attrib['target'][1:]
+ })
+
+ if instance_geometry:
+ instance_data['instance_type'] = 'GEOM'
+
+ geometry_url = instance.attrib['url']
+ instance_data['instance_name'] = geometry_url[1:]
+ elif instance_controller:
+ instance_data['instance_type'] = 'CONT'
+
+ controller_url = instance.attrib['url']
+ instance_data['instance_name'] = controller_url[1:]
+
+ instance_data['binds'] = binds
+ node_data['instances'].append(instance_data)
+
+ matrix = node.findall('collada:matrix', self.namespaces)
+ if matrix:
+ matrix_data = matrix[0].text.split()
+ matrix_data = [[float(value) for value in matrix_data[x:x + 4]] for x in range(0, len(matrix_data), 4)]
+
+ node_data['matrix'] = matrix_data
+
+ node_data['children'] = children
+
+ nodes_list.append(node_data)
+
+ return nodes_list
+
+ def fix_nodes_list(self, nodes, parent: str = ''):
+ for node in nodes:
+ node_data = {
+ 'name': node['name'],
+ 'parent': parent,
+ 'instances': node['instances']
+ }
+
+ if len(node_data['instances']) == 0:
+ node_data['frames_settings'] = [0, 0, 0, 0, 0, 0, 0, 0]
+ node_data['frames'] = []
+
+ if 'matrix' in node:
+ matrix = Matrix4x4(matrix=node['matrix'])
+
+ # scale = matrix.get_scale()
+ position = matrix.get_position()
+
+ node_data['frames'] = [
+ {
+ 'frame_id': 0,
+ 'rotation': {'x': 0, 'y': 0, 'z': 0, 'w': 0},
+ 'position': position,
+ 'scale': {'x': 1, 'y': 1, 'z': 1}
+ }
+ ]
+ else:
+ node_data['frames'] = []
+
+ # node_data['frames'] = node['frames']
+ self.parsed['nodes'].append(node_data)
+ self.fix_nodes_list(node['children'], node['name'])
+
+ def __init__(self, file_data):
+ self.parsed = {'header': {'version': 2,
+ 'frame_rate': 30,
+ 'materials_file': 'sc3d/character_materials.scw'},
+ 'materials': [],
+ 'geometries': [],
+ 'cameras': [],
+ 'nodes': []}
+
+ self.geometry_info = {}
+
+ root = fromstring(file_data)
+
+ self.namespaces = {
+ 'collada': 'http://www.collada.org/2005/11/COLLADASchema'
+ }
+
+ #
+ self.library_materials = root.find('./collada:library_materials', self.namespaces)
+ self.library_effects = root.find('./collada:library_effects', self.namespaces)
+
+ self.library_geometries = root.find('./collada:library_geometries', self.namespaces)
+ self.library_controllers = root.find('./collada:library_controllers', self.namespaces)
+
+ self.instance_scene = root.find('./collada:scene', self.namespaces).find('collada:instance_visual_scene',
+ self.namespaces)
+ self.library_scenes = root.find('./collada:library_visual_scenes', self.namespaces)
+ #
+
+ if self.library_materials is None:
+ self.library_materials = []
+
+ def parse(self):
+ for material in self.library_materials:
+ material_name = material.attrib['name']
+
+ instance_effect = material.find('collada:instance_effect', self.namespaces)
+ if instance_effect is not None:
+ effect_url = instance_effect.attrib['url'][1:]
+ effect = self.library_effects.find(f'collada:effect[@id="{effect_url}"]', self.namespaces)
+
+ if effect is not None:
+ # profile = None
+ # for item in effect:
+ # if 'profile' in item.tag:
+ # profile = item
+ # technique = profile.find('collada:technique', self.namespaces)
+ #
+ # emission_data = None
+ # ambient_data = None
+ # diffuse_data = None
+ #
+ # emission = technique[0].find('collada:emission', self.namespaces)
+ # ambient = technique[0].find('collada:ambient', self.namespaces)
+ # diffuse = technique[0].find('collada:diffuse', self.namespaces)
+ #
+ # if 'color' in emission[0].tag:
+ # emission_data = [float(item) for item in emission[0].text.split()]
+ # emission_data[3] *= 255
+ # elif 'texture' in emission[0].tag:
+ # # emission_data = emission[0].attrib['texture']
+ # emission_data = '.'
+ #
+ # if 'color' in ambient[0].tag:
+ # ambient_data = [float(item) for item in ambient[0].text.split()]
+ # ambient_data[3] *= 255
+ # elif 'texture' in ambient[0].tag:
+ # # ambient_data = ambient[0].attrib['texture']
+ # ambient_data = '.'
+ #
+ # if 'color' in diffuse[0].tag:
+ # diffuse_data = [float(item) for item in diffuse[0].text.split()]
+ # diffuse_data[3] *= 255
+ # elif 'texture' in diffuse[0].tag:
+ # # diffuse_data = diffuse[0].attrib['texture']
+ # diffuse_data = '.'
+
+ material_data = {
+ 'name': material_name,
+ 'effect': {
+ 'ambient': [0, 0, 0, 255], # ambient_data,
+ 'diffuse': '.', # diffuse_data,
+ 'specular': '.',
+ 'colorize': [255, 255, 255, 255],
+ 'emission': [0, 0, 0, 255], # emission_data,
+ 'lightmaps': {
+ 'diffuse': 'sc3d/diffuse_lightmap.png',
+ 'specular': 'sc3d/specular_lightmap.png'
+ },
+ 'shader_define_flags': 3014
+ }
+ }
+
+ self.parsed['materials'].append(material_data)
+
+ scene_url = self.instance_scene.attrib['url'][1:]
+ scene = self.library_scenes.find(f'collada:visual_scene[@id="{scene_url}"]', self.namespaces)
+
+ nodes = self.node(scene.findall('collada:node', self.namespaces))
+ self.fix_nodes_list(nodes)
+ self.parse_nodes()
+
+ def parse_nodes(self):
+ nodes = self.parsed['nodes']
+ for node_index in range(len(nodes)):
+ node = nodes[node_index]
+ for instance in node['instances']:
+ controller = None
+ geometry = None
+
+ if instance['instance_type'] == 'CONT':
+ controller = self.library_controllers \
+ .find(f'collada:controller[@id="{instance["instance_name"]}"]', self.namespaces)
+
+ geometry_url = controller[0].attrib['source'][1:]
+ geometry = self.library_geometries \
+ .find(f'collada:geometry[@id="{geometry_url}"]', self.namespaces)
+ elif instance['instance_type'] == 'GEOM':
+ geometry = self.library_geometries \
+ .find(f'collada:geometry[@id="{instance["instance_name"]}"]', self.namespaces)
+
+ if not ('name' in geometry.attrib):
+ geometry.attrib['name'] = geometry.attrib['id']
+
+ instance['instance_name'] = geometry.attrib['name']
+
+ for suffix in ['-skin', '-cont']:
+ instance['instance_name'] = instance['instance_name'].removesuffix(suffix)
+ for suffix in ['-mesh', '-geom']:
+ instance['instance_name'] = instance['instance_name'].removesuffix(suffix)
+
+ self.parsed['nodes'][node_index] = node
+
+ if geometry is not None:
+ self.geometry_info = {'name': '',
+ 'group': '', # node['parent'],
+ 'vertices': [],
+ 'have_bind_matrix': False,
+ 'materials': []}
+ if controller is not None:
+ self.parse_controller(controller)
+
+ self.parse_geometry(geometry)
+
+ def parse_controller(self, controller):
+ self.geometry_info['have_bind_matrix'] = True
+
+ skin = controller[0]
+
+ bind_shape_matrix = skin.find('collada:bind_shape_matrix', self.namespaces).text
+ bind_shape_matrix = [float(x) for x in bind_shape_matrix.split()]
+
+ self.geometry_info['bind_matrix'] = bind_shape_matrix
+
+ self.geometry_info['joints'] = []
+ joints = skin.find('collada:joints', self.namespaces)
+ joint_inputs = joints.findall('collada:input', self.namespaces)
+ for _input in joint_inputs:
+ # semantic = _input.attrib['semantic']
+ source_url = _input.attrib['source']
+ source = skin.find(f'collada:source[@id="{source_url[1:]}"]', self.namespaces)
+
+ accessor = source.find('collada:technique_common/collada:accessor', self.namespaces)
+ accessor_stride = int(accessor.attrib['stride'])
+ accessor_source_url = accessor.attrib['source']
+ accessor_source = source.find(f'collada:*[@id="{accessor_source_url[1:]}"]', self.namespaces)
+ params = accessor.findall('collada:param', self.namespaces)
+
+ for param in params:
+ param_name = param.attrib['name']
+ # param_type = param.attrib['type']
+
+ if param_name == 'JOINT':
+ for name in accessor_source.text.split():
+ self.geometry_info['joints'].append({
+ 'name': name
+ })
+
+ if param_name == 'TRANSFORM':
+ for x in range(int(accessor_source.attrib['count']) // int(accessor_stride)):
+ matrix = []
+ for y in accessor_source.text.split()[x * accessor_stride:(x + 1) * accessor_stride]:
+ matrix.append(float(y))
+ self.geometry_info['joints'][x]['matrix'] = matrix
+
+ self.geometry_info['weights'] = {}
+ vertex_weights = skin.find('collada:vertex_weights', self.namespaces)
+ vertex_weights_inputs = vertex_weights.findall('collada:input', self.namespaces)
+ for _input in vertex_weights_inputs:
+ semantic = _input.attrib['semantic']
+ source_url = _input.attrib['source']
+ source = skin.find(f'collada:source[@id="{source_url[1:]}"]', self.namespaces)
+
+ if semantic == 'WEIGHT':
+ accessor = source.find('collada:technique_common/collada:accessor', self.namespaces)
+ accessor_source_url = accessor.attrib['source']
+ accessor_source = source.find(f'collada:*[@id="{accessor_source_url[1:]}"]', self.namespaces)
+
+ params = accessor.findall('collada:param', self.namespaces)
+ for param in params:
+ param_name = param.attrib['name']
+ # param_type = param.attrib['type']
+
+ if param_name == 'WEIGHT':
+ weights = [float(x) for x in accessor_source.text.split()]
+ self.geometry_info['weights']['weights'] = weights
+
+ vcount = vertex_weights.find('collada:vcount', self.namespaces).text
+ vcount = [int(x) for x in vcount.split()]
+ self.geometry_info['weights']['vcount'] = vcount
+
+ v = vertex_weights.find('collada:v', self.namespaces).text
+ v = [int(x) for x in v.split()]
+ self.geometry_info['weights']['vertex_weights'] = v
+
+ def parse_geometry(self, geometry):
+ name = geometry.attrib['name']
+
+ if name[-5:] in ['-mesh', '-geom']:
+ name = name[:-5]
+
+ self.geometry_info['name'] = name
+ self.geometry_info['group'] = name
+
+ mesh = geometry[0]
+
+ triangles = mesh.findall('collada:triangles', self.namespaces)
+ if triangles:
+ pass
+ else:
+ triangles = mesh.findall('collada:polylist', self.namespaces)
+ inputs = triangles[0].findall('collada:input', self.namespaces)
+ for _input in inputs:
+ semantic = _input.attrib['semantic']
+ source_link = _input.attrib['source'][1:]
+ source = mesh.find(f'*[@id="{source_link}"]')
+
+ if semantic == 'VERTEX':
+ vertices_input = source[0]
+ semantic = vertices_input.attrib['semantic']
+ source_link = vertices_input.attrib['source'][1:]
+ source = mesh.find(f'*[@id="{source_link}"]')
+
+ float_array = source.find('collada:float_array', self.namespaces)
+ accessor = source.find('collada:technique_common/collada:accessor', self.namespaces)
+
+ vertex_temp = [float(floating) for floating in float_array.text.split()]
+
+ scale = max(max(vertex_temp), abs(min(vertex_temp)))
+ if scale < 1:
+ scale = 1
+ if semantic == 'TEXCOORD':
+ vertex_temp[1::2] = [1 - x for x in vertex_temp[1::2]]
+ vertex_temp = [value / scale for value in vertex_temp]
+
+ vertex = []
+ for x in range(0, len(vertex_temp), len(accessor)):
+ vertex.append(vertex_temp[x: x + len(accessor)])
+
+ self.geometry_info['vertices'].append({'type': semantic,
+ 'index': len(self.geometry_info['vertices']),
+ 'scale': scale,
+ 'vertex': vertex})
+ for triangle in triangles:
+ triangles_material = triangle.attrib['material']
+
+ p = triangle.find('collada:p', self.namespaces)
+ polygons_temp = [int(integer) for integer in p.text.split()]
+
+ polygons = [
+ [
+ polygons_temp[polygon_index + point_index:polygon_index + point_index + 3]
+ for point_index in range(0, len(inputs) * 3, 3)
+ ] for polygon_index in range(0, len(polygons_temp), len(inputs) * 3)
+ ]
+ self.geometry_info['materials'].append({'name': triangles_material,
+ 'polygons': polygons})
+ self.parsed['geometries'].append(self.geometry_info)
diff --git a/3d-converter-0.8.0/models_converter/formats/gltf.py b/3d-converter-0.8.0/models_converter/formats/gltf.py
new file mode 100644
index 0000000..281da6b
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/formats/gltf.py
@@ -0,0 +1,779 @@
+import json
+
+from models_converter.formats.dae import Writer
+from models_converter.utils.reader import Reader
+
+
+def to_camelcase(property_name: str):
+ words = property_name.split('_')
+ for word_index in range(len(words)):
+ word = words[word_index]
+ if word_index > 0:
+ word = list(word)
+ word[0] = word[0].upper()
+
+ word = ''.join(word)
+
+ words[word_index] = word
+ camelcase_name = ''.join(words)
+ return camelcase_name
+
+
+def to_lowercase(property_name: str):
+ letters = list(property_name)
+
+ for letter_index in range(len(letters)):
+ letter = letters[letter_index]
+
+ if letter.isupper():
+ letter = f'_{letter.lower()}'
+
+ letters[letter_index] = letter
+
+ lowercase_name = ''.join(letters)
+ return lowercase_name
+
+
+def get_data_from_dict(dictionary, key, default=None):
+ if key in dictionary:
+ return dictionary[key]
+ return default
+
+
+from_dict = get_data_from_dict
+
+
+class GlTFProperty:
+ def __init__(self):
+ self.extensions = None
+ self.extras = None
+
+ def from_dict(self, dictionary: dict):
+ if dictionary:
+ for key, value in dictionary.items():
+ attribute_name = to_lowercase(key)
+ value_type = type(value)
+
+ attribute_value = getattr(self, attribute_name)
+ attribute_value_type = type(attribute_value)
+
+ if attribute_value is None or value_type in [int, str]:
+ attribute_value = value
+ elif issubclass(attribute_value_type, GlTFProperty):
+ if value_type is list:
+ value_type = attribute_value_type
+ values = []
+
+ for item in value:
+ new_value = value_type()
+ new_value.from_dict(item)
+
+ values.append(new_value)
+
+ attribute_value = values
+ else:
+ attribute_value = attribute_value_type()
+ attribute_value.from_dict(value)
+
+ setattr(self, attribute_name, attribute_value)
+
+ def to_dict(self) -> dict:
+ dictionary = {}
+ for key, value in self.__dict__.items():
+ if value is not None:
+ attribute_name = to_camelcase(key)
+ value_type = type(value)
+
+ attribute_value = None
+
+ if value_type is list:
+ attribute_value = []
+ for item in value:
+ item_type = type(item)
+
+ if issubclass(item_type, GlTFProperty):
+ item = item.to_dict()
+ attribute_value.append(item)
+ elif issubclass(value_type, GlTFProperty):
+ attribute_value = value.to_dict()
+ elif attribute_value is None:
+ attribute_value = value
+
+ dictionary[attribute_name] = attribute_value
+ return dictionary
+
+ def __getitem__(self, item):
+ item = to_lowercase(item)
+ if hasattr(self, item):
+ return getattr(self, item)
+ else:
+ raise IndexError('The object has no attribute named ' + item)
+
+ def __repr__(self) -> str:
+ return f'<{self.__class__.__name__} ({self.to_dict()})>'
+
+
+class Accessor(GlTFProperty):
+ class Sparse(GlTFProperty):
+ class Indices(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.buffer_view = None
+ self.component_type = None
+
+ self.byte_offset = 0
+
+ class Values(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.buffer_view = None
+
+ self.byte_offset = 0
+
+ def __init__(self):
+ super().__init__()
+ self.count = None
+ self.indices = self.Indices()
+ self.values = self.Values()
+
+ def __init__(self):
+ super().__init__()
+ self.component_type = None
+ self.count = None
+ self.type = None
+
+ self.buffer_view = None
+ self.byte_offset = 0
+ self.normalized = False
+ self.max = None
+ self.min = None
+ self.sparse = self.Sparse()
+ self.name = None
+
+
+class Animation(GlTFProperty):
+ class AnimationSampler(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.input = None
+ self.output = None
+
+ self.interpolation = None # Default: 'LINEAR'
+
+ class Channel(GlTFProperty):
+ class Target(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.path = None
+
+ self.node = None
+
+ def __init__(self):
+ super().__init__()
+ self.sampler = None
+ self.target = self.Target()
+
+ def __init__(self):
+ super().__init__()
+ self.channels = self.Channel()
+ self.samplers = self.AnimationSampler()
+
+ self.name = None
+
+
+class Asset(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.version = None
+
+ self.copyright = None
+ self.generator = None
+ self.min_version = None
+
+
+class Buffer(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.byte_length = None
+
+ self.uri = None
+ self.name = None
+
+
+class BufferView(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.buffer = None
+ self.byte_length = None
+
+ self.byte_offset = 0
+ self.byte_stride = None
+ self.target = None
+ self.name = None
+
+
+class Camera(GlTFProperty):
+ class Orthographic(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.xmag = None
+ self.ymag = None
+ self.zfar = None
+ self.znear = None
+
+ class Perspective(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.yfov = None
+ self.znear = None
+
+ self.aspect_ratio = None
+ self.zfar = None
+
+ def __init__(self):
+ super().__init__()
+ self.type = None
+
+ self.orthographic = None
+ self.perspective = None
+ self.name = None
+
+
+class Image(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.uri = None
+ self.mime_type = None
+ self.buffer_view = None
+ self.name = None
+
+
+class Material(GlTFProperty):
+ class NormalTextureInfo(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.index = None
+
+ self.tex_coord = None # Default: 0
+ self.scale = None # Default: 1
+
+ class OcclusionTextureInfo(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.index = None
+
+ self.tex_coord = None # Default: 0
+ self.strength = None # Default: 1
+
+ class PbrMetallicRoughness(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.base_color_factor = None # Default: [1, 1, 1, 1]
+ self.base_color_texture = None
+ self.metallic_factor = None # Default: 1
+ self.roughness_factor = None # Default: 1
+ self.metallic_roughness_texture = None
+
+ def __init__(self):
+ super().__init__()
+ self.name = None
+ self.pbr_metallic_roughness = None
+ self.normal_texture = None
+ self.occlusion_texture = None
+ self.emissive_texture = None
+ self.emissive_factor = None # Default: [0, 0, 0]
+ self.alpha_mode = None # Default: 'OPAQUE'
+ self.alpha_cutoff = None # Default: 0.5
+ self.double_sided = None # Default: False
+
+
+class Mesh(GlTFProperty):
+ class Primitive(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.attributes = None
+
+ self.indices = None
+ self.material = None
+ self.mode = None # Default: 4
+ self.targets = None
+
+ def __init__(self):
+ super().__init__()
+ self.primitives = self.Primitive()
+
+ self.weights = None
+ self.name = None
+
+
+class Node(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.camera = None
+ self.children = None
+ self.skin = None
+ self.matrix = None # Default: [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
+ self.mesh = None
+ self.rotation = None # Default: [0, 0, 0, 1]
+ self.scale = None # Default: [1, 1, 1]
+ self.translation = None # Default: [0, 0, 0]
+ self.weights = None
+ self.name = None
+
+
+class Sampler(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.mag_filter = None
+ self.min_filter = None
+ self.wrap_s = None # Default: 10497
+ self.wrap_t = None # Default: 10497
+ self.name = None
+
+
+class Scene(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.nodes = None
+ self.name = None
+
+
+class Skin(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.joints = None
+
+ self.inverse_bind_matrices = None
+ self.skeleton = None
+ self.name = None
+
+
+class Texture(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.sampler = None
+ self.source = None
+ self.name = None
+
+
+class TextureInfo(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.index = None
+
+ self.tex_coord = None # Default: 0
+
+
+class GlTF(GlTFProperty):
+ def __init__(self):
+ super().__init__()
+ self.asset = Asset()
+
+ self.extensions_used = None
+ self.extensions_required = None
+
+ self.accessors = Accessor()
+ self.animations = Animation()
+ self.buffers = Buffer()
+ self.buffer_views = BufferView()
+ self.cameras = Camera()
+ self.images = Image()
+ self.materials = Material()
+ self.meshes = Mesh()
+ self.nodes = Node()
+ self.samplers = Sampler()
+ self.scene = None
+ self.scenes = Scene()
+ self.skins = Skin()
+ self.textures = Texture()
+
+
+class GlTFChunk:
+ def __init__(self):
+ self.chunk_length = 0
+ self.chunk_name = b''
+ self.data = b''
+
+
+class Parser(Reader):
+ def __init__(self, initial_bytes: bytes):
+ super().__init__(initial_bytes, 'little')
+
+ self.magic = self.read(4)
+ if self.magic != b'glTF':
+ raise TypeError('File Magic isn\'t "glTF"')
+
+ self.parsed = {
+ 'header': {
+ 'frame_rate': 30
+ },
+ 'materials': [],
+ 'geometries': [],
+ 'cameras': [],
+ 'nodes': []
+ }
+
+ self.version = None
+ self.length = None
+
+ self.json_chunk = None
+ self.bin_chunk = None
+
+ self.buffer_views = []
+ self.accessors = []
+ self.buffers = []
+
+ self.gltf = GlTF()
+
+ def parse_bin(self):
+ super().__init__(self.bin_chunk.data, 'little')
+
+ for buffer in self.gltf.buffers:
+ parsed_buffer = self.read(buffer.byte_length)
+ self.buffers.append(parsed_buffer)
+
+ for buffer_view in self.gltf.buffer_views:
+ super().__init__(self.buffers[buffer_view.buffer], 'little')
+
+ self.read(buffer_view.byte_offset)
+
+ length = buffer_view.byte_length
+ data = self.read(length)
+
+ self.buffer_views.append(data)
+
+ for accessor in self.gltf.accessors:
+ super().__init__(self.buffer_views[accessor.buffer_view], '<')
+ temp_accessor = []
+
+ self.read(accessor.byte_offset)
+
+ types = {
+ 5120: self.readByte,
+ 5121: self.readUByte,
+ 5122: self.readShort,
+ 5123: self.readUShort,
+ 5125: self.readUInt32,
+ 5126: self.readFloat
+ }
+
+ items_count = {
+ 'SCALAR': 1,
+ 'VEC2': 2,
+ 'VEC3': 3,
+ 'VEC4': 4,
+ 'MAT2': 4,
+ 'MAT3': 9,
+ 'MAT4': 16
+ }
+
+ for x in range(accessor.count):
+ temp_list = []
+ for i in range(items_count[accessor.type]):
+ temp_list.append(types[accessor.component_type]())
+ temp_accessor.append(temp_list)
+
+ if accessor.normalized:
+ for item_index, data in enumerate(temp_accessor):
+ new_data = []
+ for item in data:
+ if accessor['component_type'] == 5120:
+ new_data.append(max(item / 127, -1.0))
+ elif accessor['component_type'] == 5121:
+ new_data.append(item / 255)
+ elif accessor['component_type'] == 5122:
+ new_data.append(max(item / 32767, -1.0))
+ elif accessor['component_type'] == 5123:
+ new_data.append(item / 65535)
+ else:
+ new_data.append(item)
+ temp_accessor[item_index] = new_data
+
+ self.accessors.append(temp_accessor)
+
+ def parse(self):
+ #
+
+ self.version = self.readUInt32()
+ self.length = self.readUInt32()
+
+ self.json_chunk = GlTFChunk()
+ self.bin_chunk = GlTFChunk()
+
+ self.json_chunk.chunk_length = self.readUInt32()
+ self.json_chunk.chunk_name = self.read(4)
+ self.json_chunk.data = self.read(self.json_chunk.chunk_length)
+
+ self.bin_chunk.chunk_length = self.readUInt32()
+ self.bin_chunk.chunk_name = self.read(4)
+ self.bin_chunk.data = self.read(self.bin_chunk.chunk_length)
+
+ #
+
+ self.gltf.from_dict(json.loads(self.json_chunk.data))
+
+ self.parse_bin()
+
+ #
+
+ scene_id = self.gltf.scene
+ scene = self.gltf.scenes[scene_id]
+
+ for node_id in scene.nodes:
+ node = self.gltf.nodes[node_id]
+ self.parse_node(node)
+
+ #
+
+ def parse_node(self, node: Node, parent: str = None):
+ node_name = node.name
+ # node_name = node_name.split('|')
+ # if len(node_name) > 1:
+ # node_name = node_name[1]
+ # parent = node_name[0]
+ # else:
+ # node_name = node_name[0]
+
+ # node_name = node_name.split(':')
+ # if len(node_name) > 1:
+ # if node_name[1] == 'PIV':
+ # print(node.name, node.translation)
+ # # else:
+ # print(node)
+ # node_name = node_name[0]
+ # else:
+ # node_name = node_name[0]
+
+ node_data = {
+ 'name': node_name,
+ 'parent': parent,
+ 'has_target': False,
+ 'frames': []
+ }
+
+ if node.mesh:
+ node_data['has_target'] = True
+ node_data['target_type'] = 'GEOM'
+
+ geometry_data = {
+ 'name': '',
+ 'group': '',
+ 'vertices': [],
+ 'have_bind_matrix': False,
+ 'weights': {
+ 'vertex_weights': [],
+ 'weights': [],
+ 'vcount': []
+ },
+ 'materials': []
+ }
+
+ if node.skin:
+ geometry_data['have_bind_matrix'] = True
+ geometry_data['bind_matrix'] = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
+ geometry_data['joints'] = []
+ node_data['target_type'] = 'CONT'
+
+ skin_id = node.skin
+ skin = self.gltf.skins[skin_id]
+ bind_matrices = self.accessors[skin.inverse_bind_matrices]
+
+ for joint in skin.joints:
+ joint_index = skin['joints'].index(joint)
+ joint_node = self.gltf.nodes[joint]
+ joint_name = joint_node['name']
+ matrix = bind_matrices[joint_index]
+
+ joint_data = {
+ 'name': joint_name,
+ 'matrix': matrix
+ }
+ geometry_data['joints'].append(joint_data)
+
+ mesh_id = node.mesh
+ mesh = self.gltf.meshes[mesh_id]
+ mesh_name = mesh.name
+ mesh_name = mesh_name.split('|')
+
+ if len(mesh_name) > 1:
+ geometry_data['group'] = mesh_name[0]
+ geometry_data['name'] = mesh_name[1]
+ else:
+ geometry_data['name'] = mesh_name[0]
+
+ node_data['target'] = geometry_data['name']
+ node_data['binds'] = []
+
+ offsets = {
+ 'POSITION': 0,
+ 'NORMAL': 0,
+ 'TEXCOORD': 0
+ }
+
+ for primitive in mesh.primitives:
+ if primitive.to_dict() != {}:
+ primitive_index = mesh.primitives.index(primitive)
+ attributes = primitive.attributes
+ material_id = primitive.material
+ polygons_id = primitive.indices
+
+ inputs = []
+
+ polygons = self.accessors[polygons_id]
+ material = self.gltf.materials[material_id]
+
+ material_name = material.extensions['SC_shader']['name']
+ node_data['binds'].append({
+ 'symbol': material_name,
+ 'target': material_name
+ })
+
+ for attribute_id in attributes:
+ attribute = attributes[attribute_id]
+ if attribute_id == 'POSITION':
+ position = self.accessors[attribute]
+
+ geometry_data['vertices'].append({
+ 'type': 'POSITION',
+ 'name': f'position_{primitive_index}',
+ 'index': len(geometry_data['vertices']),
+ 'scale': 1,
+ 'vertex': position
+ })
+
+ inputs.append({
+ 'type': 'POSITION',
+ 'offset': '1',
+ 'name': f'position_{primitive_index}',
+ })
+ elif attribute_id == 'NORMAL':
+ normal = self.accessors[attribute]
+
+ geometry_data['vertices'].append({
+ 'type': 'NORMAL',
+ 'name': f'normal_{primitive_index}',
+ 'index': len(geometry_data['vertices']),
+ 'scale': 1,
+ 'vertex': normal
+ })
+
+ inputs.append({
+ 'type': 'NORMAL',
+ 'offset': '0',
+ 'name': f'normal_{primitive_index}',
+ })
+ elif attribute_id.startswith('TEXCOORD'):
+ texcoord = self.accessors[attribute]
+
+ texcoord = [[item[0], 1 - item[1]] for item in texcoord]
+
+ geometry_data['vertices'].append({
+ 'type': 'TEXCOORD',
+ 'name': f'texcoord_{primitive_index}',
+ 'index': len(geometry_data['vertices']),
+ 'scale': 1,
+ 'vertex': texcoord
+ })
+
+ inputs.append({
+ 'type': 'TEXCOORD',
+ 'offset': '2',
+ 'name': f'texcoord_{primitive_index}',
+ })
+ elif attribute_id.startswith('JOINTS'):
+ vertex_weights = self.accessors[attribute]
+ elif attribute_id.startswith('WEIGHTS'):
+ weights = self.accessors[attribute]
+
+ for x in range(len(vertex_weights)):
+ geometry_data['weights']['vcount'].append(0)
+
+ temp_list = [
+ [vertex_weights[x][0], weights[x][0]],
+ [vertex_weights[x][1], weights[x][1]],
+ [vertex_weights[x][2], weights[x][2]],
+ [vertex_weights[x][3], weights[x][3]]
+ ]
+ for pair in temp_list:
+ if pair[1] != 0:
+ geometry_data['weights']['vcount'][x] += 1
+ geometry_data['weights']['vertex_weights'].append(pair[0])
+ if pair[1] not in geometry_data['weights']['weights']:
+ geometry_data['weights']['weights'].append(pair[1])
+ geometry_data['weights']['vertex_weights'].append(
+ geometry_data['weights']['weights'].index(pair[1])
+ )
+
+ polygons = [
+ [
+ [
+ value[0] + offsets['NORMAL'],
+ value[0] + offsets['POSITION'],
+ value[0] + offsets['TEXCOORD']
+ ] for value in polygons[x:x + 3]
+ ] for x in range(0, len(polygons), 3)
+ ]
+
+ geometry_data['materials'].append({
+ 'name': material_name,
+ 'inputs': inputs,
+ 'polygons': polygons
+ })
+
+ for attribute_id in attributes:
+ if attribute_id == 'POSITION':
+ offsets['POSITION'] += len(position)
+ elif attribute_id == 'NORMAL':
+ offsets['NORMAL'] += len(normal)
+ elif attribute_id.startswith('TEXCOORD'):
+ offsets['TEXCOORD'] += len(texcoord)
+
+ self.parsed['geometries'].append(geometry_data)
+
+ self.parsed['nodes'].append(node_data)
+
+ if node.translation or node.rotation or node.scale:
+ node_data['frames'].append({
+ 'frame_id': 0,
+ 'rotation': {'x': 0, 'y': 0, 'z': 0, 'w': 0},
+ 'position': {'x': 0, 'y': 0, 'z': 0},
+ 'scale': {'x': 1, 'y': 1, 'z': 1}
+ })
+
+ if node.translation:
+ node_data['frames'][0]['position'] = {
+ 'x': node.translation[0],
+ 'y': node.translation[1],
+ 'z': node.translation[2]
+ }
+ if node.rotation:
+ node_data['frames'][0]['rotation'] = {
+ 'x': node.rotation[0],
+ 'y': node.rotation[1],
+ 'z': node.rotation[2],
+ 'w': node.rotation[3]
+ }
+ if node.scale:
+ node_data['frames'][0]['scale'] = {
+ 'x': node.scale[0],
+ 'y': node.scale[1],
+ 'z': node.scale[2]
+ }
+
+ if node.children:
+ for child_id in node.children:
+ child = self.gltf.nodes[child_id]
+ self.parse_node(child, node_name)
+
+
+if __name__ == '__main__':
+ with open('../crow_geo.glb', 'rb') as fh:
+ file_data = fh.read()
+ fh.close()
+ parser = Parser(file_data)
+ parser.parse()
+
+ writer = Writer()
+ with open('../crow_geo.dae', 'w') as fh:
+ fh.write(writer.writen)
+ fh.close()
diff --git a/3d-converter-0.8.0/models_converter/formats/obj.py b/3d-converter-0.8.0/models_converter/formats/obj.py
new file mode 100644
index 0000000..ecdada4
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/formats/obj.py
@@ -0,0 +1,189 @@
+def _(*args):
+ print('[ScwUtils]', end=' ')
+ for arg in args:
+ print(arg, end=' ')
+ print()
+
+
+class Writer:
+ def __init__(self):
+ self.writen = ''
+
+ self.temp_vertices_offsets = {
+ 'POSITION': 0,
+ 'TEXCOORD': 0,
+ 'NORMAL': 0
+ }
+
+ self.vertices_offsets = {
+ 'POSITION': 0,
+ 'TEXCOORD': 0,
+ 'NORMAL': 0
+ }
+
+ def write(self, data: dict):
+ for geom in data['geometries']:
+ for key in self.vertices_offsets.keys():
+ self.vertices_offsets[key] = self.temp_vertices_offsets[key]
+ prefix = ''
+
+ name = geom['name']
+ vertices = geom['vertices']
+ materials = geom['materials']
+ for vertex in vertices:
+ if vertex['type'] == 'POSITION':
+ prefix = 'v '
+ elif vertex['type'] == 'NORMAL':
+ prefix = 'vn '
+ elif vertex['type'] == 'TEXCOORD':
+ prefix = 'vt '
+
+ self.temp_vertices_offsets[vertex['type']] += len(vertex['vertex'])
+
+ for item in vertex['vertex']:
+ temp_string = prefix
+ for subitem in item:
+ temp_string += str(subitem * vertex['scale']) + ' '
+ self.writen += f'{temp_string}\n'
+ self.writen += '\n\n'
+ for material in materials:
+ self.writen += f'o {name}_{material["name"]}\n\n'
+ for item in material['polygons']:
+ temp_string = 'f '
+ for subitem in item:
+ temp_list = [
+ str(subitem[0] + self.vertices_offsets['POSITION'] + 1), # POSITION
+ str(subitem[2] + self.vertices_offsets['TEXCOORD'] + 1), # TEXCOORD
+ str(subitem[1] + self.vertices_offsets['NORMAL'] + 1) # NORMAL
+ ]
+
+ temp_string += '/'.join(temp_list) + ' '
+ self.writen += f'{temp_string}\n'
+ self.writen += '\n\n'
+
+
+class Parser:
+ def __init__(self, file_data: str):
+ #
+
+ self.parsed = {'header': {'version': 2,
+ 'frame_rate': 30,
+ 'materials_file': 'sc3d/character_materials.scw'},
+ 'materials': [],
+ 'geometries': [],
+ 'cameras': [],
+ 'nodes': []}
+
+ self.lines = file_data.split('\n')
+
+ #
+ self.position_temp, self.position = [], []
+ self.normals_temp, self.normals = [], []
+ self.texcoord_temp, self.texcoord = [], []
+ #
+
+ self.polygons = []
+
+ #
+
+ self.parse()
+
+ def parse(self):
+ geometry_name = 'This model haven \'t a chunk_name!:( Its VERY SAD!'
+ is_first_name = True
+ for line in self.lines:
+ items = line.split()[1:]
+ if line.startswith('v '): # POSITION
+ for item in items:
+ self.position_temp.append(float(item))
+ elif line.startswith('vn '): # NORMAL
+ for item in items:
+ self.normals_temp.append(float(item))
+ elif line.startswith('vt '): # TEXCOORD
+ if len(items) > 2:
+ items = items[:-1]
+ for item in items:
+ self.texcoord_temp.append(float(item))
+ elif line.startswith('f '):
+ temp_list = []
+ if len(items) > 3:
+ _('It is necessary to triangulate the model')
+ break
+ for item in items:
+ second_temp_list = []
+ if len(item.split('/')) == 2:
+ _('Model have not normals or texture')
+ break
+ elif len(item.split('/')) == 1:
+ _('Model have not normals and texture')
+ break
+ for x in item.split('/'):
+ second_temp_list.append(int(x) - 1)
+ temp_list.append([second_temp_list[0], second_temp_list[2], second_temp_list[1]])
+ self.polygons.append(temp_list)
+ elif line.startswith('o '):
+ # if not is_first_name:
+ # position_scale = self.get_vertex_scale(self.position_temp)
+ # normals_scale = self.get_vertex_scale(self.normals_temp)
+ # texcoord_scale = self.get_vertex_scale(self.texcoord_temp)
+ #
+ # self.parsed['geometries'].append({
+ # 'chunk_name': geometry_name,
+ # 'group': '',
+ # 'vertices': [
+ # {'type': 'POSITION', 'index': 0, 'scale': position_scale, 'vertex': self.position},
+ # {'type': 'NORMAL', 'index': 1, 'scale': normals_scale, 'vertex': self.normals},
+ # {'type': 'TEXCOORD', 'index': 2, 'scale': texcoord_scale, 'vertex': self.texcoord}
+ # ],
+ # 'have_bind_matrix': False,
+ # 'materials': [{'chunk_name': 'character_mat', 'polygons': self.polygons}]
+ # })
+ #
+ # #
+ #
+ # #
+ # self.position_temp, self.position = [], []
+ # self.normals_temp, self.normals = [], []
+ # self.texcoord_temp, self.texcoord = [], []
+ # #
+ #
+ # self.polygons = []
+ #
+ # #
+ # geometry_name = line.split('o ')[0]
+ if is_first_name:
+ geometry_name = line.split('o ')[0]
+
+ position_scale = self.get_vertex_scale(self.position_temp)
+ normals_scale = self.get_vertex_scale(self.normals_temp)
+ texcoord_scale = self.get_vertex_scale(self.texcoord_temp)
+
+ for x in range(0, len(self.position_temp), 3):
+ self.position.append(self.position_temp[x: x + 3])
+
+ for x in range(0, len(self.normals_temp), 3):
+ self.normals.append(self.normals_temp[x: x + 3])
+
+ for x in range(0, len(self.texcoord_temp), 2):
+ self.texcoord.append(self.texcoord_temp[x: x + 2])
+
+ self.parsed['geometries'].append({
+ 'chunk_name': geometry_name,
+ 'group': '',
+ 'vertices': [
+ {'type': 'POSITION', 'index': 0, 'scale': position_scale, 'vertex': self.position},
+ {'type': 'NORMAL', 'index': 1, 'scale': normals_scale, 'vertex': self.normals},
+ {'type': 'TEXCOORD', 'index': 2, 'scale': texcoord_scale, 'vertex': self.texcoord}
+ ],
+ 'have_bind_matrix': False,
+ 'materials': [{'chunk_name': 'character_mat', 'polygons': self.polygons}]
+ })
+
+ # TODO: nodes
+
+ @staticmethod
+ def get_vertex_scale(vertex_data: list):
+ vertex_scale = max(max(vertex_data), abs(min(vertex_data)))
+ if vertex_scale < 1:
+ vertex_scale = 1
+ return vertex_scale
diff --git a/3d-converter-0.8.0/models_converter/formats/scw/__init__.py b/3d-converter-0.8.0/models_converter/formats/scw/__init__.py
new file mode 100644
index 0000000..cb85844
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/formats/scw/__init__.py
@@ -0,0 +1,7 @@
+from .writer import Writer
+from .parser import Parser
+
+__all__ = [
+ 'Writer',
+ 'Parser'
+]
diff --git a/3d-converter-0.8.0/models_converter/formats/scw/chunks/__init__.py b/3d-converter-0.8.0/models_converter/formats/scw/chunks/__init__.py
new file mode 100644
index 0000000..b8bd048
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/formats/scw/chunks/__init__.py
@@ -0,0 +1,17 @@
+from .chunk import Chunk
+from .head import HEAD
+from .mate import MATE
+from .geom import GEOM
+from .came import CAME
+from .node import NODE
+from .wend import WEND
+
+__all__ = [
+ 'Chunk',
+ 'HEAD',
+ 'MATE',
+ 'GEOM',
+ 'CAME',
+ 'NODE',
+ 'WEND'
+]
diff --git a/3d-converter-0.8.0/models_converter/formats/scw/chunks/came.py b/3d-converter-0.8.0/models_converter/formats/scw/chunks/came.py
new file mode 100644
index 0000000..eb76959
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/formats/scw/chunks/came.py
@@ -0,0 +1,29 @@
+from . import Chunk
+
+
+class CAME(Chunk):
+ def __init__(self, header=None):
+ super().__init__(header)
+ self.chunk_name = 'CAME'
+
+ def parse(self, buffer: bytes):
+ super().parse(buffer)
+
+ setattr(self, 'name', self.readString())
+ setattr(self, 'v1', self.readFloat())
+ setattr(self, 'xFov', self.readFloat())
+ setattr(self, 'aspectRatio', self.readFloat())
+ setattr(self, 'zNear', self.readFloat())
+ setattr(self, 'zFar', self.readFloat())
+
+ def encode(self):
+ super().encode()
+
+ self.writeString(getattr(self, 'name'))
+ self.writeFloat(getattr(self, 'v1'))
+ self.writeFloat(getattr(self, 'xFov'))
+ self.writeFloat(getattr(self, 'aspectRatio'))
+ self.writeFloat(getattr(self, 'zNear'))
+ self.writeFloat(getattr(self, 'zFar'))
+
+ self.length = len(self.buffer)
diff --git a/3d-converter-0.8.0/models_converter/formats/scw/chunks/chunk.py b/3d-converter-0.8.0/models_converter/formats/scw/chunks/chunk.py
new file mode 100644
index 0000000..4f28584
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/formats/scw/chunks/chunk.py
@@ -0,0 +1,70 @@
+from ....utils.reader import Reader
+from ....utils.writer import Writer
+
+
+class Chunk(Writer, Reader):
+ def __init__(self, header=None):
+ super().__init__()
+
+ if header is None:
+ header = {}
+
+ self.header = header
+ self.chunk_name = ''
+
+ self.buffer = b''
+ self.length = 0
+
+ def from_dict(self, dictionary: dict):
+ if dictionary:
+ for key, value in dictionary.items():
+ setattr(self, key, value)
+
+ def to_dict(self) -> dict:
+ dictionary = {}
+ for key, value in self.__dict__.items():
+ if key in ['header', 'buffer', 'length', 'endian', 'i']:
+ continue
+ if value is not None:
+ attribute_name = key
+ value_type = type(value)
+
+ attribute_value = None
+
+ if value_type is list:
+ attribute_value = []
+ for item in value:
+ item_type = type(item)
+
+ if issubclass(item_type, Chunk):
+ item = item.to_dict()
+ attribute_value.append(item)
+ elif issubclass(value_type, Chunk):
+ attribute_value = value.to_dict()
+ elif attribute_value is None:
+ attribute_value = value
+
+ dictionary[attribute_name] = attribute_value
+ return dictionary
+
+ def __getitem__(self, key):
+ if hasattr(self, key):
+ return getattr(self, key)
+ else:
+ raise IndexError('The object has no attribute named ' + key)
+
+ def __repr__(self) -> str:
+ return f'<{self.__class__.__name__} ({self.to_dict()})>'
+
+ def set(self, key: str, value):
+ setattr(self, key, value)
+
+ def parse(self, buffer: bytes):
+ Reader.__init__(self, buffer, 'big')
+
+ def encode(self):
+ Writer.__init__(self, 'big')
+
+ self.length = len(self.buffer)
+
+ get = __getitem__
diff --git a/3d-converter-0.8.0/models_converter/formats/scw/chunks/geom.py b/3d-converter-0.8.0/models_converter/formats/scw/chunks/geom.py
new file mode 100644
index 0000000..ffa2c5d
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/formats/scw/chunks/geom.py
@@ -0,0 +1,268 @@
+from . import Chunk
+
+
+class GEOM(Chunk):
+ def __init__(self, header: dict):
+ super().__init__(header)
+ self.chunk_name = 'GEOM'
+
+ def parse(self, buffer: bytes):
+ super().parse(buffer)
+
+ setattr(self, 'name', self.readString())
+ setattr(self, 'group', self.readString())
+ if self.header['version'] < 2:
+ matrix = []
+ for x in range(4):
+ temp_list = []
+ for x1 in range(4):
+ temp_list.append(self.readFloat())
+ matrix.append(temp_list)
+
+ self.parse_vertices()
+ self.parse_skin()
+ self.parse_materials()
+
+ def parse_vertices(self):
+ vertices = []
+ inputs = []
+
+ vertex_count = self.readUByte()
+ for x in range(vertex_count):
+ vertex = []
+ vertex_type = self.readString()
+ vertex_index = self.readUByte()
+ self.readUByte() # sub_index
+ vertex_stride = self.readUByte()
+ vertex_scale = self.readFloat()
+ vertex_count = self.readUInt32()
+
+ if vertex_type == 'VERTEX':
+ vertex_type = 'POSITION'
+
+ for x1 in range(vertex_count):
+ coordinates_massive = []
+ for x2 in range(vertex_stride):
+ coordinate = self.readShort()
+ coordinates_massive.append(coordinate / 32512)
+ if vertex_type == 'TEXCOORD':
+ coordinates_massive[1::2] = [1 - x for x in coordinates_massive[1::2]]
+ vertex.append(coordinates_massive)
+
+ inputs.append({
+ 'type': vertex_type,
+ 'offset': vertex_index,
+ 'name': f'{vertex_type.lower()}_0'
+ })
+
+ vertices.append({
+ 'name': f'{vertex_type.lower()}_0',
+ 'type': vertex_type,
+ 'index': vertex_index,
+ 'scale': vertex_scale,
+ 'vertex': vertex
+ })
+ setattr(self, 'inputs', inputs)
+ setattr(self, 'vertices', vertices)
+
+ def parse_skin(self):
+ bind_matrix = []
+
+ setattr(self, 'have_bind_matrix', self.readBool())
+ if getattr(self, 'have_bind_matrix'):
+ for x in range(16):
+ bind_matrix.append(self.readFloat())
+
+ setattr(self, 'bind_matrix', bind_matrix)
+
+ self.parse_joints()
+ self.parse_weights()
+
+ def parse_joints(self):
+ joints = []
+
+ joint_counts = self.readUByte()
+ for x in range(joint_counts):
+ joint_matrix = []
+ joint_name = self.readString()
+ for x1 in range(16):
+ joint_matrix.append(self.readFloat())
+ joints.append({'name': joint_name, 'matrix': joint_matrix})
+
+ setattr(self, 'joints', joints)
+
+ def parse_weights(self):
+ vertex_weights = []
+ weights = []
+ vcount = []
+
+ vertex_weights_count = self.readUInt32()
+ for x in range(vertex_weights_count):
+ vcount.append(0)
+ joint_a = self.readUByte()
+ joint_b = self.readUByte()
+ joint_c = self.readUByte()
+ joint_d = self.readUByte()
+ weight_a = self.readUShort()
+ weight_b = self.readUShort()
+ weight_c = self.readUShort()
+ weight_d = self.readUShort()
+ temp_list = [
+ [joint_a, weight_a],
+ [joint_b, weight_b],
+ [joint_c, weight_c],
+ [joint_d, weight_d]
+ ]
+ for pair in temp_list:
+ if pair[1] != 0:
+ vcount[x] += 1
+ vertex_weights.append(pair[0])
+ if pair[1] / 65535 not in weights:
+ weights.append(pair[1] / 65535)
+ vertex_weights.append(weights.index(pair[1] / 65535))
+
+ setattr(self, 'weights',
+ {
+ 'vertex_weights': vertex_weights,
+ 'weights': weights,
+ 'vcount': vcount
+ })
+
+ def parse_materials(self):
+ materials = []
+
+ materials_count = self.readUByte()
+ for x in range(materials_count):
+ polygons = []
+ material_name = self.readString()
+ self.readString()
+ polygons_count = self.readUShort()
+ inputs_count = self.readUByte()
+ vertex_id_length = self.readUByte()
+ for x1 in range(polygons_count):
+ temp_list = []
+ for x2 in range(3):
+ second_temp_list = []
+ for x3 in range(inputs_count):
+ second_temp_list.append(self.readUInteger(vertex_id_length))
+ temp_list.append(second_temp_list)
+ polygons.append(temp_list)
+ materials.append({
+ 'name': material_name,
+ 'inputs': getattr(self, 'inputs'),
+ 'polygons': polygons
+ })
+
+ setattr(self, 'materials', materials)
+
+ def encode(self):
+ super().encode()
+
+ self.writeString(self.get('name'))
+ self.writeString(self.get('group'))
+
+ self.encode_vertices(self.get('vertices'))
+
+ self.encode_skin()
+
+ self.encode_materials()
+
+ self.length = len(self.buffer)
+
+ def encode_vertices(self, vertices: dict):
+ self.writeUByte(len(vertices))
+ for vertex in vertices:
+ self.writeString(vertex['type'])
+ self.writeUByte(vertex['index'])
+ self.writeUByte(0) # sub_index
+ self.writeUByte(len(vertex['vertex'][0]))
+ self.writeFloat(vertex['scale'])
+ self.writeUInt32(len(vertex['vertex']))
+ for coordinates_massive in vertex['vertex']:
+ if vertex['type'] == 'TEXCOORD':
+ coordinates_massive[1::2] = [1 - x for x in coordinates_massive[1::2]]
+ for coordinate in coordinates_massive:
+ # coordinate /= vertex['scale']
+ coordinate *= 32512
+ self.writeShort(round(coordinate))
+
+ def encode_skin(self):
+ self.writeBool(self.get('have_bind_matrix'))
+ if self.get('have_bind_matrix'):
+ for x in self.get('bind_matrix'):
+ self.writeFloat(x)
+
+ self.encode_joints()
+
+ self.encode_weight()
+
+ def encode_joints(self):
+ if self.get('have_bind_matrix'):
+ self.writeUByte(len(self.get('joints')))
+
+ for joint in self.get('joints'):
+ self.writeString(joint['name'])
+ for x in joint['matrix']:
+ self.writeFloat(x)
+ else:
+ self.writeUByte(0)
+
+ def encode_weight(self):
+ if self.get('have_bind_matrix'):
+ self.writeUInt32(len(self.get('weights')['vcount']))
+ past_index = 0
+ for vcount in self.get('weights')['vcount']:
+ temp_list = []
+ for x in range(vcount):
+ vertex_weights_index = x * 2 + past_index * 2
+ joint_id = self.get('weights')['vertex_weights'][vertex_weights_index]
+ weight_id = self.get('weights')['vertex_weights'][vertex_weights_index + 1]
+
+ weight = self.get('weights')['weights'][weight_id]
+
+ if weight > 1:
+ weight = 1
+ elif weight < 0:
+ weight = 0
+
+ weight = int(weight * 65535)
+
+ temp_list.append([joint_id, weight])
+ past_index += vcount
+ while len(temp_list) < 4:
+ temp_list.append([0, 0])
+ for x in temp_list:
+ self.writeUByte(x[0])
+ for x in temp_list:
+ self.writeUShort(x[1])
+ else:
+ self.writeUInt32(0)
+
+ def encode_materials(self):
+ self.writeUByte(len(self.get('materials')))
+ for material in self.get('materials'):
+ self.writeString(material['name'])
+ self.writeString('')
+ self.writeUShort(len(material['polygons']))
+
+ # Calculate settings
+ inputs_count = len(material['polygons'][0][0])
+
+ maximal_value = 0
+ for points in material['polygons']:
+ for point in points:
+ for vertex in point:
+ if vertex > maximal_value:
+ maximal_value = vertex
+
+ short_length = 1 if maximal_value <= 255 else 2
+
+ # Write Settings
+ self.writeUByte(inputs_count)
+ self.writeUByte(short_length)
+
+ # Write Polygons
+ for points in material['polygons']:
+ for point in points:
+ for vertex in point:
+ self.writeUInteger(vertex, short_length)
diff --git a/3d-converter-0.8.0/models_converter/formats/scw/chunks/head.py b/3d-converter-0.8.0/models_converter/formats/scw/chunks/head.py
new file mode 100644
index 0000000..2b17bbe
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/formats/scw/chunks/head.py
@@ -0,0 +1,30 @@
+from . import Chunk
+
+
+class HEAD(Chunk):
+ def __init__(self, header=None):
+ super().__init__(header)
+ self.chunk_name = 'HEAD'
+
+ def parse(self, buffer: bytes):
+ super().parse(buffer)
+
+ setattr(self, 'version', self.readUShort())
+ setattr(self, 'frame_rate', self.readUShort())
+ setattr(self, 'v1', self.readUShort())
+ setattr(self, 'v2', self.readUShort())
+ setattr(self, 'materials_file', self.readString())
+ if self.get('version') == 2:
+ setattr(self, 'v3', self.readUByte())
+
+ def encode(self):
+ super().encode()
+
+ self.writeUShort(2) # getattr(self, 'version')
+ self.writeUShort(getattr(self, 'frame_rate'))
+ self.writeUShort(0) # getattr(self, 'v1')
+ self.writeUShort(249) # getattr(self, 'v2')
+ self.writeString(getattr(self, 'materials_file'))
+ self.writeUByte(0) # getattr(self, 'v3')
+
+ self.length = len(self.buffer)
diff --git a/3d-converter-0.8.0/models_converter/formats/scw/chunks/mate.py b/3d-converter-0.8.0/models_converter/formats/scw/chunks/mate.py
new file mode 100644
index 0000000..a508caa
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/formats/scw/chunks/mate.py
@@ -0,0 +1,174 @@
+from . import Chunk
+
+
+class MATE(Chunk):
+ def __init__(self, header: dict):
+ super().__init__(header)
+ self.chunk_name = 'MATE'
+
+ def parse(self, buffer: bytes):
+ super().parse(buffer)
+
+ setattr(self, 'name', self.readString())
+ setattr(self, 'shader', self.readString())
+ setattr(self, 'v1', self.readUByte())
+ setattr(self, 'v2', self.readUByte())
+
+ effect = {}
+ a = self.readUByte()
+ r = self.readUByte()
+ g = self.readUByte()
+ b = self.readUByte()
+ ambient_color = (r, g, b, a)
+ effect['ambient'] = ambient_color
+
+ use_diffuse_tex = self.readBool()
+ if use_diffuse_tex:
+ diffuse_tex = self.readString()
+ effect['diffuse'] = diffuse_tex
+ else:
+ a = self.readUByte()
+ r = self.readUByte()
+ g = self.readUByte()
+ b = self.readUByte()
+ diffuse_color = (r, g, b, a)
+ effect['diffuse'] = diffuse_color
+
+ use_specular_tex = self.readBool()
+ if use_specular_tex:
+ specular_tex = self.readString()
+ effect['specular'] = specular_tex
+ else:
+ a = self.readUByte()
+ r = self.readUByte()
+ g = self.readUByte()
+ b = self.readUByte()
+ specular_color = (r, g, b, a)
+ effect['specular'] = specular_color
+
+ setattr(self, 'v3', self.readString())
+ setattr(self, 'v4', self.readString())
+
+ use_colorize_tex = self.readBool()
+ if use_colorize_tex:
+ colorize_tex = self.readString()
+ effect['colorize'] = colorize_tex
+ else:
+ a = self.readUByte()
+ r = self.readUByte()
+ g = self.readUByte()
+ b = self.readUByte()
+ colorize_color = (r, g, b, a)
+ effect['colorize'] = colorize_color
+
+ use_emission_tex = self.readBool()
+ if use_emission_tex:
+ emission_tex = self.readString()
+ effect['emission'] = emission_tex
+ else:
+ a = self.readUByte()
+ r = self.readUByte()
+ g = self.readUByte()
+ b = self.readUByte()
+ emission_color = (r, g, b, a)
+ effect['emission'] = emission_color
+
+ setattr(self, 'opacity_texture', self.readString())
+ setattr(self, 'v5', self.readFloat())
+ setattr(self, 'v6', self.readFloat())
+
+ effect['lightmaps'] = {
+ 'diffuse': self.readString(),
+ 'specular': self.readString()
+ }
+
+ if self.header['version'] == 2:
+ setattr(self, 'v7', self.readString())
+
+ shader_define_flags = self.readUInt32()
+ effect['shader_define_flags'] = shader_define_flags
+
+ if shader_define_flags & 32768:
+ self.readFloat()
+ self.readFloat()
+ self.readFloat()
+ self.readFloat()
+
+ setattr(self, 'effect', effect)
+
+ def encode(self):
+ super().encode()
+
+ self.writeString(getattr(self, 'name'))
+ self.writeString(getattr(self, 'shader'))
+ self.writeUByte(4) # getattr(self, 'v1')
+ self.writeUByte(0) # getattr(self, 'v2')
+
+ effect = getattr(self, 'effect')
+ r, g, b, a = effect['ambient']
+ self.writeUByte(a)
+ self.writeUByte(r)
+ self.writeUByte(g)
+ self.writeUByte(b)
+
+ use_diffuse_tex = type(effect['diffuse']) is str
+ self.writeBool(use_diffuse_tex)
+ if use_diffuse_tex:
+ self.writeString(effect['diffuse'])
+ else:
+ r, g, b, a = effect['diffuse']
+ self.writeUByte(a)
+ self.writeUByte(r)
+ self.writeUByte(g)
+ self.writeUByte(b)
+
+ use_specular_tex = type(effect['specular']) is str
+ self.writeBool(use_specular_tex)
+ if use_specular_tex:
+ self.writeString(effect['specular'])
+ else:
+ r, g, b, a = effect['specular']
+ self.writeUByte(a)
+ self.writeUByte(r)
+ self.writeUByte(g)
+ self.writeUByte(b)
+
+ self.writeString('.') # getattr(self, 'v3')
+ self.writeString('') # getattr(self, 'v4')
+
+ use_colorize_tex = type(effect['colorize']) is str
+ self.writeBool(use_colorize_tex)
+ if use_colorize_tex:
+ self.writeString(effect['colorize'])
+ else:
+ r, g, b, a = effect['colorize']
+ self.writeUByte(a)
+ self.writeUByte(r)
+ self.writeUByte(g)
+ self.writeUByte(b)
+
+ use_emission_tex = type(effect['emission']) is str
+ self.writeBool(use_emission_tex)
+ if use_emission_tex:
+ self.writeString(effect['emission'])
+ else:
+ r, g, b, a = effect['emission']
+ self.writeUByte(a)
+ self.writeUByte(r)
+ self.writeUByte(g)
+ self.writeUByte(b)
+
+ self.writeString(getattr(self, 'opacity_texture'))
+ self.writeFloat(1) # getattr(self, 'v5')
+ self.writeFloat(0) # getattr(self, 'v6')
+
+ self.writeString(effect['lightmaps']['diffuse'])
+ self.writeString(effect['lightmaps']['specular'])
+
+ if self.header['version'] == 2:
+ self.writeString(getattr(self, 'v7'))
+
+ self.writeUInt32(effect['shader_define_flags'])
+
+ self.length = len(self.buffer)
+
diff --git a/3d-converter-0.8.0/models_converter/formats/scw/chunks/node.py b/3d-converter-0.8.0/models_converter/formats/scw/chunks/node.py
new file mode 100644
index 0000000..b76ae33
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/formats/scw/chunks/node.py
@@ -0,0 +1,145 @@
+from . import Chunk
+
+
+class NODE(Chunk):
+ def __init__(self, header: dict):
+ super().__init__(header)
+ self.chunk_name = 'NODE'
+
+ def parse(self, buffer: bytes):
+ super().parse(buffer)
+ nodes = []
+
+ nodes_count = self.readUShort()
+ for node in range(nodes_count):
+ node_data = {
+ 'name': self.readString(),
+ 'parent': self.readString()
+ }
+
+ instances_count = self.readUShort()
+ node_data['instances'] = [{}] * instances_count
+ for x in range(instances_count):
+ instance_type = self.readChar(4)
+ instance_name = self.readString()
+
+ node_data['instances'][x] = {}
+ if instance_type in ['GEOM', 'CONT']:
+ materials_count = self.readUShort()
+ binds = []
+ for bind in range(materials_count):
+ binds.append({})
+ symbol = self.readString()
+ target = self.readString()
+ binds[bind] = {'symbol': symbol,
+ 'target': target}
+ node_data['instances'][x]['binds'] = binds
+ elif instance_type in ['CAME']:
+ target = self.readString()
+ node_data['instances'][x]['target'] = target
+ node_data['instances'][x]['instance_name'] = instance_name
+ node_data['instances'][x]['instance_type'] = instance_type
+
+ frames_count = self.readUShort()
+ node_data['frames'] = []
+ if frames_count > 0:
+ rotation = {'x': 0, 'y': 0, 'z': 0, 'w': 0}
+ scale_x, scale_y, scale_z = 0, 0, 0
+ pos_x, pos_y, pos_z = 0, 0, 0
+
+ settings = list(bin(self.readUByte())[2:].zfill(8))
+ settings = [bool(int(value)) for value in settings]
+ node_data['frames_settings'] = settings
+ for frame in range(frames_count):
+ frame_data = {
+ 'frame_id': self.readUShort()
+ }
+
+ if settings[7] or frame == 0: # Rotation
+ rotation = {
+ 'x': self.readNShort(),
+ 'y': self.readNShort(),
+ 'z': self.readNShort(),
+ 'w': self.readNShort()
+ }
+
+ if settings[4] or frame == 0: # Position X
+ pos_x = self.readFloat()
+ if settings[5] or frame == 0: # Position Y
+ pos_y = self.readFloat()
+ if settings[6] or frame == 0: # Position Z
+ pos_z = self.readFloat()
+
+ if settings[1] or frame == 0: # Scale X
+ scale_x = self.readFloat()
+ if settings[2] or frame == 0: # Scale Y
+ scale_y = self.readFloat()
+ if settings[3] or frame == 0: # Scale Z
+ scale_z = self.readFloat()
+
+ frame_data['rotation'] = rotation
+ frame_data['position'] = {
+ 'x': pos_x,
+ 'y': pos_y,
+ 'z': pos_z
+ }
+ frame_data['scale'] = {
+ 'x': scale_x,
+ 'y': scale_y,
+ 'z': scale_z
+ }
+
+ node_data['frames'].append(frame_data)
+ nodes.append(node_data)
+ setattr(self, 'nodes', nodes)
+
+ def encode(self):
+ super().encode()
+
+ self.writeUShort(len(self.get('nodes')))
+ for node in self.get('nodes'):
+ self.writeString(node['name'])
+ self.writeString(node['parent'])
+
+ self.writeUShort(len(node['instances']))
+ for instance in node['instances']:
+ self.writeChar(instance['instance_type'])
+ self.writeString(instance['instance_name'])
+ self.writeUShort(len(instance['binds']))
+ for bind in instance['binds']:
+ self.writeString(bind['symbol'])
+ self.writeString(bind['target'])
+
+ if 'frames_settings' in node:
+ frames_settings = node['frames_settings']
+ else:
+ frames_settings = None
+ self.encode_frames(node['frames'], frames_settings)
+
+ self.length = len(self.buffer)
+
+ def encode_frames(self, frames, frames_settings):
+ self.writeUShort(len(frames))
+ if len(frames) > 0:
+ self.writeUByte(int(''.join([('1' if item else '0') for item in frames_settings])[::], 2))
+ for frame in frames:
+ self.writeUShort(frame['frame_id'])
+ if frames_settings[7] or frames.index(frame) == 0: # Rotation
+ self.writeNShort(frame['rotation']['x'])
+ self.writeNShort(frame['rotation']['y'])
+ self.writeNShort(frame['rotation']['z'])
+ self.writeNShort(frame['rotation']['w'])
+
+ if frames_settings[4] or frames.index(frame) == 0: # Position X
+ self.writeFloat(frame['position']['x'])
+ if frames_settings[5] or frames.index(frame) == 0: # Position Y
+ self.writeFloat(frame['position']['y'])
+ if frames_settings[6] or frames.index(frame) == 0: # Position Z
+ self.writeFloat(frame['position']['z'])
+
+ if frames_settings[1] or frames.index(frame) == 0: # Scale X
+ self.writeFloat(frame['scale']['x'])
+ if frames_settings[2] or frames.index(frame) == 0: # Scale Y
+ self.writeFloat(frame['scale']['y'])
+ if frames_settings[3] or frames.index(frame) == 0: # Scale Z
+ self.writeFloat(frame['scale']['z'])
diff --git a/3d-converter-0.8.0/models_converter/formats/scw/chunks/wend.py b/3d-converter-0.8.0/models_converter/formats/scw/chunks/wend.py
new file mode 100644
index 0000000..0252602
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/formats/scw/chunks/wend.py
@@ -0,0 +1,15 @@
+from . import Chunk
+
+
+class WEND(Chunk):
+ def __init__(self, header=None):
+ super().__init__(header)
+ self.chunk_name = 'WEND'
+
+ def parse(self, buffer: bytes):
+ super().parse(buffer)
+
+ def encode(self):
+ super().encode()
+
+ self.length = len(self.buffer)
diff --git a/3d-converter-0.8.0/models_converter/formats/scw/parser.py b/3d-converter-0.8.0/models_converter/formats/scw/parser.py
new file mode 100644
index 0000000..df8649f
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/formats/scw/parser.py
@@ -0,0 +1,66 @@
+from ...utils.reader import Reader
+from .chunks import *
+
+
+class Parser(Reader):
+ def __init__(self, file_data: bytes):
+ super().__init__(file_data)
+ self.file_data = file_data
+ self.parsed = {
+ 'header': {},
+ 'materials': [],
+ 'geometries': [],
+ 'cameras': [],
+ 'nodes': []
+ }
+ self.chunks = []
+
+ file_magic = self.read(4)
+ if file_magic != b'SC3D':
+ raise TypeError('File Magic isn\'t "SC3D"')
+
+ def split_chunks(self):
+ # len(Chunk Length) + len(Chunk Name) + len(Chunk CRC)
+ while len(self.file_data[self.tell():]) >= 12:
+ chunk_length = self.readUInt32()
+ chunk_name = self.readChar(4)
+ chunk_data = self.read(chunk_length)
+ chunk_crc = self.readUInt32()
+
+ self.chunks.append({
+ 'chunk_name': chunk_name,
+ 'data': chunk_data,
+ 'crc': chunk_crc
+ })
+
+ def parse(self):
+ for chunk in self.chunks:
+ chunk_name = chunk['chunk_name']
+ chunk_data = chunk['data']
+
+ if chunk_name == 'HEAD':
+ head = HEAD()
+ head.parse(chunk_data)
+
+ self.parsed['header'] = head.to_dict()
+ elif chunk_name == 'MATE':
+ mate = MATE(self.parsed['header'])
+ mate.parse(chunk_data)
+ self.parsed['materials'].append(mate.to_dict())
+ elif chunk_name == 'GEOM':
+ geom = GEOM(self.parsed['header'])
+ geom.parse(chunk_data)
+ self.parsed['geometries'].append(geom.to_dict())
+ elif chunk_name == 'CAME':
+ came = CAME(self.parsed['header'])
+ came.parse(chunk_data)
+ self.parsed['cameras'].append(came.to_dict())
+ elif chunk_name == 'NODE':
+ node = NODE(self.parsed['header'])
+ node.parse(chunk_data)
+ self.parsed['nodes'] = node.to_dict()['nodes']
+ elif chunk_name == 'WEND':
+ wend = WEND()
+ wend.parse(chunk_data)
+ else:
+ raise TypeError(f'Unknown chunk: {chunk_name}')
diff --git a/3d-converter-0.8.0/models_converter/formats/scw/writer.py b/3d-converter-0.8.0/models_converter/formats/scw/writer.py
new file mode 100644
index 0000000..b55efb2
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/formats/scw/writer.py
@@ -0,0 +1,51 @@
+import binascii
+
+from .chunks import *
+
+
+class Writer:
+ def __init__(self):
+ self.writen = b'SC3D'
+
+ def write(self, data: dict):
+
+ header = data['header']
+ head = HEAD()
+ head.from_dict(header)
+
+ self.write_chunk(head)
+
+ # TODO: materials
+ for material in data['materials']:
+ mate = MATE(header)
+ mate.from_dict(material)
+
+ self.write_chunk(mate)
+
+ for geometry in data['geometries']:
+ geom = GEOM(header)
+ geom.from_dict(geometry)
+
+ self.write_chunk(geom)
+
+ # TODO: cameras
+ for camera in data['cameras']:
+ came = CAME(header)
+ came.from_dict(camera)
+
+ self.write_chunk(came)
+
+ node = NODE(header)
+ node.from_dict({'nodes': data['nodes']})
+
+ self.write_chunk(node)
+
+ wend = WEND()
+
+ self.write_chunk(wend)
+
+ def write_chunk(self, chunk: Chunk):
+ chunk.encode()
+
+ self.writen += chunk.length.to_bytes(4, 'big') + chunk.chunk_name.encode('utf-8') + chunk.buffer
+ self.writen += binascii.crc32(chunk.chunk_name.encode('utf-8') + chunk.buffer).to_bytes(4, 'big')
diff --git a/3d-converter-0.8.0/models_converter/utils/__init__.py b/3d-converter-0.8.0/models_converter/utils/__init__.py
new file mode 100644
index 0000000..87b30e1
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/utils/__init__.py
@@ -0,0 +1,5 @@
+__all__ = [
+ 'reader',
+ 'writer',
+ 'matrix'
+]
diff --git a/3d-converter-0.8.0/models_converter/utils/reader.py b/3d-converter-0.8.0/models_converter/utils/reader.py
new file mode 100644
index 0000000..ddf0668
--- /dev/null
+++ b/3d-converter-0.8.0/models_converter/utils/reader.py
@@ -0,0 +1,119 @@
+class Reader:
+ def __init__(self, buffer: bytes, endian: str = 'big'):
+ self.buffer = buffer
+ self.endian = endian
+ self.i = 0
+
+ def read(self, length: int = 1) -> bytes:
+ result = self.buffer[self.i:self.i + length]
+ self.i += length
+
+ return result
+
+ def readUInteger(self, length: int = 1) -> int:
+ result = 0
+ for x in range(length):
+ byte = self.buffer[self.i]
+
+ bit_padding = x * 8
+ if self.endian == 'big':
+ bit_padding = (8 * (length - 1)) - bit_padding
+
+ result |= byte << bit_padding
+ self.i += 1
+
+ return result
+
+ def readInteger(self, length: int = 1) -> int:
+ integer = self.readUInteger(length)
+ result = integer
+ if integer > 2 ** (length * 8) / 2:
+ result -= 2 ** (length * 8)
+ return result
+
+ def readUInt64(self) -> int:
+ return self.readUInteger(8)
+
+ def readInt64(self) -> int:
+ return self.readInteger(8)
+
+ def readFloat(self) -> float:
+ as_int = self.readUInt32()
+ binary = bin(as_int)
+ binary = binary[2:].zfill(32)
+
+ sign = -1 if binary[0] == '1' else 1
+ exponent = int(binary[1:9], 2) - 127
+ mantissa_base = binary[9:]
+ mantissa_bin = '1' + mantissa_base
+ mantissa = 0
+ val = 1
+
+ if exponent == -127:
+ if mantissa_base[1] == -1:
+ return 0
+ else:
+ exponent = -126
+ mantissa_bin = '0' + mantissa_base
+
+ for char in mantissa_bin:
+ mantissa += val * int(char)
+ val = val / 2
+
+ result = sign * 2 ** exponent * mantissa
+ return result
+
+ def readUInt32(self) -> int:
+ return self.readUInteger(4)
+
+ def readInt32(self) -> int:
+ return self.readInteger(4)
+
+ def readNUInt16(self) -> float:
+ return self.readUInt16() / 65535
+
+ def readUInt16(self) -> int:
+ return self.readUInteger(2)
+
+ def readNInt16(self) -> float:
+ return self.readInt16() / 32512
+
+ def readInt16(self) -> int:
+ return self.readInteger(2)
+
+ def readUInt8(self) -> int:
+ return self.readUInteger()
+
+ def readInt8(self) -> int:
+ return self.readInteger()
+
+ def readBool(self) -> bool:
+ if self.readUInt8() >= 1:
+ return True
+ else:
+ return False
+
+ readUInt = readUInteger
+ readInt = readInteger
+
+ readULong = readUInt64
+ readLong = readInt64
+
+ readNUShort = readNUInt16
+ readNShort = readNInt16
+
+ readUShort = readUInt16
+ readShort = readInt16
+
+ readUByte = readUInt8
+ readByte = readInt8
+
+ def readChar(self, length: int = 1) -> str:
+ return self.read(length).decode('utf-8')
+
+ def readString(self) -> str:
+ length = self.readUShort()
+ return self.readChar(length)
+
+ def tell(self) -> int:
+ return self.i
diff --git a/3d-converter-0.8.0/setup.py b/3d-converter-0.8.0/setup.py
new file mode 100644
index 0000000..9db5ce4
--- /dev/null
+++ b/3d-converter-0.8.0/setup.py
@@ -0,0 +1,24 @@
+import setuptools
+
+with open('README.md') as fh:
+ long_description = fh.read()
+ fh.close()
+
+setuptools.setup(
+ name='3d-converter',
+ version='0.8.0',
+ author='Vorono4ka',
+ author_email='crowo4ka@gmail.com',
+ description='Python 3D Models Converter',
+ long_description=long_description,
+ long_description_content_type='text/markdown',
+ url='https://github.com/vorono4ka/3d-converter',
+ license='GPLv3',
+ packages=setuptools.find_packages(),
+ classifiers=[
+ 'Programming Language :: Python :: 3',
+ 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
+ 'Operating System :: OS Independent',
+ ],
+ python_requires='>=3.7',
+)
diff --git a/3d_converter.egg-info/PKG-INFO b/3d_converter.egg-info/PKG-INFO
index e15a475..8ac1aac 100644
--- a/3d_converter.egg-info/PKG-INFO
+++ b/3d_converter.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: 3d-converter
-Version: 0.7.8
+Version: 0.8.1
Summary: Python 3D Models Converter
Home-page: https://github.com/vorono4ka/3d-converter
Author: Vorono4ka
@@ -8,7 +8,7 @@ Author-email: crowo4ka@gmail.com
License: GPLv3
Description: ## `Python 3D Models Converter`
- **Version**: 0.7.8
+ **Version**: 0.8.1
### Thanks a lot for motivating [AMIRMISTIK]!
diff --git a/README.md b/README.md
index e845f93..6f2fcc1 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
## `Python 3D Models Converter`
-**Version**: 0.7.8
+**Version**: 0.8.1
### Thanks a lot for motivating [AMIRMISTIK]!
diff --git a/models_converter/formats/dae.py b/models_converter/formats/dae.py
index 3167c23..1792769 100644
--- a/models_converter/formats/dae.py
+++ b/models_converter/formats/dae.py
@@ -458,7 +458,7 @@ def node(self, nodes):
instance_data['instance_name'] = controller_url[1:]
instance_data['binds'] = binds
- node_data['instances'][len(node_data['instances'])-1] = instance_data
+ node_data['instances'].append(instance_data)
matrix = node.findall('collada:matrix', self.namespaces)
if matrix:
@@ -509,6 +509,7 @@ def fix_nodes_list(self, nodes, parent: str = ''):
def __init__(self, file_data):
self.parsed = {'header': {'version': 2,
'frame_rate': 30,
+ 'last_frame': 0,
'materials_file': 'sc3d/character_materials.scw'},
'materials': [],
'geometries': [],
@@ -548,54 +549,55 @@ def parse(self):
effect = self.library_effects.find(f'collada:effect[@id="{effect_url}"]', self.namespaces)
if effect is not None:
- profile = None
- for item in effect:
- if 'profile' in item.tag:
- profile = item
- technique = profile.find('collada:technique', self.namespaces)
-
- emission_data = None
- ambient_data = None
- diffuse_data = None
-
- emission = technique[0].find('collada:emission', self.namespaces)
- ambient = technique[0].find('collada:ambient', self.namespaces)
- diffuse = technique[0].find('collada:diffuse', self.namespaces)
-
- if 'color' in emission[0].tag:
- emission_data = [float(item) for item in emission[0].text.split()]
- emission_data[3] *= 255
- elif 'texture' in emission[0].tag:
- # emission_data = emission[0].attrib['texture']
- emission_data = '.'
-
- if 'color' in ambient[0].tag:
- ambient_data = [float(item) for item in ambient[0].text.split()]
- ambient_data[3] *= 255
- elif 'texture' in ambient[0].tag:
- # ambient_data = ambient[0].attrib['texture']
- ambient_data = '.'
-
- if 'color' in diffuse[0].tag:
- diffuse_data = [float(item) for item in diffuse[0].text.split()]
- diffuse_data[3] *= 255
- elif 'texture' in diffuse[0].tag:
- # diffuse_data = diffuse[0].attrib['texture']
- diffuse_data = '.'
+ # profile = None
+ # for item in effect:
+ # if 'profile' in item.tag:
+ # profile = item
+ # technique = profile.find('collada:technique', self.namespaces)
+ #
+ # emission_data = None
+ # ambient_data = None
+ # diffuse_data = None
+ #
+ # emission = technique[0].find('collada:emission', self.namespaces)
+ # ambient = technique[0].find('collada:ambient', self.namespaces)
+ # diffuse = technique[0].find('collada:diffuse', self.namespaces)
+ #
+ # if 'color' in emission[0].tag:
+ # emission_data = [float(item) for item in emission[0].text.split()]
+ # emission_data[3] *= 255
+ # elif 'texture' in emission[0].tag:
+ # # emission_data = emission[0].attrib['texture']
+ # emission_data = '.'
+ #
+ # if 'color' in ambient[0].tag:
+ # ambient_data = [float(item) for item in ambient[0].text.split()]
+ # ambient_data[3] *= 255
+ # elif 'texture' in ambient[0].tag:
+ # # ambient_data = ambient[0].attrib['texture']
+ # ambient_data = '.'
+ #
+ # if 'color' in diffuse[0].tag:
+ # diffuse_data = [float(item) for item in diffuse[0].text.split()]
+ # diffuse_data[3] *= 255
+ # elif 'texture' in diffuse[0].tag:
+ # # diffuse_data = diffuse[0].attrib['texture']
+ # diffuse_data = '.'
material_data = {
'name': material_name,
+ 'shader': 'shader/uber.vsh',
'effect': {
- 'ambient': ambient_data,
- 'diffuse': diffuse_data,
+ 'ambient': [0, 0, 0, 255], # ambient_data,
+ 'diffuse': '.', # diffuse_data,
'specular': '.',
'colorize': [255, 255, 255, 255],
- 'emission': emission_data,
+ 'emission': [0, 0, 0, 255], # emission_data,
'lightmaps': {
'diffuse': 'sc3d/diffuse_lightmap.png',
'specular': 'sc3d/specular_lightmap.png'
},
- 'tint': [0, 0, 0, 0]
+ 'shader_define_flags': 3014
}
}
diff --git a/models_converter/formats/scw/chunks/head.py b/models_converter/formats/scw/chunks/head.py
index 2b17bbe..028f489 100644
--- a/models_converter/formats/scw/chunks/head.py
+++ b/models_converter/formats/scw/chunks/head.py
@@ -12,7 +12,7 @@ def parse(self, buffer: bytes):
setattr(self, 'version', self.readUShort())
setattr(self, 'frame_rate', self.readUShort())
setattr(self, 'v1', self.readUShort())
- setattr(self, 'v2', self.readUShort())
+ setattr(self, 'animation_end_frame', self.readUShort())
setattr(self, 'materials_file', self.readString())
if self.get('version') == 2:
setattr(self, 'v3', self.readUByte())
@@ -23,7 +23,7 @@ def encode(self):
self.writeUShort(2) # getattr(self, 'version')
self.writeUShort(getattr(self, 'frame_rate'))
self.writeUShort(0) # getattr(self, 'v1')
- self.writeUShort(249) # getattr(self, 'v2')
+ self.writeUShort(getattr(self, 'last_frame')) # animation end frame
self.writeString(getattr(self, 'materials_file'))
self.writeUByte(0) # getattr(self, 'v3')
diff --git a/models_converter/formats/scw/chunks/mate.py b/models_converter/formats/scw/chunks/mate.py
index fd5fa30..6dca312 100644
--- a/models_converter/formats/scw/chunks/mate.py
+++ b/models_converter/formats/scw/chunks/mate.py
@@ -12,19 +12,15 @@ def parse(self, buffer: bytes):
setattr(self, 'name', self.readString())
setattr(self, 'shader', self.readString())
setattr(self, 'v1', self.readUByte())
+ setattr(self, 'v2', self.readUByte())
effect = {}
- use_ambient_tex = self.readBool()
- if use_ambient_tex:
- ambient_tex = self.readString()
- effect['ambient'] = ambient_tex
- else:
- a = self.readUByte()
- r = self.readUByte()
- g = self.readUByte()
- b = self.readUByte()
- ambient_color = (r, g, b, a)
- effect['ambient'] = ambient_color
+ a = self.readUByte()
+ r = self.readUByte()
+ g = self.readUByte()
+ b = self.readUByte()
+ ambient_color = (r, g, b, a)
+ effect['ambient'] = ambient_color
use_diffuse_tex = self.readBool()
if use_diffuse_tex:
@@ -50,8 +46,8 @@ def parse(self, buffer: bytes):
specular_color = (r, g, b, a)
effect['specular'] = specular_color
- setattr(self, 'v2', self.readString())
setattr(self, 'v3', self.readString())
+ setattr(self, 'v4', self.readString())
use_colorize_tex = self.readBool()
if use_colorize_tex:
@@ -77,7 +73,7 @@ def parse(self, buffer: bytes):
emission_color = (r, g, b, a)
effect['emission'] = emission_color
- setattr(self, 'v4', self.readString())
+ setattr(self, 'opacity_texture', self.readString())
setattr(self, 'v5', self.readFloat())
setattr(self, 'v6', self.readFloat())
@@ -86,16 +82,93 @@ def parse(self, buffer: bytes):
'specular': self.readString()
}
- a = self.readUByte()
- r = self.readUByte()
- g = self.readUByte()
- b = self.readUByte()
- effect['tint'] = (r, g, b, a)
+ if self.header['version'] == 2:
+ setattr(self, 'v7', self.readString())
+
+ shader_define_flags = self.readUInt32()
+ effect['shader_define_flags'] = shader_define_flags
+
+ if shader_define_flags & 32768:
+ self.readFloat()
+ self.readFloat()
+ self.readFloat()
+ self.readFloat()
setattr(self, 'effect', effect)
def encode(self):
super().encode()
+ self.writeString(getattr(self, 'name'))
+ self.writeString(getattr(self, 'shader'))
+ self.writeUByte(4) # getattr(self, 'v1')
+ self.writeUByte(0) # getattr(self, 'v2')
+
+ effect = getattr(self, 'effect')
+ r, g, b, a = effect['ambient']
+ self.writeUByte(a)
+ self.writeUByte(r)
+ self.writeUByte(g)
+ self.writeUByte(b)
+
+ use_diffuse_tex = type(effect['diffuse']) is str
+ self.writeBool(use_diffuse_tex)
+ if use_diffuse_tex:
+ self.writeString(effect['diffuse'])
+ else:
+ r, g, b, a = effect['diffuse']
+ self.writeUByte(a)
+ self.writeUByte(r)
+ self.writeUByte(g)
+ self.writeUByte(b)
+
+ use_specular_tex = type(effect['specular']) is str
+ self.writeBool(use_specular_tex)
+ if use_specular_tex:
+ self.writeString(effect['specular'])
+ else:
+ r, g, b, a = effect['specular']
+ self.writeUByte(a)
+ self.writeUByte(r)
+ self.writeUByte(g)
+ self.writeUByte(b)
+
+ self.writeString('.') # getattr(self, 'v3')
+ self.writeString('') # getattr(self, 'v4')
+
+ use_colorize_tex = type(effect['colorize']) is str
+ self.writeBool(use_colorize_tex)
+ if use_colorize_tex:
+ self.writeString(effect['colorize'])
+ else:
+ r, g, b, a = effect['colorize']
+ self.writeUByte(a)
+ self.writeUByte(r)
+ self.writeUByte(g)
+ self.writeUByte(b)
+
+ use_emission_tex = type(effect['emission']) is str
+ self.writeBool(use_emission_tex)
+ if use_emission_tex:
+ self.writeString(effect['emission'])
+ else:
+ r, g, b, a = effect['emission']
+ self.writeUByte(a)
+ self.writeUByte(r)
+ self.writeUByte(g)
+ self.writeUByte(b)
+
+ self.writeString('') # getattr(self, 'opacity_texture')
+ self.writeFloat(1) # getattr(self, 'v5')
+ self.writeFloat(0) # getattr(self, 'v6')
+
+ self.writeString(effect['lightmaps']['diffuse'])
+ self.writeString(effect['lightmaps']['specular'])
+
+ if self.header['version'] == 2:
+ self.writeString('') # getattr(self, 'v7')
+
+ self.writeUInt32(effect['shader_define_flags'])
+
self.length = len(self.buffer)
diff --git a/models_converter/formats/scw/writer.py b/models_converter/formats/scw/writer.py
index fede3f4..b55efb2 100644
--- a/models_converter/formats/scw/writer.py
+++ b/models_converter/formats/scw/writer.py
@@ -16,11 +16,11 @@ def write(self, data: dict):
self.write_chunk(head)
# TODO: materials
- # for material in data['materials']:
- # mate = MATE(header)
- # mate.from_dict(material)
- #
- # self.write_chunk(mate)
+ for material in data['materials']:
+ mate = MATE(header)
+ mate.from_dict(material)
+
+ self.write_chunk(mate)
for geometry in data['geometries']:
geom = GEOM(header)
diff --git a/models_converter/utils/__pycache__/writer.cpython-39.pyc b/models_converter/utils/__pycache__/writer.cpython-39.pyc
index 6dbd69b..70b909f 100644
Binary files a/models_converter/utils/__pycache__/writer.cpython-39.pyc and b/models_converter/utils/__pycache__/writer.cpython-39.pyc differ
diff --git a/setup.py b/setup.py
index a59e158..2c275ab 100644
--- a/setup.py
+++ b/setup.py
@@ -6,7 +6,7 @@
setuptools.setup(
name='3d-converter',
- version='0.7.8',
+ version='0.8.1',
author='Vorono4ka',
author_email='crowo4ka@gmail.com',
description='Python 3D Models Converter',