From 0b6dbf28fdd81992df67afc9b1f8933cdbd24c7d Mon Sep 17 00:00:00 2001 From: JeffreyWang <944688482@qq.com> Date: Fri, 11 Jun 2021 18:33:43 +0800 Subject: [PATCH] feat: fix some problems and update document. --- Dockerfile | 7 +-- README.md | 48 ++++++++++++++++--- README_ZH.md | 47 +++++++++++++++--- convert.sh | 2 +- environment/docker-compose.yaml | 9 ++-- server/background.py | 36 -------------- ...test_converter_client.py => rpc_client.py} | 17 ++++--- server/routes.py | 21 -------- server/rpc_server.py | 6 +-- 9 files changed, 105 insertions(+), 88 deletions(-) delete mode 100644 server/background.py rename server/examples/python/{test_converter_client.py => rpc_client.py} (67%) delete mode 100644 server/routes.py diff --git a/Dockerfile b/Dockerfile index eafa5a1..7d080a0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,7 @@ -# docker build -t wj2015/3d-model-convert-to-gltf:v1.3 +# docker build . -t wj2015/3d-model-convert-to-gltf:v1.4 +# startup docker by: docker run -d -p 8999:8999 wj2015/3d-model-convert-to-gltf:latest # you can debug by: docker run -it --rm -v `pwd`:/opt/3d-model-convert-to-gltf/ wj2015/3d-model-convert-to-gltf:latest /bin/bash -# and run `conda activate pythonocc` to enter the environment. +# you can also execute `conda activate pythonocc` to enter the environment. FROM continuumio/anaconda:2019.10 LABEL maintainer=admin@wj2015.com @@ -25,4 +26,4 @@ COPY . /opt/3d-model-convert-to-gltf RUN cd /opt/3d-model-convert-to-gltf && \ conda run -n pythonocc pip install -r server/requirements.txt WORKDIR /opt/3d-model-convert-to-gltf -CMD ['python', '/opt/3d-model-convert-to-gltf/server/rpc_server.py'] +CMD conda run -n pythonocc python /opt/3d-model-convert-to-gltf/server/rpc_server.py diff --git a/README.md b/README.md index 7ca73d6..3f61b88 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ English|[中文](README_ZH.md) -The main reason for this project is that I encountered a scenario where **the STEP and IGES models need to be displayed on the Web**, but the web3d class libraries on the market do not support this format, and the direct display of STL files uploaded by users will consume a lot of bandwidth or CDN Traffic, converted to compressed gltf would be more appropriate. +The main reason for this project is that I encountered a scenario where **the STEP and IGES models need to be displayed on the Web**, but the web3d libraries on the market do not support this format, and the direct display of STL files uploaded by users will consume a lot of bandwidth or CDN Traffic, converted to compressed gltf would be more appropriate. Demo assets model effect compare: @@ -23,7 +23,7 @@ I organized my thoughts into a blog: [STEP and IGES models are converted to the > PS: My blog is write by Chinese, if you are non-Chinese native speaker, you should take a Google Translate tool for well. -**Project status:** maintain +**Project status:** stable ## Mission @@ -36,7 +36,23 @@ I organized my thoughts into a blog: [STEP and IGES models are converted to the - [x] write easy to use convert.sh - [x] online convert preview - [ ] [bug] stp convert to gltf is too large -- [ ] grpc support +- [x] grpc support +- [ ] rpc should response error detail +- [ ] rpc docker server logs output problems + +## Version update + +##### v1.4 2021-06-11 17:20 + +Support GRPC, convert code refactor, fix bugs. + +##### v1.3 2020-06-24 17:19 + +Add English document, fix bugs. + +##### v1.0 2020-05-21 19:08 + +Basic commit, core feature complete, support shell convert. ## Why not assmip @@ -45,11 +61,11 @@ I tried to use [assimp](https://github.com/assimp/assimp), but the result under ## Why not implement API in this project -Model conversion is a very performance-consuming and slow-speed service. The upload and download of the model will consume bandwidth. **If it is deployed directly on your own server, it will be a very bandwidth-intensive and CPU-consuming task**. For the most common method to upload and download large files is to **introduce OSS and CDN with dynamic expansion of queues and back-end services**, but the deployment cost and implementation cost will be relatively high, please contact admin@wj2015.com for business needs Commercial API support. +Model conversion is a very performance-consuming and slow-speed service. The upload and download of the model will consume bandwidth. **If it is deployed directly on your own server, it will be a very bandwidth-intensive and CPU-consuming task**. For the most common method to upload and download large files is to **introduce OSS and CDN with dynamic expansion of queues and back-end services**, but the deployment cost and implementation cost will be relatively high. ## Quick Start -Due to the trouble of environment configuration and other reasons, the command line mode **still needs to rely on docker**. **The command line mode is suitable for simple invocation on the server side.** The conversion process blocks the processes to be synchronized and cannot be deployed in a distributed manner to increase concurrency. +Due to the trouble of environment configuration and other reasons, the command line mode **still needs to rely on docker**. **The command line mode is suitable for simple invocation on the server side.** The conversion process blocks the processes to be synchronized and cannot be deployed in a distributed manner to increase concurrency.The most recommended way is to use **grpc with docker** deployment to make rpc, which can be synchronous or asynchronous, and will be **easy to extend**. > PS:When there are too many simultaneous conversion models in the command line mode or a single model is too large, there is a risk that the server providing the web service is stuck @@ -57,11 +73,31 @@ Due to the trouble of environment configuration and other reasons, the command l You can convert model online (<100MB) powered by [modelbox-sdk](https://github.com/wangerzi/modelbox-sdk),preview link: [https://wangerzi.gitee.io/modelbox-sdk/examples/index.html](https://wangerzi.gitee.io/modelbox-sdk/examples/index.html) +### GRPC Mode + +Based on GRPC, and it will be more convient to **build a dynamically expanded service cluster**, we support uploading zip/model source files, for the compatibility of each model, **response files are all in zip**, you need to decompress it after got it. + +You should run server-side rpc service by docker, please make sure 8999 port is usable and `wj2015/3d-model-convert-to-gltf:latest` image is up to date, command: + +```shell +docker run -d -p 8999:8999 wj2015/3d-model-convert-to-gltf:latest +``` + +When using grpc in this project, please copy `server/rpc/protos/converter.proto` , and generate a code template according to the language of caller and enjoy it. Official document: [Support Language](https://grpc.io/docs/languages/) + +#### Completed examples + +If this project is helpful to you, you can commit another examples and PR, such as php/golang/Nodejs rpc call examples. + +| name | code | comments | +| ------------------------------- | ------------------------------------ | ---------------- | +| Python rpc client usage example | server/examples/python/rpc_client.py | convert and save | + ### Command Mode Download the `convert.sh`, and grant execution authority, execute the following command, the second param should choose in `stl|stp|iges|obj|fbx`, please determine according to the file type -> The script depends on the docker environment, so you should prepare the Docker environment first. +> The script depends on the docker environment, so you should prepare the Docker environment first. **Command mode is not support zip file convert**, beacause docker volume will auto sync picture or mtl assets to docker container. ```shell convert.sh stl inputpath.stl outputpath.glb # convert to glb single bin file diff --git a/README_ZH.md b/README_ZH.md index bd1f7d7..2d55c50 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -21,7 +21,7 @@ 本项目即采用了博客中总结的思路:[STEP和IGES模型转换为适用Web的glb格式](https://blog.wj2015.com/2020/03/08/step%e5%92%8ciges%e6%a8%a1%e5%9e%8b%e8%bd%ac%e6%8d%a2%e4%b8%ba%e9%80%82%e7%94%a8web%e7%9a%84glb%e6%a0%bc%e5%bc%8f/) -**项目状态:** 维护中 +**项目状态:** 稳定维护中 ## 待完成任务 @@ -34,6 +34,22 @@ - [x] 在线转换预览 - [ ] [bug] stp 转 gltf 最终文件太大 - [x] 支持以 grpc 形式调用 +- [ ] rpc 接口优化,返回具体的错误信息 +- [ ] rpc server 日志问题 + +## 版本说明 + +##### v1.4 2021-06-11 17:20 + +更新了 GRPC 的支持,清理 aiohttp 和配置代码,重构了转换部分的代码并修复了 BUG + +##### v1.3 2020-06-24 17:19 + +新增英文文档,修复 BUG,尝试 aiohttp 写接口 + +##### v1.0 2020-05-21 19:08 + +基础提交,基本功能开发完毕,支持脚本调用 ## 为什么不用 assmip @@ -45,7 +61,7 @@ ## 快速上手 -由于环境配置麻烦等原因,命令行模式依旧需要依赖docker,**命令行模式适合服务端简单调用**,转换过程阻塞进程同步进行,无法分布式部署增加并发量等 +由于环境配置麻烦等原因,命令行模式依旧需要依赖docker,**命令行模式适合服务端简单调用**,转换过程阻塞进程同步进行,无法分布式部署增加并发量;最推荐的方式就是用 **grpc 配合容器化部署做调用**,可同步可异步,可方便的做扩展。 > PS:命令行模式同步转换模型过多或者单个模型过大时,有把提供Web服务的服务器卡住的风险 @@ -53,11 +69,31 @@ 可以使用 [modelbox-sdk](https://github.com/wangerzi/modelbox-sdk) 在线转换模型(<100MB),链接:[https://wangerzi.gitee.io/modelbox-sdk/examples/index.html](https://wangerzi.gitee.io/modelbox-sdk/examples/index.html) +### GRPC 模式 + +基于 GRPC 实现了服务内部的 RPC 通信,构建动态扩容的服务集群会更加方便,**支持上传 zip/模型源文件**,为各模型的兼容性考虑,**响应的文件都是 zip 格式的**,调用后需自行解压。 + +首先需要**通过 docker 运行 RPC 服务端**,直接运行的指令如下,注意保证 8999 端口未被占用且 `wj2015/3d-model-convert-to-gltf:latest` 镜像最新 + +```shell +docker run -d -p 8999:8999 wj2015/3d-model-convert-to-gltf:latest +``` + +使用时,请将 `server/rpc/protos/converter.proto` 复制出来,并根据调用方变成语言生成模板并使用,官方网址:[支持的语言](https://grpc.io/docs/languages/) + +#### 已实现示例 + +如果本项目对您有帮助,您可继续补充其他示例并提交 PR,比如常见的 php 、 golang 、Nodejs 的调用示例 + +| 示例名称 | 目录 | 备注 | +| --------------- | ------------------------------------ | -------------- | +| Python 调用示例 | server/examples/python/rpc_client.py | 调用封装和保存 | + ### 命令行模式 下载代码中的 `convert.sh`,赋予执行权限,执行如下指令即可,第二个参数可支持 `stl|stp|iges|obj|fbx`,根据文件类型而定。 -> 脚本依赖于docker环境,所以 Docker 环境先准备好吧,而且命令行依赖的是 docker 的 -v 映射本地目录从而将贴图和模型一起导入到容器中执行转换,所以不接受压缩包,请直接指定需要转换的模型文件。 +> 脚本依赖于docker环境,所以 Docker 环境先准备好吧,而且命令行依赖的是 docker 的 -v 映射本地目录从而将贴图和模型一起导入到容器中执行转换,所以**不接受压缩包**,请直接指定需要转换的模型文件。 ```shell convert.sh stl inputpath.stl outputpath.glb # 生成二进制glb文件 @@ -102,12 +138,9 @@ docker pull wj2015/3d-model-convert-to-gltf 在容器内执行 `conda run -n pythonocc python convert.py stl input.stl out.glb` 可同步转换文件 -### GRPC 模式 -文档完善中,基于 GRPC 实现了服务内部的 RPC 通信,构建动态扩容的服务集群会更加方便,支持上传 zip/模型源文件,为各模型的兼容性考虑,响应的文件都是 zip 格式的,调用后需自行解压。 - ### 简单负载示意图 -如果有多机负载的需求,可借助 nginx 的反向代理做一下简单的负载均衡或者辅助消息队列以及生产者消费者来做,其中 grpc 已内置实现并支持容器化部署,如需使用 HTTP 服务或队列需要自己实现逻辑。 +如果有**多机负载**的需求,可借助 nginx 的反向代理、微服务的服务注册和调用轮训来做简单的负载均衡,还可以辅助消息队列以及生产者消费者,**其中 grpc 已内置实现并支持容器化部署**,如需使用 HTTP 服务或队列需要自己实现逻辑。 ![1583754967257](assets/1583754967257.png) diff --git a/convert.sh b/convert.sh index 206ddcf..6098456 100755 --- a/convert.sh +++ b/convert.sh @@ -10,4 +10,4 @@ outPath=$( pwd ) outFile=$outPath/`basename $3` -docker run -v $inputPath:$inputPath -v $outPath:$outPath wj2015/3d-model-convert-to-gltf:latest /bin/bash -c "cd $inputPath && conda run -n pythonocc python /opt/3d-model-convert-to-gltf/server/convert.py $1 $inputFile $outFile" +docker run -v $inputPath:$inputPath -v $outPath:$outPath wj2015/3d-model-convert-to-gltf:v1.4 /bin/bash -c "cd $inputPath && conda run -n pythonocc python /opt/3d-model-convert-to-gltf/server/convert.py $1 $inputFile $outFile" diff --git a/environment/docker-compose.yaml b/environment/docker-compose.yaml index 245745b..74305c1 100644 --- a/environment/docker-compose.yaml +++ b/environment/docker-compose.yaml @@ -2,13 +2,12 @@ version: '3' services: app: - image: wj2015/3d-model-convert-to-gltf + image: wj2015/3d-model-convert-to-gltf:v1.4 container_name: 3d-model-convert-to-gltf-app volumes: - ../:/opt/3d-model-convert-to-gltf:cached privileged: true + ports: + - 8999:8999 command: - 'conda run -n pythonocc python server/rpc_server.py' - redis: - image: redis - container_name: 3d-model-convert-to-gltf-redis \ No newline at end of file + 'conda run --no-capture-output -n pythonocc python server/rpc_server.py > /dev/stdout' \ No newline at end of file diff --git a/server/background.py b/server/background.py deleted file mode 100644 index bf9abd9..0000000 --- a/server/background.py +++ /dev/null @@ -1,36 +0,0 @@ -import service.Convert -from exception.ConvertException import ConvertException -import threading - -def setup_background(app): - create_threads(app) - -threads = [] -def create_threads(app): - process_num = int(app['config']['app']['background_process_num']) - if process_num <= 0: - process_num = 1 - for i in range(0, process_num): - thread = threading.Thread(target=handle_background(app, convert_background)) - thread.start() - threads.append(thread) - - return threads - -# pass variable for multi threading -def handle_background(app, callback): - def func(): - callback(app) - return func - -def convert_background(app): - while True: - # get information - try: - req_id, json_dict = service.Convert.get_wait_mission(app['redis']) - except ConvertException as err: - print('Get information error ', err) - except Exception as err: - print('Unexcpeted error', err) - # read and convert - # notice result diff --git a/server/examples/python/test_converter_client.py b/server/examples/python/rpc_client.py similarity index 67% rename from server/examples/python/test_converter_client.py rename to server/examples/python/rpc_client.py index 5ace239..81aaf5e 100644 --- a/server/examples/python/test_converter_client.py +++ b/server/examples/python/rpc_client.py @@ -24,6 +24,8 @@ def convert_file_and_save(target, t, source, dist, is_bin=False): response = stub.convertToGltf( converter_pb2.convertReq(type=t, isBin=is_bin, file=f.read()) ) + if response.file == b'': + return False with open(dist, 'wb') as d: d.write(response.file) @@ -32,12 +34,15 @@ def convert_file_and_save(target, t, source, dist, is_bin=False): def run(): - start_time = time.time() - if convert_file_and_save("127.0.0.1:8999", 'stl', '../../../assets/test.stl', 'test.glb.zip', True): - end_time = time.time() - print("convert success", str(end_time - start_time), 's') - else: - print("convert failed") + try: + start_time = time.time() + if convert_file_and_save("127.0.0.1:8999", 'stl', '../../../assets/test.stl', 'test.glb.zip', True): + end_time = time.time() + print("convert success", str(end_time - start_time), 's') + else: + print("convert failed") + except Exception as err: + print("convert exception:", err) if __name__ == '__main__': diff --git a/server/routes.py b/server/routes.py deleted file mode 100644 index d0809de..0000000 --- a/server/routes.py +++ /dev/null @@ -1,21 +0,0 @@ -import controller.Convert as Convert -from exception.BaseException import BaseException -from aiohttp import web - - -def handle(func): - # wrapper exception handler - async def run(request): - try: - res = await func(request) - except BaseException as err: - return web.json_response({'code': 999, "message": str(err), "data": {}}) - return res - - return run - - -def setup_routes(app): - app.router.add_post('/convert/stp', handle(Convert.stp)) - app.router.add_post('/convert/stl', handle(Convert.stl)) - app.router.add_post('/convert/iges', handle(Convert.iges)) diff --git a/server/rpc_server.py b/server/rpc_server.py index 05f9f83..3300403 100644 --- a/server/rpc_server.py +++ b/server/rpc_server.py @@ -1,6 +1,7 @@ import logging import sys import os +import time from concurrent import futures import grpc @@ -29,7 +30,6 @@ def convertToGltf(self, request, context): model = Convert.ModelFactory.make_model(request.type) if up_service.is_zip(): model_ext = model.get_ext() - # todo:: we can more effective find_path = up_service.scan_ext_file(model_ext, True) if find_path and len(find_path) > 0: # find first file @@ -61,7 +61,7 @@ def convertToGltf(self, request, context): def serve(): server = grpc.server( - futures.ThreadPoolExecutor(max_workers=10), + futures.ThreadPoolExecutor(max_workers=1), options=( ('grpc.max_receive_message_length', -1), ('grpc.max_send_message_length', -1), @@ -69,7 +69,7 @@ def serve(): ) converter_pb2_grpc.add_ConverterServicer_to_server(ConverterService(), server) - target = "[::]:8999" + target = "0.0.0.0:8999" server.add_insecure_port(target) server.start() print("server at ", target)