-
Notifications
You must be signed in to change notification settings - Fork 2.1k
/
module.py
87 lines (80 loc) · 3.25 KB
/
module.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import paddlehub as hub
from paddleocr.ppocr.utils.logging import get_logger
from paddleocr.tools.infer.utility import base64_to_cv2
from paddlehub.module.module import moduleinfo, runnable, serving
@moduleinfo(
name="japan_ocr_db_crnn_mobile",
version="1.1.0",
summary="ocr service",
author="PaddlePaddle",
type="cv/text_recognition")
class JapanOCRDBCRNNMobile:
def __init__(self,
det=True,
rec=True,
use_angle_cls=False,
enable_mkldnn=False,
use_gpu=False,
box_thresh=0.6,
angle_classification_thresh=0.9):
"""
initialize with the necessary elements
Args:
det(bool): Whether to use text detector.
rec(bool): Whether to use text recognizer.
use_angle_cls(bool): Whether to use text orientation classifier.
enable_mkldnn(bool): Whether to enable mkldnn.
use_gpu (bool): Whether to use gpu.
box_thresh(float): the threshold of the detected text box's confidence
angle_classification_thresh(float): the threshold of the angle classification confidence
"""
self.logger = get_logger()
self.model = hub.Module(
name="multi_languages_ocr_db_crnn",
lang="japan",
det=det,
rec=rec,
use_angle_cls=use_angle_cls,
enable_mkldnn=enable_mkldnn,
use_gpu=use_gpu,
box_thresh=box_thresh,
angle_classification_thresh=angle_classification_thresh)
self.model.name = self.name
def recognize_text(self, images=[], paths=[], output_dir='ocr_result', visualization=False):
"""
Get the text in the predicted images.
Args:
images (list(numpy.ndarray)): images data, shape of each is [H, W, C]. If images not paths
paths (list[str]): The paths of images. If paths not images
output_dir (str): The directory to store output images.
visualization (bool): Whether to save image or not.
Returns:
res (list): The result of text detection box and save path of images.
"""
all_results = self.model.recognize_text(
images=images, paths=paths, output_dir=output_dir, visualization=visualization)
return all_results
@serving
def serving_method(self, images, **kwargs):
"""
Run as a service.
"""
images_decode = [base64_to_cv2(image) for image in images]
results = self.recognize_text(images_decode, **kwargs)
return results
@runnable
def run_cmd(self, argvs):
"""
Run as a command
"""
results = self.model.run_cmd(argvs)
return results
def export_onnx_model(self, dirname: str, input_shape_dict=None, opset_version=10):
'''
Export the model to ONNX format.
Args:
dirname(str): The directory to save the onnx model.
input_shape_dict: dictionary ``{ input_name: input_value }, eg. {'x': [-1, 3, -1, -1]}``
opset_version(int): operator set
'''
self.model.export_onnx_model(dirname=dirname, input_shape_dict=input_shape_dict, opset_version=opset_version)