diff --git a/DLPredictOnline/demo/README.MD b/DLPredictOnline/demo/README.MD index 9f8bb34..657c682 100644 --- a/DLPredictOnline/demo/README.MD +++ b/DLPredictOnline/demo/README.MD @@ -9,8 +9,9 @@ Tensorflow模型部署步骤如下: 2. 运行容器,启动tensorflow-serving,绑定到宿主机8501端口上,命令: docker run -t --rm -p 8501:8501 -p 8500:8500 -v "$TESTDATA:/models/textcnn-69" -e MODEL_NAME=textcnn-69 tensorflow/serving:1.14.0 & 3. -e MODEL_NAME=textcnn-69 表示模型名称,其中69表示此次测试的任务id,由用户自己指定 -4. -p 8501:8501 http端口 表示将宿主机端口8051绑定到容器8051端口上,这样可以通过物理机8051端口可以直接访问容器中服务 -5. -p 8500:8500 grpc端口 +4. -v /models/textcnn-69 容器内部目录必须符合/models/$MODEL_NAME 规范 +5. -p 8501:8501 http端口 表示将宿主机端口8051绑定到容器8051端口上,这样可以通过物理机8051端口可以直接访问容器中服务 +6. -p 8500:8500 grpc端口 模型启动日志如下所示: ![模型启动日志](tensorflow-serving start log.png) @@ -133,7 +134,7 @@ key=__label__thankyou, value=1.5074686E-5 PyTorch模型部署步骤如下: -1. 下载[待部署的PyTorch模型文件](model/pytorch/watermark-centernet)到本地/model/deps +1. 下载[待部署的PyTorch模型文件](model/pytorch/mnist)到本地/model/deps 下载[startPredict.sh](../../PyTorchPredictOnline/startPredict.sh), [gpu/predictor.py](../../PyTorchPredictOnline/gpu/predictor.py), [localtime](../../PyTorchPredictOnline/DockerImages/deps/localtime)到本地/model/deps 下载[Dockerfile_gpu](../../PyTorchPredictOnline/DockerImages/Dockerfile_gpu)到本地/model,并重命名为Dockerfile 2. 创建镜像,命令: diff --git a/DLPredictOnline/demo/model/pytorch/mnist/README.MD b/DLPredictOnline/demo/model/pytorch/mnist/README.MD new file mode 100644 index 0000000..e092da4 --- /dev/null +++ b/DLPredictOnline/demo/model/pytorch/mnist/README.MD @@ -0,0 +1,10 @@ +# Mnist模型示例 + +client程序 [PyTorchClient](../../../src/main/java/com/bj58/ailab/demo/client/PyTorchClient.java) + +## 数据 +[test_data](./test_data) 样本数据 + +## 模型文件 +以版本命名,所以模型下载需要解压成对应版本 +[版本1](http://wos.58cdn.com.cn/nOlKjIhGntU/dlinference/demo_model_pytorch_mnist_2.zip) \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/mnist/test_data/2.png b/DLPredictOnline/demo/model/pytorch/mnist/test_data/2.png new file mode 100644 index 0000000..9dbc8f7 Binary files /dev/null and b/DLPredictOnline/demo/model/pytorch/mnist/test_data/2.png differ diff --git a/DLPredictOnline/demo/model/pytorch/mnist/test_data/9.png b/DLPredictOnline/demo/model/pytorch/mnist/test_data/9.png new file mode 100644 index 0000000..a94378c Binary files /dev/null and b/DLPredictOnline/demo/model/pytorch/mnist/test_data/9.png differ diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/README.MD b/DLPredictOnline/demo/model/pytorch/watermark-centernet/README.MD deleted file mode 100644 index 865fd47..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/README.MD +++ /dev/null @@ -1,18 +0,0 @@ -# watermark-centernet模型示例 - -client程序 [PyTorchClient](../../../src/main/java/com/bj58/ailab/demo/client/PyTorchClient.java) - -## 数据 -[test_data](./data) 样本文件 - -## 数据处理函数定义文件 -[processor.py](./processor.py): 数据处理函数定义文件,名称固定为processor.py - -## 模型文件 -[model.pth](http://wos.58cdn.com.cn/nOlKjIhGntU/dlinference/demo_model_pytorch_model.pth): 模型文件,名称固定为model.pth,**模型下载之后需要将模型重名为 model.pth** - -## 模型依赖python包指定文件 -[requirements.txt](./requirements.txt):模型依赖python包指定文件,名称固定为requirements.txt - -## 模型定义文件 -models及myutils文件夹下的文件为模型训练时的定义文件,供PyTorch加载模型文件时使用 diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/data/1.jpg b/DLPredictOnline/demo/model/pytorch/watermark-centernet/data/1.jpg deleted file mode 100644 index c874997..0000000 Binary files a/DLPredictOnline/demo/model/pytorch/watermark-centernet/data/1.jpg and /dev/null differ diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/data_parallel.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/data_parallel.py deleted file mode 100644 index 1a96c0d..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/data_parallel.py +++ /dev/null @@ -1,128 +0,0 @@ -import torch -from torch.nn.modules import Module -from torch.nn.parallel.scatter_gather import gather -from torch.nn.parallel.replicate import replicate -from torch.nn.parallel.parallel_apply import parallel_apply - - -from .scatter_gather import scatter_kwargs - -class _DataParallel(Module): - r"""Implements data parallelism at the module level. - - This container parallelizes the application of the given module by - splitting the input across the specified devices by chunking in the batch - dimension. In the forward pass, the module is replicated on each device, - and each replica handles a portion of the input. During the backwards - pass, gradients from each replica are summed into the original module. - - The batch size should be larger than the number of GPUs used. It should - also be an integer multiple of the number of GPUs so that each chunk is the - same size (so that each GPU processes the same number of samples). - - See also: :ref:`cuda-nn-dataparallel-instead` - - Arbitrary positional and keyword inputs are allowed to be passed into - DataParallel EXCEPT Tensors. All variables will be scattered on dim - specified (default 0). Primitive types will be broadcasted, but all - other types will be a shallow copy and can be corrupted if written to in - the model's forward pass. - - Args: - module: module to be parallelized - device_ids: CUDA devices (default: all devices) - output_device: device location of output (default: device_ids[0]) - - Example:: - - >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2]) - >>> output = net(input_var) - """ - - # TODO: update notes/cuda.rst when this class handles 8+ GPUs well - - def __init__(self, module, device_ids=None, output_device=None, dim=0, chunk_sizes=None): - super(_DataParallel, self).__init__() - - if not torch.cuda.is_available(): - self.module = module - self.device_ids = [] - return - - if device_ids is None: - device_ids = list(range(torch.cuda.device_count())) - if output_device is None: - output_device = device_ids[0] - self.dim = dim - self.module = module - self.device_ids = device_ids - self.chunk_sizes = chunk_sizes - self.output_device = output_device - if len(self.device_ids) == 1: - self.module.cuda(device_ids[0]) - - def forward(self, *inputs, **kwargs): - if not self.device_ids: - return self.module(*inputs, **kwargs) - inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes) - if len(self.device_ids) == 1: - return self.module(*inputs[0], **kwargs[0]) - replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) - outputs = self.parallel_apply(replicas, inputs, kwargs) - return self.gather(outputs, self.output_device) - - def replicate(self, module, device_ids): - return replicate(module, device_ids) - - def scatter(self, inputs, kwargs, device_ids, chunk_sizes): - return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim, chunk_sizes=self.chunk_sizes) - - def parallel_apply(self, replicas, inputs, kwargs): - return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)]) - - def gather(self, outputs, output_device): - return gather(outputs, output_device, dim=self.dim) - - -def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None): - r"""Evaluates module(input) in parallel across the GPUs given in device_ids. - - This is the functional version of the DataParallel module. - - Args: - module: the module to evaluate in parallel - inputs: inputs to the module - device_ids: GPU ids on which to replicate module - output_device: GPU location of the output Use -1 to indicate the CPU. - (default: device_ids[0]) - Returns: - a Variable containing the result of module(input) located on - output_device - """ - if not isinstance(inputs, tuple): - inputs = (inputs,) - - if device_ids is None: - device_ids = list(range(torch.cuda.device_count())) - - if output_device is None: - output_device = device_ids[0] - - inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim) - if len(device_ids) == 1: - return module(*inputs[0], **module_kwargs[0]) - used_device_ids = device_ids[:len(inputs)] - replicas = replicate(module, used_device_ids) - outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids) - return gather(outputs, output_device, dim) - -def DataParallel(module, device_ids=None, output_device=None, dim=0, chunk_sizes=None): - if chunk_sizes is None: - return torch.nn.DataParallel(module, device_ids, output_device, dim) - standard_size = True - for i in range(1, len(chunk_sizes)): - if chunk_sizes[i] != chunk_sizes[0]: - standard_size = False - if standard_size: - return torch.nn.DataParallel(module, device_ids, output_device, dim) - return _DataParallel(module, device_ids, output_device, dim, chunk_sizes) \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/decode.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/decode.py deleted file mode 100644 index 2d61192..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/decode.py +++ /dev/null @@ -1,571 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import torch -import torch.nn as nn -from .utils import _gather_feat, _tranpose_and_gather_feat - -def _nms(heat, kernel=3): - pad = (kernel - 1) // 2 - - hmax = nn.functional.max_pool2d( - heat, (kernel, kernel), stride=1, padding=pad) - keep = (hmax == heat).float() - return heat * keep - -def _left_aggregate(heat): - ''' - heat: batchsize x channels x h x w - ''' - shape = heat.shape - heat = heat.reshape(-1, heat.shape[3]) - heat = heat.transpose(1, 0).contiguous() - ret = heat.clone() - for i in range(1, heat.shape[0]): - inds = (heat[i] >= heat[i - 1]) - ret[i] += ret[i - 1] * inds.float() - return (ret - heat).transpose(1, 0).reshape(shape) - -def _right_aggregate(heat): - ''' - heat: batchsize x channels x h x w - ''' - shape = heat.shape - heat = heat.reshape(-1, heat.shape[3]) - heat = heat.transpose(1, 0).contiguous() - ret = heat.clone() - for i in range(heat.shape[0] - 2, -1, -1): - inds = (heat[i] >= heat[i +1]) - ret[i] += ret[i + 1] * inds.float() - return (ret - heat).transpose(1, 0).reshape(shape) - -def _top_aggregate(heat): - ''' - heat: batchsize x channels x h x w - ''' - heat = heat.transpose(3, 2) - shape = heat.shape - heat = heat.reshape(-1, heat.shape[3]) - heat = heat.transpose(1, 0).contiguous() - ret = heat.clone() - for i in range(1, heat.shape[0]): - inds = (heat[i] >= heat[i - 1]) - ret[i] += ret[i - 1] * inds.float() - return (ret - heat).transpose(1, 0).reshape(shape).transpose(3, 2) - -def _bottom_aggregate(heat): - ''' - heat: batchsize x channels x h x w - ''' - heat = heat.transpose(3, 2) - shape = heat.shape - heat = heat.reshape(-1, heat.shape[3]) - heat = heat.transpose(1, 0).contiguous() - ret = heat.clone() - for i in range(heat.shape[0] - 2, -1, -1): - inds = (heat[i] >= heat[i + 1]) - ret[i] += ret[i + 1] * inds.float() - return (ret - heat).transpose(1, 0).reshape(shape).transpose(3, 2) - -def _h_aggregate(heat, aggr_weight=0.1): - return aggr_weight * _left_aggregate(heat) + \ - aggr_weight * _right_aggregate(heat) + heat - -def _v_aggregate(heat, aggr_weight=0.1): - return aggr_weight * _top_aggregate(heat) + \ - aggr_weight * _bottom_aggregate(heat) + heat - -''' -# Slow for large number of categories -def _topk(scores, K=40): - batch, cat, height, width = scores.size() - topk_scores, topk_inds = torch.topk(scores.view(batch, -1), K) - - topk_clses = (topk_inds / (height * width)).int() - - topk_inds = topk_inds % (height * width) - topk_ys = (topk_inds / width).int().float() - topk_xs = (topk_inds % width).int().float() - return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs -''' -def _topk_channel(scores, K=40): - batch, cat, height, width = scores.size() - - topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K) - - topk_inds = topk_inds % (height * width) - topk_ys = (topk_inds / width).int().float() - topk_xs = (topk_inds % width).int().float() - - return topk_scores, topk_inds, topk_ys, topk_xs - -def _topk(scores, K=40): - batch, cat, height, width = scores.size() - - topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K) - - topk_inds = topk_inds % (height * width) - topk_ys = (topk_inds / width).int().float() - topk_xs = (topk_inds % width).int().float() - - topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K) - topk_clses = (topk_ind / K).int() - topk_inds = _gather_feat( - topk_inds.view(batch, -1, 1), topk_ind).view(batch, K) - topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K) - topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K) - - return topk_score, topk_inds, topk_clses, topk_ys, topk_xs - - -def agnex_ct_decode( - t_heat, l_heat, b_heat, r_heat, ct_heat, - t_regr=None, l_regr=None, b_regr=None, r_regr=None, - K=40, scores_thresh=0.1, center_thresh=0.1, aggr_weight=0.0, num_dets=1000 -): - batch, cat, height, width = t_heat.size() - - ''' - t_heat = torch.sigmoid(t_heat) - l_heat = torch.sigmoid(l_heat) - b_heat = torch.sigmoid(b_heat) - r_heat = torch.sigmoid(r_heat) - ct_heat = torch.sigmoid(ct_heat) - ''' - if aggr_weight > 0: - t_heat = _h_aggregate(t_heat, aggr_weight=aggr_weight) - l_heat = _v_aggregate(l_heat, aggr_weight=aggr_weight) - b_heat = _h_aggregate(b_heat, aggr_weight=aggr_weight) - r_heat = _v_aggregate(r_heat, aggr_weight=aggr_weight) - - # perform nms on heatmaps - t_heat = _nms(t_heat) - l_heat = _nms(l_heat) - b_heat = _nms(b_heat) - r_heat = _nms(r_heat) - - - t_heat[t_heat > 1] = 1 - l_heat[l_heat > 1] = 1 - b_heat[b_heat > 1] = 1 - r_heat[r_heat > 1] = 1 - - t_scores, t_inds, _, t_ys, t_xs = _topk(t_heat, K=K) - l_scores, l_inds, _, l_ys, l_xs = _topk(l_heat, K=K) - b_scores, b_inds, _, b_ys, b_xs = _topk(b_heat, K=K) - r_scores, r_inds, _, r_ys, r_xs = _topk(r_heat, K=K) - - ct_heat_agn, ct_clses = torch.max(ct_heat, dim=1, keepdim=True) - - # import pdb; pdb.set_trace() - - t_ys = t_ys.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K) - t_xs = t_xs.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K) - l_ys = l_ys.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K) - l_xs = l_xs.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K) - b_ys = b_ys.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K) - b_xs = b_xs.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K) - r_ys = r_ys.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K) - r_xs = r_xs.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K) - - box_ct_xs = ((l_xs + r_xs + 0.5) / 2).long() - box_ct_ys = ((t_ys + b_ys + 0.5) / 2).long() - - ct_inds = box_ct_ys * width + box_ct_xs - ct_inds = ct_inds.view(batch, -1) - ct_heat_agn = ct_heat_agn.view(batch, -1, 1) - ct_clses = ct_clses.view(batch, -1, 1) - ct_scores = _gather_feat(ct_heat_agn, ct_inds) - clses = _gather_feat(ct_clses, ct_inds) - - t_scores = t_scores.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K) - l_scores = l_scores.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K) - b_scores = b_scores.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K) - r_scores = r_scores.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K) - ct_scores = ct_scores.view(batch, K, K, K, K) - scores = (t_scores + l_scores + b_scores + r_scores + 2 * ct_scores) / 6 - - # reject boxes based on classes - top_inds = (t_ys > l_ys) + (t_ys > b_ys) + (t_ys > r_ys) - top_inds = (top_inds > 0) - left_inds = (l_xs > t_xs) + (l_xs > b_xs) + (l_xs > r_xs) - left_inds = (left_inds > 0) - bottom_inds = (b_ys < t_ys) + (b_ys < l_ys) + (b_ys < r_ys) - bottom_inds = (bottom_inds > 0) - right_inds = (r_xs < t_xs) + (r_xs < l_xs) + (r_xs < b_xs) - right_inds = (right_inds > 0) - - sc_inds = (t_scores < scores_thresh) + (l_scores < scores_thresh) + \ - (b_scores < scores_thresh) + (r_scores < scores_thresh) + \ - (ct_scores < center_thresh) - sc_inds = (sc_inds > 0) - - scores = scores - sc_inds.float() - scores = scores - top_inds.float() - scores = scores - left_inds.float() - scores = scores - bottom_inds.float() - scores = scores - right_inds.float() - - scores = scores.view(batch, -1) - scores, inds = torch.topk(scores, num_dets) - scores = scores.unsqueeze(2) - - if t_regr is not None and l_regr is not None \ - and b_regr is not None and r_regr is not None: - t_regr = _tranpose_and_gather_feat(t_regr, t_inds) - t_regr = t_regr.view(batch, K, 1, 1, 1, 2) - l_regr = _tranpose_and_gather_feat(l_regr, l_inds) - l_regr = l_regr.view(batch, 1, K, 1, 1, 2) - b_regr = _tranpose_and_gather_feat(b_regr, b_inds) - b_regr = b_regr.view(batch, 1, 1, K, 1, 2) - r_regr = _tranpose_and_gather_feat(r_regr, r_inds) - r_regr = r_regr.view(batch, 1, 1, 1, K, 2) - - t_xs = t_xs + t_regr[..., 0] - t_ys = t_ys + t_regr[..., 1] - l_xs = l_xs + l_regr[..., 0] - l_ys = l_ys + l_regr[..., 1] - b_xs = b_xs + b_regr[..., 0] - b_ys = b_ys + b_regr[..., 1] - r_xs = r_xs + r_regr[..., 0] - r_ys = r_ys + r_regr[..., 1] - else: - t_xs = t_xs + 0.5 - t_ys = t_ys + 0.5 - l_xs = l_xs + 0.5 - l_ys = l_ys + 0.5 - b_xs = b_xs + 0.5 - b_ys = b_ys + 0.5 - r_xs = r_xs + 0.5 - r_ys = r_ys + 0.5 - - bboxes = torch.stack((l_xs, t_ys, r_xs, b_ys), dim=5) - bboxes = bboxes.view(batch, -1, 4) - bboxes = _gather_feat(bboxes, inds) - - clses = clses.contiguous().view(batch, -1, 1) - clses = _gather_feat(clses, inds).float() - - t_xs = t_xs.contiguous().view(batch, -1, 1) - t_xs = _gather_feat(t_xs, inds).float() - t_ys = t_ys.contiguous().view(batch, -1, 1) - t_ys = _gather_feat(t_ys, inds).float() - l_xs = l_xs.contiguous().view(batch, -1, 1) - l_xs = _gather_feat(l_xs, inds).float() - l_ys = l_ys.contiguous().view(batch, -1, 1) - l_ys = _gather_feat(l_ys, inds).float() - b_xs = b_xs.contiguous().view(batch, -1, 1) - b_xs = _gather_feat(b_xs, inds).float() - b_ys = b_ys.contiguous().view(batch, -1, 1) - b_ys = _gather_feat(b_ys, inds).float() - r_xs = r_xs.contiguous().view(batch, -1, 1) - r_xs = _gather_feat(r_xs, inds).float() - r_ys = r_ys.contiguous().view(batch, -1, 1) - r_ys = _gather_feat(r_ys, inds).float() - - - detections = torch.cat([bboxes, scores, t_xs, t_ys, l_xs, l_ys, - b_xs, b_ys, r_xs, r_ys, clses], dim=2) - - return detections - -def exct_decode( - t_heat, l_heat, b_heat, r_heat, ct_heat, - t_regr=None, l_regr=None, b_regr=None, r_regr=None, - K=40, scores_thresh=0.1, center_thresh=0.1, aggr_weight=0.0, num_dets=1000 -): - batch, cat, height, width = t_heat.size() - ''' - t_heat = torch.sigmoid(t_heat) - l_heat = torch.sigmoid(l_heat) - b_heat = torch.sigmoid(b_heat) - r_heat = torch.sigmoid(r_heat) - ct_heat = torch.sigmoid(ct_heat) - ''' - - if aggr_weight > 0: - t_heat = _h_aggregate(t_heat, aggr_weight=aggr_weight) - l_heat = _v_aggregate(l_heat, aggr_weight=aggr_weight) - b_heat = _h_aggregate(b_heat, aggr_weight=aggr_weight) - r_heat = _v_aggregate(r_heat, aggr_weight=aggr_weight) - - # perform nms on heatmaps - t_heat = _nms(t_heat) - l_heat = _nms(l_heat) - b_heat = _nms(b_heat) - r_heat = _nms(r_heat) - - t_heat[t_heat > 1] = 1 - l_heat[l_heat > 1] = 1 - b_heat[b_heat > 1] = 1 - r_heat[r_heat > 1] = 1 - - t_scores, t_inds, t_clses, t_ys, t_xs = _topk(t_heat, K=K) - l_scores, l_inds, l_clses, l_ys, l_xs = _topk(l_heat, K=K) - b_scores, b_inds, b_clses, b_ys, b_xs = _topk(b_heat, K=K) - r_scores, r_inds, r_clses, r_ys, r_xs = _topk(r_heat, K=K) - - t_ys = t_ys.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K) - t_xs = t_xs.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K) - l_ys = l_ys.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K) - l_xs = l_xs.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K) - b_ys = b_ys.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K) - b_xs = b_xs.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K) - r_ys = r_ys.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K) - r_xs = r_xs.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K) - - t_clses = t_clses.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K) - l_clses = l_clses.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K) - b_clses = b_clses.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K) - r_clses = r_clses.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K) - box_ct_xs = ((l_xs + r_xs + 0.5) / 2).long() - box_ct_ys = ((t_ys + b_ys + 0.5) / 2).long() - ct_inds = t_clses.long() * (height * width) + box_ct_ys * width + box_ct_xs - ct_inds = ct_inds.view(batch, -1) - ct_heat = ct_heat.view(batch, -1, 1) - ct_scores = _gather_feat(ct_heat, ct_inds) - - t_scores = t_scores.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K) - l_scores = l_scores.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K) - b_scores = b_scores.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K) - r_scores = r_scores.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K) - ct_scores = ct_scores.view(batch, K, K, K, K) - scores = (t_scores + l_scores + b_scores + r_scores + 2 * ct_scores) / 6 - - # reject boxes based on classes - cls_inds = (t_clses != l_clses) + (t_clses != b_clses) + \ - (t_clses != r_clses) - cls_inds = (cls_inds > 0) - - top_inds = (t_ys > l_ys) + (t_ys > b_ys) + (t_ys > r_ys) - top_inds = (top_inds > 0) - left_inds = (l_xs > t_xs) + (l_xs > b_xs) + (l_xs > r_xs) - left_inds = (left_inds > 0) - bottom_inds = (b_ys < t_ys) + (b_ys < l_ys) + (b_ys < r_ys) - bottom_inds = (bottom_inds > 0) - right_inds = (r_xs < t_xs) + (r_xs < l_xs) + (r_xs < b_xs) - right_inds = (right_inds > 0) - - sc_inds = (t_scores < scores_thresh) + (l_scores < scores_thresh) + \ - (b_scores < scores_thresh) + (r_scores < scores_thresh) + \ - (ct_scores < center_thresh) - sc_inds = (sc_inds > 0) - - scores = scores - sc_inds.float() - scores = scores - cls_inds.float() - scores = scores - top_inds.float() - scores = scores - left_inds.float() - scores = scores - bottom_inds.float() - scores = scores - right_inds.float() - - scores = scores.view(batch, -1) - scores, inds = torch.topk(scores, num_dets) - scores = scores.unsqueeze(2) - - if t_regr is not None and l_regr is not None \ - and b_regr is not None and r_regr is not None: - t_regr = _tranpose_and_gather_feat(t_regr, t_inds) - t_regr = t_regr.view(batch, K, 1, 1, 1, 2) - l_regr = _tranpose_and_gather_feat(l_regr, l_inds) - l_regr = l_regr.view(batch, 1, K, 1, 1, 2) - b_regr = _tranpose_and_gather_feat(b_regr, b_inds) - b_regr = b_regr.view(batch, 1, 1, K, 1, 2) - r_regr = _tranpose_and_gather_feat(r_regr, r_inds) - r_regr = r_regr.view(batch, 1, 1, 1, K, 2) - - t_xs = t_xs + t_regr[..., 0] - t_ys = t_ys + t_regr[..., 1] - l_xs = l_xs + l_regr[..., 0] - l_ys = l_ys + l_regr[..., 1] - b_xs = b_xs + b_regr[..., 0] - b_ys = b_ys + b_regr[..., 1] - r_xs = r_xs + r_regr[..., 0] - r_ys = r_ys + r_regr[..., 1] - else: - t_xs = t_xs + 0.5 - t_ys = t_ys + 0.5 - l_xs = l_xs + 0.5 - l_ys = l_ys + 0.5 - b_xs = b_xs + 0.5 - b_ys = b_ys + 0.5 - r_xs = r_xs + 0.5 - r_ys = r_ys + 0.5 - - bboxes = torch.stack((l_xs, t_ys, r_xs, b_ys), dim=5) - bboxes = bboxes.view(batch, -1, 4) - bboxes = _gather_feat(bboxes, inds) - - clses = t_clses.contiguous().view(batch, -1, 1) - clses = _gather_feat(clses, inds).float() - - t_xs = t_xs.contiguous().view(batch, -1, 1) - t_xs = _gather_feat(t_xs, inds).float() - t_ys = t_ys.contiguous().view(batch, -1, 1) - t_ys = _gather_feat(t_ys, inds).float() - l_xs = l_xs.contiguous().view(batch, -1, 1) - l_xs = _gather_feat(l_xs, inds).float() - l_ys = l_ys.contiguous().view(batch, -1, 1) - l_ys = _gather_feat(l_ys, inds).float() - b_xs = b_xs.contiguous().view(batch, -1, 1) - b_xs = _gather_feat(b_xs, inds).float() - b_ys = b_ys.contiguous().view(batch, -1, 1) - b_ys = _gather_feat(b_ys, inds).float() - r_xs = r_xs.contiguous().view(batch, -1, 1) - r_xs = _gather_feat(r_xs, inds).float() - r_ys = r_ys.contiguous().view(batch, -1, 1) - r_ys = _gather_feat(r_ys, inds).float() - - - detections = torch.cat([bboxes, scores, t_xs, t_ys, l_xs, l_ys, - b_xs, b_ys, r_xs, r_ys, clses], dim=2) - - - return detections - -def ddd_decode(heat, rot, depth, dim, wh=None, reg=None, K=40): - batch, cat, height, width = heat.size() - # heat = torch.sigmoid(heat) - # perform nms on heatmaps - heat = _nms(heat) - - scores, inds, clses, ys, xs = _topk(heat, K=K) - if reg is not None: - reg = _tranpose_and_gather_feat(reg, inds) - reg = reg.view(batch, K, 2) - xs = xs.view(batch, K, 1) + reg[:, :, 0:1] - ys = ys.view(batch, K, 1) + reg[:, :, 1:2] - else: - xs = xs.view(batch, K, 1) + 0.5 - ys = ys.view(batch, K, 1) + 0.5 - - rot = _tranpose_and_gather_feat(rot, inds) - rot = rot.view(batch, K, 8) - depth = _tranpose_and_gather_feat(depth, inds) - depth = depth.view(batch, K, 1) - dim = _tranpose_and_gather_feat(dim, inds) - dim = dim.view(batch, K, 3) - clses = clses.view(batch, K, 1).float() - scores = scores.view(batch, K, 1) - xs = xs.view(batch, K, 1) - ys = ys.view(batch, K, 1) - - if wh is not None: - wh = _tranpose_and_gather_feat(wh, inds) - wh = wh.view(batch, K, 2) - detections = torch.cat( - [xs, ys, scores, rot, depth, dim, wh, clses], dim=2) - else: - detections = torch.cat( - [xs, ys, scores, rot, depth, dim, clses], dim=2) - - return detections - -def ctdet_decode(heat, wh, reg=None, cat_spec_wh=False, K=100): - batch, cat, height, width = heat.size() - - # heat = torch.sigmoid(heat) - # perform nms on heatmaps - heat = _nms(heat) - - scores, inds, clses, ys, xs = _topk(heat, K=K) - if reg is not None: - reg = _tranpose_and_gather_feat(reg, inds) - reg = reg.view(batch, K, 2) - xs = xs.view(batch, K, 1) + reg[:, :, 0:1] - ys = ys.view(batch, K, 1) + reg[:, :, 1:2] - else: - xs = xs.view(batch, K, 1) + 0.5 - ys = ys.view(batch, K, 1) + 0.5 - wh = _tranpose_and_gather_feat(wh, inds) - if cat_spec_wh: - wh = wh.view(batch, K, cat, 2) - clses_ind = clses.view(batch, K, 1, 1).expand(batch, K, 1, 2).long() - wh = wh.gather(2, clses_ind).view(batch, K, 2) - else: - wh = wh.view(batch, K, 2) - clses = clses.view(batch, K, 1).float() - scores = scores.view(batch, K, 1) - bboxes = torch.cat([xs - wh[..., 0:1] / 2, - ys - wh[..., 1:2] / 2, - xs + wh[..., 0:1] / 2, - ys + wh[..., 1:2] / 2], dim=2) - detections = torch.cat([bboxes, scores, clses], dim=2) - - return detections - -def multi_pose_decode( - heat, wh, kps, reg=None, hm_hp=None, hp_offset=None, K=100): - batch, cat, height, width = heat.size() - num_joints = kps.shape[1] // 2 - # heat = torch.sigmoid(heat) - # perform nms on heatmaps - heat = _nms(heat) - scores, inds, clses, ys, xs = _topk(heat, K=K) - - kps = _tranpose_and_gather_feat(kps, inds) - kps = kps.view(batch, K, num_joints * 2) - kps[..., ::2] += xs.view(batch, K, 1).expand(batch, K, num_joints) - kps[..., 1::2] += ys.view(batch, K, 1).expand(batch, K, num_joints) - if reg is not None: - reg = _tranpose_and_gather_feat(reg, inds) - reg = reg.view(batch, K, 2) - xs = xs.view(batch, K, 1) + reg[:, :, 0:1] - ys = ys.view(batch, K, 1) + reg[:, :, 1:2] - else: - xs = xs.view(batch, K, 1) + 0.5 - ys = ys.view(batch, K, 1) + 0.5 - wh = _tranpose_and_gather_feat(wh, inds) - wh = wh.view(batch, K, 2) - clses = clses.view(batch, K, 1).float() - scores = scores.view(batch, K, 1) - - bboxes = torch.cat([xs - wh[..., 0:1] / 2, - ys - wh[..., 1:2] / 2, - xs + wh[..., 0:1] / 2, - ys + wh[..., 1:2] / 2], dim=2) - if hm_hp is not None: - hm_hp = _nms(hm_hp) - thresh = 0.1 - kps = kps.view(batch, K, num_joints, 2).permute( - 0, 2, 1, 3).contiguous() # b x J x K x 2 - reg_kps = kps.unsqueeze(3).expand(batch, num_joints, K, K, 2) - hm_score, hm_inds, hm_ys, hm_xs = _topk_channel(hm_hp, K=K) # b x J x K - if hp_offset is not None: - hp_offset = _tranpose_and_gather_feat( - hp_offset, hm_inds.view(batch, -1)) - hp_offset = hp_offset.view(batch, num_joints, K, 2) - hm_xs = hm_xs + hp_offset[:, :, :, 0] - hm_ys = hm_ys + hp_offset[:, :, :, 1] - else: - hm_xs = hm_xs + 0.5 - hm_ys = hm_ys + 0.5 - - mask = (hm_score > thresh).float() - hm_score = (1 - mask) * -1 + mask * hm_score - hm_ys = (1 - mask) * (-10000) + mask * hm_ys - hm_xs = (1 - mask) * (-10000) + mask * hm_xs - hm_kps = torch.stack([hm_xs, hm_ys], dim=-1).unsqueeze( - 2).expand(batch, num_joints, K, K, 2) - dist = (((reg_kps - hm_kps) ** 2).sum(dim=4) ** 0.5) - min_dist, min_ind = dist.min(dim=3) # b x J x K - hm_score = hm_score.gather(2, min_ind).unsqueeze(-1) # b x J x K x 1 - min_dist = min_dist.unsqueeze(-1) - min_ind = min_ind.view(batch, num_joints, K, 1, 1).expand( - batch, num_joints, K, 1, 2) - hm_kps = hm_kps.gather(3, min_ind) - hm_kps = hm_kps.view(batch, num_joints, K, 2) - l = bboxes[:, :, 0].view(batch, 1, K, 1).expand(batch, num_joints, K, 1) - t = bboxes[:, :, 1].view(batch, 1, K, 1).expand(batch, num_joints, K, 1) - r = bboxes[:, :, 2].view(batch, 1, K, 1).expand(batch, num_joints, K, 1) - b = bboxes[:, :, 3].view(batch, 1, K, 1).expand(batch, num_joints, K, 1) - mask = (hm_kps[..., 0:1] < l) + (hm_kps[..., 0:1] > r) + \ - (hm_kps[..., 1:2] < t) + (hm_kps[..., 1:2] > b) + \ - (hm_score < thresh) + (min_dist > (torch.max(b - t, r - l) * 0.3)) - mask = (mask > 0).float().expand(batch, num_joints, K, 2) - kps = (1 - mask) * hm_kps + mask * kps - kps = kps.permute(0, 2, 1, 3).contiguous().view( - batch, K, num_joints * 2) - detections = torch.cat([bboxes, scores, kps, clses], dim=2) - - return detections \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/losses.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/losses.py deleted file mode 100644 index 757e1b1..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/losses.py +++ /dev/null @@ -1,231 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import torch -import torch.nn as nn -from .utils import _tranpose_and_gather_feat -import torch.nn.functional as F - - -def _slow_neg_loss(pred, gt): - '''focal loss from CornerNet''' - pos_inds = gt.eq(1) - neg_inds = gt.lt(1) - - neg_weights = torch.pow(1 - gt[neg_inds], 4) - - loss = 0 - pos_pred = pred[pos_inds] - neg_pred = pred[neg_inds] - - pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2) - neg_loss = torch.log(1 - neg_pred) * torch.pow(neg_pred, 2) * neg_weights - - num_pos = pos_inds.float().sum() - pos_loss = pos_loss.sum() - neg_loss = neg_loss.sum() - - if pos_pred.nelement() == 0: - loss = loss - neg_loss - else: - loss = loss - (pos_loss + neg_loss) / num_pos - return loss - - -def _neg_loss(pred, gt): - ''' Modified focal loss. Exactly the same as CornerNet. - Runs faster and costs a little bit more memory - Arguments: - pred (batch x c x h x w) - gt_regr (batch x c x h x w) - ''' - pos_inds = gt.eq(1).float() - neg_inds = gt.lt(1).float() - - neg_weights = torch.pow(1 - gt, 4) - - loss = 0 - - pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds - neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds - - num_pos = pos_inds.float().sum() - pos_loss = pos_loss.sum() - neg_loss = neg_loss.sum() - - if num_pos == 0: - loss = loss - neg_loss - else: - loss = loss - (pos_loss + neg_loss) / num_pos - return loss - -def _not_faster_neg_loss(pred, gt): - pos_inds = gt.eq(1).float() - neg_inds = gt.lt(1).float() - num_pos = pos_inds.float().sum() - neg_weights = torch.pow(1 - gt, 4) - - loss = 0 - trans_pred = pred * neg_inds + (1 - pred) * pos_inds - weight = neg_weights * neg_inds + pos_inds - all_loss = torch.log(1 - trans_pred) * torch.pow(trans_pred, 2) * weight - all_loss = all_loss.sum() - - if num_pos > 0: - all_loss /= num_pos - loss -= all_loss - return loss - -def _slow_reg_loss(regr, gt_regr, mask): - num = mask.float().sum() - mask = mask.unsqueeze(2).expand_as(gt_regr) - - regr = regr[mask] - gt_regr = gt_regr[mask] - - regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False) - regr_loss = regr_loss / (num + 1e-4) - return regr_loss - -def _reg_loss(regr, gt_regr, mask): - ''' L1 regression loss - Arguments: - regr (batch x max_objects x dim) - gt_regr (batch x max_objects x dim) - mask (batch x max_objects) - ''' - num = mask.float().sum() - mask = mask.unsqueeze(2).expand_as(gt_regr).float() - - regr = regr * mask - gt_regr = gt_regr * mask - - regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False) - regr_loss = regr_loss / (num + 1e-4) - return regr_loss - -class FocalLoss(nn.Module): - '''nn.Module warpper for focal loss''' - def __init__(self): - super(FocalLoss, self).__init__() - self.neg_loss = _neg_loss - - def forward(self, out, target): - return self.neg_loss(out, target) - -class RegLoss(nn.Module): - '''Regression loss for an output tensor - Arguments: - output (batch x dim x h x w) - mask (batch x max_objects) - ind (batch x max_objects) - target (batch x max_objects x dim) - ''' - def __init__(self): - super(RegLoss, self).__init__() - - def forward(self, output, mask, ind, target): - pred = _tranpose_and_gather_feat(output, ind) - loss = _reg_loss(pred, target, mask) - return loss - -class RegL1Loss(nn.Module): - def __init__(self): - super(RegL1Loss, self).__init__() - - def forward(self, output, mask, ind, target): - pred = _tranpose_and_gather_feat(output, ind) - mask = mask.unsqueeze(2).expand_as(pred).float() - # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean') - loss = F.l1_loss(pred * mask, target * mask, size_average=False) - loss = loss / (mask.sum() + 1e-4) - return loss - -class NormRegL1Loss(nn.Module): - def __init__(self): - super(NormRegL1Loss, self).__init__() - - def forward(self, output, mask, ind, target): - pred = _tranpose_and_gather_feat(output, ind) - mask = mask.unsqueeze(2).expand_as(pred).float() - # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean') - pred = pred / (target + 1e-4) - target = target * 0 + 1 - loss = F.l1_loss(pred * mask, target * mask, size_average=False) - loss = loss / (mask.sum() + 1e-4) - return loss - -class RegWeightedL1Loss(nn.Module): - def __init__(self): - super(RegWeightedL1Loss, self).__init__() - - def forward(self, output, mask, ind, target): - pred = _tranpose_and_gather_feat(output, ind) - mask = mask.float() - # loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean') - loss = F.l1_loss(pred * mask, target * mask, size_average=False) - loss = loss / (mask.sum() + 1e-4) - return loss - -class L1Loss(nn.Module): - def __init__(self): - super(L1Loss, self).__init__() - - def forward(self, output, mask, ind, target): - pred = _tranpose_and_gather_feat(output, ind) - mask = mask.unsqueeze(2).expand_as(pred).float() - loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean') - return loss - -class BinRotLoss(nn.Module): - def __init__(self): - super(BinRotLoss, self).__init__() - - def forward(self, output, mask, ind, rotbin, rotres): - pred = _tranpose_and_gather_feat(output, ind) - loss = compute_rot_loss(pred, rotbin, rotres, mask) - return loss - -def compute_res_loss(output, target): - return F.smooth_l1_loss(output, target, reduction='elementwise_mean') - -# TODO: weight -def compute_bin_loss(output, target, mask): - mask = mask.expand_as(output) - output = output * mask.float() - return F.cross_entropy(output, target, reduction='elementwise_mean') - -def compute_rot_loss(output, target_bin, target_res, mask): - # output: (B, 128, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos, - # bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos] - # target_bin: (B, 128, 2) [bin1_cls, bin2_cls] - # target_res: (B, 128, 2) [bin1_res, bin2_res] - # mask: (B, 128, 1) - # import pdb; pdb.set_trace() - output = output.view(-1, 8) - target_bin = target_bin.view(-1, 2) - target_res = target_res.view(-1, 2) - mask = mask.view(-1, 1) - loss_bin1 = compute_bin_loss(output[:, 0:2], target_bin[:, 0], mask) - loss_bin2 = compute_bin_loss(output[:, 4:6], target_bin[:, 1], mask) - loss_res = torch.zeros_like(loss_bin1) - if target_bin[:, 0].nonzero().shape[0] > 0: - idx1 = target_bin[:, 0].nonzero()[:, 0] - valid_output1 = torch.index_select(output, 0, idx1.long()) - valid_target_res1 = torch.index_select(target_res, 0, idx1.long()) - loss_sin1 = compute_res_loss( - valid_output1[:, 2], torch.sin(valid_target_res1[:, 0])) - loss_cos1 = compute_res_loss( - valid_output1[:, 3], torch.cos(valid_target_res1[:, 0])) - loss_res += loss_sin1 + loss_cos1 - if target_bin[:, 1].nonzero().shape[0] > 0: - idx2 = target_bin[:, 1].nonzero()[:, 0] - valid_output2 = torch.index_select(output, 0, idx2.long()) - valid_target_res2 = torch.index_select(target_res, 0, idx2.long()) - loss_sin2 = compute_res_loss( - valid_output2[:, 6], torch.sin(valid_target_res2[:, 1])) - loss_cos2 = compute_res_loss( - valid_output2[:, 7], torch.cos(valid_target_res2[:, 1])) - loss_res += loss_sin2 + loss_cos2 - return loss_bin1 + loss_bin2 + loss_res diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/model.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/model.py deleted file mode 100644 index f65427a..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/model.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import torchvision.models as models -import torch -import torch.nn as nn -import os - -from .networks.msra_resnet import get_pose_net -from .networks.dlav0 import get_pose_net as get_dlav0 -from .networks.pose_dla_dcn import get_pose_net as get_dla_dcn -from .networks.resnet_dcn import get_pose_net as get_pose_net_dcn -from .networks.large_hourglass import get_large_hourglass_net - -_model_factory = { - 'res': get_pose_net, # default Resnet with deconv - 'dlav0': get_dlav0, # default DLAup - 'dla': get_dla_dcn, - 'resdcn': get_pose_net_dcn, - 'hourglass': get_large_hourglass_net, -} - -def create_model(arch, heads, head_conv): - num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0 - arch = arch[:arch.find('_')] if '_' in arch else arch - get_model = _model_factory[arch] - model = get_model(num_layers=num_layers, heads=heads, head_conv=head_conv) - return model - -def load_model(model, model_path, optimizer=None, resume=False, - lr=None, lr_step=None): - start_epoch = 0 - checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage) - print('loaded {}, epoch {}'.format(model_path, checkpoint['epoch'])) - state_dict_ = checkpoint['state_dict'] - state_dict = {} - - # convert data_parallal to model - for k in state_dict_: - if k.startswith('module') and not k.startswith('module_list'): - state_dict[k[7:]] = state_dict_[k] - else: - state_dict[k] = state_dict_[k] - model_state_dict = model.state_dict() - - # check loaded parameters and created model parameters - for k in state_dict: - if k in model_state_dict: - if state_dict[k].shape != model_state_dict[k].shape: - print('Skip loading parameter {}, required shape{}, '\ - 'loaded shape{}.'.format(k, model_state_dict[k].shape, state_dict[k].shape)) - state_dict[k] = model_state_dict[k] - else: - print('Drop parameter {}.'.format(k)) - for k in model_state_dict: - if not (k in state_dict): - print('No param {}.'.format(k)) - state_dict[k] = model_state_dict[k] - model.load_state_dict(state_dict, strict=False) - - # print(model) - # save compute gragh and weight test - # torch.save(model,"/data/wangchongjin/object_detect/CenterNet/exp/ctdet/test.pth") - # resume optimizer parameters - if optimizer is not None and resume: - if 'optimizer' in checkpoint: - optimizer.load_state_dict(checkpoint['optimizer']) - start_epoch = checkpoint['epoch'] - start_lr = lr - for step in lr_step: - if start_epoch >= step: - start_lr *= 0.1 - for param_group in optimizer.param_groups: - param_group['lr'] = start_lr - print('Resumed optimizer with start lr', start_lr) - else: - print('No optimizer parameters in checkpoint.') - if optimizer is not None: - return model, optimizer, start_epoch - else: - return model - -def save_model(path, epoch, model, optimizer=None): - if isinstance(model, torch.nn.DataParallel): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - data = {'epoch': epoch, - 'state_dict': state_dict} - if not (optimizer is None): - data['optimizer'] = optimizer.state_dict() - torch.save(data, path) - diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/__init__.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/dcn_v2.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/dcn_v2.py deleted file mode 100644 index 982bef5..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/dcn_v2.py +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/env python -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - -import math -import torch -from torch import nn -from torch.autograd import Function -from torch.nn.modules.utils import _pair -from torch.autograd.function import once_differentiable - -import _ext as _backend - - -class _DCNv2(Function): - @staticmethod - def forward(ctx, input, offset, mask, weight, bias, - stride, padding, dilation, deformable_groups): - ctx.stride = _pair(stride) - ctx.padding = _pair(padding) - ctx.dilation = _pair(dilation) - ctx.kernel_size = _pair(weight.shape[2:4]) - ctx.deformable_groups = deformable_groups - output = _backend.dcn_v2_forward(input, weight, bias, - offset, mask, - ctx.kernel_size[0], ctx.kernel_size[1], - ctx.stride[0], ctx.stride[1], - ctx.padding[0], ctx.padding[1], - ctx.dilation[0], ctx.dilation[1], - ctx.deformable_groups) - ctx.save_for_backward(input, offset, mask, weight, bias) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - input, offset, mask, weight, bias = ctx.saved_tensors - grad_input, grad_offset, grad_mask, grad_weight, grad_bias = \ - _backend.dcn_v2_backward(input, weight, - bias, - offset, mask, - grad_output, - ctx.kernel_size[0], ctx.kernel_size[1], - ctx.stride[0], ctx.stride[1], - ctx.padding[0], ctx.padding[1], - ctx.dilation[0], ctx.dilation[1], - ctx.deformable_groups) - - return grad_input, grad_offset, grad_mask, grad_weight, grad_bias,\ - None, None, None, None, - - -dcn_v2_conv = _DCNv2.apply - - -class DCNv2(nn.Module): - - def __init__(self, in_channels, out_channels, - kernel_size, stride, padding, dilation=1, deformable_groups=1): - super(DCNv2, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = _pair(kernel_size) - self.stride = _pair(stride) - self.padding = _pair(padding) - self.dilation = _pair(dilation) - self.deformable_groups = deformable_groups - - self.weight = nn.Parameter(torch.Tensor( - out_channels, in_channels, *self.kernel_size)) - self.bias = nn.Parameter(torch.Tensor(out_channels)) - self.reset_parameters() - - def reset_parameters(self): - n = self.in_channels - for k in self.kernel_size: - n *= k - stdv = 1. / math.sqrt(n) - self.weight.data.uniform_(-stdv, stdv) - self.bias.data.zero_() - - def forward(self, input, offset, mask): - assert 2 * self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \ - offset.shape[1] - assert self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \ - mask.shape[1] - return dcn_v2_conv(input, offset, mask, - self.weight, - self.bias, - self.stride, - self.padding, - self.dilation, - self.deformable_groups) - - -class DCN(DCNv2): - - def __init__(self, in_channels, out_channels, - kernel_size, stride, padding, - dilation=1, deformable_groups=1): - super(DCN, self).__init__(in_channels, out_channels, - kernel_size, stride, padding, dilation, deformable_groups) - - channels_ = self.deformable_groups * 3 * self.kernel_size[0] * self.kernel_size[1] - self.conv_offset_mask = nn.Conv2d(self.in_channels, - channels_, - kernel_size=self.kernel_size, - stride=self.stride, - padding=self.padding, - bias=True) - self.init_offset() - - def init_offset(self): - self.conv_offset_mask.weight.data.zero_() - self.conv_offset_mask.bias.data.zero_() - - def forward(self, input): - out = self.conv_offset_mask(input) - o1, o2, mask = torch.chunk(out, 3, dim=1) - offset = torch.cat((o1, o2), dim=1) - mask = torch.sigmoid(mask) - return dcn_v2_conv(input, offset, mask, - self.weight, self.bias, - self.stride, - self.padding, - self.dilation, - self.deformable_groups) - - - -class _DCNv2Pooling(Function): - @staticmethod - def forward(ctx, input, rois, offset, - spatial_scale, - pooled_size, - output_dim, - no_trans, - group_size=1, - part_size=None, - sample_per_part=4, - trans_std=.0): - ctx.spatial_scale = spatial_scale - ctx.no_trans = int(no_trans) - ctx.output_dim = output_dim - ctx.group_size = group_size - ctx.pooled_size = pooled_size - ctx.part_size = pooled_size if part_size is None else part_size - ctx.sample_per_part = sample_per_part - ctx.trans_std = trans_std - - output, output_count = \ - _backend.dcn_v2_psroi_pooling_forward(input, rois, offset, - ctx.no_trans, ctx.spatial_scale, - ctx.output_dim, ctx.group_size, - ctx.pooled_size, ctx.part_size, - ctx.sample_per_part, ctx.trans_std) - ctx.save_for_backward(input, rois, offset, output_count) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - input, rois, offset, output_count = ctx.saved_tensors - grad_input, grad_offset = \ - _backend.dcn_v2_psroi_pooling_backward(grad_output, - input, - rois, - offset, - output_count, - ctx.no_trans, - ctx.spatial_scale, - ctx.output_dim, - ctx.group_size, - ctx.pooled_size, - ctx.part_size, - ctx.sample_per_part, - ctx.trans_std) - - return grad_input, None, grad_offset, \ - None, None, None, None, None, None, None, None - - -dcn_v2_pooling = _DCNv2Pooling.apply - - -class DCNv2Pooling(nn.Module): - - def __init__(self, - spatial_scale, - pooled_size, - output_dim, - no_trans, - group_size=1, - part_size=None, - sample_per_part=4, - trans_std=.0): - super(DCNv2Pooling, self).__init__() - self.spatial_scale = spatial_scale - self.pooled_size = pooled_size - self.output_dim = output_dim - self.no_trans = no_trans - self.group_size = group_size - self.part_size = pooled_size if part_size is None else part_size - self.sample_per_part = sample_per_part - self.trans_std = trans_std - - def forward(self, input, rois, offset): - assert input.shape[1] == self.output_dim - if self.no_trans: - offset = input.new() - return dcn_v2_pooling(input, rois, offset, - self.spatial_scale, - self.pooled_size, - self.output_dim, - self.no_trans, - self.group_size, - self.part_size, - self.sample_per_part, - self.trans_std) - - -class DCNPooling(DCNv2Pooling): - - def __init__(self, - spatial_scale, - pooled_size, - output_dim, - no_trans, - group_size=1, - part_size=None, - sample_per_part=4, - trans_std=.0, - deform_fc_dim=1024): - super(DCNPooling, self).__init__(spatial_scale, - pooled_size, - output_dim, - no_trans, - group_size, - part_size, - sample_per_part, - trans_std) - - self.deform_fc_dim = deform_fc_dim - - if not no_trans: - self.offset_mask_fc = nn.Sequential( - nn.Linear(self.pooled_size * self.pooled_size * - self.output_dim, self.deform_fc_dim), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_dim, self.deform_fc_dim), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_dim, self.pooled_size * - self.pooled_size * 3) - ) - self.offset_mask_fc[4].weight.data.zero_() - self.offset_mask_fc[4].bias.data.zero_() - - def forward(self, input, rois): - offset = input.new() - - if not self.no_trans: - - # do roi_align first - n = rois.shape[0] - roi = dcn_v2_pooling(input, rois, offset, - self.spatial_scale, - self.pooled_size, - self.output_dim, - True, # no trans - self.group_size, - self.part_size, - self.sample_per_part, - self.trans_std) - - # build mask and offset - offset_mask = self.offset_mask_fc(roi.view(n, -1)) - offset_mask = offset_mask.view( - n, 3, self.pooled_size, self.pooled_size) - o1, o2, mask = torch.chunk(offset_mask, 3, dim=1) - offset = torch.cat((o1, o2), dim=1) - mask = torch.sigmoid(mask) - - # do pooling with offset and mask - return dcn_v2_pooling(input, rois, offset, - self.spatial_scale, - self.pooled_size, - self.output_dim, - self.no_trans, - self.group_size, - self.part_size, - self.sample_per_part, - self.trans_std) * mask - # only roi_align - return dcn_v2_pooling(input, rois, offset, - self.spatial_scale, - self.pooled_size, - self.output_dim, - self.no_trans, - self.group_size, - self.part_size, - self.sample_per_part, - self.trans_std) diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/dist/DCNv2-0.1-py3.6-linux-x86_64.egg b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/dist/DCNv2-0.1-py3.6-linux-x86_64.egg deleted file mode 100644 index 652d0b8..0000000 Binary files a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/dist/DCNv2-0.1-py3.6-linux-x86_64.egg and /dev/null differ diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/dist/DCNv2-0.1-py3.7-linux-x86_64.egg b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/dist/DCNv2-0.1-py3.7-linux-x86_64.egg deleted file mode 100644 index abb0b86..0000000 Binary files a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/dist/DCNv2-0.1-py3.7-linux-x86_64.egg and /dev/null differ diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/make.sh b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/make.sh deleted file mode 100644 index f1f15c0..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/make.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/usr/bin/env bash -python setup.py build develop diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/setup.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/setup.py deleted file mode 100644 index 1082494..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/setup.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env python - -import os -import glob - -import torch - -from torch.utils.cpp_extension import CUDA_HOME -from torch.utils.cpp_extension import CppExtension -from torch.utils.cpp_extension import CUDAExtension - -from setuptools import find_packages -from setuptools import setup - -requirements = ["torch", "torchvision"] - -def get_extensions(): - this_dir = os.path.dirname(os.path.abspath(__file__)) - extensions_dir = os.path.join(this_dir, "src") - - main_file = glob.glob(os.path.join(extensions_dir, "*.cpp")) - source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp")) - source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu")) - - sources = main_file + source_cpu - extension = CppExtension - extra_compile_args = {"cxx": []} - define_macros = [] - - if torch.cuda.is_available() and CUDA_HOME is not None: - extension = CUDAExtension - sources += source_cuda - define_macros += [("WITH_CUDA", None)] - extra_compile_args["nvcc"] = [ - "-DCUDA_HAS_FP16=1", - "-D__CUDA_NO_HALF_OPERATORS__", - "-D__CUDA_NO_HALF_CONVERSIONS__", - "-D__CUDA_NO_HALF2_OPERATORS__", - ] - else: - raise NotImplementedError('Cuda is not availabel') - - sources = [os.path.join(extensions_dir, s) for s in sources] - include_dirs = [extensions_dir] - ext_modules = [ - extension( - "_ext", - sources, - include_dirs=include_dirs, - define_macros=define_macros, - extra_compile_args=extra_compile_args, - ) - ] - return ext_modules - -setup( - name="DCNv2", - version="0.1", - author="charlesshang", - url="https://github.com/charlesshang/DCNv2", - description="deformable convolutional networks", - packages=find_packages(exclude=("configs", "tests",)), - # install_requires=requirements, - ext_modules=get_extensions(), - cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, -) \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cpu/dcn_v2_cpu.cpp b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cpu/dcn_v2_cpu.cpp deleted file mode 100644 index a68ccef..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cpu/dcn_v2_cpu.cpp +++ /dev/null @@ -1,74 +0,0 @@ -#include - -#include -#include - - -at::Tensor -dcn_v2_cpu_forward(const at::Tensor &input, - const at::Tensor &weight, - const at::Tensor &bias, - const at::Tensor &offset, - const at::Tensor &mask, - const int kernel_h, - const int kernel_w, - const int stride_h, - const int stride_w, - const int pad_h, - const int pad_w, - const int dilation_h, - const int dilation_w, - const int deformable_group) -{ - AT_ERROR("Not implement on cpu"); -} - -std::vector -dcn_v2_cpu_backward(const at::Tensor &input, - const at::Tensor &weight, - const at::Tensor &bias, - const at::Tensor &offset, - const at::Tensor &mask, - const at::Tensor &grad_output, - int kernel_h, int kernel_w, - int stride_h, int stride_w, - int pad_h, int pad_w, - int dilation_h, int dilation_w, - int deformable_group) -{ - AT_ERROR("Not implement on cpu"); -} - -std::tuple -dcn_v2_psroi_pooling_cpu_forward(const at::Tensor &input, - const at::Tensor &bbox, - const at::Tensor &trans, - const int no_trans, - const float spatial_scale, - const int output_dim, - const int group_size, - const int pooled_size, - const int part_size, - const int sample_per_part, - const float trans_std) -{ - AT_ERROR("Not implement on cpu"); -} - -std::tuple -dcn_v2_psroi_pooling_cpu_backward(const at::Tensor &out_grad, - const at::Tensor &input, - const at::Tensor &bbox, - const at::Tensor &trans, - const at::Tensor &top_count, - const int no_trans, - const float spatial_scale, - const int output_dim, - const int group_size, - const int pooled_size, - const int part_size, - const int sample_per_part, - const float trans_std) -{ - AT_ERROR("Not implement on cpu"); -} \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cpu/vision.h b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cpu/vision.h deleted file mode 100644 index d5fbf1f..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cpu/vision.h +++ /dev/null @@ -1,60 +0,0 @@ -#pragma once -#include - -at::Tensor -dcn_v2_cpu_forward(const at::Tensor &input, - const at::Tensor &weight, - const at::Tensor &bias, - const at::Tensor &offset, - const at::Tensor &mask, - const int kernel_h, - const int kernel_w, - const int stride_h, - const int stride_w, - const int pad_h, - const int pad_w, - const int dilation_h, - const int dilation_w, - const int deformable_group); - -std::vector -dcn_v2_cpu_backward(const at::Tensor &input, - const at::Tensor &weight, - const at::Tensor &bias, - const at::Tensor &offset, - const at::Tensor &mask, - const at::Tensor &grad_output, - int kernel_h, int kernel_w, - int stride_h, int stride_w, - int pad_h, int pad_w, - int dilation_h, int dilation_w, - int deformable_group); - - -std::tuple -dcn_v2_psroi_pooling_cpu_forward(const at::Tensor &input, - const at::Tensor &bbox, - const at::Tensor &trans, - const int no_trans, - const float spatial_scale, - const int output_dim, - const int group_size, - const int pooled_size, - const int part_size, - const int sample_per_part, - const float trans_std); - -std::tuple -dcn_v2_psroi_pooling_cpu_backward(const at::Tensor &out_grad, - const at::Tensor &input, - const at::Tensor &bbox, - const at::Tensor &trans, - const at::Tensor &top_count, - const int no_trans, - const float spatial_scale, - const int output_dim, - const int group_size, - const int pooled_size, - const int part_size, - const int sample_per_part, - const float trans_std); \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cuda/dcn_v2_cuda.cu b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cuda/dcn_v2_cuda.cu deleted file mode 100644 index 767ed8f..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cuda/dcn_v2_cuda.cu +++ /dev/null @@ -1,335 +0,0 @@ -#include -#include "cuda/dcn_v2_im2col_cuda.h" - -#include -#include - -#include -#include -#include - -extern THCState *state; - -// author: Charles Shang -// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu - -// [batch gemm] -// https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu - -__global__ void createBatchGemmBuffer(const float **input_b, float **output_b, - float **columns_b, const float **ones_b, - const float **weight_b, const float **bias_b, - float *input, float *output, - float *columns, float *ones, - float *weight, float *bias, - const int input_stride, const int output_stride, - const int columns_stride, const int ones_stride, - const int num_batches) -{ - const int idx = blockIdx.x * blockDim.x + threadIdx.x; - if (idx < num_batches) - { - input_b[idx] = input + idx * input_stride; - output_b[idx] = output + idx * output_stride; - columns_b[idx] = columns + idx * columns_stride; - ones_b[idx] = ones + idx * ones_stride; - // share weights and bias within a Mini-Batch - weight_b[idx] = weight; - bias_b[idx] = bias; - } -} - -at::Tensor -dcn_v2_cuda_forward(const at::Tensor &input, - const at::Tensor &weight, - const at::Tensor &bias, - const at::Tensor &offset, - const at::Tensor &mask, - const int kernel_h, - const int kernel_w, - const int stride_h, - const int stride_w, - const int pad_h, - const int pad_w, - const int dilation_h, - const int dilation_w, - const int deformable_group) -{ - using scalar_t = float; - // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); - AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); - AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); - AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); - AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); - AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); - - const int batch = input.size(0); - const int channels = input.size(1); - const int height = input.size(2); - const int width = input.size(3); - - const int channels_out = weight.size(0); - const int channels_kernel = weight.size(1); - const int kernel_h_ = weight.size(2); - const int kernel_w_ = weight.size(3); - - // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h); - // printf("Channels: %d %d\n", channels, channels_kernel); - // printf("Channels: %d %d\n", channels_out, channels_kernel); - - AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, - "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); - - AT_ASSERTM(channels == channels_kernel, - "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); - - const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; - const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; - - auto ones = at::ones({batch, height_out, width_out}, input.options()); - auto columns = at::empty({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); - auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); - - // prepare for batch-wise computing, which is significantly faster than instance-wise computing - // when batch size is large. - // launch batch threads - int matrices_size = batch * sizeof(float *); - auto input_b = static_cast(THCudaMalloc(state, matrices_size)); - auto output_b = static_cast(THCudaMalloc(state, matrices_size)); - auto columns_b = static_cast(THCudaMalloc(state, matrices_size)); - auto ones_b = static_cast(THCudaMalloc(state, matrices_size)); - auto weight_b = static_cast(THCudaMalloc(state, matrices_size)); - auto bias_b = static_cast(THCudaMalloc(state, matrices_size)); - - const int block = 128; - const int grid = (batch + block - 1) / block; - - createBatchGemmBuffer<<>>( - input_b, output_b, - columns_b, ones_b, - weight_b, bias_b, - input.data(), - output.data(), - columns.data(), - ones.data(), - weight.data(), - bias.data(), - channels * width * height, - channels_out * width_out * height_out, - channels * kernel_h * kernel_w * height_out * width_out, - height_out * width_out, - batch); - - long m_ = channels_out; - long n_ = height_out * width_out; - long k_ = 1; - THCudaBlas_SgemmBatched(state, - 't', - 'n', - n_, - m_, - k_, - 1.0f, - ones_b, k_, - bias_b, k_, - 0.0f, - output_b, n_, - batch); - - modulated_deformable_im2col_cuda(THCState_getCurrentStream(state), - input.data(), - offset.data(), - mask.data(), - batch, channels, height, width, - height_out, width_out, kernel_h, kernel_w, - pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, - deformable_group, - columns.data()); - - long m = channels_out; - long n = height_out * width_out; - long k = channels * kernel_h * kernel_w; - THCudaBlas_SgemmBatched(state, - 'n', - 'n', - n, - m, - k, - 1.0f, - (const float **)columns_b, n, - weight_b, k, - 1.0f, - output_b, n, - batch); - - THCudaFree(state, input_b); - THCudaFree(state, output_b); - THCudaFree(state, columns_b); - THCudaFree(state, ones_b); - THCudaFree(state, weight_b); - THCudaFree(state, bias_b); - return output; -} - -__global__ void createBatchGemmBufferBackward( - float **grad_output_b, - float **columns_b, - float **ones_b, - float **weight_b, - float **grad_weight_b, - float **grad_bias_b, - float *grad_output, - float *columns, - float *ones, - float *weight, - float *grad_weight, - float *grad_bias, - const int grad_output_stride, - const int columns_stride, - const int ones_stride, - const int num_batches) -{ - const int idx = blockIdx.x * blockDim.x + threadIdx.x; - if (idx < num_batches) - { - grad_output_b[idx] = grad_output + idx * grad_output_stride; - columns_b[idx] = columns + idx * columns_stride; - ones_b[idx] = ones + idx * ones_stride; - - // share weights and bias within a Mini-Batch - weight_b[idx] = weight; - grad_weight_b[idx] = grad_weight; - grad_bias_b[idx] = grad_bias; - } -} - -std::vector dcn_v2_cuda_backward(const at::Tensor &input, - const at::Tensor &weight, - const at::Tensor &bias, - const at::Tensor &offset, - const at::Tensor &mask, - const at::Tensor &grad_output, - int kernel_h, int kernel_w, - int stride_h, int stride_w, - int pad_h, int pad_w, - int dilation_h, int dilation_w, - int deformable_group) -{ - - THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous"); - THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous"); - - AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); - AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); - AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); - AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); - AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); - - const int batch = input.size(0); - const int channels = input.size(1); - const int height = input.size(2); - const int width = input.size(3); - - const int channels_out = weight.size(0); - const int channels_kernel = weight.size(1); - const int kernel_h_ = weight.size(2); - const int kernel_w_ = weight.size(3); - - AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, - "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); - - AT_ASSERTM(channels == channels_kernel, - "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel); - - const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; - const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; - - auto ones = at::ones({height_out, width_out}, input.options()); - auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options()); - auto output = at::empty({batch, channels_out, height_out, width_out}, input.options()); - - auto grad_input = at::zeros_like(input); - auto grad_weight = at::zeros_like(weight); - auto grad_bias = at::zeros_like(bias); - auto grad_offset = at::zeros_like(offset); - auto grad_mask = at::zeros_like(mask); - - using scalar_t = float; - - for (int b = 0; b < batch; b++) - { - auto input_n = input.select(0, b); - auto offset_n = offset.select(0, b); - auto mask_n = mask.select(0, b); - auto grad_output_n = grad_output.select(0, b); - auto grad_input_n = grad_input.select(0, b); - auto grad_offset_n = grad_offset.select(0, b); - auto grad_mask_n = grad_mask.select(0, b); - - long m = channels * kernel_h * kernel_w; - long n = height_out * width_out; - long k = channels_out; - - THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f, - grad_output_n.data(), n, - weight.data(), m, 0.0f, - columns.data(), n); - - // gradient w.r.t. input coordinate data - modulated_deformable_col2im_coord_cuda(THCState_getCurrentStream(state), - columns.data(), - input_n.data(), - offset_n.data(), - mask_n.data(), - 1, channels, height, width, - height_out, width_out, kernel_h, kernel_w, - pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w, deformable_group, - grad_offset_n.data(), - grad_mask_n.data()); - // gradient w.r.t. input data - modulated_deformable_col2im_cuda(THCState_getCurrentStream(state), - columns.data(), - offset_n.data(), - mask_n.data(), - 1, channels, height, width, - height_out, width_out, kernel_h, kernel_w, - pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w, deformable_group, - grad_input_n.data()); - - // gradient w.r.t. weight, dWeight should accumulate across the batch and group - modulated_deformable_im2col_cuda(THCState_getCurrentStream(state), - input_n.data(), - offset_n.data(), - mask_n.data(), - 1, channels, height, width, - height_out, width_out, kernel_h, kernel_w, - pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w, deformable_group, - columns.data()); - - long m_ = channels_out; - long n_ = channels * kernel_h * kernel_w; - long k_ = height_out * width_out; - - THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f, - columns.data(), k_, - grad_output_n.data(), k_, 1.0f, - grad_weight.data(), n_); - - // gradient w.r.t. bias - // long m_ = channels_out; - // long k__ = height_out * width_out; - THCudaBlas_Sgemv(state, - 't', - k_, m_, 1.0f, - grad_output_n.data(), k_, - ones.data(), 1, 1.0f, - grad_bias.data(), 1); - } - - return { - grad_input, grad_offset, grad_mask, grad_weight, grad_bias - }; -} \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cuda/dcn_v2_im2col_cuda.cu b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cuda/dcn_v2_im2col_cuda.cu deleted file mode 100644 index 4183793..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cuda/dcn_v2_im2col_cuda.cu +++ /dev/null @@ -1,402 +0,0 @@ -#include "dcn_v2_im2col_cuda.h" -#include -#include -#include - -#include -#include - -#include -#include -#include - -#define CUDA_KERNEL_LOOP(i, n) \ - for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ - i < (n); \ - i += blockDim.x * gridDim.x) - -const int CUDA_NUM_THREADS = 1024; -inline int GET_BLOCKS(const int N) -{ - return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; -} - - -__device__ float dmcn_im2col_bilinear(const float *bottom_data, const int data_width, - const int height, const int width, float h, float w) -{ - int h_low = floor(h); - int w_low = floor(w); - int h_high = h_low + 1; - int w_high = w_low + 1; - - float lh = h - h_low; - float lw = w - w_low; - float hh = 1 - lh, hw = 1 - lw; - - float v1 = 0; - if (h_low >= 0 && w_low >= 0) - v1 = bottom_data[h_low * data_width + w_low]; - float v2 = 0; - if (h_low >= 0 && w_high <= width - 1) - v2 = bottom_data[h_low * data_width + w_high]; - float v3 = 0; - if (h_high <= height - 1 && w_low >= 0) - v3 = bottom_data[h_high * data_width + w_low]; - float v4 = 0; - if (h_high <= height - 1 && w_high <= width - 1) - v4 = bottom_data[h_high * data_width + w_high]; - - float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; - - float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); - return val; -} - -__device__ float dmcn_get_gradient_weight(float argmax_h, float argmax_w, - const int h, const int w, const int height, const int width) -{ - if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) - { - //empty - return 0; - } - - int argmax_h_low = floor(argmax_h); - int argmax_w_low = floor(argmax_w); - int argmax_h_high = argmax_h_low + 1; - int argmax_w_high = argmax_w_low + 1; - - float weight = 0; - if (h == argmax_h_low && w == argmax_w_low) - weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); - if (h == argmax_h_low && w == argmax_w_high) - weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); - if (h == argmax_h_high && w == argmax_w_low) - weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); - if (h == argmax_h_high && w == argmax_w_high) - weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); - return weight; -} - -__device__ float dmcn_get_coordinate_weight(float argmax_h, float argmax_w, - const int height, const int width, const float *im_data, - const int data_width, const int bp_dir) -{ - if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) - { - //empty - return 0; - } - - int argmax_h_low = floor(argmax_h); - int argmax_w_low = floor(argmax_w); - int argmax_h_high = argmax_h_low + 1; - int argmax_w_high = argmax_w_low + 1; - - float weight = 0; - - if (bp_dir == 0) - { - if (argmax_h_low >= 0 && argmax_w_low >= 0) - weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; - if (argmax_h_low >= 0 && argmax_w_high <= width - 1) - weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; - if (argmax_h_high <= height - 1 && argmax_w_low >= 0) - weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; - if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) - weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; - } - else if (bp_dir == 1) - { - if (argmax_h_low >= 0 && argmax_w_low >= 0) - weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; - if (argmax_h_low >= 0 && argmax_w_high <= width - 1) - weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; - if (argmax_h_high <= height - 1 && argmax_w_low >= 0) - weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; - if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) - weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; - } - - return weight; -} - -__global__ void modulated_deformable_im2col_gpu_kernel(const int n, - const float *data_im, const float *data_offset, const float *data_mask, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int channel_per_deformable_group, - const int batch_size, const int num_channels, const int deformable_group, - const int height_col, const int width_col, - float *data_col) -{ - // launch channels * batch_size * height_col * width_col cores - CUDA_KERNEL_LOOP(index, n) - { - // NOTE(CharlesShang): different from Dai Jifeng's MXNet implementation, col_buffer is of shape (c*kw*kh, N, oh, ow) - // here columns is of shape (N, c*kw*kh, oh * ow), need to adapt axis - - // index index of output matrix - const int w_col = index % width_col; - const int h_col = (index / width_col) % height_col; - // const int b_col = (index / width_col / height_col) % batch_size; - const int b_col = (index / width_col / height_col / num_channels) % batch_size; - // const int c_im = (index / width_col / height_col) / batch_size; - const int c_im = (index / width_col / height_col) % num_channels; - // const int c_col = c_im * kernel_h * kernel_w; - const int c_col = c_im * kernel_h * kernel_w; - - // compute deformable group index - const int deformable_group_index = c_im / channel_per_deformable_group; - - const int h_in = h_col * stride_h - pad_h; - const int w_in = w_col * stride_w - pad_w; - - // float *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; - float *data_col_ptr = data_col + ((b_col * num_channels * kernel_w * kernel_h + c_col) * height_col + h_col) * width_col + w_col; - //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; - const float *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; - const float *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; - - const float *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; - - for (int i = 0; i < kernel_h; ++i) - { - for (int j = 0; j < kernel_w; ++j) - { - const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; - const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; - const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; - const float offset_h = data_offset_ptr[data_offset_h_ptr]; - const float offset_w = data_offset_ptr[data_offset_w_ptr]; - const float mask = data_mask_ptr[data_mask_hw_ptr]; - float val = static_cast(0); - const float h_im = h_in + i * dilation_h + offset_h; - const float w_im = w_in + j * dilation_w + offset_w; - //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { - if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) - { - //const float map_h = i * dilation_h + offset_h; - //const float map_w = j * dilation_w + offset_w; - //const int cur_height = height - h_in; - //const int cur_width = width - w_in; - //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); - val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); - } - *data_col_ptr = val * mask; - // data_col_ptr += batch_size * height_col * width_col; - data_col_ptr += height_col * width_col; - } - } - } -} - -__global__ void modulated_deformable_col2im_gpu_kernel(const int n, - const float *data_col, const float *data_offset, const float *data_mask, - const int channels, const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int channel_per_deformable_group, - const int batch_size, const int deformable_group, - const int height_col, const int width_col, - float *grad_im) -{ - CUDA_KERNEL_LOOP(index, n) - { - const int j = (index / width_col / height_col / batch_size) % kernel_w; - const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; - const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; - // compute the start and end of the output - - const int deformable_group_index = c / channel_per_deformable_group; - - int w_out = index % width_col; - int h_out = (index / width_col) % height_col; - int b = (index / width_col / height_col) % batch_size; - int w_in = w_out * stride_w - pad_w; - int h_in = h_out * stride_h - pad_h; - - const float *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; - const float *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; - const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; - const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; - const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; - const float offset_h = data_offset_ptr[data_offset_h_ptr]; - const float offset_w = data_offset_ptr[data_offset_w_ptr]; - const float mask = data_mask_ptr[data_mask_hw_ptr]; - const float cur_inv_h_data = h_in + i * dilation_h + offset_h; - const float cur_inv_w_data = w_in + j * dilation_w + offset_w; - - const float cur_top_grad = data_col[index] * mask; - const int cur_h = (int)cur_inv_h_data; - const int cur_w = (int)cur_inv_w_data; - for (int dy = -2; dy <= 2; dy++) - { - for (int dx = -2; dx <= 2; dx++) - { - if (cur_h + dy >= 0 && cur_h + dy < height && - cur_w + dx >= 0 && cur_w + dx < width && - abs(cur_inv_h_data - (cur_h + dy)) < 1 && - abs(cur_inv_w_data - (cur_w + dx)) < 1) - { - int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; - float weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); - atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); - } - } - } - } -} - -__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n, - const float *data_col, const float *data_im, - const float *data_offset, const float *data_mask, - const int channels, const int height, const int width, - const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int channel_per_deformable_group, - const int batch_size, const int offset_channels, const int deformable_group, - const int height_col, const int width_col, - float *grad_offset, float *grad_mask) -{ - CUDA_KERNEL_LOOP(index, n) - { - float val = 0, mval = 0; - int w = index % width_col; - int h = (index / width_col) % height_col; - int c = (index / width_col / height_col) % offset_channels; - int b = (index / width_col / height_col) / offset_channels; - // compute the start and end of the output - - const int deformable_group_index = c / (2 * kernel_h * kernel_w); - const int col_step = kernel_h * kernel_w; - int cnt = 0; - const float *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; - const float *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; - const float *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; - const float *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; - - const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; - - for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) - { - const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; - const int bp_dir = offset_c % 2; - - int j = (col_pos / width_col / height_col / batch_size) % kernel_w; - int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; - int w_out = col_pos % width_col; - int h_out = (col_pos / width_col) % height_col; - int w_in = w_out * stride_w - pad_w; - int h_in = h_out * stride_h - pad_h; - const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); - const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); - const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); - const float offset_h = data_offset_ptr[data_offset_h_ptr]; - const float offset_w = data_offset_ptr[data_offset_w_ptr]; - const float mask = data_mask_ptr[data_mask_hw_ptr]; - float inv_h = h_in + i * dilation_h + offset_h; - float inv_w = w_in + j * dilation_w + offset_w; - if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) - { - inv_h = inv_w = -2; - } - else - { - mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); - } - const float weight = dmcn_get_coordinate_weight( - inv_h, inv_w, - height, width, data_im_ptr + cnt * height * width, width, bp_dir); - val += weight * data_col_ptr[col_pos] * mask; - cnt += 1; - } - // KERNEL_ASSIGN(grad_offset[index], offset_req, val); - grad_offset[index] = val; - if (offset_c % 2 == 0) - // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval); - grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; - } -} - -void modulated_deformable_im2col_cuda(cudaStream_t stream, - const float* data_im, const float* data_offset, const float* data_mask, - const int batch_size, const int channels, const int height_im, const int width_im, - const int height_col, const int width_col, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int deformable_group, float* data_col) { - // num_axes should be smaller than block size - const int channel_per_deformable_group = channels / deformable_group; - const int num_kernels = channels * batch_size * height_col * width_col; - modulated_deformable_im2col_gpu_kernel - <<>>( - num_kernels, data_im, data_offset, data_mask, height_im, width_im, kernel_h, kernel_w, - pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, - batch_size, channels, deformable_group, height_col, width_col, data_col); - - cudaError_t err = cudaGetLastError(); - if (err != cudaSuccess) - { - printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); - } - -} - -void modulated_deformable_col2im_cuda(cudaStream_t stream, - const float* data_col, const float* data_offset, const float* data_mask, - const int batch_size, const int channels, const int height_im, const int width_im, - const int height_col, const int width_col, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int deformable_group, float* grad_im){ - - const int channel_per_deformable_group = channels / deformable_group; - const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; - modulated_deformable_col2im_gpu_kernel - <<>>( - num_kernels, data_col, data_offset, data_mask, channels, height_im, width_im, - kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w, - dilation_h, dilation_w, channel_per_deformable_group, - batch_size, deformable_group, height_col, width_col, grad_im); - cudaError_t err = cudaGetLastError(); - if (err != cudaSuccess) - { - printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); - } - -} - -void modulated_deformable_col2im_coord_cuda(cudaStream_t stream, - const float* data_col, const float* data_im, const float* data_offset, const float* data_mask, - const int batch_size, const int channels, const int height_im, const int width_im, - const int height_col, const int width_col, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int deformable_group, - float* grad_offset, float* grad_mask) { - const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; - const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; - modulated_deformable_col2im_coord_gpu_kernel - <<>>( - num_kernels, data_col, data_im, data_offset, data_mask, channels, height_im, width_im, - kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, - dilation_h, dilation_w, channel_per_deformable_group, - batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, - grad_offset, grad_mask); - cudaError_t err = cudaGetLastError(); - if (err != cudaSuccess) - { - printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); - } -} \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cuda/dcn_v2_im2col_cuda.h b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cuda/dcn_v2_im2col_cuda.h deleted file mode 100644 index c856831..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cuda/dcn_v2_im2col_cuda.h +++ /dev/null @@ -1,101 +0,0 @@ - -/*! - ******************* BEGIN Caffe Copyright Notice and Disclaimer **************** - * - * COPYRIGHT - * - * All contributions by the University of California: - * Copyright (c) 2014-2017 The Regents of the University of California (Regents) - * All rights reserved. - * - * All other contributions: - * Copyright (c) 2014-2017, the respective contributors - * All rights reserved. - * - * Caffe uses a shared copyright model: each contributor holds copyright over - * their contributions to Caffe. The project versioning records all such - * contribution and copyright details. If a contributor wants to further mark - * their specific copyright on a particular contribution, they should indicate - * their copyright solely in the commit message of the change when it is - * committed. - * - * LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * CONTRIBUTION AGREEMENT - * - * By contributing to the BVLC/caffe repository through pull-request, comment, - * or otherwise, the contributor releases their content to the - * license and copyright terms herein. - * - ***************** END Caffe Copyright Notice and Disclaimer ******************** - * - * Copyright (c) 2018 Microsoft - * Licensed under The MIT License [see LICENSE for details] - * \file modulated_deformable_im2col.h - * \brief Function definitions of converting an image to - * column matrix based on kernel, padding, dilation, and offset. - * These functions are mainly used in deformable convolution operators. - * \ref: https://arxiv.org/abs/1811.11168 - * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu - */ - -/***************** Adapted by Charles Shang *********************/ - -#ifndef DCN_V2_IM2COL_CUDA -#define DCN_V2_IM2COL_CUDA - -#ifdef __cplusplus -extern "C" -{ -#endif - - void modulated_deformable_im2col_cuda(cudaStream_t stream, - const float *data_im, const float *data_offset, const float *data_mask, - const int batch_size, const int channels, const int height_im, const int width_im, - const int height_col, const int width_col, const int kernel_h, const int kenerl_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int deformable_group, float *data_col); - - void modulated_deformable_col2im_cuda(cudaStream_t stream, - const float *data_col, const float *data_offset, const float *data_mask, - const int batch_size, const int channels, const int height_im, const int width_im, - const int height_col, const int width_col, const int kernel_h, const int kenerl_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int deformable_group, float *grad_im); - - void modulated_deformable_col2im_coord_cuda(cudaStream_t stream, - const float *data_col, const float *data_im, const float *data_offset, const float *data_mask, - const int batch_size, const int channels, const int height_im, const int width_im, - const int height_col, const int width_col, const int kernel_h, const int kenerl_w, - const int pad_h, const int pad_w, const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - const int deformable_group, - float *grad_offset, float *grad_mask); - -#ifdef __cplusplus -} -#endif - -#endif \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cuda/dcn_v2_psroi_pooling_cuda.cu b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cuda/dcn_v2_psroi_pooling_cuda.cu deleted file mode 100644 index a959792..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cuda/dcn_v2_psroi_pooling_cuda.cu +++ /dev/null @@ -1,410 +0,0 @@ -#include -#include -#include -#include - -#include -#include - -#include -#include -#include - -#define CUDA_KERNEL_LOOP(i, n) \ - for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ - i < (n); \ - i += blockDim.x * gridDim.x) - -const int CUDA_NUM_THREADS = 1024; -inline int GET_BLOCKS(const int N) -{ - return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; -} - -template -__device__ T bilinear_interp( - const T *data, - const T x, - const T y, - const int width, - const int height) -{ - int x1 = floor(x); - int x2 = ceil(x); - int y1 = floor(y); - int y2 = ceil(y); - T dist_x = static_cast(x - x1); - T dist_y = static_cast(y - y1); - T value11 = data[y1 * width + x1]; - T value12 = data[y2 * width + x1]; - T value21 = data[y1 * width + x2]; - T value22 = data[y2 * width + x2]; - T value = (1 - dist_x) * (1 - dist_y) * value11 + - (1 - dist_x) * dist_y * value12 + - dist_x * (1 - dist_y) * value21 + - dist_x * dist_y * value22; - return value; -} - -template -__global__ void DeformablePSROIPoolForwardKernel( - const int count, - const T *bottom_data, - const T spatial_scale, - const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const T *bottom_rois, const T *bottom_trans, - const int no_trans, - const T trans_std, - const int sample_per_part, - const int output_dim, - const int group_size, - const int part_size, - const int num_classes, - const int channels_each_class, - T *top_data, - T *top_count) -{ - CUDA_KERNEL_LOOP(index, count) - { - // The output is in order (n, ctop, ph, pw) - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int ctop = (index / pooled_width / pooled_height) % output_dim; - int n = index / pooled_width / pooled_height / output_dim; - - // [start, end) interval for spatial sampling - const T *offset_bottom_rois = bottom_rois + n * 5; - int roi_batch_ind = offset_bottom_rois[0]; - T roi_start_w = static_cast(round(offset_bottom_rois[1])) * spatial_scale - 0.5; - T roi_start_h = static_cast(round(offset_bottom_rois[2])) * spatial_scale - 0.5; - T roi_end_w = static_cast(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; - T roi_end_h = static_cast(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; - - // Force too small ROIs to be 1x1 - T roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 - T roi_height = max(roi_end_h - roi_start_h, 0.1); - - // Compute w and h at bottom - T bin_size_h = roi_height / static_cast(pooled_height); - T bin_size_w = roi_width / static_cast(pooled_width); - - T sub_bin_size_h = bin_size_h / static_cast(sample_per_part); - T sub_bin_size_w = bin_size_w / static_cast(sample_per_part); - - int part_h = floor(static_cast(ph) / pooled_height * part_size); - int part_w = floor(static_cast(pw) / pooled_width * part_size); - int class_id = ctop / channels_each_class; - T trans_x = no_trans ? static_cast(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std; - T trans_y = no_trans ? static_cast(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std; - - T wstart = static_cast(pw) * bin_size_w + roi_start_w; - wstart += trans_x * roi_width; - T hstart = static_cast(ph) * bin_size_h + roi_start_h; - hstart += trans_y * roi_height; - - T sum = 0; - int count = 0; - int gw = floor(static_cast(pw) * group_size / pooled_width); - int gh = floor(static_cast(ph) * group_size / pooled_height); - gw = min(max(gw, 0), group_size - 1); - gh = min(max(gh, 0), group_size - 1); - - const T *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width; - for (int ih = 0; ih < sample_per_part; ih++) - { - for (int iw = 0; iw < sample_per_part; iw++) - { - T w = wstart + iw * sub_bin_size_w; - T h = hstart + ih * sub_bin_size_h; - // bilinear interpolation - if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) - { - continue; - } - w = min(max(w, 0.), width - 1.); - h = min(max(h, 0.), height - 1.); - int c = (ctop * group_size + gh) * group_size + gw; - T val = bilinear_interp(offset_bottom_data + c * height * width, w, h, width, height); - sum += val; - count++; - } - } - top_data[index] = count == 0 ? static_cast(0) : sum / count; - top_count[index] = count; - } -} - -template -__global__ void DeformablePSROIPoolBackwardAccKernel( - const int count, - const T *top_diff, - const T *top_count, - const int num_rois, - const T spatial_scale, - const int channels, - const int height, const int width, - const int pooled_height, const int pooled_width, - const int output_dim, - T *bottom_data_diff, T *bottom_trans_diff, - const T *bottom_data, - const T *bottom_rois, - const T *bottom_trans, - const int no_trans, - const T trans_std, - const int sample_per_part, - const int group_size, - const int part_size, - const int num_classes, - const int channels_each_class) -{ - CUDA_KERNEL_LOOP(index, count) - { - // The output is in order (n, ctop, ph, pw) - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int ctop = (index / pooled_width / pooled_height) % output_dim; - int n = index / pooled_width / pooled_height / output_dim; - - // [start, end) interval for spatial sampling - const T *offset_bottom_rois = bottom_rois + n * 5; - int roi_batch_ind = offset_bottom_rois[0]; - T roi_start_w = static_cast(round(offset_bottom_rois[1])) * spatial_scale - 0.5; - T roi_start_h = static_cast(round(offset_bottom_rois[2])) * spatial_scale - 0.5; - T roi_end_w = static_cast(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; - T roi_end_h = static_cast(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; - - // Force too small ROIs to be 1x1 - T roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 - T roi_height = max(roi_end_h - roi_start_h, 0.1); - - // Compute w and h at bottom - T bin_size_h = roi_height / static_cast(pooled_height); - T bin_size_w = roi_width / static_cast(pooled_width); - - T sub_bin_size_h = bin_size_h / static_cast(sample_per_part); - T sub_bin_size_w = bin_size_w / static_cast(sample_per_part); - - int part_h = floor(static_cast(ph) / pooled_height * part_size); - int part_w = floor(static_cast(pw) / pooled_width * part_size); - int class_id = ctop / channels_each_class; - T trans_x = no_trans ? static_cast(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * trans_std; - T trans_y = no_trans ? static_cast(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * trans_std; - - T wstart = static_cast(pw) * bin_size_w + roi_start_w; - wstart += trans_x * roi_width; - T hstart = static_cast(ph) * bin_size_h + roi_start_h; - hstart += trans_y * roi_height; - - if (top_count[index] <= 0) - { - continue; - } - T diff_val = top_diff[index] / top_count[index]; - const T *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width; - T *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width; - int gw = floor(static_cast(pw) * group_size / pooled_width); - int gh = floor(static_cast(ph) * group_size / pooled_height); - gw = min(max(gw, 0), group_size - 1); - gh = min(max(gh, 0), group_size - 1); - - for (int ih = 0; ih < sample_per_part; ih++) - { - for (int iw = 0; iw < sample_per_part; iw++) - { - T w = wstart + iw * sub_bin_size_w; - T h = hstart + ih * sub_bin_size_h; - // bilinear interpolation - if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) - { - continue; - } - w = min(max(w, 0.), width - 1.); - h = min(max(h, 0.), height - 1.); - int c = (ctop * group_size + gh) * group_size + gw; - // backward on feature - int x0 = floor(w); - int x1 = ceil(w); - int y0 = floor(h); - int y1 = ceil(h); - T dist_x = w - x0, dist_y = h - y0; - T q00 = (1 - dist_x) * (1 - dist_y); - T q01 = (1 - dist_x) * dist_y; - T q10 = dist_x * (1 - dist_y); - T q11 = dist_x * dist_y; - int bottom_index_base = c * height * width; - atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val); - atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val); - atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val); - atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val); - - if (no_trans) - { - continue; - } - T U00 = offset_bottom_data[bottom_index_base + y0 * width + x0]; - T U01 = offset_bottom_data[bottom_index_base + y1 * width + x0]; - T U10 = offset_bottom_data[bottom_index_base + y0 * width + x1]; - T U11 = offset_bottom_data[bottom_index_base + y1 * width + x1]; - T diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val; - diff_x *= roi_width; - T diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val; - diff_y *= roi_height; - - atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x); - atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y); - } - } - } -} - -std::tuple -dcn_v2_psroi_pooling_cuda_forward(const at::Tensor &input, - const at::Tensor &bbox, - const at::Tensor &trans, - const int no_trans, - const float spatial_scale, - const int output_dim, - const int group_size, - const int pooled_size, - const int part_size, - const int sample_per_part, - const float trans_std) -{ - AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); - AT_ASSERTM(bbox.type().is_cuda(), "rois must be a CUDA tensor"); - AT_ASSERTM(trans.type().is_cuda(), "trans must be a CUDA tensor"); - - const int batch = input.size(0); - const int channels = input.size(1); - const int height = input.size(2); - const int width = input.size(3); - const int channels_trans = no_trans ? 2 : trans.size(1); - const int num_bbox = bbox.size(0); - - AT_ASSERTM(channels == output_dim, "input channels and output channels must equal"); - auto pooled_height = pooled_size; - auto pooled_width = pooled_size; - - auto out = at::empty({num_bbox, output_dim, pooled_height, pooled_width}, input.options()); - long out_size = num_bbox * output_dim * pooled_height * pooled_width; - auto top_count = at::zeros({num_bbox, output_dim, pooled_height, pooled_width}, input.options()); - - const int num_classes = no_trans ? 1 : channels_trans / 2; - const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; - - cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - - if (out.numel() == 0) - { - THCudaCheck(cudaGetLastError()); - return std::make_tuple(out, top_count); - } - - dim3 grid(std::min(THCCeilDiv(out_size, 512L), 4096L)); - dim3 block(512); - - AT_DISPATCH_FLOATING_TYPES(input.type(), "dcn_v2_psroi_pooling_cuda_forward", [&] { - DeformablePSROIPoolForwardKernel<<>>( - out_size, - input.contiguous().data(), - spatial_scale, - channels, - height, width, - pooled_height, - pooled_width, - bbox.contiguous().data(), - trans.contiguous().data(), - no_trans, - trans_std, - sample_per_part, - output_dim, - group_size, - part_size, - num_classes, - channels_each_class, - out.data(), - top_count.data()); - }); - THCudaCheck(cudaGetLastError()); - return std::make_tuple(out, top_count); -} - -std::tuple -dcn_v2_psroi_pooling_cuda_backward(const at::Tensor &out_grad, - const at::Tensor &input, - const at::Tensor &bbox, - const at::Tensor &trans, - const at::Tensor &top_count, - const int no_trans, - const float spatial_scale, - const int output_dim, - const int group_size, - const int pooled_size, - const int part_size, - const int sample_per_part, - const float trans_std) -{ - AT_ASSERTM(out_grad.type().is_cuda(), "out_grad must be a CUDA tensor"); - AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); - AT_ASSERTM(bbox.type().is_cuda(), "bbox must be a CUDA tensor"); - AT_ASSERTM(trans.type().is_cuda(), "trans must be a CUDA tensor"); - AT_ASSERTM(top_count.type().is_cuda(), "top_count must be a CUDA tensor"); - - const int batch = input.size(0); - const int channels = input.size(1); - const int height = input.size(2); - const int width = input.size(3); - const int channels_trans = no_trans ? 2 : trans.size(1); - const int num_bbox = bbox.size(0); - - AT_ASSERTM(channels == output_dim, "input channels and output channels must equal"); - auto pooled_height = pooled_size; - auto pooled_width = pooled_size; - long out_size = num_bbox * output_dim * pooled_height * pooled_width; - const int num_classes = no_trans ? 1 : channels_trans / 2; - const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; - - auto input_grad = at::zeros({batch, channels, height, width}, out_grad.options()); - auto trans_grad = at::zeros_like(trans); - - if (input_grad.numel() == 0) - { - THCudaCheck(cudaGetLastError()); - return std::make_tuple(input_grad, trans_grad); - } - - dim3 grid(std::min(THCCeilDiv(out_size, 512L), 4096L)); - dim3 block(512); - cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - - AT_DISPATCH_FLOATING_TYPES(out_grad.type(), "dcn_v2_psroi_pooling_cuda_backward", [&] { - DeformablePSROIPoolBackwardAccKernel<<>>( - out_size, - out_grad.contiguous().data(), - top_count.contiguous().data(), - num_bbox, - spatial_scale, - channels, - height, - width, - pooled_height, - pooled_width, - output_dim, - input_grad.contiguous().data(), - trans_grad.contiguous().data(), - input.contiguous().data(), - bbox.contiguous().data(), - trans.contiguous().data(), - no_trans, - trans_std, - sample_per_part, - group_size, - part_size, - num_classes, - channels_each_class); - }); - THCudaCheck(cudaGetLastError()); - return std::make_tuple(input_grad, trans_grad); -} \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cuda/vision.h b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cuda/vision.h deleted file mode 100644 index e42a2a7..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/cuda/vision.h +++ /dev/null @@ -1,60 +0,0 @@ -#pragma once -#include - -at::Tensor -dcn_v2_cuda_forward(const at::Tensor &input, - const at::Tensor &weight, - const at::Tensor &bias, - const at::Tensor &offset, - const at::Tensor &mask, - const int kernel_h, - const int kernel_w, - const int stride_h, - const int stride_w, - const int pad_h, - const int pad_w, - const int dilation_h, - const int dilation_w, - const int deformable_group); - -std::vector -dcn_v2_cuda_backward(const at::Tensor &input, - const at::Tensor &weight, - const at::Tensor &bias, - const at::Tensor &offset, - const at::Tensor &mask, - const at::Tensor &grad_output, - int kernel_h, int kernel_w, - int stride_h, int stride_w, - int pad_h, int pad_w, - int dilation_h, int dilation_w, - int deformable_group); - - -std::tuple -dcn_v2_psroi_pooling_cuda_forward(const at::Tensor &input, - const at::Tensor &bbox, - const at::Tensor &trans, - const int no_trans, - const float spatial_scale, - const int output_dim, - const int group_size, - const int pooled_size, - const int part_size, - const int sample_per_part, - const float trans_std); - -std::tuple -dcn_v2_psroi_pooling_cuda_backward(const at::Tensor &out_grad, - const at::Tensor &input, - const at::Tensor &bbox, - const at::Tensor &trans, - const at::Tensor &top_count, - const int no_trans, - const float spatial_scale, - const int output_dim, - const int group_size, - const int pooled_size, - const int part_size, - const int sample_per_part, - const float trans_std); \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/dcn_v2.h b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/dcn_v2.h deleted file mode 100644 index 23f5caf..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/dcn_v2.h +++ /dev/null @@ -1,145 +0,0 @@ -#pragma once - -#include "cpu/vision.h" - -#ifdef WITH_CUDA -#include "cuda/vision.h" -#endif - -at::Tensor -dcn_v2_forward(const at::Tensor &input, - const at::Tensor &weight, - const at::Tensor &bias, - const at::Tensor &offset, - const at::Tensor &mask, - const int kernel_h, - const int kernel_w, - const int stride_h, - const int stride_w, - const int pad_h, - const int pad_w, - const int dilation_h, - const int dilation_w, - const int deformable_group) -{ - if (input.type().is_cuda()) - { -#ifdef WITH_CUDA - return dcn_v2_cuda_forward(input, weight, bias, offset, mask, - kernel_h, kernel_w, - stride_h, stride_w, - pad_h, pad_w, - dilation_h, dilation_w, - deformable_group); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - AT_ERROR("Not implemented on the CPU"); -} - -std::vector -dcn_v2_backward(const at::Tensor &input, - const at::Tensor &weight, - const at::Tensor &bias, - const at::Tensor &offset, - const at::Tensor &mask, - const at::Tensor &grad_output, - int kernel_h, int kernel_w, - int stride_h, int stride_w, - int pad_h, int pad_w, - int dilation_h, int dilation_w, - int deformable_group) -{ - if (input.type().is_cuda()) - { -#ifdef WITH_CUDA - return dcn_v2_cuda_backward(input, - weight, - bias, - offset, - mask, - grad_output, - kernel_h, kernel_w, - stride_h, stride_w, - pad_h, pad_w, - dilation_h, dilation_w, - deformable_group); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - AT_ERROR("Not implemented on the CPU"); -} - -std::tuple -dcn_v2_psroi_pooling_forward(const at::Tensor &input, - const at::Tensor &bbox, - const at::Tensor &trans, - const int no_trans, - const float spatial_scale, - const int output_dim, - const int group_size, - const int pooled_size, - const int part_size, - const int sample_per_part, - const float trans_std) -{ - if (input.type().is_cuda()) - { -#ifdef WITH_CUDA - return dcn_v2_psroi_pooling_cuda_forward(input, - bbox, - trans, - no_trans, - spatial_scale, - output_dim, - group_size, - pooled_size, - part_size, - sample_per_part, - trans_std); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - AT_ERROR("Not implemented on the CPU"); -} - -std::tuple -dcn_v2_psroi_pooling_backward(const at::Tensor &out_grad, - const at::Tensor &input, - const at::Tensor &bbox, - const at::Tensor &trans, - const at::Tensor &top_count, - const int no_trans, - const float spatial_scale, - const int output_dim, - const int group_size, - const int pooled_size, - const int part_size, - const int sample_per_part, - const float trans_std) -{ - if (input.type().is_cuda()) - { -#ifdef WITH_CUDA - return dcn_v2_psroi_pooling_cuda_backward(out_grad, - input, - bbox, - trans, - top_count, - no_trans, - spatial_scale, - output_dim, - group_size, - pooled_size, - part_size, - sample_per_part, - trans_std); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - AT_ERROR("Not implemented on the CPU"); -} \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/vision.cpp b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/vision.cpp deleted file mode 100644 index ff54233..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/src/vision.cpp +++ /dev/null @@ -1,9 +0,0 @@ - -#include "dcn_v2.h" - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("dcn_v2_forward", &dcn_v2_forward, "dcn_v2_forward"); - m.def("dcn_v2_backward", &dcn_v2_backward, "dcn_v2_backward"); - m.def("dcn_v2_psroi_pooling_forward", &dcn_v2_psroi_pooling_forward, "dcn_v2_psroi_pooling_forward"); - m.def("dcn_v2_psroi_pooling_backward", &dcn_v2_psroi_pooling_backward, "dcn_v2_psroi_pooling_backward"); -} diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/test.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/test.py deleted file mode 100644 index 3bd5bd2..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/DCNv2/test.py +++ /dev/null @@ -1,270 +0,0 @@ -#!/usr/bin/env python -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - -import time -import torch -import torch.nn as nn -from torch.autograd import gradcheck - -from dcn_v2 import dcn_v2_conv, DCNv2, DCN -from dcn_v2 import dcn_v2_pooling, DCNv2Pooling, DCNPooling - -deformable_groups = 1 -N, inC, inH, inW = 2, 2, 4, 4 -outC = 2 -kH, kW = 3, 3 - - -def conv_identify(weight, bias): - weight.data.zero_() - bias.data.zero_() - o, i, h, w = weight.shape - y = h//2 - x = w//2 - for p in range(i): - for q in range(o): - if p == q: - weight.data[q, p, y, x] = 1.0 - - -def check_zero_offset(): - conv_offset = nn.Conv2d(inC, deformable_groups * 2 * kH * kW, - kernel_size=(kH, kW), - stride=(1, 1), - padding=(1, 1), - bias=True).cuda() - - conv_mask = nn.Conv2d(inC, deformable_groups * 1 * kH * kW, - kernel_size=(kH, kW), - stride=(1, 1), - padding=(1, 1), - bias=True).cuda() - - dcn_v2 = DCNv2(inC, outC, (kH, kW), - stride=1, padding=1, dilation=1, - deformable_groups=deformable_groups).cuda() - - conv_offset.weight.data.zero_() - conv_offset.bias.data.zero_() - conv_mask.weight.data.zero_() - conv_mask.bias.data.zero_() - conv_identify(dcn_v2.weight, dcn_v2.bias) - - input = torch.randn(N, inC, inH, inW).cuda() - offset = conv_offset(input) - mask = conv_mask(input) - mask = torch.sigmoid(mask) - output = dcn_v2(input, offset, mask) - output *= 2 - d = (input - output).abs().max() - if d < 1e-10: - print('Zero offset passed') - else: - print('Zero offset failed') - print(input) - print(output) - -def check_gradient_dconv(): - - input = torch.rand(N, inC, inH, inW).cuda() * 0.01 - input.requires_grad = True - - offset = torch.randn(N, deformable_groups * 2 * kW * kH, inH, inW).cuda() * 2 - # offset.data.zero_() - # offset.data -= 0.5 - offset.requires_grad = True - - mask = torch.rand(N, deformable_groups * 1 * kW * kH, inH, inW).cuda() - # mask.data.zero_() - mask.requires_grad = True - mask = torch.sigmoid(mask) - - weight = torch.randn(outC, inC, kH, kW).cuda() - weight.requires_grad = True - - bias = torch.rand(outC).cuda() - bias.requires_grad = True - - stride = 1 - padding = 1 - dilation = 1 - - print('check_gradient_dconv: ', - gradcheck(dcn_v2_conv, (input, offset, mask, weight, bias, - stride, padding, dilation, deformable_groups), - eps=1e-3, atol=1e-4, rtol=1e-2)) - - -def check_pooling_zero_offset(): - - input = torch.randn(2, 16, 64, 64).cuda().zero_() - input[0, :, 16:26, 16:26] = 1. - input[1, :, 10:20, 20:30] = 2. - rois = torch.tensor([ - [0, 65, 65, 103, 103], - [1, 81, 41, 119, 79], - ]).cuda().float() - pooling = DCNv2Pooling(spatial_scale=1.0 / 4, - pooled_size=7, - output_dim=16, - no_trans=True, - group_size=1, - trans_std=0.0).cuda() - - out = pooling(input, rois, input.new()) - s = ', '.join(['%f' % out[i, :, :, :].mean().item() - for i in range(rois.shape[0])]) - print(s) - - dpooling = DCNv2Pooling(spatial_scale=1.0 / 4, - pooled_size=7, - output_dim=16, - no_trans=False, - group_size=1, - trans_std=0.0).cuda() - offset = torch.randn(20, 2, 7, 7).cuda().zero_() - dout = dpooling(input, rois, offset) - s = ', '.join(['%f' % dout[i, :, :, :].mean().item() - for i in range(rois.shape[0])]) - print(s) - - -def check_gradient_dpooling(): - input = torch.randn(2, 3, 5, 5).cuda() * 0.01 - N = 4 - batch_inds = torch.randint(2, (N, 1)).cuda().float() - x = torch.rand((N, 1)).cuda().float() * 15 - y = torch.rand((N, 1)).cuda().float() * 15 - w = torch.rand((N, 1)).cuda().float() * 10 - h = torch.rand((N, 1)).cuda().float() * 10 - rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1) - offset = torch.randn(N, 2, 3, 3).cuda() - input.requires_grad = True - offset.requires_grad = True - - spatial_scale = 1.0 / 4 - pooled_size = 3 - output_dim = 3 - no_trans = 0 - group_size = 1 - trans_std = 0.0 - sample_per_part = 4 - part_size = pooled_size - - print('check_gradient_dpooling:', - gradcheck(dcn_v2_pooling, (input, rois, offset, - spatial_scale, - pooled_size, - output_dim, - no_trans, - group_size, - part_size, - sample_per_part, - trans_std), - eps=1e-4)) - - -def example_dconv(): - input = torch.randn(2, 64, 128, 128).cuda() - # wrap all things (offset and mask) in DCN - dcn = DCN(64, 64, kernel_size=(3, 3), stride=1, - padding=1, deformable_groups=2).cuda() - # print(dcn.weight.shape, input.shape) - output = dcn(input) - targert = output.new(*output.size()) - targert.data.uniform_(-0.01, 0.01) - error = (targert - output).mean() - error.backward() - print(output.shape) - - -def example_dpooling(): - input = torch.randn(2, 32, 64, 64).cuda() - batch_inds = torch.randint(2, (20, 1)).cuda().float() - x = torch.randint(256, (20, 1)).cuda().float() - y = torch.randint(256, (20, 1)).cuda().float() - w = torch.randint(64, (20, 1)).cuda().float() - h = torch.randint(64, (20, 1)).cuda().float() - rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1) - offset = torch.randn(20, 2, 7, 7).cuda() - input.requires_grad = True - offset.requires_grad = True - - # normal roi_align - pooling = DCNv2Pooling(spatial_scale=1.0 / 4, - pooled_size=7, - output_dim=32, - no_trans=True, - group_size=1, - trans_std=0.1).cuda() - - # deformable pooling - dpooling = DCNv2Pooling(spatial_scale=1.0 / 4, - pooled_size=7, - output_dim=32, - no_trans=False, - group_size=1, - trans_std=0.1).cuda() - - out = pooling(input, rois, offset) - dout = dpooling(input, rois, offset) - print(out.shape) - print(dout.shape) - - target_out = out.new(*out.size()) - target_out.data.uniform_(-0.01, 0.01) - target_dout = dout.new(*dout.size()) - target_dout.data.uniform_(-0.01, 0.01) - e = (target_out - out).mean() - e.backward() - e = (target_dout - dout).mean() - e.backward() - - -def example_mdpooling(): - input = torch.randn(2, 32, 64, 64).cuda() - input.requires_grad = True - batch_inds = torch.randint(2, (20, 1)).cuda().float() - x = torch.randint(256, (20, 1)).cuda().float() - y = torch.randint(256, (20, 1)).cuda().float() - w = torch.randint(64, (20, 1)).cuda().float() - h = torch.randint(64, (20, 1)).cuda().float() - rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1) - - # mdformable pooling (V2) - dpooling = DCNPooling(spatial_scale=1.0 / 4, - pooled_size=7, - output_dim=32, - no_trans=False, - group_size=1, - trans_std=0.1, - deform_fc_dim=1024).cuda() - - dout = dpooling(input, rois) - target = dout.new(*dout.size()) - target.data.uniform_(-0.1, 0.1) - error = (target - dout).mean() - error.backward() - print(dout.shape) - - -if __name__ == '__main__': - - example_dconv() - example_dpooling() - example_mdpooling() - - check_pooling_zero_offset() - # zero offset check - if inC == outC: - check_zero_offset() - - check_gradient_dpooling() - check_gradient_dconv() - # """ - # ****** Note: backward is not reentrant error may not be a serious problem, - # ****** since the max error is less than 1e-7, - # ****** Still looking for what trigger this problem - # """ diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/dlav0.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/dlav0.py deleted file mode 100644 index 2807adf..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/dlav0.py +++ /dev/null @@ -1,647 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math -from os.path import join - -import torch -from torch import nn -import torch.utils.model_zoo as model_zoo - -import numpy as np - -BatchNorm = nn.BatchNorm2d - -def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'): - return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash)) - - -def conv3x3(in_planes, out_planes, stride=1): - "3x3 convolution with padding" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) - - -class BasicBlock(nn.Module): - def __init__(self, inplanes, planes, stride=1, dilation=1): - super(BasicBlock, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, - stride=stride, padding=dilation, - bias=False, dilation=dilation) - self.bn1 = BatchNorm(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, - stride=1, padding=dilation, - bias=False, dilation=dilation) - self.bn2 = BatchNorm(planes) - self.stride = stride - - def forward(self, x, residual=None): - if residual is None: - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - out += residual - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - expansion = 2 - - def __init__(self, inplanes, planes, stride=1, dilation=1): - super(Bottleneck, self).__init__() - expansion = Bottleneck.expansion - bottle_planes = planes // expansion - self.conv1 = nn.Conv2d(inplanes, bottle_planes, - kernel_size=1, bias=False) - self.bn1 = BatchNorm(bottle_planes) - self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3, - stride=stride, padding=dilation, - bias=False, dilation=dilation) - self.bn2 = BatchNorm(bottle_planes) - self.conv3 = nn.Conv2d(bottle_planes, planes, - kernel_size=1, bias=False) - self.bn3 = BatchNorm(planes) - self.relu = nn.ReLU(inplace=True) - self.stride = stride - - def forward(self, x, residual=None): - if residual is None: - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - out += residual - out = self.relu(out) - - return out - - -class BottleneckX(nn.Module): - expansion = 2 - cardinality = 32 - - def __init__(self, inplanes, planes, stride=1, dilation=1): - super(BottleneckX, self).__init__() - cardinality = BottleneckX.cardinality - # dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0))) - # bottle_planes = dim * cardinality - bottle_planes = planes * cardinality // 32 - self.conv1 = nn.Conv2d(inplanes, bottle_planes, - kernel_size=1, bias=False) - self.bn1 = BatchNorm(bottle_planes) - self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3, - stride=stride, padding=dilation, bias=False, - dilation=dilation, groups=cardinality) - self.bn2 = BatchNorm(bottle_planes) - self.conv3 = nn.Conv2d(bottle_planes, planes, - kernel_size=1, bias=False) - self.bn3 = BatchNorm(planes) - self.relu = nn.ReLU(inplace=True) - self.stride = stride - - def forward(self, x, residual=None): - if residual is None: - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - out += residual - out = self.relu(out) - - return out - - -class Root(nn.Module): - def __init__(self, in_channels, out_channels, kernel_size, residual): - super(Root, self).__init__() - self.conv = nn.Conv2d( - in_channels, out_channels, 1, - stride=1, bias=False, padding=(kernel_size - 1) // 2) - self.bn = BatchNorm(out_channels) - self.relu = nn.ReLU(inplace=True) - self.residual = residual - - def forward(self, *x): - children = x - x = self.conv(torch.cat(x, 1)) - x = self.bn(x) - if self.residual: - x += children[0] - x = self.relu(x) - - return x - - -class Tree(nn.Module): - def __init__(self, levels, block, in_channels, out_channels, stride=1, - level_root=False, root_dim=0, root_kernel_size=1, - dilation=1, root_residual=False): - super(Tree, self).__init__() - if root_dim == 0: - root_dim = 2 * out_channels - if level_root: - root_dim += in_channels - if levels == 1: - self.tree1 = block(in_channels, out_channels, stride, - dilation=dilation) - self.tree2 = block(out_channels, out_channels, 1, - dilation=dilation) - else: - self.tree1 = Tree(levels - 1, block, in_channels, out_channels, - stride, root_dim=0, - root_kernel_size=root_kernel_size, - dilation=dilation, root_residual=root_residual) - self.tree2 = Tree(levels - 1, block, out_channels, out_channels, - root_dim=root_dim + out_channels, - root_kernel_size=root_kernel_size, - dilation=dilation, root_residual=root_residual) - if levels == 1: - self.root = Root(root_dim, out_channels, root_kernel_size, - root_residual) - self.level_root = level_root - self.root_dim = root_dim - self.downsample = None - self.project = None - self.levels = levels - if stride > 1: - self.downsample = nn.MaxPool2d(stride, stride=stride) - if in_channels != out_channels: - self.project = nn.Sequential( - nn.Conv2d(in_channels, out_channels, - kernel_size=1, stride=1, bias=False), - BatchNorm(out_channels) - ) - - def forward(self, x, residual=None, children=None): - children = [] if children is None else children - bottom = self.downsample(x) if self.downsample else x - residual = self.project(bottom) if self.project else bottom - if self.level_root: - children.append(bottom) - x1 = self.tree1(x, residual) - if self.levels == 1: - x2 = self.tree2(x1) - x = self.root(x2, x1, *children) - else: - children.append(x1) - x = self.tree2(x1, children=children) - return x - - -class DLA(nn.Module): - def __init__(self, levels, channels, num_classes=1000, - block=BasicBlock, residual_root=False, return_levels=False, - pool_size=7, linear_root=False): - super(DLA, self).__init__() - self.channels = channels - self.return_levels = return_levels - self.num_classes = num_classes - self.base_layer = nn.Sequential( - nn.Conv2d(3, channels[0], kernel_size=7, stride=1, - padding=3, bias=False), - BatchNorm(channels[0]), - nn.ReLU(inplace=True)) - self.level0 = self._make_conv_level( - channels[0], channels[0], levels[0]) - self.level1 = self._make_conv_level( - channels[0], channels[1], levels[1], stride=2) - self.level2 = Tree(levels[2], block, channels[1], channels[2], 2, - level_root=False, - root_residual=residual_root) - self.level3 = Tree(levels[3], block, channels[2], channels[3], 2, - level_root=True, root_residual=residual_root) - self.level4 = Tree(levels[4], block, channels[3], channels[4], 2, - level_root=True, root_residual=residual_root) - self.level5 = Tree(levels[5], block, channels[4], channels[5], 2, - level_root=True, root_residual=residual_root) - - self.avgpool = nn.AvgPool2d(pool_size) - self.fc = nn.Conv2d(channels[-1], num_classes, kernel_size=1, - stride=1, padding=0, bias=True) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2. / n)) - elif isinstance(m, BatchNorm): - m.weight.data.fill_(1) - m.bias.data.zero_() - - def _make_level(self, block, inplanes, planes, blocks, stride=1): - downsample = None - if stride != 1 or inplanes != planes: - downsample = nn.Sequential( - nn.MaxPool2d(stride, stride=stride), - nn.Conv2d(inplanes, planes, - kernel_size=1, stride=1, bias=False), - BatchNorm(planes), - ) - - layers = [] - layers.append(block(inplanes, planes, stride, downsample=downsample)) - for i in range(1, blocks): - layers.append(block(inplanes, planes)) - - return nn.Sequential(*layers) - - def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): - modules = [] - for i in range(convs): - modules.extend([ - nn.Conv2d(inplanes, planes, kernel_size=3, - stride=stride if i == 0 else 1, - padding=dilation, bias=False, dilation=dilation), - BatchNorm(planes), - nn.ReLU(inplace=True)]) - inplanes = planes - return nn.Sequential(*modules) - - def forward(self, x): - y = [] - x = self.base_layer(x) - for i in range(6): - x = getattr(self, 'level{}'.format(i))(x) - y.append(x) - if self.return_levels: - return y - else: - x = self.avgpool(x) - x = self.fc(x) - x = x.view(x.size(0), -1) - - return x - - def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'): - fc = self.fc - if name.endswith('.pth'): - model_weights = torch.load(data + name) - else: - model_url = get_model_url(data, name, hash) - model_weights = model_zoo.load_url(model_url) - num_classes = len(model_weights[list(model_weights.keys())[-1]]) - self.fc = nn.Conv2d( - self.channels[-1], num_classes, - kernel_size=1, stride=1, padding=0, bias=True) - self.load_state_dict(model_weights) - self.fc = fc - - -def dla34(pretrained, **kwargs): # DLA-34 - model = DLA([1, 1, 1, 2, 2, 1], - [16, 32, 64, 128, 256, 512], - block=BasicBlock, **kwargs) - if pretrained: - model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86') - return model - - -def dla46_c(pretrained=None, **kwargs): # DLA-46-C - Bottleneck.expansion = 2 - model = DLA([1, 1, 1, 2, 2, 1], - [16, 32, 64, 64, 128, 256], - block=Bottleneck, **kwargs) - if pretrained is not None: - model.load_pretrained_model(pretrained, 'dla46_c') - return model - - -def dla46x_c(pretrained=None, **kwargs): # DLA-X-46-C - BottleneckX.expansion = 2 - model = DLA([1, 1, 1, 2, 2, 1], - [16, 32, 64, 64, 128, 256], - block=BottleneckX, **kwargs) - if pretrained is not None: - model.load_pretrained_model(pretrained, 'dla46x_c') - return model - - -def dla60x_c(pretrained, **kwargs): # DLA-X-60-C - BottleneckX.expansion = 2 - model = DLA([1, 1, 1, 2, 3, 1], - [16, 32, 64, 64, 128, 256], - block=BottleneckX, **kwargs) - if pretrained: - model.load_pretrained_model(data='imagenet', name='dla60x_c', hash='b870c45c') - return model - - -def dla60(pretrained=None, **kwargs): # DLA-60 - Bottleneck.expansion = 2 - model = DLA([1, 1, 1, 2, 3, 1], - [16, 32, 128, 256, 512, 1024], - block=Bottleneck, **kwargs) - if pretrained is not None: - model.load_pretrained_model(pretrained, 'dla60') - return model - - -def dla60x(pretrained=None, **kwargs): # DLA-X-60 - BottleneckX.expansion = 2 - model = DLA([1, 1, 1, 2, 3, 1], - [16, 32, 128, 256, 512, 1024], - block=BottleneckX, **kwargs) - if pretrained is not None: - model.load_pretrained_model(pretrained, 'dla60x') - return model - - -def dla102(pretrained=None, **kwargs): # DLA-102 - Bottleneck.expansion = 2 - model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024], - block=Bottleneck, residual_root=True, **kwargs) - if pretrained is not None: - model.load_pretrained_model(pretrained, 'dla102') - return model - - -def dla102x(pretrained=None, **kwargs): # DLA-X-102 - BottleneckX.expansion = 2 - model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024], - block=BottleneckX, residual_root=True, **kwargs) - if pretrained is not None: - model.load_pretrained_model(pretrained, 'dla102x') - return model - - -def dla102x2(pretrained=None, **kwargs): # DLA-X-102 64 - BottleneckX.cardinality = 64 - model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024], - block=BottleneckX, residual_root=True, **kwargs) - if pretrained is not None: - model.load_pretrained_model(pretrained, 'dla102x2') - return model - - -def dla169(pretrained=None, **kwargs): # DLA-169 - Bottleneck.expansion = 2 - model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024], - block=Bottleneck, residual_root=True, **kwargs) - if pretrained is not None: - model.load_pretrained_model(pretrained, 'dla169') - return model - - -def set_bn(bn): - global BatchNorm - BatchNorm = bn - dla.BatchNorm = bn - - -class Identity(nn.Module): - def __init__(self): - super(Identity, self).__init__() - - def forward(self, x): - return x - - -def fill_up_weights(up): - w = up.weight.data - f = math.ceil(w.size(2) / 2) - c = (2 * f - 1 - f % 2) / (2. * f) - for i in range(w.size(2)): - for j in range(w.size(3)): - w[0, 0, i, j] = \ - (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c)) - for c in range(1, w.size(0)): - w[c, 0, :, :] = w[0, 0, :, :] - - -class IDAUp(nn.Module): - def __init__(self, node_kernel, out_dim, channels, up_factors): - super(IDAUp, self).__init__() - self.channels = channels - self.out_dim = out_dim - for i, c in enumerate(channels): - if c == out_dim: - proj = Identity() - else: - proj = nn.Sequential( - nn.Conv2d(c, out_dim, - kernel_size=1, stride=1, bias=False), - BatchNorm(out_dim), - nn.ReLU(inplace=True)) - f = int(up_factors[i]) - if f == 1: - up = Identity() - else: - up = nn.ConvTranspose2d( - out_dim, out_dim, f * 2, stride=f, padding=f // 2, - output_padding=0, groups=out_dim, bias=False) - fill_up_weights(up) - setattr(self, 'proj_' + str(i), proj) - setattr(self, 'up_' + str(i), up) - - for i in range(1, len(channels)): - node = nn.Sequential( - nn.Conv2d(out_dim * 2, out_dim, - kernel_size=node_kernel, stride=1, - padding=node_kernel // 2, bias=False), - BatchNorm(out_dim), - nn.ReLU(inplace=True)) - setattr(self, 'node_' + str(i), node) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2. / n)) - elif isinstance(m, BatchNorm): - m.weight.data.fill_(1) - m.bias.data.zero_() - - def forward(self, layers): - assert len(self.channels) == len(layers), \ - '{} vs {} layers'.format(len(self.channels), len(layers)) - layers = list(layers) - for i, l in enumerate(layers): - upsample = getattr(self, 'up_' + str(i)) - project = getattr(self, 'proj_' + str(i)) - layers[i] = upsample(project(l)) - x = layers[0] - y = [] - for i in range(1, len(layers)): - node = getattr(self, 'node_' + str(i)) - x = node(torch.cat([x, layers[i]], 1)) - y.append(x) - return x, y - - -class DLAUp(nn.Module): - def __init__(self, channels, scales=(1, 2, 4, 8, 16), in_channels=None): - super(DLAUp, self).__init__() - if in_channels is None: - in_channels = channels - self.channels = channels - channels = list(channels) - scales = np.array(scales, dtype=int) - for i in range(len(channels) - 1): - j = -i - 2 - setattr(self, 'ida_{}'.format(i), - IDAUp(3, channels[j], in_channels[j:], - scales[j:] // scales[j])) - scales[j + 1:] = scales[j] - in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]] - - def forward(self, layers): - layers = list(layers) - assert len(layers) > 1 - for i in range(len(layers) - 1): - ida = getattr(self, 'ida_{}'.format(i)) - x, y = ida(layers[-i - 2:]) - layers[-i - 1:] = y - return x - -def fill_fc_weights(layers): - for m in layers.modules(): - if isinstance(m, nn.Conv2d): - nn.init.normal_(m.weight, std=0.001) - # torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu') - # torch.nn.init.xavier_normal_(m.weight.data) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - -class DLASeg(nn.Module): - def __init__(self, base_name, heads, - pretrained=True, down_ratio=4, head_conv=256): - super(DLASeg, self).__init__() - assert down_ratio in [2, 4, 8, 16] - self.heads = heads - self.first_level = int(np.log2(down_ratio)) - self.base = globals()[base_name]( - pretrained=pretrained, return_levels=True) - channels = self.base.channels - scales = [2 ** i for i in range(len(channels[self.first_level:]))] - self.dla_up = DLAUp(channels[self.first_level:], scales=scales) - ''' - self.fc = nn.Sequential( - nn.Conv2d(channels[self.first_level], classes, kernel_size=1, - stride=1, padding=0, bias=True) - ) - ''' - - for head in self.heads: - classes = self.heads[head] - if head_conv > 0: - fc = nn.Sequential( - nn.Conv2d(channels[self.first_level], head_conv, - kernel_size=3, padding=1, bias=True), - nn.ReLU(inplace=True), - nn.Conv2d(head_conv, classes, - kernel_size=1, stride=1, - padding=0, bias=True)) - if 'hm' in head: - fc[-1].bias.data.fill_(-2.19) - else: - fill_fc_weights(fc) - else: - fc = nn.Conv2d(channels[self.first_level], classes, - kernel_size=1, stride=1, - padding=0, bias=True) - if 'hm' in head: - fc.bias.data.fill_(-2.19) - else: - fill_fc_weights(fc) - self.__setattr__(head, fc) - - ''' - up_factor = 2 ** self.first_level - if up_factor > 1: - up = nn.ConvTranspose2d(classes, classes, up_factor * 2, - stride=up_factor, padding=up_factor // 2, - output_padding=0, groups=classes, - bias=False) - fill_up_weights(up) - up.weight.requires_grad = False - else: - up = Identity() - self.up = up - self.softmax = nn.LogSoftmax(dim=1) - - - for m in self.fc.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2. / n)) - elif isinstance(m, BatchNorm): - m.weight.data.fill_(1) - m.bias.data.zero_() - ''' - - def forward(self, x): - x = self.base(x) - x = self.dla_up(x[self.first_level:]) - # x = self.fc(x) - # y = self.softmax(self.up(x)) - ret = {} - for head in self.heads: - ret[head] = self.__getattr__(head)(x) - return [ret] - - ''' - def optim_parameters(self, memo=None): - for param in self.base.parameters(): - yield param - for param in self.dla_up.parameters(): - yield param - for param in self.fc.parameters(): - yield param - ''' -''' -def dla34up(classes, pretrained_base=None, **kwargs): - model = DLASeg('dla34', classes, pretrained_base=pretrained_base, **kwargs) - return model - - -def dla60up(classes, pretrained_base=None, **kwargs): - model = DLASeg('dla60', classes, pretrained_base=pretrained_base, **kwargs) - return model - - -def dla102up(classes, pretrained_base=None, **kwargs): - model = DLASeg('dla102', classes, - pretrained_base=pretrained_base, **kwargs) - return model - - -def dla169up(classes, pretrained_base=None, **kwargs): - model = DLASeg('dla169', classes, - pretrained_base=pretrained_base, **kwargs) - return model -''' - -def get_pose_net(num_layers, heads, head_conv=256, down_ratio=4): #add_conv=256 - model = DLASeg('dla{}'.format(num_layers), heads, - pretrained=True, - down_ratio=down_ratio, - head_conv=head_conv) - return model diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/pose_dla_dcn.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/pose_dla_dcn.py deleted file mode 100644 index 7eb7ab0..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/networks/pose_dla_dcn.py +++ /dev/null @@ -1,494 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import math -import logging -import numpy as np -from os.path import join - -import torch -from torch import nn -import torch.nn.functional as F -import torch.utils.model_zoo as model_zoo - -from .DCNv2.dcn_v2 import DCN - -BN_MOMENTUM = 0.1 -logger = logging.getLogger(__name__) - -def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'): - return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash)) - - -def conv3x3(in_planes, out_planes, stride=1): - "3x3 convolution with padding" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) - - -class BasicBlock(nn.Module): - def __init__(self, inplanes, planes, stride=1, dilation=1): - super(BasicBlock, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, - stride=stride, padding=dilation, - bias=False, dilation=dilation) - self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) - self.relu = nn.ReLU(inplace=True) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, - stride=1, padding=dilation, - bias=False, dilation=dilation) - self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) - self.stride = stride - - def forward(self, x, residual=None): - if residual is None: - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - out += residual - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - expansion = 2 - - def __init__(self, inplanes, planes, stride=1, dilation=1): - super(Bottleneck, self).__init__() - expansion = Bottleneck.expansion - bottle_planes = planes // expansion - self.conv1 = nn.Conv2d(inplanes, bottle_planes, - kernel_size=1, bias=False) - self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM) - self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3, - stride=stride, padding=dilation, - bias=False, dilation=dilation) - self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM) - self.conv3 = nn.Conv2d(bottle_planes, planes, - kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) - self.relu = nn.ReLU(inplace=True) - self.stride = stride - - def forward(self, x, residual=None): - if residual is None: - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - out += residual - out = self.relu(out) - - return out - - -class BottleneckX(nn.Module): - expansion = 2 - cardinality = 32 - - def __init__(self, inplanes, planes, stride=1, dilation=1): - super(BottleneckX, self).__init__() - cardinality = BottleneckX.cardinality - # dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0))) - # bottle_planes = dim * cardinality - bottle_planes = planes * cardinality // 32 - self.conv1 = nn.Conv2d(inplanes, bottle_planes, - kernel_size=1, bias=False) - self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM) - self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3, - stride=stride, padding=dilation, bias=False, - dilation=dilation, groups=cardinality) - self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM) - self.conv3 = nn.Conv2d(bottle_planes, planes, - kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM) - self.relu = nn.ReLU(inplace=True) - self.stride = stride - - def forward(self, x, residual=None): - if residual is None: - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - out += residual - out = self.relu(out) - - return out - - -class Root(nn.Module): - def __init__(self, in_channels, out_channels, kernel_size, residual): - super(Root, self).__init__() - self.conv = nn.Conv2d( - in_channels, out_channels, 1, - stride=1, bias=False, padding=(kernel_size - 1) // 2) - self.bn = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM) - self.relu = nn.ReLU(inplace=True) - self.residual = residual - - def forward(self, *x): - children = x - x = self.conv(torch.cat(x, 1)) - x = self.bn(x) - if self.residual: - x += children[0] - x = self.relu(x) - - return x - - -class Tree(nn.Module): - def __init__(self, levels, block, in_channels, out_channels, stride=1, - level_root=False, root_dim=0, root_kernel_size=1, - dilation=1, root_residual=False): - super(Tree, self).__init__() - if root_dim == 0: - root_dim = 2 * out_channels - if level_root: - root_dim += in_channels - if levels == 1: - self.tree1 = block(in_channels, out_channels, stride, - dilation=dilation) - self.tree2 = block(out_channels, out_channels, 1, - dilation=dilation) - else: - self.tree1 = Tree(levels - 1, block, in_channels, out_channels, - stride, root_dim=0, - root_kernel_size=root_kernel_size, - dilation=dilation, root_residual=root_residual) - self.tree2 = Tree(levels - 1, block, out_channels, out_channels, - root_dim=root_dim + out_channels, - root_kernel_size=root_kernel_size, - dilation=dilation, root_residual=root_residual) - if levels == 1: - self.root = Root(root_dim, out_channels, root_kernel_size, - root_residual) - self.level_root = level_root - self.root_dim = root_dim - self.downsample = None - self.project = None - self.levels = levels - if stride > 1: - self.downsample = nn.MaxPool2d(stride, stride=stride) - if in_channels != out_channels: - self.project = nn.Sequential( - nn.Conv2d(in_channels, out_channels, - kernel_size=1, stride=1, bias=False), - nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM) - ) - - def forward(self, x, residual=None, children=None): - children = [] if children is None else children - bottom = self.downsample(x) if self.downsample else x - residual = self.project(bottom) if self.project else bottom - if self.level_root: - children.append(bottom) - x1 = self.tree1(x, residual) - if self.levels == 1: - x2 = self.tree2(x1) - x = self.root(x2, x1, *children) - else: - children.append(x1) - x = self.tree2(x1, children=children) - return x - - -class DLA(nn.Module): - def __init__(self, levels, channels, num_classes=1000, - block=BasicBlock, residual_root=False, linear_root=False): - super(DLA, self).__init__() - self.channels = channels - self.num_classes = num_classes - self.base_layer = nn.Sequential( - nn.Conv2d(3, channels[0], kernel_size=7, stride=1, - padding=3, bias=False), - nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM), - nn.ReLU(inplace=True)) - self.level0 = self._make_conv_level( - channels[0], channels[0], levels[0]) - self.level1 = self._make_conv_level( - channels[0], channels[1], levels[1], stride=2) - self.level2 = Tree(levels[2], block, channels[1], channels[2], 2, - level_root=False, - root_residual=residual_root) - self.level3 = Tree(levels[3], block, channels[2], channels[3], 2, - level_root=True, root_residual=residual_root) - self.level4 = Tree(levels[4], block, channels[3], channels[4], 2, - level_root=True, root_residual=residual_root) - self.level5 = Tree(levels[5], block, channels[4], channels[5], 2, - level_root=True, root_residual=residual_root) - - # for m in self.modules(): - # if isinstance(m, nn.Conv2d): - # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - # m.weight.data.normal_(0, math.sqrt(2. / n)) - # elif isinstance(m, nn.BatchNorm2d): - # m.weight.data.fill_(1) - # m.bias.data.zero_() - - def _make_level(self, block, inplanes, planes, blocks, stride=1): - downsample = None - if stride != 1 or inplanes != planes: - downsample = nn.Sequential( - nn.MaxPool2d(stride, stride=stride), - nn.Conv2d(inplanes, planes, - kernel_size=1, stride=1, bias=False), - nn.BatchNorm2d(planes, momentum=BN_MOMENTUM), - ) - - layers = [] - layers.append(block(inplanes, planes, stride, downsample=downsample)) - for i in range(1, blocks): - layers.append(block(inplanes, planes)) - - return nn.Sequential(*layers) - - def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): - modules = [] - for i in range(convs): - modules.extend([ - nn.Conv2d(inplanes, planes, kernel_size=3, - stride=stride if i == 0 else 1, - padding=dilation, bias=False, dilation=dilation), - nn.BatchNorm2d(planes, momentum=BN_MOMENTUM), - nn.ReLU(inplace=True)]) - inplanes = planes - return nn.Sequential(*modules) - - def forward(self, x): - y = [] - x = self.base_layer(x) - for i in range(6): - x = getattr(self, 'level{}'.format(i))(x) - y.append(x) - return y - - def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'): - # fc = self.fc - if name.endswith('.pth'): - model_weights = torch.load(data + name) - else: - model_url = get_model_url(data, name, hash) - model_weights = model_zoo.load_url(model_url) - num_classes = len(model_weights[list(model_weights.keys())[-1]]) - self.fc = nn.Conv2d( - self.channels[-1], num_classes, - kernel_size=1, stride=1, padding=0, bias=True) - self.load_state_dict(model_weights) - # self.fc = fc - - -def dla34(pretrained=True, **kwargs): # DLA-34 - model = DLA([1, 1, 1, 2, 2, 1], - [16, 32, 64, 128, 256, 512], - block=BasicBlock, **kwargs) - if pretrained: - model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86') - return model - -class Identity(nn.Module): - - def __init__(self): - super(Identity, self).__init__() - - def forward(self, x): - return x - - -def fill_fc_weights(layers): - for m in layers.modules(): - if isinstance(m, nn.Conv2d): - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - -def fill_up_weights(up): - w = up.weight.data - f = math.ceil(w.size(2) / 2) - c = (2 * f - 1 - f % 2) / (2. * f) - for i in range(w.size(2)): - for j in range(w.size(3)): - w[0, 0, i, j] = \ - (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c)) - for c in range(1, w.size(0)): - w[c, 0, :, :] = w[0, 0, :, :] - - -class DeformConv(nn.Module): - def __init__(self, chi, cho): - super(DeformConv, self).__init__() - self.actf = nn.Sequential( - nn.BatchNorm2d(cho, momentum=BN_MOMENTUM), - nn.ReLU(inplace=True) - ) - self.conv = DCN(chi, cho, kernel_size=(3,3), stride=1, padding=1, dilation=1, deformable_groups=1) - - def forward(self, x): - x = self.conv(x) - x = self.actf(x) - return x - - -class IDAUp(nn.Module): - - def __init__(self, o, channels, up_f): - super(IDAUp, self).__init__() - for i in range(1, len(channels)): - c = channels[i] - f = int(up_f[i]) - proj = DeformConv(c, o) - node = DeformConv(o, o) - - up = nn.ConvTranspose2d(o, o, f * 2, stride=f, - padding=f // 2, output_padding=0, - groups=o, bias=False) - fill_up_weights(up) - - setattr(self, 'proj_' + str(i), proj) - setattr(self, 'up_' + str(i), up) - setattr(self, 'node_' + str(i), node) - - - def forward(self, layers, startp, endp): - for i in range(startp + 1, endp): - upsample = getattr(self, 'up_' + str(i - startp)) - project = getattr(self, 'proj_' + str(i - startp)) - layers[i] = upsample(project(layers[i])) - node = getattr(self, 'node_' + str(i - startp)) - layers[i] = node(layers[i] + layers[i - 1]) - - - -class DLAUp(nn.Module): - def __init__(self, startp, channels, scales, in_channels=None): - super(DLAUp, self).__init__() - self.startp = startp - if in_channels is None: - in_channels = channels - self.channels = channels - channels = list(channels) - scales = np.array(scales, dtype=int) - for i in range(len(channels) - 1): - j = -i - 2 - setattr(self, 'ida_{}'.format(i), - IDAUp(channels[j], in_channels[j:], - scales[j:] // scales[j])) - scales[j + 1:] = scales[j] - in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]] - - def forward(self, layers): - out = [layers[-1]] # start with 32 - for i in range(len(layers) - self.startp - 1): - ida = getattr(self, 'ida_{}'.format(i)) - ida(layers, len(layers) -i - 2, len(layers)) - out.insert(0, layers[-1]) - return out - - -class Interpolate(nn.Module): - def __init__(self, scale, mode): - super(Interpolate, self).__init__() - self.scale = scale - self.mode = mode - - def forward(self, x): - x = F.interpolate(x, scale_factor=self.scale, mode=self.mode, align_corners=False) - return x - - -class DLASeg(nn.Module): - def __init__(self, base_name, heads, pretrained, down_ratio, final_kernel, - last_level, head_conv, out_channel=0): - super(DLASeg, self).__init__() - assert down_ratio in [2, 4, 8, 16] - self.first_level = int(np.log2(down_ratio)) - self.last_level = last_level - self.base = globals()[base_name](pretrained=pretrained) - channels = self.base.channels - scales = [2 ** i for i in range(len(channels[self.first_level:]))] - self.dla_up = DLAUp(self.first_level, channels[self.first_level:], scales) - - if out_channel == 0: - out_channel = channels[self.first_level] - - self.ida_up = IDAUp(out_channel, channels[self.first_level:self.last_level], - [2 ** i for i in range(self.last_level - self.first_level)]) - - self.heads = heads - for head in self.heads: - classes = self.heads[head] - if head_conv > 0: - fc = nn.Sequential( - nn.Conv2d(channels[self.first_level], head_conv, - kernel_size=3, padding=1, bias=True), - nn.ReLU(inplace=True), - nn.Conv2d(head_conv, classes, - kernel_size=final_kernel, stride=1, - padding=final_kernel // 2, bias=True)) - if 'hm' in head: - fc[-1].bias.data.fill_(-2.19) - else: - fill_fc_weights(fc) - else: - fc = nn.Conv2d(channels[self.first_level], classes, - kernel_size=final_kernel, stride=1, - padding=final_kernel // 2, bias=True) - if 'hm' in head: - fc.bias.data.fill_(-2.19) - else: - fill_fc_weights(fc) - self.__setattr__(head, fc) - - def forward(self, x): - x = self.base(x) - x = self.dla_up(x) - - y = [] - for i in range(self.last_level - self.first_level): - y.append(x[i].clone()) - self.ida_up(y, 0, len(y)) - - z = {} - for head in self.heads: - z[head] = self.__getattr__(head)(y[-1]) - return [z] - - -def get_pose_net(num_layers, heads, head_conv=256, down_ratio=4): - #pretrained=True - model = DLASeg('dla{}'.format(num_layers), heads, - pretrained=False, - down_ratio=down_ratio, - final_kernel=1, - last_level=5, - head_conv=head_conv) - return model - diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/scatter_gather.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/scatter_gather.py deleted file mode 100644 index 9a46058..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/scatter_gather.py +++ /dev/null @@ -1,38 +0,0 @@ -import torch -from torch.autograd import Variable -from torch.nn.parallel._functions import Scatter, Gather - - -def scatter(inputs, target_gpus, dim=0, chunk_sizes=None): - r""" - Slices variables into approximately equal chunks and - distributes them across given GPUs. Duplicates - references to objects that are not variables. Does not - support Tensors. - """ - def scatter_map(obj): - if isinstance(obj, Variable): - return Scatter.apply(target_gpus, chunk_sizes, dim, obj) - assert not torch.is_tensor(obj), "Tensors not supported in scatter." - if isinstance(obj, tuple): - return list(zip(*map(scatter_map, obj))) - if isinstance(obj, list): - return list(map(list, zip(*map(scatter_map, obj)))) - if isinstance(obj, dict): - return list(map(type(obj), zip(*map(scatter_map, obj.items())))) - return [obj for targets in target_gpus] - - return scatter_map(inputs) - - -def scatter_kwargs(inputs, kwargs, target_gpus, dim=0, chunk_sizes=None): - r"""Scatter with support for kwargs dictionary""" - inputs = scatter(inputs, target_gpus, dim, chunk_sizes) if inputs else [] - kwargs = scatter(kwargs, target_gpus, dim, chunk_sizes) if kwargs else [] - if len(inputs) < len(kwargs): - inputs.extend([() for _ in range(len(kwargs) - len(inputs))]) - elif len(kwargs) < len(inputs): - kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))]) - inputs = tuple(inputs) - kwargs = tuple(kwargs) - return inputs, kwargs diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/utils.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/utils.py deleted file mode 100644 index 318038a..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/models/utils.py +++ /dev/null @@ -1,50 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import torch -import torch.nn as nn - -def _sigmoid(x): - y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4) - return y - -def _gather_feat(feat, ind, mask=None): - dim = feat.size(2) - ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim) - feat = feat.gather(1, ind) - if mask is not None: - mask = mask.unsqueeze(2).expand_as(feat) - feat = feat[mask] - feat = feat.view(-1, dim) - return feat - -def _tranpose_and_gather_feat(feat, ind): - feat = feat.permute(0, 2, 3, 1).contiguous() - feat = feat.view(feat.size(0), -1, feat.size(3)) - feat = _gather_feat(feat, ind) - return feat - -def flip_tensor(x): - return torch.flip(x, [3]) - # tmp = x.detach().cpu().numpy()[..., ::-1].copy() - # return torch.from_numpy(tmp).to(x.device) - -def flip_lr(x, flip_idx): - tmp = x.detach().cpu().numpy()[..., ::-1].copy() - shape = tmp.shape - for e in flip_idx: - tmp[:, e[0], ...], tmp[:, e[1], ...] = \ - tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy() - return torch.from_numpy(tmp.reshape(shape)).to(x.device) - -def flip_lr_off(x, flip_idx): - tmp = x.detach().cpu().numpy()[..., ::-1].copy() - shape = tmp.shape - tmp = tmp.reshape(tmp.shape[0], 17, 2, - tmp.shape[2], tmp.shape[3]) - tmp[:, :, 0, :, :] *= -1 - for e in flip_idx: - tmp[:, e[0], ...], tmp[:, e[1], ...] = \ - tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy() - return torch.from_numpy(tmp.reshape(shape)).to(x.device) \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/__init__.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/ddd_utils.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/ddd_utils.py deleted file mode 100644 index e79c14e..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/ddd_utils.py +++ /dev/null @@ -1,131 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import cv2 - -def compute_box_3d(dim, location, rotation_y): - # dim: 3 - # location: 3 - # rotation_y: 1 - # return: 8 x 3 - c, s = np.cos(rotation_y), np.sin(rotation_y) - R = np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]], dtype=np.float32) - l, w, h = dim[2], dim[1], dim[0] - x_corners = [l/2, l/2, -l/2, -l/2, l/2, l/2, -l/2, -l/2] - y_corners = [0,0,0,0,-h,-h,-h,-h] - z_corners = [w/2, -w/2, -w/2, w/2, w/2, -w/2, -w/2, w/2] - - corners = np.array([x_corners, y_corners, z_corners], dtype=np.float32) - corners_3d = np.dot(R, corners) - corners_3d = corners_3d + np.array(location, dtype=np.float32).reshape(3, 1) - return corners_3d.transpose(1, 0) - -def project_to_image(pts_3d, P): - # pts_3d: n x 3 - # P: 3 x 4 - # return: n x 2 - pts_3d_homo = np.concatenate( - [pts_3d, np.ones((pts_3d.shape[0], 1), dtype=np.float32)], axis=1) - pts_2d = np.dot(P, pts_3d_homo.transpose(1, 0)).transpose(1, 0) - pts_2d = pts_2d[:, :2] / pts_2d[:, 2:] - # import pdb; pdb.set_trace() - return pts_2d - -def compute_orientation_3d(dim, location, rotation_y): - # dim: 3 - # location: 3 - # rotation_y: 1 - # return: 2 x 3 - c, s = np.cos(rotation_y), np.sin(rotation_y) - R = np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]], dtype=np.float32) - orientation_3d = np.array([[0, dim[2]], [0, 0], [0, 0]], dtype=np.float32) - orientation_3d = np.dot(R, orientation_3d) - orientation_3d = orientation_3d + \ - np.array(location, dtype=np.float32).reshape(3, 1) - return orientation_3d.transpose(1, 0) - -def draw_box_3d(image, corners, c=(0, 0, 255)): - face_idx = [[0,1,5,4], - [1,2,6, 5], - [2,3,7,6], - [3,0,4,7]] - for ind_f in range(3, -1, -1): - f = face_idx[ind_f] - for j in range(4): - cv2.line(image, (corners[f[j], 0], corners[f[j], 1]), - (corners[f[(j+1)%4], 0], corners[f[(j+1)%4], 1]), c, 2, lineType=cv2.LINE_AA) - if ind_f == 0: - cv2.line(image, (corners[f[0], 0], corners[f[0], 1]), - (corners[f[2], 0], corners[f[2], 1]), c, 1, lineType=cv2.LINE_AA) - cv2.line(image, (corners[f[1], 0], corners[f[1], 1]), - (corners[f[3], 0], corners[f[3], 1]), c, 1, lineType=cv2.LINE_AA) - return image - -def unproject_2d_to_3d(pt_2d, depth, P): - # pts_2d: 2 - # depth: 1 - # P: 3 x 4 - # return: 3 - z = depth - P[2, 3] - x = (pt_2d[0] * depth - P[0, 3] - P[0, 2] * z) / P[0, 0] - y = (pt_2d[1] * depth - P[1, 3] - P[1, 2] * z) / P[1, 1] - pt_3d = np.array([x, y, z], dtype=np.float32) - return pt_3d - -def alpha2rot_y(alpha, x, cx, fx): - """ - Get rotation_y by alpha + theta - 180 - alpha : Observation angle of object, ranging [-pi..pi] - x : Object center x to the camera center (x-W/2), in pixels - rotation_y : Rotation ry around Y-axis in camera coordinates [-pi..pi] - """ - rot_y = alpha + np.arctan2(x - cx, fx) - if rot_y > np.pi: - rot_y -= 2 * np.pi - if rot_y < -np.pi: - rot_y += 2 * np.pi - return rot_y - -def rot_y2alpha(rot_y, x, cx, fx): - """ - Get rotation_y by alpha + theta - 180 - alpha : Observation angle of object, ranging [-pi..pi] - x : Object center x to the camera center (x-W/2), in pixels - rotation_y : Rotation ry around Y-axis in camera coordinates [-pi..pi] - """ - alpha = rot_y - np.arctan2(x - cx, fx) - if alpha > np.pi: - alpha -= 2 * np.pi - if alpha < -np.pi: - alpha += 2 * np.pi - return alpha - - -def ddd2locrot(center, alpha, dim, depth, calib): - # single image - locations = unproject_2d_to_3d(center, depth, calib) - locations[1] += dim[0] / 2 - rotation_y = alpha2rot_y(alpha, center[0], calib[0, 2], calib[0, 0]) - return locations, rotation_y - -def project_3d_bbox(location, dim, rotation_y, calib): - box_3d = compute_box_3d(dim, location, rotation_y) - box_2d = project_to_image(box_3d, calib) - return box_2d - - -if __name__ == '__main__': - calib = np.array( - [[7.070493000000e+02, 0.000000000000e+00, 6.040814000000e+02, 4.575831000000e+01], - [0.000000000000e+00, 7.070493000000e+02, 1.805066000000e+02, -3.454157000000e-01], - [0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00, 4.981016000000e-03]], - dtype=np.float32) - alpha = -0.20 - tl = np.array([712.40, 143.00], dtype=np.float32) - br = np.array([810.73, 307.92], dtype=np.float32) - ct = (tl + br) / 2 - rotation_y = 0.01 - print('alpha2rot_y', alpha2rot_y(alpha, ct[0], calib[0, 2], calib[0, 0])) - print('rotation_y', rotation_y) \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/debugger.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/debugger.py deleted file mode 100644 index 3cf676b..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/debugger.py +++ /dev/null @@ -1,572 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import cv2 -from .ddd_utils import compute_box_3d, project_to_image, draw_box_3d - -class Debugger(object): - def __init__(self, ipynb=False, theme='black', - num_classes=-1, dataset=None, down_ratio=4): - self.ipynb = ipynb - if not self.ipynb: - import matplotlib.pyplot as plt - self.plt = plt - self.imgs = {} - self.theme = theme - colors = [(color_list[_]).astype(np.uint8) \ - for _ in range(len(color_list))] - self.colors = np.array(colors, dtype=np.uint8).reshape(len(colors), 1, 1, 3) - if self.theme == 'white': - self.colors = self.colors.reshape(-1)[::-1].reshape(len(colors), 1, 1, 3) - self.colors = np.clip(self.colors, 0., 0.6 * 255).astype(np.uint8) - self.dim_scale = 1 - if dataset == 'coco_hp': - self.names = ['p'] - self.num_class = 1 - self.num_joints = 17 - self.edges = [[0, 1], [0, 2], [1, 3], [2, 4], - [3, 5], [4, 6], [5, 6], - [5, 7], [7, 9], [6, 8], [8, 10], - [5, 11], [6, 12], [11, 12], - [11, 13], [13, 15], [12, 14], [14, 16]] - self.ec = [(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255), - (255, 0, 0), (0, 0, 255), (255, 0, 255), - (255, 0, 0), (255, 0, 0), (0, 0, 255), (0, 0, 255), - (255, 0, 0), (0, 0, 255), (255, 0, 255), - (255, 0, 0), (255, 0, 0), (0, 0, 255), (0, 0, 255)] - self.colors_hp = [(255, 0, 255), (255, 0, 0), (0, 0, 255), - (255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255), - (255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255), - (255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255), - (255, 0, 0), (0, 0, 255)] - elif num_classes == 80 or dataset == 'coco': - self.names = coco_class_name - elif num_classes == 20 or dataset == 'pascal': - self.names = pascal_class_name - elif dataset == 'gta': - self.names = gta_class_name - self.focal_length = 935.3074360871937 - self.W = 1920 - self.H = 1080 - self.dim_scale = 3 - elif dataset == 'viper': - self.names = gta_class_name - self.focal_length = 1158 - self.W = 1920 - self.H = 1080 - self.dim_scale = 3 - elif num_classes == 3 or dataset == 'kitti': - self.names = kitti_class_name - self.focal_length = 721.5377 - self.W = 1242 - self.H = 375 - elif num_classes == 22 or dataset == "watermark": - self.names = watermark_class_name - num_classes = len(self.names) - self.down_ratio=down_ratio - # for bird view - self.world_size = 64 - self.out_size = 384 - - def add_img(self, img, img_id='default', revert_color=False): - if revert_color: - img = 255 - img - self.imgs[img_id] = img.copy() - - def add_mask(self, mask, bg, imgId = 'default', trans = 0.8): - self.imgs[imgId] = (mask.reshape( - mask.shape[0], mask.shape[1], 1) * 255 * trans + \ - bg * (1 - trans)).astype(np.uint8) - - def show_img(self, pause = False, imgId = 'default'): - cv2.imshow('{}'.format(imgId), self.imgs[imgId]) - if pause: - cv2.waitKey() - - def add_blend_img(self, back, fore, img_id='blend', trans=0.7): - if self.theme == 'white': - fore = 255 - fore - if fore.shape[0] != back.shape[0] or fore.shape[0] != back.shape[1]: - fore = cv2.resize(fore, (back.shape[1], back.shape[0])) - if len(fore.shape) == 2: - fore = fore.reshape(fore.shape[0], fore.shape[1], 1) - self.imgs[img_id] = (back * (1. - trans) + fore * trans) - self.imgs[img_id][self.imgs[img_id] > 255] = 255 - self.imgs[img_id][self.imgs[img_id] < 0] = 0 - self.imgs[img_id] = self.imgs[img_id].astype(np.uint8).copy() - - ''' - # slow version - def gen_colormap(self, img, output_res=None): - # num_classes = len(self.colors) - img[img < 0] = 0 - h, w = img.shape[1], img.shape[2] - if output_res is None: - output_res = (h * self.down_ratio, w * self.down_ratio) - color_map = np.zeros((output_res[0], output_res[1], 3), dtype=np.uint8) - for i in range(img.shape[0]): - resized = cv2.resize(img[i], (output_res[1], output_res[0])) - resized = resized.reshape(output_res[0], output_res[1], 1) - cl = self.colors[i] if not (self.theme == 'white') \ - else 255 - self.colors[i] - color_map = np.maximum(color_map, (resized * cl).astype(np.uint8)) - return color_map - ''' - - - def gen_colormap(self, img, output_res=None): - img = img.copy() - c, h, w = img.shape[0], img.shape[1], img.shape[2] - if output_res is None: - output_res = (h * self.down_ratio, w * self.down_ratio) - img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32) - colors = np.array( - self.colors, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3) - if self.theme == 'white': - colors = 255 - colors - color_map = (img * colors).max(axis=2).astype(np.uint8) - color_map = cv2.resize(color_map, (output_res[0], output_res[1])) - return color_map - - ''' - # slow - def gen_colormap_hp(self, img, output_res=None): - # num_classes = len(self.colors) - # img[img < 0] = 0 - h, w = img.shape[1], img.shape[2] - if output_res is None: - output_res = (h * self.down_ratio, w * self.down_ratio) - color_map = np.zeros((output_res[0], output_res[1], 3), dtype=np.uint8) - for i in range(img.shape[0]): - resized = cv2.resize(img[i], (output_res[1], output_res[0])) - resized = resized.reshape(output_res[0], output_res[1], 1) - cl = self.colors_hp[i] if not (self.theme == 'white') else \ - (255 - np.array(self.colors_hp[i])) - color_map = np.maximum(color_map, (resized * cl).astype(np.uint8)) - return color_map - ''' - - def gen_colormap_hp(self, img, output_res=None): - c, h, w = img.shape[0], img.shape[1], img.shape[2] - if output_res is None: - output_res = (h * self.down_ratio, w * self.down_ratio) - img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32) - colors = np.array( - self.colors_hp, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3) - if self.theme == 'white': - colors = 255 - colors - color_map = (img * colors).max(axis=2).astype(np.uint8) - color_map = cv2.resize(color_map, (output_res[0], output_res[1])) - return color_map - - def add_rect(self, rect1, rect2, c, conf=1, img_id='default'): - cv2.rectangle( - self.imgs[img_id], (rect1[0], rect1[1]), (rect2[0], rect2[1]), c, 2) - if conf < 1: - cv2.circle(self.imgs[img_id], (rect1[0], rect1[1]), int(10 * conf), c, 1) - cv2.circle(self.imgs[img_id], (rect2[0], rect2[1]), int(10 * conf), c, 1) - cv2.circle(self.imgs[img_id], (rect1[0], rect2[1]), int(10 * conf), c, 1) - cv2.circle(self.imgs[img_id], (rect2[0], rect1[1]), int(10 * conf), c, 1) - - def add_coco_bbox(self, bbox, cat, conf=1, show_txt=True, img_id='default'): - bbox = np.array(bbox, dtype=np.int32) - # cat = (int(cat) + 1) % 80 - cat = int(cat) - # print('cat', cat, self.names[cat]) - c = self.colors[cat][0][0].tolist() - if self.theme == 'white': - c = (255 - np.array(c)).tolist() - txt = '{}{:.1f}'.format(self.names[cat], conf) - font = cv2.FONT_HERSHEY_SIMPLEX - cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0] - cv2.rectangle( - self.imgs[img_id], (bbox[0], bbox[1]), (bbox[2], bbox[3]), c, 2) - if show_txt: - cv2.rectangle(self.imgs[img_id], - (bbox[0], bbox[1] - cat_size[1] - 2), - (bbox[0] + cat_size[0], bbox[1] - 2), c, -1) - cv2.putText(self.imgs[img_id], txt, (bbox[0], bbox[1] - 2), - font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA) - - def add_coco_hp(self, points, img_id='default'): - points = np.array(points, dtype=np.int32).reshape(self.num_joints, 2) - for j in range(self.num_joints): - cv2.circle(self.imgs[img_id], - (points[j, 0], points[j, 1]), 3, self.colors_hp[j], -1) - for j, e in enumerate(self.edges): - if points[e].min() > 0: - cv2.line(self.imgs[img_id], (points[e[0], 0], points[e[0], 1]), - (points[e[1], 0], points[e[1], 1]), self.ec[j], 2, - lineType=cv2.LINE_AA) - - def add_points(self, points, img_id='default'): - num_classes = len(points) - # assert num_classes == len(self.colors) - for i in range(num_classes): - for j in range(len(points[i])): - c = self.colors[i, 0, 0] - cv2.circle(self.imgs[img_id], (points[i][j][0] * self.down_ratio, - points[i][j][1] * self.down_ratio), - 5, (255, 255, 255), -1) - cv2.circle(self.imgs[img_id], (points[i][j][0] * self.down_ratio, - points[i][j][1] * self.down_ratio), - 3, (int(c[0]), int(c[1]), int(c[2])), -1) - - def show_all_imgs(self, pause=False, time=0): - if not self.ipynb: - for i, v in self.imgs.items(): - cv2.imshow('{}'.format(i), v) - if cv2.waitKey(0 if pause else 1) == 27: - import sys - sys.exit(0) - else: - self.ax = None - nImgs = len(self.imgs) - fig=self.plt.figure(figsize=(nImgs * 10,10)) - nCols = nImgs - nRows = nImgs // nCols - for i, (k, v) in enumerate(self.imgs.items()): - fig.add_subplot(1, nImgs, i + 1) - if len(v.shape) == 3: - self.plt.imshow(cv2.cvtColor(v, cv2.COLOR_BGR2RGB)) - else: - self.plt.imshow(v) - self.plt.show() - - def save_img(self, imgId='default', path='./cache/debug/', image_name = 'default'): - cv2.imwrite(path + '{}'.format(image_name), self.imgs[imgId]) - - def save_all_imgs(self, path='./cache/debug/', prefix='', genID=False): - if genID: - try: - idx = int(np.loadtxt(path + '/id.txt')) - except: - idx = 0 - prefix=idx - np.savetxt(path + '/id.txt', np.ones(1) * (idx + 1), fmt='%d') - for i, v in self.imgs.items(): - cv2.imwrite(path + '/{}{}.png'.format(prefix, i), v) - - def save_all_imgs_test(self, path='./cache/debug/', prefix='', genID=False, image_name= 'image'): - if genID: - try: - idx = int(np.loadtxt(path + '/id.txt')) - except: - idx = 0 - prefix=idx - np.savetxt(path + '/id.txt', np.ones(1) * (idx + 1), fmt='%d') - for i, v in self.imgs.items(): - cv2.imwrite(path + '/{}{}.png'.format(image_name, i), v) - - - - - def remove_side(self, img_id, img): - if not (img_id in self.imgs): - return - ws = img.sum(axis=2).sum(axis=0) - l = 0 - while ws[l] == 0 and l < len(ws): - l+= 1 - r = ws.shape[0] - 1 - while ws[r] == 0 and r > 0: - r -= 1 - hs = img.sum(axis=2).sum(axis=1) - t = 0 - while hs[t] == 0 and t < len(hs): - t += 1 - b = hs.shape[0] - 1 - while hs[b] == 0 and b > 0: - b -= 1 - self.imgs[img_id] = self.imgs[img_id][t:b+1, l:r+1].copy() - - def project_3d_to_bird(self, pt): - pt[0] += self.world_size / 2 - pt[1] = self.world_size - pt[1] - pt = pt * self.out_size / self.world_size - return pt.astype(np.int32) - - def add_ct_detection( - self, img, dets, show_box=False, show_txt=True, - center_thresh=0.5, img_id='det'): - # dets: max_preds x 5 - self.imgs[img_id] = img.copy() - if type(dets) == type({}): - for cat in dets: - for i in range(len(dets[cat])): - if dets[cat][i, 2] > center_thresh: - cl = (self.colors[cat, 0, 0]).tolist() - ct = dets[cat][i, :2].astype(np.int32) - if show_box: - w, h = dets[cat][i, -2], dets[cat][i, -1] - x, y = dets[cat][i, 0], dets[cat][i, 1] - bbox = np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2], - dtype=np.float32) - self.add_coco_bbox( - bbox, cat - 1, dets[cat][i, 2], - show_txt=show_txt, img_id=img_id) - else: - for i in range(len(dets)): - if dets[i, 2] > center_thresh: - # print('dets', dets[i]) - cat = int(dets[i, -1]) - cl = (self.colors[cat, 0, 0] if self.theme == 'black' else \ - 255 - self.colors[cat, 0, 0]).tolist() - ct = dets[i, :2].astype(np.int32) * self.down_ratio - cv2.circle(self.imgs[img_id], (ct[0], ct[1]), 3, cl, -1) - if show_box: - w, h = dets[i, -3] * self.down_ratio, dets[i, -2] * self.down_ratio - x, y = dets[i, 0] * self.down_ratio, dets[i, 1] * self.down_ratio - bbox = np.array([x - w / 2, y - h / 2, x + w / 2, y + h / 2], - dtype=np.float32) - self.add_coco_bbox(bbox, dets[i, -1], dets[i, 2], img_id=img_id) - - def add_3d_detection( - self, image_or_path, dets, calib, show_txt=False, - center_thresh=0.5, img_id='det'): - if isinstance(image_or_path, np.ndarray): - self.imgs[img_id] = image_or_path - else: - self.imgs[img_id] = cv2.imread(image_or_path) - for cat in dets: - for i in range(len(dets[cat])): - cl = (self.colors[cat - 1, 0, 0]).tolist() - if dets[cat][i, -1] > center_thresh: - dim = dets[cat][i, 5:8] - loc = dets[cat][i, 8:11] - rot_y = dets[cat][i, 11] - # loc[1] = loc[1] - dim[0] / 2 + dim[0] / 2 / self.dim_scale - # dim = dim / self.dim_scale - if loc[2] > 1: - box_3d = compute_box_3d(dim, loc, rot_y) - box_2d = project_to_image(box_3d, calib) - self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl) - - def compose_vis_add( - self, img_path, dets, calib, - center_thresh, pred, bev, img_id='out'): - self.imgs[img_id] = cv2.imread(img_path) - # h, w = self.imgs[img_id].shape[:2] - # pred = cv2.resize(pred, (h, w)) - h, w = pred.shape[:2] - hs, ws = self.imgs[img_id].shape[0] / h, self.imgs[img_id].shape[1] / w - self.imgs[img_id] = cv2.resize(self.imgs[img_id], (w, h)) - self.add_blend_img(self.imgs[img_id], pred, img_id) - for cat in dets: - for i in range(len(dets[cat])): - cl = (self.colors[cat - 1, 0, 0]).tolist() - if dets[cat][i, -1] > center_thresh: - dim = dets[cat][i, 5:8] - loc = dets[cat][i, 8:11] - rot_y = dets[cat][i, 11] - # loc[1] = loc[1] - dim[0] / 2 + dim[0] / 2 / self.dim_scale - # dim = dim / self.dim_scale - if loc[2] > 1: - box_3d = compute_box_3d(dim, loc, rot_y) - box_2d = project_to_image(box_3d, calib) - box_2d[:, 0] /= hs - box_2d[:, 1] /= ws - self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl) - self.imgs[img_id] = np.concatenate( - [self.imgs[img_id], self.imgs[bev]], axis=1) - - def add_2d_detection( - self, img, dets, show_box=False, show_txt=True, - center_thresh=0.5, img_id='det'): - self.imgs[img_id] = img - for cat in dets: - for i in range(len(dets[cat])): - cl = (self.colors[cat - 1, 0, 0]).tolist() - if dets[cat][i, -1] > center_thresh: - bbox = dets[cat][i, 1:5] - self.add_coco_bbox( - bbox, cat - 1, dets[cat][i, -1], - show_txt=show_txt, img_id=img_id) - - def add_bird_view(self, dets, center_thresh=0.3, img_id='bird'): - bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230 - for cat in dets: - cl = (self.colors[cat - 1, 0, 0]).tolist() - lc = (250, 152, 12) - for i in range(len(dets[cat])): - if dets[cat][i, -1] > center_thresh: - dim = dets[cat][i, 5:8] - loc = dets[cat][i, 8:11] - rot_y = dets[cat][i, 11] - rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]] - for k in range(4): - rect[k] = self.project_3d_to_bird(rect[k]) - # cv2.circle(bird_view, (rect[k][0], rect[k][1]), 2, lc, -1) - cv2.polylines( - bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)], - True,lc,2,lineType=cv2.LINE_AA) - for e in [[0, 1]]: - t = 4 if e == [0, 1] else 1 - cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]), - (rect[e[1]][0], rect[e[1]][1]), lc, t, - lineType=cv2.LINE_AA) - self.imgs[img_id] = bird_view - - def add_bird_views(self, dets_dt, dets_gt, center_thresh=0.3, img_id='bird'): - alpha = 0.5 - bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230 - for ii, (dets, lc, cc) in enumerate( - [(dets_gt, (12, 49, 250), (0, 0, 255)), - (dets_dt, (250, 152, 12), (255, 0, 0))]): - # cc = np.array(lc, dtype=np.uint8).reshape(1, 1, 3) - for cat in dets: - cl = (self.colors[cat - 1, 0, 0]).tolist() - for i in range(len(dets[cat])): - if dets[cat][i, -1] > center_thresh: - dim = dets[cat][i, 5:8] - loc = dets[cat][i, 8:11] - rot_y = dets[cat][i, 11] - rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]] - for k in range(4): - rect[k] = self.project_3d_to_bird(rect[k]) - if ii == 0: - cv2.fillPoly( - bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)], - lc,lineType=cv2.LINE_AA) - else: - cv2.polylines( - bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)], - True,lc,2,lineType=cv2.LINE_AA) - # for e in [[0, 1], [1, 2], [2, 3], [3, 0]]: - for e in [[0, 1]]: - t = 4 if e == [0, 1] else 1 - cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]), - (rect[e[1]][0], rect[e[1]][1]), lc, t, - lineType=cv2.LINE_AA) - self.imgs[img_id] = bird_view - - -kitti_class_name = [ - 'p', 'v', 'b' -] - -gta_class_name = [ - 'p', 'v' -] - -watermark_class_name = [ - "365Taofang", "Fangtianxia", - "Fangchanchaoshi", "Beikezhaofang", - "Dazhongdianping", "Xianyu", - "58tongcheng", "Anjuke", - "Kujiale", "Soufang", - "Qichezhijia", "Renrenche", - "Souhuershou", "Youxin", - "Baixingwang", "Diyichewang", - "Beikezhaofang1", "Lianjia", - "Lianjia1", "Xinlangershoufang", - "Tubatu", "Loupanwang"] - - -pascal_class_name = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", - "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", - "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] - -coco_class_name = [ - 'person', 'bicycle', 'car', 'motorcycle', 'airplane', - 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', - 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', - 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', - 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', - 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', - 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', - 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', - 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', - 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', - 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', - 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', - 'scissors', 'teddy bear', 'hair drier', 'toothbrush' -] - -color_list = np.array( - [ - 1.000, 1.000, 1.000, - 0.850, 0.325, 0.098, - 0.929, 0.694, 0.125, - 0.494, 0.184, 0.556, - 0.466, 0.674, 0.188, - 0.301, 0.745, 0.933, - 0.635, 0.078, 0.184, - 0.300, 0.300, 0.300, - 0.600, 0.600, 0.600, - 1.000, 0.000, 0.000, - 1.000, 0.500, 0.000, - 0.749, 0.749, 0.000, - 0.000, 1.000, 0.000, - 0.000, 0.000, 1.000, - 0.667, 0.000, 1.000, - 0.333, 0.333, 0.000, - 0.333, 0.667, 0.000, - 0.333, 1.000, 0.000, - 0.667, 0.333, 0.000, - 0.667, 0.667, 0.000, - 0.667, 1.000, 0.000, - 1.000, 0.333, 0.000, - 1.000, 0.667, 0.000, - 1.000, 1.000, 0.000, - 0.000, 0.333, 0.500, - 0.000, 0.667, 0.500, - 0.000, 1.000, 0.500, - 0.333, 0.000, 0.500, - 0.333, 0.333, 0.500, - 0.333, 0.667, 0.500, - 0.333, 1.000, 0.500, - 0.667, 0.000, 0.500, - 0.667, 0.333, 0.500, - 0.667, 0.667, 0.500, - 0.667, 1.000, 0.500, - 1.000, 0.000, 0.500, - 1.000, 0.333, 0.500, - 1.000, 0.667, 0.500, - 1.000, 1.000, 0.500, - 0.000, 0.333, 1.000, - 0.000, 0.667, 1.000, - 0.000, 1.000, 1.000, - 0.333, 0.000, 1.000, - 0.333, 0.333, 1.000, - 0.333, 0.667, 1.000, - 0.333, 1.000, 1.000, - 0.667, 0.000, 1.000, - 0.667, 0.333, 1.000, - 0.667, 0.667, 1.000, - 0.667, 1.000, 1.000, - 1.000, 0.000, 1.000, - 1.000, 0.333, 1.000, - 1.000, 0.667, 1.000, - 0.167, 0.000, 0.000, - 0.333, 0.000, 0.000, - 0.500, 0.000, 0.000, - 0.667, 0.000, 0.000, - 0.833, 0.000, 0.000, - 1.000, 0.000, 0.000, - 0.000, 0.167, 0.000, - 0.000, 0.333, 0.000, - 0.000, 0.500, 0.000, - 0.000, 0.667, 0.000, - 0.000, 0.833, 0.000, - 0.000, 1.000, 0.000, - 0.000, 0.000, 0.167, - 0.000, 0.000, 0.333, - 0.000, 0.000, 0.500, - 0.000, 0.000, 0.667, - 0.000, 0.000, 0.833, - 0.000, 0.000, 1.000, - 0.000, 0.000, 0.000, - 0.143, 0.143, 0.143, - 0.286, 0.286, 0.286, - 0.429, 0.429, 0.429, - 0.571, 0.571, 0.571, - 0.714, 0.714, 0.714, - 0.857, 0.857, 0.857, - 0.000, 0.447, 0.741, - 0.50, 0.5, 0 - ] - ).astype(np.float32) -color_list = color_list.reshape((-1, 3)) * 255 diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/image.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/image.py deleted file mode 100644 index e5d3583..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/image.py +++ /dev/null @@ -1,223 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import cv2 -import random - -def flip(img): - return img[:, :, ::-1].copy() - -def transform_preds(coords, center, scale, output_size): - target_coords = np.zeros(coords.shape) - trans = get_affine_transform(center, scale, 0, output_size, inv=1) - for p in range(coords.shape[0]): - target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans) - return target_coords - - -def get_affine_transform(center, - scale, - rot, - output_size, - shift=np.array([0, 0], dtype=np.float32), - inv=0): - if not isinstance(scale, np.ndarray) and not isinstance(scale, list): - scale = np.array([scale, scale], dtype=np.float32) - - scale_tmp = scale - src_w = scale_tmp[0] - dst_w = output_size[0] - dst_h = output_size[1] - - rot_rad = np.pi * rot / 180 - src_dir = get_dir([0, src_w * -0.5], rot_rad) - dst_dir = np.array([0, dst_w * -0.5], np.float32) - - src = np.zeros((3, 2), dtype=np.float32) - dst = np.zeros((3, 2), dtype=np.float32) - src[0, :] = center + scale_tmp * shift - src[1, :] = center + src_dir + scale_tmp * shift - dst[0, :] = [dst_w * 0.5, dst_h * 0.5] - dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir - - src[2:, :] = get_3rd_point(src[0, :], src[1, :]) - dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) - - if inv: - trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) - else: - trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) - - return trans - - -def affine_transform(pt, t): - new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T - new_pt = np.dot(t, new_pt) - return new_pt[:2] - - -def get_3rd_point(a, b): - direct = a - b - return b + np.array([-direct[1], direct[0]], dtype=np.float32) - - -def get_dir(src_point, rot_rad): - sn, cs = np.sin(rot_rad), np.cos(rot_rad) - - src_result = [0, 0] - src_result[0] = src_point[0] * cs - src_point[1] * sn - src_result[1] = src_point[0] * sn + src_point[1] * cs - - return src_result - - -def crop(img, center, scale, output_size, rot=0): - trans = get_affine_transform(center, scale, rot, output_size) - - dst_img = cv2.warpAffine(img, - trans, - (int(output_size[0]), int(output_size[1])), - flags=cv2.INTER_LINEAR) - - return dst_img - - -def gaussian_radius(det_size, min_overlap=0.7): - height, width = det_size - - a1 = 1 - b1 = (height + width) - c1 = width * height * (1 - min_overlap) / (1 + min_overlap) - sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1) - r1 = (b1 + sq1) / 2 - - a2 = 4 - b2 = 2 * (height + width) - c2 = (1 - min_overlap) * width * height - sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2) - r2 = (b2 + sq2) / 2 - - a3 = 4 * min_overlap - b3 = -2 * min_overlap * (height + width) - c3 = (min_overlap - 1) * width * height - sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3) - r3 = (b3 + sq3) / 2 - return min(r1, r2, r3) - - -def gaussian2D(shape, sigma=1): - m, n = [(ss - 1.) / 2. for ss in shape] - y, x = np.ogrid[-m:m+1,-n:n+1] - - h = np.exp(-(x * x + y * y) / (2 * sigma * sigma)) - h[h < np.finfo(h.dtype).eps * h.max()] = 0 - return h - -def draw_umich_gaussian(heatmap, center, radius, k=1): - diameter = 2 * radius + 1 - gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6) - - x, y = int(center[0]), int(center[1]) - - height, width = heatmap.shape[0:2] - - left, right = min(x, radius), min(width - x, radius + 1) - top, bottom = min(y, radius), min(height - y, radius + 1) - - masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] - masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right] - if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug - np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap) - return heatmap - -def draw_dense_reg(regmap, heatmap, center, value, radius, is_offset=False): - diameter = 2 * radius + 1 - gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6) - value = np.array(value, dtype=np.float32).reshape(-1, 1, 1) - dim = value.shape[0] - reg = np.ones((dim, diameter*2+1, diameter*2+1), dtype=np.float32) * value - if is_offset and dim == 2: - delta = np.arange(diameter*2+1) - radius - reg[0] = reg[0] - delta.reshape(1, -1) - reg[1] = reg[1] - delta.reshape(-1, 1) - - x, y = int(center[0]), int(center[1]) - - height, width = heatmap.shape[0:2] - - left, right = min(x, radius), min(width - x, radius + 1) - top, bottom = min(y, radius), min(height - y, radius + 1) - - masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] - masked_regmap = regmap[:, y - top:y + bottom, x - left:x + right] - masked_gaussian = gaussian[radius - top:radius + bottom, - radius - left:radius + right] - masked_reg = reg[:, radius - top:radius + bottom, - radius - left:radius + right] - if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug - idx = (masked_gaussian >= masked_heatmap).reshape( - 1, masked_gaussian.shape[0], masked_gaussian.shape[1]) - masked_regmap = (1-idx) * masked_regmap + idx * masked_reg - regmap[:, y - top:y + bottom, x - left:x + right] = masked_regmap - return regmap - - -def draw_msra_gaussian(heatmap, center, sigma): - tmp_size = sigma * 3 - mu_x = int(center[0] + 0.5) - mu_y = int(center[1] + 0.5) - w, h = heatmap.shape[0], heatmap.shape[1] - ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)] - br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)] - if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0: - return heatmap - size = 2 * tmp_size + 1 - x = np.arange(0, size, 1, np.float32) - y = x[:, np.newaxis] - x0 = y0 = size // 2 - g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2)) - g_x = max(0, -ul[0]), min(br[0], h) - ul[0] - g_y = max(0, -ul[1]), min(br[1], w) - ul[1] - img_x = max(0, ul[0]), min(br[0], h) - img_y = max(0, ul[1]), min(br[1], w) - heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum( - heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]], - g[g_y[0]:g_y[1], g_x[0]:g_x[1]]) - return heatmap - -def grayscale(image): - return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - -def lighting_(data_rng, image, alphastd, eigval, eigvec): - alpha = data_rng.normal(scale=alphastd, size=(3, )) - image += np.dot(eigvec, eigval * alpha) - -def blend_(alpha, image1, image2): - image1 *= alpha - image2 *= (1 - alpha) - image1 += image2 - -def saturation_(data_rng, image, gs, gs_mean, var): - alpha = 1. + data_rng.uniform(low=-var, high=var) - blend_(alpha, image, gs[:, :, None]) - -def brightness_(data_rng, image, gs, gs_mean, var): - alpha = 1. + data_rng.uniform(low=-var, high=var) - image *= alpha - -def contrast_(data_rng, image, gs, gs_mean, var): - alpha = 1. + data_rng.uniform(low=-var, high=var) - blend_(alpha, image, gs_mean) - -def color_aug(data_rng, image, eig_val, eig_vec): - functions = [brightness_, contrast_, saturation_] - random.shuffle(functions) - - gs = grayscale(image) - gs_mean = gs.mean() - for f in functions: - f(data_rng, image, gs, gs_mean, 0.4) - lighting_(data_rng, image, 0.1, eig_val, eig_vec) diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/oracle_utils.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/oracle_utils.py deleted file mode 100644 index 3cbe737..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/oracle_utils.py +++ /dev/null @@ -1,42 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import numba - -@numba.jit(nopython=True, nogil=True) -def gen_oracle_map(feat, ind, w, h): - # feat: B x maxN x featDim - # ind: B x maxN - batch_size = feat.shape[0] - max_objs = feat.shape[1] - feat_dim = feat.shape[2] - out = np.zeros((batch_size, feat_dim, h, w), dtype=np.float32) - vis = np.zeros((batch_size, h, w), dtype=np.uint8) - ds = [(0, 1), (0, -1), (1, 0), (-1, 0)] - for i in range(batch_size): - queue_ind = np.zeros((h*w*2, 2), dtype=np.int32) - queue_feat = np.zeros((h*w*2, feat_dim), dtype=np.float32) - head, tail = 0, 0 - for j in range(max_objs): - if ind[i][j] > 0: - x, y = ind[i][j] % w, ind[i][j] // w - out[i, :, y, x] = feat[i][j] - vis[i, y, x] = 1 - queue_ind[tail] = x, y - queue_feat[tail] = feat[i][j] - tail += 1 - while tail - head > 0: - x, y = queue_ind[head] - f = queue_feat[head] - head += 1 - for (dx, dy) in ds: - xx, yy = x + dx, y + dy - if xx >= 0 and yy >= 0 and xx < w and yy < h and vis[i, yy, xx] < 1: - out[i, :, yy, xx] = f - vis[i, yy, xx] = 1 - queue_ind[tail] = xx, yy - queue_feat[tail] = f - tail += 1 - return out \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/post_process.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/post_process.py deleted file mode 100644 index 3ef72c2..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/post_process.py +++ /dev/null @@ -1,114 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -from .image import transform_preds -from .ddd_utils import ddd2locrot - - -def get_pred_depth(depth): - return depth - -def get_alpha(rot): - # output: (B, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos, - # bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos] - # return rot[:, 0] - idx = rot[:, 1] > rot[:, 5] - alpha1 = np.arctan(rot[:, 2] / rot[:, 3]) + (-0.5 * np.pi) - alpha2 = np.arctan(rot[:, 6] / rot[:, 7]) + ( 0.5 * np.pi) - return alpha1 * idx + alpha2 * (1 - idx) - - -def ddd_post_process_2d(dets, c, s, opt): - # dets: batch x max_dets x dim - # return 1-based class det list - ret = [] - include_wh = dets.shape[2] > 16 - for i in range(dets.shape[0]): - top_preds = {} - dets[i, :, :2] = transform_preds( - dets[i, :, 0:2], c[i], s[i], (opt.output_w, opt.output_h)) - classes = dets[i, :, -1] - for j in range(opt.num_classes): - inds = (classes == j) - top_preds[j + 1] = np.concatenate([ - dets[i, inds, :3].astype(np.float32), - get_alpha(dets[i, inds, 3:11])[:, np.newaxis].astype(np.float32), - get_pred_depth(dets[i, inds, 11:12]).astype(np.float32), - dets[i, inds, 12:15].astype(np.float32)], axis=1) - if include_wh: - top_preds[j + 1] = np.concatenate([ - top_preds[j + 1], - transform_preds( - dets[i, inds, 15:17], c[i], s[i], (opt.output_w, opt.output_h)) - .astype(np.float32)], axis=1) - ret.append(top_preds) - return ret - -def ddd_post_process_3d(dets, calibs): - # dets: batch x max_dets x dim - # return 1-based class det list - ret = [] - for i in range(len(dets)): - preds = {} - for cls_ind in dets[i].keys(): - preds[cls_ind] = [] - for j in range(len(dets[i][cls_ind])): - center = dets[i][cls_ind][j][:2] - score = dets[i][cls_ind][j][2] - alpha = dets[i][cls_ind][j][3] - depth = dets[i][cls_ind][j][4] - dimensions = dets[i][cls_ind][j][5:8] - wh = dets[i][cls_ind][j][8:10] - locations, rotation_y = ddd2locrot( - center, alpha, dimensions, depth, calibs[0]) - bbox = [center[0] - wh[0] / 2, center[1] - wh[1] / 2, - center[0] + wh[0] / 2, center[1] + wh[1] / 2] - pred = [alpha] + bbox + dimensions.tolist() + \ - locations.tolist() + [rotation_y, score] - preds[cls_ind].append(pred) - preds[cls_ind] = np.array(preds[cls_ind], dtype=np.float32) - ret.append(preds) - return ret - -def ddd_post_process(dets, c, s, calibs, opt): - # dets: batch x max_dets x dim - # return 1-based class det list - dets = ddd_post_process_2d(dets, c, s, opt) - dets = ddd_post_process_3d(dets, calibs) - return dets - - -def ctdet_post_process(dets, c, s, h, w, num_classes): - # dets: batch x max_dets x dim - # return 1-based class det dict - ret = [] - for i in range(dets.shape[0]): - top_preds = {} - dets[i, :, :2] = transform_preds( - dets[i, :, 0:2], c[i], s[i], (w, h)) - dets[i, :, 2:4] = transform_preds( - dets[i, :, 2:4], c[i], s[i], (w, h)) - classes = dets[i, :, -1] - for j in range(num_classes): - inds = (classes == j) - top_preds[j + 1] = np.concatenate([ - dets[i, inds, :4].astype(np.float32), - dets[i, inds, 4:5].astype(np.float32)], axis=1).tolist() - ret.append(top_preds) - return ret - - -def multi_pose_post_process(dets, c, s, h, w): - # dets: batch x max_dets x 40 - # return list of 39 in image coord - ret = [] - for i in range(dets.shape[0]): - bbox = transform_preds(dets[i, :, :4].reshape(-1, 2), c[i], s[i], (w, h)) - pts = transform_preds(dets[i, :, 5:39].reshape(-1, 2), c[i], s[i], (w, h)) - top_preds = np.concatenate( - [bbox.reshape(-1, 4), dets[i, :, 4:5], - pts.reshape(-1, 34)], axis=1).astype(np.float32).tolist() - ret.append({np.ones(1, dtype=np.int32)[0]: top_preds}) - return ret diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/utils.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/utils.py deleted file mode 100644 index 49d90a2..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/myutils/utils.py +++ /dev/null @@ -1,23 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import torch - -class AverageMeter(object): - """Computes and stores the average and current value""" - def __init__(self): - self.reset() - - def reset(self): - self.val = 0 - self.avg = 0 - self.sum = 0 - self.count = 0 - - def update(self, val, n=1): - self.val = val - self.sum += val * n - self.count += n - if self.count > 0: - self.avg = self.sum / self.count \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/processor.py b/DLPredictOnline/demo/model/pytorch/watermark-centernet/processor.py deleted file mode 100644 index eae9350..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/processor.py +++ /dev/null @@ -1,118 +0,0 @@ -import numpy as np -import cv2 -import torch -import torch.nn as nn -from myutils.image import get_affine_transform -from models.utils import _gather_feat - -input_h = 512 -input_w = 512 -mean = [0.408, 0.447, 0.470] -std = [0.289, 0.274, 0.278] -down_ratio = 4 -scale = 1 -num_classes = 22 -max_per_image = 100 - - -def pre_process(image, scale, meta=None): - height, width = image.shape[0:2] - new_height = int(height * scale) - new_width = int(width * scale) - - inp_height, inp_width = input_h, input_w - c = np.array([new_width / 2., new_height / 2.], dtype=np.float32) - s = max(height, width) * 1.0 - - trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height]) - resized_image = cv2.resize(image, (new_width, new_height)) - - inp_image = cv2.warpAffine( - resized_image, trans_input, (inp_width, inp_height), - flags=cv2.INTER_LINEAR) - inp_image = ((inp_image / 255. - mean) / std).astype(np.float32) - images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width) - images = torch.from_numpy(images) - a = torch.rand(1, 3, 512, 512) - print(images.device) - meta = {'c': c, 's': s, 'out_height': inp_height // down_ratio, 'out_width': inp_width // down_ratio} - return a, meta - - -def merge_outputs(detections): - results = {} - for j in range(1, num_classes + 1): - results[j] = np.concatenate( - [detection[j] for detection in detections], axis=0).astype(np.float32) - scores = np.hstack( - [results[j][:, 4] for j in range(1, num_classes + 1)]) - if len(scores) > max_per_image: - kth = len(scores) - max_per_image - thresh = np.partition(scores, kth)[kth] - for j in range(1, num_classes + 1): - keep_inds = (results[j][:, 4] >= thresh) - results[j] = results[j][keep_inds] - return results - - -def preprocess(image_bytes, **kwargs): - image = cv2.imdecode(np.frombuffer(image_bytes, np.uint8), cv2.IMREAD_COLOR) - height, width = image.shape[0:2] - new_height = int(height * scale) - new_width = int(width * scale) - - inp_height, inp_width = input_h, input_w - c = np.array([new_width / 2., new_height / 2.], dtype=np.float32) - s = max(height, width) * 1.0 - - trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height]) - resized_image = cv2.resize(image, (new_width, new_height)) - - inp_image = cv2.warpAffine( - resized_image, trans_input, (inp_width, inp_height), - flags=cv2.INTER_LINEAR) - inp_image = ((inp_image / 255. - mean) / std).astype(np.float32) - images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width) - images = torch.from_numpy(images) - print(images.device) - meta = {'c': c, 's': s, 'out_height': inp_height // down_ratio, 'out_width': inp_width // down_ratio} - return images - - -def _nms(heat, kernel=3): - pad = (kernel - 1) // 2 - - hmax = nn.functional.max_pool2d( - heat, (kernel, kernel), stride=1, padding=pad) - keep = (hmax == heat).float() - return heat * keep - - -def _topk(scores, K=40): - batch, cat, height, width = scores.size() - - topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K) - - topk_inds = topk_inds % (height * width) - topk_ys = (topk_inds / width).int().float() - topk_xs = (topk_inds % width).int().float() - - topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K) - topk_clses = (topk_ind / K).int() - topk_inds = _gather_feat( - topk_inds.view(batch, -1, 1), topk_ind).view(batch, K) - topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K) - topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K) - - return topk_score, topk_inds, topk_clses, topk_ys, topk_xs - - -def postprocess(output, **kwargs): - output = output[-1] - heat = output['hm'].sigmoid_() - heat = _nms(heat) - scores, inds, clses, ys, xs = _topk(heat, K=100) - scores = scores.cpu().detach() - clses = clses.cpu().detach().numpy() - res = np.concatenate((scores, clses), axis=1) - return res \ No newline at end of file diff --git a/DLPredictOnline/demo/model/pytorch/watermark-centernet/requirements.txt b/DLPredictOnline/demo/model/pytorch/watermark-centernet/requirements.txt deleted file mode 100644 index 9475444..0000000 --- a/DLPredictOnline/demo/model/pytorch/watermark-centernet/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -opencv-python -Cython -numba -progress -matplotlib -easydict \ No newline at end of file diff --git a/DLPredictOnline/demo/model/tensorflow/wideAndDeep/README.MD b/DLPredictOnline/demo/model/tensorflow/wideAndDeep/README.MD index 1678a81..b842f82 100644 --- a/DLPredictOnline/demo/model/tensorflow/wideAndDeep/README.MD +++ b/DLPredictOnline/demo/model/tensorflow/wideAndDeep/README.MD @@ -1,9 +1,11 @@ # Wide&Deep模型示例 +Census Income Data Set 公开数据集二分类模型 + client程序 [TensorflowWideAndDeep](../../../src/main/java/com/bj58/ailab/demo/client/TensorflowWideAndDeep.java) ## 数据 -[data.txt](./data.txt) 样本数据 +[census_input.csv](./census_input.csv) 样本数据 ## 模型文件 以版本命名,所以模型下载需要解压成对应版本 diff --git a/DLPredictOnline/demo/model/tensorflow/wideAndDeep/census_input.csv b/DLPredictOnline/demo/model/tensorflow/wideAndDeep/census_input.csv new file mode 100644 index 0000000..beb27c2 --- /dev/null +++ b/DLPredictOnline/demo/model/tensorflow/wideAndDeep/census_input.csv @@ -0,0 +1,100 @@ +51,Private,179479,HS-grad,9,Widowed,Exec-managerial,Not-in-family,White,Female,3325,0,40,Yugoslavia +53,Private,153064,5th-6th,3,Married-civ-spouse,Exec-managerial,Husband,White,Male,7688,0,10,Yugoslavia +49,Private,406518,HS-grad,9,Divorced,Priv-house-serv,Unmarried,White,Female,0,0,16,Yugoslavia +53,Self-emp-not-inc,146325,HS-grad,9,Married-civ-spouse,Craft-repair,Husband,White,Male,0,0,40,Yugoslavia +55,Private,160631,HS-grad,9,Married-civ-spouse,Machine-op-inspct,Husband,White,Male,4508,0,8,Yugoslavia +19,Self-emp-not-inc,159269,HS-grad,9,Never-married,Other-service,Own-child,White,Male,0,0,53,Yugoslavia +30,Self-emp-not-inc,227429,HS-grad,9,Never-married,Exec-managerial,Not-in-family,White,Male,0,0,45,Yugoslavia +51,Self-emp-not-inc,136708,HS-grad,9,Married-civ-spouse,Sales,Husband,Asian-Pac-Islander,Male,3103,0,84,Vietnam +49,Private,146121,5th-6th,3,Married-spouse-absent,Machine-op-inspct,Unmarried,Asian-Pac-Islander,Female,0,0,20,Vietnam +41,Private,117747,Bachelors,13,Divorced,Other-service,Unmarried,Asian-Pac-Islander,Female,0,0,40,Vietnam +27,Self-emp-not-inc,301514,Some-college,10,Married-civ-spouse,Sales,Husband,Asian-Pac-Islander,Male,0,0,40,Vietnam +43,Private,215624,Some-college,10,Married-civ-spouse,Handlers-cleaners,Husband,Asian-Pac-Islander,Male,0,0,40,Vietnam +26,Private,186454,Bachelors,13,Never-married,Prof-specialty,Not-in-family,Asian-Pac-Islander,Male,4650,0,40,Vietnam +23,Private,347873,Assoc-acdm,12,Never-married,Prof-specialty,Not-in-family,Asian-Pac-Islander,Female,0,0,30,Vietnam +38,Private,198751,HS-grad,9,Divorced,Handlers-cleaners,Not-in-family,Asian-Pac-Islander,Male,0,0,40,Vietnam +45,Private,111706,1st-4th,2,Never-married,Machine-op-inspct,Unmarried,Asian-Pac-Islander,Female,0,0,40,Vietnam +38,Private,339025,HS-grad,9,Married-civ-spouse,Handlers-cleaners,Husband,Asian-Pac-Islander,Male,0,1579,40,Vietnam +52,Private,261671,HS-grad,9,Married-civ-spouse,Machine-op-inspct,Husband,Asian-Pac-Islander,Male,0,0,40,Vietnam +53,Self-emp-not-inc,206288,Bachelors,13,Married-civ-spouse,Craft-repair,Husband,Asian-Pac-Islander,Male,0,0,35,Vietnam +25,Private,150804,HS-grad,9,Never-married,Transport-moving,Not-in-family,Asian-Pac-Islander,Male,0,0,40,Vietnam +22,Private,138994,HS-grad,9,Never-married,Sales,Own-child,Asian-Pac-Islander,Female,0,0,40,Vietnam +43,Private,154568,HS-grad,9,Married-civ-spouse,Tech-support,Husband,Asian-Pac-Islander,Male,0,0,40,Vietnam +42,Private,139012,Assoc-voc,11,Married-civ-spouse,Prof-specialty,Husband,Asian-Pac-Islander,Male,0,0,40,Vietnam +20,Private,138994,HS-grad,9,Never-married,Sales,Own-child,Asian-Pac-Islander,Female,0,0,40,Vietnam +22,Private,157783,HS-grad,9,Married-civ-spouse,Other-service,Husband,Asian-Pac-Islander,Male,0,0,35,Vietnam +35,Local-gov,304252,Assoc-acdm,12,Divorced,Exec-managerial,Not-in-family,Asian-Pac-Islander,Female,0,0,40,Vietnam +25,Private,226802,11th,7,Never-married,Machine-op-inspct,Own-child,Black,Male,0,0,40,United-States +38,Private,89814,HS-grad,9,Married-civ-spouse,Farming-fishing,Husband,White,Male,0,0,50,United-States +28,Local-gov,336951,Assoc-acdm,12,Married-civ-spouse,Protective-serv,Husband,White,Male,0,0,40,United-States +44,Private,160323,Some-college,10,Married-civ-spouse,Machine-op-inspct,Husband,Black,Male,7688,0,40,United-States +18,?,103497,Some-college,10,Never-married,?,Own-child,White,Female,0,0,30,United-States +34,Private,198693,10th,6,Never-married,Other-service,Not-in-family,White,Male,0,0,30,United-States +29,?,227026,HS-grad,9,Never-married,?,Unmarried,Black,Male,0,0,40,United-States +63,Self-emp-not-inc,104626,Prof-school,15,Married-civ-spouse,Prof-specialty,Husband,White,Male,3103,0,32,United-States +24,Private,369667,Some-college,10,Never-married,Other-service,Unmarried,White,Female,0,0,40,United-States +55,Private,104996,7th-8th,4,Married-civ-spouse,Craft-repair,Husband,White,Male,0,0,10,United-States +65,Private,184454,HS-grad,9,Married-civ-spouse,Machine-op-inspct,Husband,White,Male,6418,0,40,United-States +36,Federal-gov,212465,Bachelors,13,Married-civ-spouse,Adm-clerical,Husband,White,Male,0,0,40,United-States +26,Private,82091,HS-grad,9,Never-married,Adm-clerical,Not-in-family,White,Female,0,0,39,United-States +58,?,299831,HS-grad,9,Married-civ-spouse,?,Husband,White,Male,0,0,35,United-States +48,Private,279724,HS-grad,9,Married-civ-spouse,Machine-op-inspct,Husband,White,Male,3103,0,48,United-States +43,Private,346189,Masters,14,Married-civ-spouse,Exec-managerial,Husband,White,Male,0,0,50,United-States +20,State-gov,444554,Some-college,10,Never-married,Other-service,Own-child,White,Male,0,0,25,United-States +43,Private,128354,HS-grad,9,Married-civ-spouse,Adm-clerical,Wife,White,Female,0,0,30,United-States +37,Private,60548,HS-grad,9,Widowed,Machine-op-inspct,Unmarried,White,Female,0,0,20,United-States +34,Private,107914,Bachelors,13,Married-civ-spouse,Tech-support,Husband,White,Male,0,0,47,United-States +34,Private,238588,Some-college,10,Never-married,Other-service,Own-child,Black,Female,0,0,35,United-States +72,?,132015,7th-8th,4,Divorced,?,Not-in-family,White,Female,0,0,6,United-States +25,Private,205947,Bachelors,13,Married-civ-spouse,Prof-specialty,Husband,White,Male,0,0,40,United-States +45,Self-emp-not-inc,432824,HS-grad,9,Married-civ-spouse,Craft-repair,Husband,White,Male,7298,0,90,United-States +22,Private,236427,HS-grad,9,Never-married,Adm-clerical,Own-child,White,Male,0,0,20,United-States +23,Private,134446,HS-grad,9,Separated,Machine-op-inspct,Unmarried,Black,Male,0,0,54,United-States +54,Private,99516,HS-grad,9,Married-civ-spouse,Craft-repair,Husband,White,Male,0,0,35,United-States +32,Self-emp-not-inc,109282,Some-college,10,Never-married,Prof-specialty,Not-in-family,White,Male,0,0,60,United-States +46,State-gov,106444,Some-college,10,Married-civ-spouse,Exec-managerial,Husband,Black,Male,7688,0,38,United-States +56,Self-emp-not-inc,186651,11th,7,Widowed,Other-service,Unmarried,White,Female,0,0,50,United-States +24,Self-emp-not-inc,188274,Bachelors,13,Never-married,Sales,Not-in-family,White,Male,0,0,50,United-States +23,Local-gov,258120,Some-college,10,Married-civ-spouse,Protective-serv,Husband,White,Male,0,0,40,United-States +26,Private,43311,HS-grad,9,Divorced,Exec-managerial,Unmarried,White,Female,0,0,40,United-States +65,?,191846,HS-grad,9,Married-civ-spouse,?,Husband,White,Male,0,0,40,United-States +36,Local-gov,403681,Bachelors,13,Married-civ-spouse,Prof-specialty,Husband,White,Male,0,0,40,United-States +17,Private,269430,10th,6,Never-married,Machine-op-inspct,Not-in-family,White,Male,0,0,40,United-States +20,Private,257509,HS-grad,9,Never-married,Craft-repair,Own-child,White,Male,0,0,40,United-States +65,Private,136384,Masters,14,Married-civ-spouse,Prof-specialty,Husband,White,Male,0,0,50,United-States +44,Self-emp-inc,120277,Assoc-voc,11,Married-civ-spouse,Sales,Husband,White,Male,0,0,45,United-States +36,Private,465326,HS-grad,9,Married-civ-spouse,Farming-fishing,Husband,White,Male,0,0,40,United-States +29,Private,103634,11th,7,Married-civ-spouse,Other-service,Husband,White,Male,0,0,40,United-States +20,State-gov,138371,Some-college,10,Never-married,Farming-fishing,Own-child,White,Male,0,0,32,United-States +28,Private,242832,Assoc-voc,11,Married-civ-spouse,Prof-specialty,Wife,White,Female,0,0,36,United-States +54,Private,186272,Some-college,10,Married-civ-spouse,Transport-moving,Husband,White,Male,3908,0,50,United-States +52,Private,201062,11th,7,Separated,Priv-house-serv,Not-in-family,Black,Female,0,0,18,United-States +56,Self-emp-inc,131916,HS-grad,9,Widowed,Exec-managerial,Not-in-family,White,Female,0,0,50,United-States +18,Private,54440,Some-college,10,Never-married,Other-service,Own-child,White,Male,0,0,20,United-States +39,Private,280215,HS-grad,9,Divorced,Handlers-cleaners,Own-child,Black,Male,0,0,40,United-States +21,Private,214399,Some-college,10,Never-married,Other-service,Own-child,White,Female,0,1721,24,United-States +22,Private,54164,HS-grad,9,Never-married,Other-service,Not-in-family,White,Male,14084,0,60,United-States +21,Private,110677,Some-college,10,Never-married,Adm-clerical,Own-child,White,Female,0,0,40,United-States +63,Private,145985,HS-grad,9,Married-civ-spouse,Craft-repair,Husband,White,Male,0,0,40,United-States +34,Local-gov,382078,Bachelors,13,Married-civ-spouse,Exec-managerial,Husband,White,Male,3103,0,50,United-States +42,Self-emp-inc,170721,HS-grad,9,Married-civ-spouse,Exec-managerial,Husband,White,Male,5178,0,50,United-States +33,Private,269705,HS-grad,9,Married-civ-spouse,Handlers-cleaners,Husband,White,Male,0,0,40,United-States +30,Private,101135,Bachelors,13,Never-married,Exec-managerial,Not-in-family,White,Female,0,0,50,United-States +39,Private,118429,Some-college,10,Divorced,Sales,Not-in-family,White,Male,0,0,40,United-States +26,Private,31208,Masters,14,Never-married,Exec-managerial,Not-in-family,White,Female,0,0,40,United-States +33,Private,281384,HS-grad,9,Never-married,Machine-op-inspct,Own-child,White,Female,0,0,40,United-States +47,Local-gov,171807,HS-grad,9,Divorced,Adm-clerical,Not-in-family,White,Female,0,0,40,United-States +41,Self-emp-inc,445382,Assoc-acdm,12,Married-civ-spouse,Craft-repair,Husband,White,Male,15024,0,60,United-States +19,Private,105460,Some-college,10,Never-married,Other-service,Own-child,White,Male,0,0,20,United-States +46,Private,170338,HS-grad,9,Separated,Transport-moving,Not-in-family,White,Male,0,0,40,United-States +43,Private,102606,HS-grad,9,Married-civ-spouse,Sales,Husband,White,Male,0,0,48,United-States +55,Private,323887,Some-college,10,Married-civ-spouse,Exec-managerial,Husband,White,Male,15024,0,45,United-States +46,Private,175622,Assoc-voc,11,Married-civ-spouse,Tech-support,Husband,White,Male,0,0,40,United-States +21,Private,388946,Some-college,10,Separated,Handlers-cleaners,Not-in-family,White,Female,0,0,40,United-States +17,?,165361,10th,6,Never-married,?,Own-child,White,Male,0,0,40,United-States +41,Private,75012,HS-grad,9,Married-civ-spouse,Machine-op-inspct,Husband,White,Male,0,0,50,United-States +69,Self-emp-inc,174379,HS-grad,9,Married-civ-spouse,Sales,Husband,White,Male,0,0,30,United-States +50,Private,312477,HS-grad,9,Married-civ-spouse,Transport-moving,Husband,White,Male,0,0,40,United-States +20,Private,72055,Some-college,10,Never-married,Adm-clerical,Not-in-family,White,Female,0,0,40,United-States +45,Self-emp-inc,67001,Some-college,10,Married-civ-spouse,Machine-op-inspct,Husband,White,Male,0,0,50,United-States +23,Private,213734,Bachelors,13,Never-married,Exec-managerial,Not-in-family,White,Male,0,0,40,United-States \ No newline at end of file diff --git a/DLPredictOnline/demo/model/tensorflow/wideAndDeep/data.txt b/DLPredictOnline/demo/model/tensorflow/wideAndDeep/data.txt deleted file mode 100644 index b400873..0000000 --- a/DLPredictOnline/demo/model/tensorflow/wideAndDeep/data.txt +++ /dev/null @@ -1,3 +0,0 @@ -4|9|1|10|16009|1850|20|24|20|23|1.0|0.37|0.37|0.37|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.3|0.54|0.89|0.0|0.0|0.0|0.0|0.0|0.0|0.2540983606557377|0.270935960591133|0.24258760107816713|0.0|0.1|0.5|0.01694915254237288|0.024691358024691357|0.04411764705882353|2|0|0|0|0|0|0|0|1|0|1|1|0|3|0|0|0|0.084|0.05 -4|9|1|8|1809|2669|35|35|20|23|0.0|0.0|0.0|0.0|0.3|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|9|0|0|0|0|0|0|0|1|1|1|1|1|2|1|0|0|1.0|0.85 -4|9|1|10|16009|1850|20|22|20|23|1.0|0.37|0.37|0.37|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.0|0.73|1.0|1.0|0.0|0.0|0.0|0.2|0.3|0.3|0.4228571428571429|0.6203703703703703|0.6203703703703703|0.3|0.3|0.3|0.05|0.041237113402061855|0.041237113402061855|2|0|0|0|0|0|0|0|1|0|1|1|1|3|1|0|0|0.03|0.045 diff --git a/DLPredictOnline/demo/src/main/java/com/bj58/ailab/demo/client/PyTorchClient.java b/DLPredictOnline/demo/src/main/java/com/bj58/ailab/demo/client/PyTorchClient.java index d710672..7d87170 100644 --- a/DLPredictOnline/demo/src/main/java/com/bj58/ailab/demo/client/PyTorchClient.java +++ b/DLPredictOnline/demo/src/main/java/com/bj58/ailab/demo/client/PyTorchClient.java @@ -31,7 +31,7 @@ import java.util.concurrent.TimeUnit; /** - * PyTorch 图像水印识别模型示例 + * PyTorch 图像数字识别模型示例 * @author 58 * 模型文件位于 demo/model/pytorch **/ @@ -87,9 +87,9 @@ public void printResult(PredictionProtos.SeldonMessage response){ } public static void client(WpaiDLPredictOnlineServiceGrpc.WpaiDLPredictOnlineServiceBlockingStub blockingStub){ - String imagePath = "data"; + String imagePath = "test_data"; if (CommonUtil.checkSystemIsWin()){ - imagePath = "demo\\model\\pytorch\\watermark-centernet\\data"; + imagePath = "demo\\model\\pytorch\\mnist\\test_data"; } System.out.println(System.getProperty("user.dir")); PyTorchClient pyTorchClient = new PyTorchClient(); @@ -100,4 +100,5 @@ public static void client(WpaiDLPredictOnlineServiceGrpc.WpaiDLPredictOnlineServ pyTorchClient.printResult(response); } } + } diff --git a/DLPredictOnline/demo/src/main/java/com/bj58/ailab/demo/client/TensorflowWideAndDeep.java b/DLPredictOnline/demo/src/main/java/com/bj58/ailab/demo/client/TensorflowWideAndDeep.java index 9a23d2a..12ec2aa 100644 --- a/DLPredictOnline/demo/src/main/java/com/bj58/ailab/demo/client/TensorflowWideAndDeep.java +++ b/DLPredictOnline/demo/src/main/java/com/bj58/ailab/demo/client/TensorflowWideAndDeep.java @@ -27,10 +27,7 @@ import tensorflow.serving.Model; import tensorflow.serving.Predict; -import java.io.BufferedReader; -import java.io.File; -import java.io.FileReader; -import java.io.IOException; +import java.io.*; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -76,90 +73,38 @@ private Map buildFeature(FeatureType featureType, String featur public Predict.PredictRequest getRequest(List testDataArrayList){ - List inputStrs = testDataArrayList.stream().map(o -> { - - String[] elems = o.toString().split("\\|", -1); - Map inputFeatures = new HashMap(1000); - - buildFeature(FeatureType.INT_TYPE, "week", inputFeatures, elems[0]); - buildFeature(FeatureType.INT_TYPE, "hour", inputFeatures, elems[1]); - buildFeature(FeatureType.INT_TYPE, "page_id", inputFeatures, "0"); - buildFeature(FeatureType.INT_TYPE, "pos_in_page", inputFeatures, "0"); - buildFeature(FeatureType.INT_TYPE, "city_id", inputFeatures, elems[2]); - buildFeature(FeatureType.INT_TYPE, "cate", inputFeatures, elems[3]); - buildFeature(FeatureType.STRING_TYPE, "region", inputFeatures, elems[4]); - buildFeature(FeatureType.STRING_TYPE, "shangquan", inputFeatures, elems[5]); - buildFeature(FeatureType.INT_TYPE, "areaId", inputFeatures, elems[6]); - buildFeature(FeatureType.INT_TYPE, "priceId", inputFeatures, elems[7]); - buildFeature(FeatureType.INT_TYPE, "pAreaId", inputFeatures, elems[8]); - buildFeature(FeatureType.INT_TYPE, "pPriceId", inputFeatures, elems[9]); - - buildFeature(FeatureType.FLOAT_TYPE, "cateCount", inputFeatures, elems[10]); - buildFeature(FeatureType.FLOAT_TYPE, "regionCount", inputFeatures, elems[11]); - buildFeature(FeatureType.FLOAT_TYPE, "shangquanCount", inputFeatures, elems[12]); - buildFeature(FeatureType.FLOAT_TYPE, "areaCount", inputFeatures, elems[13]); - buildFeature(FeatureType.FLOAT_TYPE, "priceCount", inputFeatures, elems[14]); - - buildFeature(FeatureType.FLOAT_TYPE, "uDc3", inputFeatures, elems[15]); - buildFeature(FeatureType.FLOAT_TYPE, "uDc7", inputFeatures, elems[16]); - buildFeature(FeatureType.FLOAT_TYPE, "uDc15", inputFeatures, elems[17]); - buildFeature(FeatureType.FLOAT_TYPE, "uDt3", inputFeatures, elems[18]); - buildFeature(FeatureType.FLOAT_TYPE, "uDt7", inputFeatures, elems[19]); - buildFeature(FeatureType.FLOAT_TYPE, "uDt15", inputFeatures, elems[20]); - buildFeature(FeatureType.FLOAT_TYPE, "uDm3", inputFeatures, elems[21]); - buildFeature(FeatureType.FLOAT_TYPE, "uDm7", inputFeatures, elems[22]); - buildFeature(FeatureType.FLOAT_TYPE, "uDm15", inputFeatures, elems[23]); - buildFeature(FeatureType.FLOAT_TYPE, "uDct3", inputFeatures, elems[24]); - buildFeature(FeatureType.FLOAT_TYPE, "uDct7", inputFeatures, elems[25]); - buildFeature(FeatureType.FLOAT_TYPE, "uDct15", inputFeatures, elems[26]); - - buildFeature(FeatureType.FLOAT_TYPE, "proDc3", inputFeatures, elems[27]); - buildFeature(FeatureType.FLOAT_TYPE, "proDc7", inputFeatures, elems[28]); - buildFeature(FeatureType.FLOAT_TYPE, "proDc15", inputFeatures, elems[29]); - buildFeature(FeatureType.FLOAT_TYPE, "proDt3", inputFeatures, elems[30]); - buildFeature(FeatureType.FLOAT_TYPE, "proDt7", inputFeatures, elems[31]); - buildFeature(FeatureType.FLOAT_TYPE, "proDt15", inputFeatures, elems[32]); - buildFeature(FeatureType.FLOAT_TYPE, "proDm3", inputFeatures, elems[33]); - buildFeature(FeatureType.FLOAT_TYPE, "proDm7", inputFeatures, elems[34]); - buildFeature(FeatureType.FLOAT_TYPE, "proDm15", inputFeatures, elems[35]); - buildFeature(FeatureType.FLOAT_TYPE, "proCtr3", inputFeatures, elems[36]); - buildFeature(FeatureType.FLOAT_TYPE, "proCtr7", inputFeatures, elems[37]); - buildFeature(FeatureType.FLOAT_TYPE, "proCtr15", inputFeatures, elems[38]); - buildFeature(FeatureType.FLOAT_TYPE, "proDct3", inputFeatures, elems[39]); - buildFeature(FeatureType.FLOAT_TYPE, "proDct7", inputFeatures, elems[40]); - buildFeature(FeatureType.FLOAT_TYPE, "proDct15", inputFeatures, elems[41]); - buildFeature(FeatureType.FLOAT_TYPE, "proCtrt3", inputFeatures, elems[42]); - buildFeature(FeatureType.FLOAT_TYPE, "proCtrt7", inputFeatures, elems[43]); - buildFeature(FeatureType.FLOAT_TYPE, "proCtrt15", inputFeatures, elems[44]); - - buildFeature(FeatureType.INT_TYPE, "shangquanRank", inputFeatures, elems[45]); - - buildFeature(FeatureType.FLOAT_TYPE, "xzl_l1", inputFeatures, elems[46]); - buildFeature(FeatureType.FLOAT_TYPE, "xzl_l2", inputFeatures, elems[47]); - buildFeature(FeatureType.FLOAT_TYPE, "xzl_l3", inputFeatures, elems[48]); - buildFeature(FeatureType.FLOAT_TYPE, "xzl_l4", inputFeatures, elems[49]); - buildFeature(FeatureType.FLOAT_TYPE, "xzl_l5", inputFeatures, elems[50]); - buildFeature(FeatureType.FLOAT_TYPE, "xzl_l6", inputFeatures, elems[51]); - buildFeature(FeatureType.FLOAT_TYPE, "xzl_l7", inputFeatures, elems[52]); - buildFeature(FeatureType.FLOAT_TYPE, "sp_l1", inputFeatures, elems[53]); - buildFeature(FeatureType.FLOAT_TYPE, "sp_l2", inputFeatures, elems[54]); - buildFeature(FeatureType.FLOAT_TYPE, "sp_l3", inputFeatures, elems[55]); - buildFeature(FeatureType.FLOAT_TYPE, "sp_l4", inputFeatures, elems[56]); - buildFeature(FeatureType.FLOAT_TYPE, "sp_l5", inputFeatures, elems[57]); - buildFeature(FeatureType.INT_TYPE, "sp_l6", inputFeatures, elems[58]); - buildFeature(FeatureType.FLOAT_TYPE, "other_sp", inputFeatures, elems[59]); - buildFeature(FeatureType.FLOAT_TYPE, "other_ax", inputFeatures, elems[60]); - buildFeature(FeatureType.FLOAT_TYPE, "other_qj", inputFeatures, elems[61]); - buildFeature(FeatureType.FLOAT_TYPE, "price", inputFeatures, elems[62]); - buildFeature(FeatureType.FLOAT_TYPE, "area", inputFeatures, elems[63]); - - buildFeature(FeatureType.INT_TYPE, "sample_age", inputFeatures, "0"); - - - Features featuresSerializeToString = Features.newBuilder().putAllFeature(inputFeatures).build(); - ByteString inputStr = Example.newBuilder().setFeatures(featuresSerializeToString).build().toByteString(); - return inputStr; - }).collect(Collectors.toList()); + List inputStrs = + testDataArrayList.stream() + .map( + o -> { + String[] elems = o.toString().split(",", -1); + Map inputFeatures = new HashMap(1000); + buildFeature(FeatureType.FLOAT_TYPE, "age", inputFeatures, elems[0]); + buildFeature(FeatureType.STRING_TYPE, "workclass", inputFeatures, elems[1]); + buildFeature(FeatureType.FLOAT_TYPE, "fnlwgt", inputFeatures, elems[2]); + buildFeature(FeatureType.STRING_TYPE, "education", inputFeatures, elems[3]); + buildFeature(FeatureType.FLOAT_TYPE, "education_num", inputFeatures, elems[4]); + buildFeature(FeatureType.STRING_TYPE, "marital_status", inputFeatures, elems[5]); + buildFeature(FeatureType.STRING_TYPE, "occupation", inputFeatures, elems[6]); + buildFeature(FeatureType.STRING_TYPE, "relationship", inputFeatures, elems[7]); + buildFeature(FeatureType.STRING_TYPE, "race", inputFeatures, elems[8]); + buildFeature(FeatureType.STRING_TYPE, "gender", inputFeatures, elems[9]); + + buildFeature(FeatureType.FLOAT_TYPE, "capital_gain", inputFeatures, elems[10]); + buildFeature(FeatureType.FLOAT_TYPE, "capital_loss", inputFeatures, elems[11]); + buildFeature(FeatureType.FLOAT_TYPE, "hours_per_week", inputFeatures, elems[12]); + buildFeature(FeatureType.STRING_TYPE, "native_country", inputFeatures, elems[13]); + + Features featuresSerializeToString = + Features.newBuilder().putAllFeature(inputFeatures).build(); + ByteString inputStr = + Example.newBuilder() + .setFeatures(featuresSerializeToString) + .build() + .toByteString(); + return inputStr; + }) + .collect(Collectors.toList()); TensorShapeProto.Builder tensorShapeBuilder = TensorShapeProto.newBuilder(); @@ -192,8 +137,8 @@ public void printResult(Predict.PredictResponse response){ } List predict = outputs.getFloatValList(); int step = 2; - for (int i = 1; i < predict.size(); i = i + step) { - System.out.println(predict.get(i)); + for (int i = 0; i < predict.size(); i = i + step) { + System.out.println(predict.get(i) + "," + predict.get(i + 1)); } } @@ -210,9 +155,9 @@ public List getData(String dataFile) throws IOException { } public static void tensorflowClient(WpaiDLPredictOnlineServiceGrpc.WpaiDLPredictOnlineServiceBlockingStub blockingStub){ - String dataFile = "data.txt"; + String dataFile = "census_input.csv"; if (CommonUtil.checkSystemIsWin()){ - dataFile = "demo\\model\\tensorflow\\wideAndDeep\\data.txt"; + dataFile = "demo\\model\\tensorflow\\wideAndDeep\\census_input.csv"; } TensorflowWideAndDeep tensorflowWideAndDeep = new TensorflowWideAndDeep(); List dataList = null; diff --git a/DLPredictOnline/demo/src/main/java/com/bj58/ailab/demo/client/WpaiClient.java b/DLPredictOnline/demo/src/main/java/com/bj58/ailab/demo/client/WpaiClient.java index 6262a69..a3e20bb 100644 --- a/DLPredictOnline/demo/src/main/java/com/bj58/ailab/demo/client/WpaiClient.java +++ b/DLPredictOnline/demo/src/main/java/com/bj58/ailab/demo/client/WpaiClient.java @@ -80,7 +80,8 @@ public WpaiClient(ManagedChannel channel1) { } public void greet(String name) { - TensorflowDssm.tensorflowClient(blockingStub); +// TensorflowDssm.tensorflowClient(blockingStub); + TensorflowWideAndDeep.tensorflowClient(blockingStub); } public void shutdown() throws InterruptedException { diff --git a/README.MD b/README.MD index 2a49b82..e59a556 100644 --- a/README.MD +++ b/README.MD @@ -129,8 +129,8 @@ Caffe详细过程参见项目[Readme](./CaffePredictOnline/README.md)。 3. Tensorflow/PyTorch/Caffe等不同深度学习框架模型推理实现介绍 4. 以具体示例介绍推理服务部署 (1). Tensorflow模型示例1:[qa_match问答匹配模型](./DLPredictOnline/demo/model/tensorflow/sptm)部署 - (2). Tensorflow模型示例2:[推荐排序wide&deep模型](./DLPredictOnline/demo/model/tensorflow/wideAndDeep)部署 - (3). PyTorch模型示例:[图像水印识别模型](./DLPredictOnline/demo/model/pytorch/watermark-centernet)部署 + (2). Tensorflow模型示例2:[二分类wide&deep模型](./DLPredictOnline/demo/model/tensorflow/wideAndDeep)部署 + (3). PyTorch模型示例:[图像数字识别模型](./DLPredictOnline/demo/model/pytorch/mnist)部署 (4). Caffe模型示例:[图像分类模型](./DLPredictOnline/demo/model/caffe)部署 直播录像回放可阅读 58AILab 公众号文章: [直播回放 | 通用深度学习推理服务dl_inference开源项目解析](https://mp.weixin.qq.com/s/TYj2cqeWETmK6kEXrdaSzg)