-
Notifications
You must be signed in to change notification settings - Fork 5.6k
/
ops.py
executable file
·2352 lines (2045 loc) · 92.7 KB
/
ops.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
from paddle import _C_ops, _legacy_C_ops
from paddle.tensor.math import _add_with_axis
from paddle.utils import convert_to_list
from ..base import core
from ..base.data_feeder import check_type, check_variable_and_dtype
from ..base.framework import Variable, in_dygraph_mode, in_dynamic_or_pir_mode
from ..base.layer_helper import LayerHelper
from ..framework import _current_expected_place
from ..nn import BatchNorm2D, Conv2D, Layer, ReLU, Sequential
from ..nn.initializer import Normal
__all__ = [
'yolo_loss',
'yolo_box',
'prior_box',
'box_coder',
'deform_conv2d',
'DeformConv2D',
'distribute_fpn_proposals',
'generate_proposals',
'read_file',
'decode_jpeg',
'roi_pool',
'RoIPool',
'psroi_pool',
'PSRoIPool',
'roi_align',
'RoIAlign',
'nms',
'matrix_nms',
]
def yolo_loss(
x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.0,
):
r"""
This operator generates YOLOv3 loss based on given predict result and ground
truth boxes.
The output of previous network is in shape [N, C, H, W], while H and W
should be the same, H and W specify the grid size, each grid point predict
given number bounding boxes, this given number, which following will be represented as S,
is specified by the number of anchor clusters in each scale. In the second dimension(the channel
dimension), C should be equal to S * (class_num + 5), class_num is the object
category number of source dataset(such as 80 in coco dataset), so in the
second(channel) dimension, apart from 4 box location coordinates x, y, w, h,
also includes confidence score of the box and class one-hot key of each anchor box.
Assume the 4 location coordinates are :math:`t_x, t_y, t_w, t_h`, the box predictions
should be as follows:
$$
b_x = \\sigma(t_x) + c_x
$$
$$
b_y = \\sigma(t_y) + c_y
$$
$$
b_w = p_w e^{t_w}
$$
$$
b_h = p_h e^{t_h}
$$
In the equation above, :math:`c_x, c_y` is the left top corner of current grid
and :math:`p_w, p_h` is specified by anchors.
As for confidence score, it is the logistic regression value of IoU between
anchor boxes and ground truth boxes, the score of the anchor box which has
the max IoU should be 1, and if the anchor box has IoU bigger than ignore
thresh, the confidence score loss of this anchor box will be ignored.
Therefore, the YOLOv3 loss consists of three major parts: box location loss,
objectness loss and classification loss. The L1 loss is used for
box coordinates (w, h), sigmoid cross entropy loss is used for box
coordinates (x, y), objectness loss and classification loss.
Each groud truth box finds a best matching anchor box in all anchors.
Prediction of this anchor box will incur all three parts of losses, and
prediction of anchor boxes with no GT box matched will only incur objectness
loss.
In order to trade off box coordinate losses between big boxes and small
boxes, box coordinate losses will be mutiplied by scale weight, which is
calculated as follows.
$$
weight_{box} = 2.0 - t_w * t_h
$$
Final loss will be represented as follows.
$$
loss = (loss_{xy} + loss_{wh}) * weight_{box} + loss_{conf} + loss_{class}
$$
While :attr:`use_label_smooth` is set to be :attr:`True`, the classification
target will be smoothed when calculating classification loss, target of
positive samples will be smoothed to :math:`1.0 - 1.0 / class\_num` and target of
negetive samples will be smoothed to :math:`1.0 / class\_num`.
While :attr:`gt_score` is given, which means the mixup score of ground truth
boxes, all losses incured by a ground truth box will be multiplied by its
mixup score.
Args:
x (Tensor): The input tensor of YOLOv3 loss operator, This is a 4-D
tensor with shape of [N, C, H, W]. H and W should be same,
and the second dimension(C) stores box locations, confidence
score and classification one-hot keys of each anchor box.
The data type is float32 or float64.
gt_box (Tensor): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Tensor): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): The anchor width and height, it will be parsed
pair by pair.
anchor_mask (list|tuple): The mask index of anchors used in current
YOLOv3 loss calculation.
class_num (int): The number of classes.
ignore_thresh (float): The ignore threshold to ignore confidence loss.
downsample_ratio (int): The downsample ratio from network input to YOLOv3
loss input, so 32, 16, 8 should be set for the
first, second, and thrid YOLOv3 loss operators.
gt_score (Tensor, optional): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool, optional): Whether to use label smooth. Default True.
name (str, optional): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
scale_x_y (float, optional): Scale the center point of decoded bounding box.
Default 1.0.
Returns:
Tensor: A 1-D tensor with shape [N], the value of yolov3 loss
Examples:
.. code-block:: python
>>> import paddle
>>> x = paddle.rand([2, 14, 8, 8]).astype('float32')
>>> gt_box = paddle.rand([2, 10, 4]).astype('float32')
>>> gt_label = paddle.rand([2, 10]).astype('int32')
>>> loss = paddle.vision.ops.yolo_loss(x,
... gt_box=gt_box,
... gt_label=gt_label,
... anchors=[10, 13, 16, 30],
... anchor_mask=[0, 1],
... class_num=2,
... ignore_thresh=0.7,
... downsample_ratio=8,
... use_label_smooth=True,
... scale_x_y=1.)
"""
if in_dynamic_or_pir_mode():
loss = _C_ops.yolo_loss(
x,
gt_box,
gt_label,
gt_score,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
use_label_smooth,
scale_x_y,
)
return loss
else:
helper = LayerHelper('yolov3_loss', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'yolo_loss')
check_variable_and_dtype(
gt_box, 'gt_box', ['float32', 'float64'], 'yolo_loss'
)
check_variable_and_dtype(gt_label, 'gt_label', 'int32', 'yolo_loss')
check_type(anchors, 'anchors', (list, tuple), 'yolo_loss')
check_type(anchor_mask, 'anchor_mask', (list, tuple), 'yolo_loss')
check_type(class_num, 'class_num', int, 'yolo_loss')
check_type(ignore_thresh, 'ignore_thresh', float, 'yolo_loss')
check_type(use_label_smooth, 'use_label_smooth', bool, 'yolo_loss')
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(
dtype='int32'
)
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask,
},
attrs=attrs,
)
return loss
def yolo_box(
x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.0,
iou_aware=False,
iou_aware_factor=0.5,
):
r"""
This operator generates YOLO detection boxes from output of YOLOv3 network.
The output of previous network is in shape [N, C, H, W], while H and W
should be the same, H and W specify the grid size, each grid point predict
given number boxes, this given number, which following will be represented as S,
is specified by the number of anchors. In the second dimension(the channel
dimension), C should be equal to S * (5 + class_num) if :attr:`iou_aware` is false,
otherwise C should be equal to S * (6 + class_num). class_num is the object
category number of source dataset(such as 80 in coco dataset), so the
second(channel) dimension, apart from 4 box location coordinates x, y, w, h,
also includes confidence score of the box and class one-hot key of each anchor
box.
Assume the 4 location coordinates are :math:`t_x, t_y, t_w, t_h`, the box
predictions should be as follows:
$$
b_x = \\sigma(t_x) + c_x
$$
$$
b_y = \\sigma(t_y) + c_y
$$
$$
b_w = p_w e^{t_w}
$$
$$
b_h = p_h e^{t_h}
$$
in the equation above, :math:`c_x, c_y` is the left top corner of current grid
and :math:`p_w, p_h` is specified by anchors.
The logistic regression value of the 5th channel of each anchor prediction boxes
represents the confidence score of each prediction box, and the logistic
regression value of the last :attr:`class_num` channels of each anchor prediction
boxes represents the classifcation scores. Boxes with confidence scores less than
:attr:`conf_thresh` should be ignored, and box final scores is the product of
confidence scores and classification scores.
$$
score_{pred} = score_{conf} * score_{class}
$$
Args:
x (Tensor): The input tensor of YoloBox operator is a 4-D tensor with
shape of [N, C, H, W]. The second dimension(C) stores box
locations, confidence score and classification one-hot keys
of each anchor box. Generally, X should be the output of
YOLOv3 network. The data type is float32 or float64.
img_size (Tensor): The image size tensor of YoloBox operator, This is a
2-D tensor with shape of [N, 2]. This tensor holds
height and width of each input image used for resizing
output box in input image scale. The data type is int32.
anchors (list|tuple): The anchor width and height, it will be parsed pair
by pair.
class_num (int): The number of classes.
conf_thresh (float): The confidence scores threshold of detection boxes.
Boxes with confidence scores under threshold should
be ignored.
downsample_ratio (int): The downsample ratio from network input to
:attr:`yolo_box` operator input, so 32, 16, 8
should be set for the first, second, and thrid
:attr:`yolo_box` layer.
clip_bbox (bool, optional): Whether clip output bonding box in :attr:`img_size`
boundary. Default true.
name (str, optional): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`.
scale_x_y (float, optional): Scale the center point of decoded bounding box. Default 1.0
iou_aware (bool, optional): Whether use iou aware. Default false.
iou_aware_factor (float, optional): iou aware factor. Default 0.5.
Returns:
Tensor: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Examples:
.. code-block:: python
>>> import paddle
>>> x = paddle.rand([2, 14, 8, 8]).astype('float32')
>>> img_size = paddle.ones((2, 2)).astype('int32')
>>> boxes, scores = paddle.vision.ops.yolo_box(x,
... img_size=img_size,
... anchors=[10, 13, 16, 30],
... class_num=2,
... conf_thresh=0.01,
... downsample_ratio=8,
... clip_bbox=True,
... scale_x_y=1.)
"""
if in_dynamic_or_pir_mode():
boxes, scores = _C_ops.yolo_box(
x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox,
scale_x_y,
iou_aware,
iou_aware_factor,
)
return boxes, scores
else:
helper = LayerHelper('yolo_box', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'yolo_box')
check_variable_and_dtype(img_size, 'img_size', 'int32', 'yolo_box')
check_type(anchors, 'anchors', (list, tuple), 'yolo_box')
check_type(conf_thresh, 'conf_thresh', float, 'yolo_box')
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
"iou_aware": iou_aware,
"iou_aware_factor": iou_aware_factor,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs,
)
return boxes, scores
def prior_box(
input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.0],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
min_max_aspect_ratios_order=False,
name=None,
):
r"""
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Args:
input (Tensor): 4-D tensor(NCHW), the data type should be float32 or float64.
image (Tensor): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes (list|tuple|float): the min sizes of generated prior boxes.
max_sizes (list|tuple|None, optional): the max sizes of generated prior boxes.
Default: None, means [] and will not be used.
aspect_ratios (list|tuple|float, optional): the aspect ratios of generated
prior boxes. Default: [1.0].
variance (list|tuple, optional): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip (bool): Whether to flip aspect ratios. Default:False.
clip (bool): Whether to clip out-of-boundary boxes. Default: False.
steps (list|tuple, optional): Prior boxes steps across width and height, If
steps[0] equals to 0.0 or steps[1] equals to 0.0, the prior boxes steps across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset (float, optional)): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order (bool, optional): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: the output prior boxes and the expanded variances of PriorBox.
The prior boxes is a 4-D tensor, the layout is [H, W, num_priors, 4],
num_priors is the total box count of each position of input.
The expanded variances is a 4-D tensor, same shape as the prior boxes.
Examples:
.. code-block:: python
>>> import paddle
>>> input = paddle.rand((1, 3, 6, 9), dtype=paddle.float32)
>>> image = paddle.rand((1, 3, 9, 12), dtype=paddle.float32)
>>> box, var = paddle.vision.ops.prior_box(
... input=input,
... image=image,
... min_sizes=[2.0, 4.0],
... clip=True,
... flip=True)
...
"""
def _is_list_or_tuple_(data):
return isinstance(data, (list, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not _is_list_or_tuple_(steps):
steps = [steps]
if not len(steps) == 2:
raise ValueError('steps should be (step_w, step_h)')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
cur_max_sizes = None
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
cur_max_sizes = max_sizes
if in_dynamic_or_pir_mode():
step_w, step_h = steps
if max_sizes is None:
max_sizes = []
box, var = _C_ops.prior_box(
input,
image,
min_sizes,
max_sizes,
aspect_ratios,
variance,
flip,
clip,
step_w,
step_h,
offset,
min_max_aspect_ratios_order,
)
return box, var
else:
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box'
)
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order,
}
if cur_max_sizes is not None:
attrs['max_sizes'] = cur_max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input, "Image": image},
outputs={"Boxes": box, "Variances": var},
attrs=attrs,
)
box.stop_gradient = True
var.stop_gradient = True
return box, var
def box_coder(
prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
axis=0,
name=None,
):
r"""
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox &= (tx - px) / pw / pxv
oy &= (ty - py) / ph / pyv
ow &= log(abs(tw / pw)) / pwv
oh &= log(abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox &= (pw * pxv * tx * + px) - tw / 2
oy &= (ph * pyv * ty * + py) - th / 2
ow &= exp(pwv * tw) * pw + tw / 2
oh &= exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box (Tensor): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var (Tensor|List|tuple|None): prior_box_var supports four types
of input. One is Tensor with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list or tuple consist
of 4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box (Tensor): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
code_type (str, optional): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized (bool, optional): Whether treat the priorbox as a normalized box.
Set true by default.
axis (int, optional): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
name (str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tensor: output boxes, when code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
>>> import paddle
>>> # For encode
>>> prior_box_encode = paddle.rand((80, 4), dtype=paddle.float32)
>>> prior_box_var_encode = paddle.rand((80, 4), dtype=paddle.float32)
>>> target_box_encode = paddle.rand((20, 4), dtype=paddle.float32)
>>> output_encode = paddle.vision.ops.box_coder(
... prior_box=prior_box_encode,
... prior_box_var=prior_box_var_encode,
... target_box=target_box_encode,
... code_type="encode_center_size")
...
>>> # For decode
>>> prior_box_decode = paddle.rand((80, 4), dtype=paddle.float32)
>>> prior_box_var_decode = paddle.rand((80, 4), dtype=paddle.float32)
>>> target_box_decode = paddle.rand((20, 80, 4), dtype=paddle.float32)
>>> output_decode = paddle.vision.ops.box_coder(
... prior_box=prior_box_decode,
... prior_box_var=prior_box_var_decode,
... target_box=target_box_decode,
... code_type="decode_center_size",
... box_normalized=False)
...
"""
if in_dynamic_or_pir_mode():
if isinstance(prior_box_var, (core.eager.Tensor, paddle.pir.Value)):
output_box = _C_ops.box_coder(
prior_box,
prior_box_var,
target_box,
code_type,
box_normalized,
axis,
[],
)
elif isinstance(prior_box_var, (list, tuple)):
prior_box_var = list(prior_box_var)
assert (
len(prior_box_var) == 4
), "Input prior_box_var must be Variable or list|tuple with 4 elements."
output_box = _C_ops.box_coder(
prior_box,
None,
target_box,
code_type,
box_normalized,
axis,
prior_box_var,
)
else:
raise TypeError(
"Input prior_box_var must be Variable or list|tuple"
)
return output_box
else:
check_variable_and_dtype(
prior_box, 'prior_box', ['float32', 'float64'], 'box_coder'
)
check_variable_and_dtype(
target_box, 'target_box', ['float32', 'float64'], 'box_coder'
)
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype
)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis,
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, (list, tuple)):
attrs['variance'] = prior_box_var
assert (
len(attrs['variance']) == 4
), "Input prior_box_var must be Variable or list|tuple with 4 elements."
else:
raise TypeError(
"Input prior_box_var must be Variable or list|tuple"
)
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box},
)
return output_box
def deform_conv2d(
x,
offset,
weight,
bias=None,
stride=1,
padding=0,
dilation=1,
deformable_groups=1,
groups=1,
mask=None,
name=None,
):
r"""
Compute 2-D deformable convolution on 4-D input.
Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:
Deformable Convolution v2:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}
Deformable Convolution v1:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}
Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
<https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.
Example:
- Input:
x shape: :math:`(N, C_{in}, H_{in}, W_{in})`
weight shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
offset shape: :math:`(N, 2 * H_f * W_f, H_{out}, W_{out})`
mask shape: :math:`(N, H_f * W_f, H_{out}, W_{out})`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\
W_{out}&= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Args:
x (Tensor): The input image with [N, C, H, W] format. A Tensor with type
float32, float64.
offset (Tensor): The input coordinate offset of deformable convolution layer.
A Tensor with type float32, float64.
weight (Tensor): The convolution kernel with shape [M, C/g, kH, kW], where M is
the number of output channels, g is the number of groups, kH is the filter's
height, kW is the filter's width.
bias (Tensor, optional): The bias with shape [M,]. Default: None.
stride (int|list|tuple, optional): The stride size. If stride is a list/tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride. Default: 1.
padding (int|list|tuple, optional): The padding size. If padding is a list/tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding. Default: 0.
dilation (int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must
contain two integers, (dilation_H, dilation_W). Otherwise, the
dilation_H = dilation_W = dilation. Default: 1.
deformable_groups (int): The number of deformable group partitions.
Default: 1.
groups (int, optonal): The groups number of the deformable conv layer. According to
grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: 1.
mask (Tensor, optional): The input mask of deformable convolution layer.
A Tensor with type float32, float64. It should be None when you use
deformable convolution v1. Default: None.
name(str, optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Tensor: 4-D Tensor storing the deformable convolution result.\
A Tensor with type float32, float64.
Examples:
.. code-block:: python
>>> #deformable conv v2:
>>> import paddle
>>> input = paddle.rand((8, 1, 28, 28))
>>> kh, kw = 3, 3
>>> weight = paddle.rand((16, 1, kh, kw))
>>> # offset shape should be [bs, 2 * kh * kw, out_h, out_w]
>>> # mask shape should be [bs, hw * hw, out_h, out_w]
>>> # In this case, for an input of 28, stride of 1
>>> # and kernel size of 3, without padding, the output size is 26
>>> offset = paddle.rand((8, 2 * kh * kw, 26, 26))
>>> mask = paddle.rand((8, kh * kw, 26, 26))
>>> out = paddle.vision.ops.deform_conv2d(input, offset, weight, mask=mask)
>>> print(out.shape)
[8, 16, 26, 26]
>>> #deformable conv v1:
>>> import paddle
>>> input = paddle.rand((8, 1, 28, 28))
>>> kh, kw = 3, 3
>>> weight = paddle.rand((16, 1, kh, kw))
>>> # offset shape should be [bs, 2 * kh * kw, out_h, out_w]
>>> # In this case, for an input of 28, stride of 1
>>> # and kernel size of 3, without padding, the output size is 26
>>> offset = paddle.rand((8, 2 * kh * kw, 26, 26))
>>> out = paddle.vision.ops.deform_conv2d(input, offset, weight)
>>> print(out.shape)
[8, 16, 26, 26]
"""
stride = convert_to_list(stride, 2, 'stride')
padding = convert_to_list(padding, 2, 'padding')
dilation = convert_to_list(dilation, 2, 'dilation')
use_deform_conv2d_v1 = True if mask is None else False
if in_dygraph_mode():
pre_bias = _C_ops.deformable_conv(
x,
offset,
weight,
mask,
stride,
padding,
dilation,
deformable_groups,
groups,
1,
)
if bias is not None:
out = _add_with_axis(pre_bias, bias, axis=1)
else:
out = pre_bias
else:
check_variable_and_dtype(
x, "x", ['float32', 'float64'], 'deform_conv2d'
)
check_variable_and_dtype(
offset, "offset", ['float32', 'float64'], 'deform_conv2d'
)
num_channels = x.shape[1]
helper = LayerHelper('deformable_conv', **locals())
dtype = helper.input_dtype()
stride = convert_to_list(stride, 2, 'stride')
padding = convert_to_list(padding, 2, 'padding')
dilation = convert_to_list(dilation, 2, 'dilation')
pre_bias = helper.create_variable_for_type_inference(dtype)
if use_deform_conv2d_v1:
op_type = 'deformable_conv_v1'
inputs = {
'Input': x,
'Filter': weight,
'Offset': offset,
}
else:
op_type = 'deformable_conv'
inputs = {
'Input': x,
'Filter': weight,
'Offset': offset,
'Mask': mask,
}
outputs = {"Output": pre_bias}
attrs = {
'strides': stride,
'paddings': padding,
'dilations': dilation,
'groups': groups,
'deformable_groups': deformable_groups,
'im2col_step': 1,
}
helper.append_op(
type=op_type, inputs=inputs, outputs=outputs, attrs=attrs
)
if bias is not None:
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='elementwise_add',
inputs={'X': [pre_bias], 'Y': [bias]},
outputs={'Out': [out]},
attrs={'axis': 1},
)
else:
out = pre_bias
return out
class DeformConv2D(Layer):
r"""
Compute 2-D deformable convolution on 4-D input.
Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:
Deformable Convolution v2:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}
Deformable Convolution v1:
.. math::
y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}
Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
<https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.
Example:
- Input:
x shape: :math:`(N, C_{in}, H_{in}, W_{in})`
weight shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
offset shape: :math:`(N, 2 * H_f * W_f, H_{out}, W_{out})`
mask shape: :math:`(N, H_f * W_f, H_{out}, W_{out})`
- Output:
Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
Where
.. math::
H_{out}&= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\
W_{out}&= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
Parameters:
in_channels(int): The number of input channels in the input image.