Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
M
maskrcnn
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
人工智能
maskrcnn
Commits
b318c3ec
Commit
b318c3ec
authored
Apr 20, 2019
by
Rodrigo Berriel
Committed by
Francisco Massa
Apr 20, 2019
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add a switch for POST_NMS per batch/image during training (#695)
parent
4466eb5a
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
10 additions
and
3 deletions
+10
-3
defaults.py
maskrcnn_benchmark/config/defaults.py
+3
-0
inference.py
maskrcnn_benchmark/modeling/rpn/inference.py
+7
-3
No files found.
maskrcnn_benchmark/config/defaults.py
View file @
b318c3ec
...
...
@@ -165,6 +165,9 @@ _C.MODEL.RPN.MIN_SIZE = 0
# all FPN levels
_C
.
MODEL
.
RPN
.
FPN_POST_NMS_TOP_N_TRAIN
=
2000
_C
.
MODEL
.
RPN
.
FPN_POST_NMS_TOP_N_TEST
=
2000
# Apply the post NMS per batch (default) or per image during training
# (default is True to be consistent with Detectron, see Issue #672)
_C
.
MODEL
.
RPN
.
FPN_POST_NMS_PER_BATCH
=
True
# Custom rpn head, empty to use default conv or separable conv
_C
.
MODEL
.
RPN
.
RPN_HEAD
=
"SingleConvRPNHead"
...
...
maskrcnn_benchmark/modeling/rpn/inference.py
View file @
b318c3ec
...
...
@@ -24,6 +24,7 @@ class RPNPostProcessor(torch.nn.Module):
min_size
,
box_coder
=
None
,
fpn_post_nms_top_n
=
None
,
fpn_post_nms_per_batch
=
True
,
):
"""
Arguments:
...
...
@@ -47,6 +48,7 @@ class RPNPostProcessor(torch.nn.Module):
if
fpn_post_nms_top_n
is
None
:
fpn_post_nms_top_n
=
post_nms_top_n
self
.
fpn_post_nms_top_n
=
fpn_post_nms_top_n
self
.
fpn_post_nms_per_batch
=
fpn_post_nms_per_batch
def
add_gt_proposals
(
self
,
proposals
,
targets
):
"""
...
...
@@ -154,9 +156,9 @@ class RPNPostProcessor(torch.nn.Module):
# different behavior during training and during testing:
# during training, post_nms_top_n is over *all* the proposals combined, while
# during testing, it is over the proposals for each image
#
TODO resolve this difference and make it consistent. It should be per image,
#
and not per batch
if
self
.
training
:
#
NOTE: it should be per image, and not per batch. However, to be consistent
#
with Detectron, the default is per batch (see Issue #672)
if
self
.
training
and
self
.
fpn_post_nms_per_batch
:
objectness
=
torch
.
cat
(
[
boxlist
.
get_field
(
"objectness"
)
for
boxlist
in
boxlists
],
dim
=
0
)
...
...
@@ -189,6 +191,7 @@ def make_rpn_postprocessor(config, rpn_box_coder, is_train):
if
not
is_train
:
pre_nms_top_n
=
config
.
MODEL
.
RPN
.
PRE_NMS_TOP_N_TEST
post_nms_top_n
=
config
.
MODEL
.
RPN
.
POST_NMS_TOP_N_TEST
fpn_post_nms_per_batch
=
config
.
MODEL
.
RPN
.
FPN_POST_NMS_PER_BATCH
nms_thresh
=
config
.
MODEL
.
RPN
.
NMS_THRESH
min_size
=
config
.
MODEL
.
RPN
.
MIN_SIZE
box_selector
=
RPNPostProcessor
(
...
...
@@ -198,5 +201,6 @@ def make_rpn_postprocessor(config, rpn_box_coder, is_train):
min_size
=
min_size
,
box_coder
=
rpn_box_coder
,
fpn_post_nms_top_n
=
fpn_post_nms_top_n
,
fpn_post_nms_per_batch
=
fpn_post_nms_per_batch
,
)
return
box_selector
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment