Commit 143bb0c1 authored by Ilija Radosavovic's avatar Ilija Radosavovic Committed by Facebook Github Bot

Ensure that multi-dataset testing results don't get overwritten

Reviewed By: rbgirshick

Differential Revision: D7082759

fbshipit-source-id: b3257e418944dcfb3c15ff71404c39451d4e225d
parent 92f6cd68
...@@ -1034,8 +1034,7 @@ def cache_cfg_urls(): ...@@ -1034,8 +1034,7 @@ def cache_cfg_urls():
def get_output_dir(training=True): def get_output_dir(training=True):
"""Get the output directory determined by the current global config.""" """Get the output directory determined by the current global config."""
dataset = __C.TRAIN.DATASETS if training else __C.TEST.DATASETS dataset = ':'.join(__C.TRAIN.DATASETS) if training else __C.TEST.DATASET
dataset = ':'.join(dataset)
tag = 'train' if training else 'test' tag = 'train' if training else 'test'
# <output-dir>/<train|test>/<dataset>/<model-type>/ # <output-dir>/<train|test>/<dataset>/<model-type>/
outdir = osp.join(__C.OUTPUT_DIR, tag, dataset, __C.MODEL.TYPE) outdir = osp.join(__C.OUTPUT_DIR, tag, dataset, __C.MODEL.TYPE)
......
...@@ -39,6 +39,7 @@ from caffe2.python import core ...@@ -39,6 +39,7 @@ from caffe2.python import core
from caffe2.python import workspace from caffe2.python import workspace
from core.config import cfg from core.config import cfg
from core.config import get_output_dir
from datasets import task_evaluation from datasets import task_evaluation
from datasets.json_dataset import JsonDataset from datasets.json_dataset import JsonDataset
from modeling import model_builder from modeling import model_builder
...@@ -53,8 +54,9 @@ import utils.subprocess as subprocess_utils ...@@ -53,8 +54,9 @@ import utils.subprocess as subprocess_utils
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def generate_rpn_on_dataset(output_dir, multi_gpu=False, gpu_id=0): def generate_rpn_on_dataset(multi_gpu=False, gpu_id=0):
"""Run inference on a dataset.""" """Run inference on a dataset."""
output_dir = get_output_dir(training=False)
dataset = JsonDataset(cfg.TEST.DATASET) dataset = JsonDataset(cfg.TEST.DATASET)
test_timer = Timer() test_timer = Timer()
test_timer.tic() test_timer.tic()
...@@ -65,9 +67,7 @@ def generate_rpn_on_dataset(output_dir, multi_gpu=False, gpu_id=0): ...@@ -65,9 +67,7 @@ def generate_rpn_on_dataset(output_dir, multi_gpu=False, gpu_id=0):
) )
else: else:
# Processes entire dataset range by default # Processes entire dataset range by default
_boxes, _scores, _ids, rpn_file = generate_rpn_on_range( _boxes, _scores, _ids, rpn_file = generate_rpn_on_range(gpu_id=gpu_id)
output_dir, gpu_id=gpu_id
)
test_timer.toc() test_timer.toc()
logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time)) logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
return evaluate_proposal_file(dataset, rpn_file, output_dir) return evaluate_proposal_file(dataset, rpn_file, output_dir)
...@@ -101,7 +101,7 @@ def multi_gpu_generate_rpn_on_dataset(num_images, output_dir): ...@@ -101,7 +101,7 @@ def multi_gpu_generate_rpn_on_dataset(num_images, output_dir):
return boxes, scores, ids, rpn_file return boxes, scores, ids, rpn_file
def generate_rpn_on_range(output_dir, ind_range=None, gpu_id=0): def generate_rpn_on_range(ind_range=None, gpu_id=0):
"""Run inference on all images in a dataset or over an index range of images """Run inference on all images in a dataset or over an index range of images
in a dataset using a single GPU. in a dataset using a single GPU.
""" """
...@@ -112,6 +112,7 @@ def generate_rpn_on_range(output_dir, ind_range=None, gpu_id=0): ...@@ -112,6 +112,7 @@ def generate_rpn_on_range(output_dir, ind_range=None, gpu_id=0):
assert cfg.MODEL.RPN_ONLY or cfg.MODEL.FASTER_RCNN assert cfg.MODEL.RPN_ONLY or cfg.MODEL.FASTER_RCNN
roidb, start_ind, end_ind, total_num_images = get_roidb(ind_range) roidb, start_ind, end_ind, total_num_images = get_roidb(ind_range)
output_dir = get_output_dir(training=False)
logger.info( logger.info(
'Output will be saved to: {:s}'.format(os.path.abspath(output_dir)) 'Output will be saved to: {:s}'.format(os.path.abspath(output_dir))
) )
......
...@@ -31,6 +31,7 @@ import yaml ...@@ -31,6 +31,7 @@ import yaml
from caffe2.python import workspace from caffe2.python import workspace
from core.config import cfg from core.config import cfg
from core.config import get_output_dir
from core.rpn_generator import generate_rpn_on_dataset from core.rpn_generator import generate_rpn_on_dataset
from core.rpn_generator import generate_rpn_on_range from core.rpn_generator import generate_rpn_on_range
from core.test import im_detect_all from core.test import im_detect_all
...@@ -62,7 +63,7 @@ def get_eval_functions(): ...@@ -62,7 +63,7 @@ def get_eval_functions():
return parent_func, child_func return parent_func, child_func
def run_inference(output_dir, ind_range=None, multi_gpu_testing=False, gpu_id=0): def run_inference(ind_range=None, multi_gpu_testing=False, gpu_id=0):
parent_func, child_func = get_eval_functions() parent_func, child_func = get_eval_functions()
is_parent = ind_range is None is_parent = ind_range is None
...@@ -80,7 +81,7 @@ def run_inference(output_dir, ind_range=None, multi_gpu_testing=False, gpu_id=0) ...@@ -80,7 +81,7 @@ def run_inference(output_dir, ind_range=None, multi_gpu_testing=False, gpu_id=0)
cfg.TEST.DATASET = cfg.TEST.DATASETS[i] cfg.TEST.DATASET = cfg.TEST.DATASETS[i]
if cfg.TEST.PRECOMPUTED_PROPOSALS: if cfg.TEST.PRECOMPUTED_PROPOSALS:
cfg.TEST.PROPOSAL_FILE = cfg.TEST.PROPOSAL_FILES[i] cfg.TEST.PROPOSAL_FILE = cfg.TEST.PROPOSAL_FILES[i]
results = parent_func(output_dir, multi_gpu=multi_gpu_testing) results = parent_func(multi_gpu=multi_gpu_testing)
all_results.update(results) all_results.update(results)
return all_results return all_results
...@@ -89,11 +90,12 @@ def run_inference(output_dir, ind_range=None, multi_gpu_testing=False, gpu_id=0) ...@@ -89,11 +90,12 @@ def run_inference(output_dir, ind_range=None, multi_gpu_testing=False, gpu_id=0)
# In this case test_net was called via subprocess.Popen to execute on a # In this case test_net was called via subprocess.Popen to execute on a
# range of inputs on a single dataset (i.e., use cfg.TEST.DATASET and # range of inputs on a single dataset (i.e., use cfg.TEST.DATASET and
# don't loop over cfg.TEST.DATASETS) # don't loop over cfg.TEST.DATASETS)
return child_func(output_dir, ind_range=ind_range, gpu_id=gpu_id) return child_func(ind_range=ind_range, gpu_id=gpu_id)
def test_net_on_dataset(output_dir, multi_gpu=False, gpu_id=0): def test_net_on_dataset(multi_gpu=False, gpu_id=0):
"""Run inference on a dataset.""" """Run inference on a dataset."""
output_dir = get_output_dir(training=False)
dataset = JsonDataset(cfg.TEST.DATASET) dataset = JsonDataset(cfg.TEST.DATASET)
test_timer = Timer() test_timer = Timer()
test_timer.tic() test_timer.tic()
...@@ -103,7 +105,7 @@ def test_net_on_dataset(output_dir, multi_gpu=False, gpu_id=0): ...@@ -103,7 +105,7 @@ def test_net_on_dataset(output_dir, multi_gpu=False, gpu_id=0):
num_images, output_dir num_images, output_dir
) )
else: else:
all_boxes, all_segms, all_keyps = test_net(output_dir, gpu_id=gpu_id) all_boxes, all_segms, all_keyps = test_net(gpu_id=gpu_id)
test_timer.toc() test_timer.toc()
logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time)) logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
results = task_evaluation.evaluate_all( results = task_evaluation.evaluate_all(
...@@ -153,7 +155,7 @@ def multi_gpu_test_net_on_dataset(num_images, output_dir): ...@@ -153,7 +155,7 @@ def multi_gpu_test_net_on_dataset(num_images, output_dir):
return all_boxes, all_segms, all_keyps return all_boxes, all_segms, all_keyps
def test_net(output_dir, ind_range=None, gpu_id=0): def test_net(ind_range=None, gpu_id=0):
"""Run inference on all images in a dataset or over an index range of images """Run inference on all images in a dataset or over an index range of images
in a dataset using a single GPU. in a dataset using a single GPU.
""" """
...@@ -164,6 +166,7 @@ def test_net(output_dir, ind_range=None, gpu_id=0): ...@@ -164,6 +166,7 @@ def test_net(output_dir, ind_range=None, gpu_id=0):
assert cfg.TEST.DATASET != '', \ assert cfg.TEST.DATASET != '', \
'TEST.DATASET must be set to the dataset name to test' 'TEST.DATASET must be set to the dataset name to test'
output_dir = get_output_dir(training=False)
roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset( roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(
ind_range ind_range
) )
......
...@@ -33,7 +33,6 @@ from caffe2.python import workspace ...@@ -33,7 +33,6 @@ from caffe2.python import workspace
from core.config import assert_and_infer_cfg from core.config import assert_and_infer_cfg
from core.config import cfg from core.config import cfg
from core.config import get_output_dir
from core.config import merge_cfg_from_file from core.config import merge_cfg_from_file
from core.config import merge_cfg_from_list from core.config import merge_cfg_from_list
from core.test_engine import run_inference from core.test_engine import run_inference
...@@ -93,9 +92,8 @@ def parse_args(): ...@@ -93,9 +92,8 @@ def parse_args():
def main(ind_range=None, multi_gpu_testing=False): def main(ind_range=None, multi_gpu_testing=False):
output_dir = get_output_dir(training=False)
all_results = run_inference( all_results = run_inference(
output_dir, ind_range=ind_range, multi_gpu_testing=multi_gpu_testing ind_range=ind_range, multi_gpu_testing=multi_gpu_testing
) )
if not ind_range: if not ind_range:
task_evaluation.check_expected_results( task_evaluation.check_expected_results(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment