Commit 2941e77a authored by Ilija Radosavovic's avatar Ilija Radosavovic Committed by Facebook Github Bot

Provide config options for controlling data loader queues sizes

Reviewed By: rbgirshick

Differential Revision: D7513510

fbshipit-source-id: cf46a61f8e1e9f81154fc43974be5abc4afec2f4
parent 8f2f5cf9
......@@ -190,7 +190,7 @@ __C.TRAIN.AUTO_RESUME = True
# ---------------------------------------------------------------------------- #
# Data loader options
# Data loader options (see lib/roi_data/loader.py for more info)
# ---------------------------------------------------------------------------- #
__C.DATA_LOADER = AttrDict()
......@@ -199,6 +199,12 @@ __C.DATA_LOADER = AttrDict()
# training; 4 seems to be the sweet spot in our experience)
__C.DATA_LOADER.NUM_THREADS = 4
# Size of the shared minibatch queue
__C.DATA_LOADER.MINIBATCH_QUEUE_SIZE = 64
# Capacity of the per GPU blobs queue
__C.DATA_LOADER.BLOBS_QUEUE_CAPACITY = 8
# ---------------------------------------------------------------------------- #
# Inference ('test') options
......
......@@ -382,7 +382,10 @@ def add_training_inputs(model, roidb=None):
if roidb is not None:
# To make debugging easier you can set cfg.DATA_LOADER.NUM_THREADS = 1
model.roi_data_loader = RoIDataLoader(
roidb, num_loaders=cfg.DATA_LOADER.NUM_THREADS
roidb,
num_loaders=cfg.DATA_LOADER.NUM_THREADS,
minibatch_queue_size=cfg.DATA_LOADER.MINIBATCH_QUEUE_SIZE,
blobs_queue_capacity=cfg.DATA_LOADER.BLOBS_QUEUE_CAPACITY
)
orig_num_op = len(model.net._net.op)
blob_names = roi_data.minibatch.get_minibatch_blob_names(
......
......@@ -15,9 +15,12 @@
# Example usage:
# data_loader_benchmark.par \
# TRAIN.DATASETS voc_2007_trainval \
# NUM_GPUS 2 \
# TRAIN.PROPOSAL_FILES /path/to/voc_2007_trainval/proposals.pkl
# TRAIN.DATASETS "('voc_2007_trainval',)" \
# TRAIN.PROPOSAL_FILES /path/to/voc_2007_trainval/proposals.pkl \
# DATA_LOADER.NUM_THREADS 4 \
# DATA_LOADER.MINIBATCH_QUEUE_SIZE 64 \
# DATA_LOADER.BLOBS_QUEUE_CAPACITY 8
from __future__ import absolute_import
from __future__ import division
......@@ -45,21 +48,6 @@ import utils.logging
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--loaders', dest='num_loaders',
help='Number of data loading threads',
default=4, type=int)
parser.add_argument(
'--dequeuers', dest='num_dequeuers',
help='Number of dequeuers',
default=1, type=int)
parser.add_argument(
'--minibatch-queue-size', dest='minibatch_queue_size',
help='Size of minibatch queue',
default=64, type=int)
parser.add_argument(
'--blobs-queue-capacity', dest='blobs_queue_capacity',
default=8, type=int)
parser.add_argument(
'--num-batches', dest='num_batches',
help='Number of minibatches to run',
......@@ -105,9 +93,10 @@ def main(opts):
logger.info('{:d} roidb entries'.format(len(roidb)))
roi_data_loader = RoIDataLoader(
roidb,
num_loaders=opts.num_loaders,
minibatch_queue_size=opts.minibatch_queue_size,
blobs_queue_capacity=opts.blobs_queue_capacity)
num_loaders=cfg.DATA_LOADER.NUM_THREADS,
minibatch_queue_size=cfg.DATA_LOADER.MINIBATCH_QUEUE_SIZE,
blobs_queue_capacity=cfg.DATA_LOADER.BLOBS_QUEUE_CAPACITY
)
blob_names = roi_data_loader.get_output_names()
net = core.Net('dequeue_net')
......@@ -141,10 +130,13 @@ def main(opts):
for _ in range(opts.x_factor):
workspace.RunNetOnce(net)
total_time += (time.time() - start_t) / opts.x_factor
logger.info('{:d}/{:d}: Averge dequeue time: {:.3f}s [{:d}/{:d}]'.
format(i + 1, opts.num_batches, total_time / (i + 1),
roi_data_loader._minibatch_queue.qsize(),
opts.minibatch_queue_size))
logger.info(
'{:d}/{:d}: Averge dequeue time: {:.3f}s [{:d}/{:d}]'.format(
i + 1, opts.num_batches, total_time / (i + 1),
roi_data_loader._minibatch_queue.qsize(),
cfg.DATA_LOADER.MINIBATCH_QUEUE_SIZE
)
)
# Sleep to simulate the time taken by running a little network
time.sleep(opts.sleep_time)
# To inspect:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment