Commit 88dd4b1d authored by Ashwin Bharambe's avatar Ashwin Bharambe Committed by Facebook Github Bot

Prepare for python3 compatibility [1]

Summary:
This is a first step towards python3 compatibility. Specifically, it
tackles:

 - `cPickle`: uses six.moves
 - `Queue`: uses six.moves
 - `urllib2`: uses six.moves and changes `urllib2.urlopen` to `urllib.request.urlopen`

Also, fundamentally it changes the types of all config "byte" types to
"string" types. Those configurations aren't un-encoded byte streams but very
specifically ascii (or unicode encoded) strings which are specified and
consumed by human eyes.

Reviewed By: rbgirshick

Differential Revision: D9662024

fbshipit-source-id: b8372f685b57ec4260ae881a2f8bb7967f337b10
parent fd09e37a
......@@ -44,12 +44,13 @@ from __future__ import unicode_literals
from ast import literal_eval
from future.utils import iteritems
from past.builtins import basestring
import copy
import io
import logging
import numpy as np
import os
import os.path as osp
import six
import yaml
from detectron.utils.collections import AttrDict
......@@ -72,7 +73,7 @@ cfg = __C
__C.TRAIN = AttrDict()
# Initialize network with weights from this .pkl file
__C.TRAIN.WEIGHTS = b''
__C.TRAIN.WEIGHTS = ''
# Datasets to train on
# Available dataset list: detectron.datasets.dataset_catalog.datasets()
......@@ -219,7 +220,7 @@ __C.DATA_LOADER.BLOBS_QUEUE_CAPACITY = 8
__C.TEST = AttrDict()
# Initialize network with weights from this .pkl file
__C.TEST.WEIGHTS = b''
__C.TEST.WEIGHTS = ''
# Datasets to test on
# Available dataset list: detectron.datasets.dataset_catalog.datasets()
......@@ -297,11 +298,11 @@ __C.TEST.BBOX_AUG.ENABLED = False
# Heuristic used to combine predicted box scores
# Valid options: ('ID', 'AVG', 'UNION')
__C.TEST.BBOX_AUG.SCORE_HEUR = b'UNION'
__C.TEST.BBOX_AUG.SCORE_HEUR = 'UNION'
# Heuristic used to combine predicted box coordinates
# Valid options: ('ID', 'AVG', 'UNION')
__C.TEST.BBOX_AUG.COORD_HEUR = b'UNION'
__C.TEST.BBOX_AUG.COORD_HEUR = 'UNION'
# Horizontal flip at the original scale (id transform)
__C.TEST.BBOX_AUG.H_FLIP = False
......@@ -338,7 +339,7 @@ __C.TEST.MASK_AUG.ENABLED = False
# Heuristic used to combine mask predictions
# SOFT prefix indicates that the computation is performed on soft masks
# Valid options: ('SOFT_AVG', 'SOFT_MAX', 'LOGIT_AVG')
__C.TEST.MASK_AUG.HEUR = b'SOFT_AVG'
__C.TEST.MASK_AUG.HEUR = 'SOFT_AVG'
# Horizontal flip at the original scale (id transform)
__C.TEST.MASK_AUG.H_FLIP = False
......@@ -373,7 +374,7 @@ __C.TEST.KPS_AUG.ENABLED = False
# Heuristic used to combine keypoint predictions
# Valid options: ('HM_AVG', 'HM_MAX')
__C.TEST.KPS_AUG.HEUR = b'HM_AVG'
__C.TEST.KPS_AUG.HEUR = 'HM_AVG'
# Horizontal flip at the original scale (id transform)
__C.TEST.KPS_AUG.H_FLIP = False
......@@ -405,7 +406,7 @@ __C.TEST.SOFT_NMS = AttrDict()
# Use soft NMS instead of standard NMS if set to True
__C.TEST.SOFT_NMS.ENABLED = False
# See soft NMS paper for definition of these options
__C.TEST.SOFT_NMS.METHOD = b'linear'
__C.TEST.SOFT_NMS.METHOD = 'linear'
__C.TEST.SOFT_NMS.SIGMA = 0.5
# For the soft NMS overlap threshold, we simply use TEST.NMS
......@@ -423,7 +424,7 @@ __C.TEST.BBOX_VOTE.VOTE_TH = 0.8
# The method used to combine scores when doing bounding box voting
# Valid options include ('ID', 'AVG', 'IOU_AVG', 'GENERALIZED_AVG', 'QUASI_SUM')
__C.TEST.BBOX_VOTE.SCORING_METHOD = b'ID'
__C.TEST.BBOX_VOTE.SCORING_METHOD = 'ID'
# Hyperparameter used by the scoring method (it has different meanings for
# different methods)
......@@ -438,13 +439,13 @@ __C.MODEL = AttrDict()
# The type of model to use
# The string must match a function in the modeling.model_builder module
# (e.g., 'generalized_rcnn', 'mask_rcnn', ...)
__C.MODEL.TYPE = b''
__C.MODEL.TYPE = ''
# The backbone conv body to use
# The string must match a function that is imported in modeling.model_builder
# (e.g., 'FPN.add_fpn_ResNet101_conv5_body' to specify a ResNet-101-FPN
# backbone)
__C.MODEL.CONV_BODY = b''
__C.MODEL.CONV_BODY = ''
# Number of classes in the dataset; must be set
# E.g., 81 for COCO (80 foreground + 1 background)
......@@ -483,7 +484,7 @@ __C.MODEL.RPN_ONLY = False
# Caffe2 net execution type
# Use 'prof_dag' to get profiling statistics
__C.MODEL.EXECUTION_TYPE = b'dag'
__C.MODEL.EXECUTION_TYPE = 'dag'
# ---------------------------------------------------------------------------- #
......@@ -567,7 +568,7 @@ __C.SOLVER.BASE_LR = 0.001
# Schedule type (see functions in utils.lr_policy for options)
# E.g., 'step', 'steps_with_decay', ...
__C.SOLVER.LR_POLICY = b'step'
__C.SOLVER.LR_POLICY = 'step'
# Some LR Policies (by example):
# 'step'
......@@ -638,7 +639,7 @@ __C.FAST_RCNN = AttrDict()
# The type of RoI head to use for bounding box classification and regression
# The string must match a function this is imported in modeling.model_builder
# (e.g., 'head_builder.add_roi_2mlp_head' to specify a two hidden layer MLP)
__C.FAST_RCNN.ROI_BOX_HEAD = b''
__C.FAST_RCNN.ROI_BOX_HEAD = ''
# Hidden layer dimension when using an MLP for the RoI box head
__C.FAST_RCNN.MLP_HEAD_DIM = 1024
......@@ -650,7 +651,7 @@ __C.FAST_RCNN.NUM_STACKED_CONVS = 4
# RoI transformation function (e.g., RoIPool or RoIAlign)
# (RoIPoolF is the same as RoIPool; ignore the trailing 'F')
__C.FAST_RCNN.ROI_XFORM_METHOD = b'RoIPoolF'
__C.FAST_RCNN.ROI_XFORM_METHOD = 'RoIPoolF'
# Number of grid sampling points in RoIAlign (usually use 2)
# Only applies to RoIAlign
......@@ -740,13 +741,13 @@ __C.MRCNN = AttrDict()
# The type of RoI head to use for instance mask prediction
# The string must match a function this is imported in modeling.model_builder
# (e.g., 'mask_rcnn_heads.ResNet_mask_rcnn_fcn_head_v1up4convs')
__C.MRCNN.ROI_MASK_HEAD = b''
__C.MRCNN.ROI_MASK_HEAD = ''
# Resolution of mask predictions
__C.MRCNN.RESOLUTION = 14
# RoI transformation function and associated options
__C.MRCNN.ROI_XFORM_METHOD = b'RoIAlign'
__C.MRCNN.ROI_XFORM_METHOD = 'RoIAlign'
# RoI transformation function (e.g., RoIPool or RoIAlign)
__C.MRCNN.ROI_XFORM_RESOLUTION = 7
......@@ -768,7 +769,7 @@ __C.MRCNN.UPSAMPLE_RATIO = 1
__C.MRCNN.USE_FC_OUTPUT = False
# Weight initialization method for the mask head and mask output layers
__C.MRCNN.CONV_INIT = b'GaussianFill'
__C.MRCNN.CONV_INIT = 'GaussianFill'
# Use class specific mask predictions if True (otherwise use class agnostic mask
# predictions)
......@@ -789,7 +790,7 @@ __C.KRCNN = AttrDict()
# The type of RoI head to use for instance keypoint prediction
# The string must match a function this is imported in modeling.model_builder
# (e.g., 'keypoint_rcnn_heads.add_roi_pose_head_v1convX')
__C.KRCNN.ROI_KEYPOINTS_HEAD = b''
__C.KRCNN.ROI_KEYPOINTS_HEAD = ''
# Output size (and size loss is computed on), e.g., 56x56
__C.KRCNN.HEATMAP_SIZE = -1
......@@ -824,17 +825,17 @@ __C.KRCNN.CONV_HEAD_DIM = 256
# Conv kernel size used in the keypoint head
__C.KRCNN.CONV_HEAD_KERNEL = 3
# Conv kernel weight filling function
__C.KRCNN.CONV_INIT = b'GaussianFill'
__C.KRCNN.CONV_INIT = 'GaussianFill'
# Use NMS based on OKS if True
__C.KRCNN.NMS_OKS = False
# Source of keypoint confidence
# Valid options: ('bbox', 'logit', 'prob')
__C.KRCNN.KEYPOINT_CONFIDENCE = b'bbox'
__C.KRCNN.KEYPOINT_CONFIDENCE = 'bbox'
# Standard ROI XFORM options (see FAST_RCNN or MRCNN options)
__C.KRCNN.ROI_XFORM_METHOD = b'RoIAlign'
__C.KRCNN.ROI_XFORM_METHOD = 'RoIAlign'
__C.KRCNN.ROI_XFORM_RESOLUTION = 7
__C.KRCNN.ROI_XFORM_SAMPLING_RATIO = 0
......@@ -884,11 +885,11 @@ __C.RESNETS.WIDTH_PER_GROUP = 64
__C.RESNETS.STRIDE_1X1 = True
# Residual transformation function
__C.RESNETS.TRANS_FUNC = b'bottleneck_transformation'
__C.RESNETS.TRANS_FUNC = 'bottleneck_transformation'
# ResNet's stem function (conv1 and pool1)
__C.RESNETS.STEM_FUNC = b'basic_bn_stem'
__C.RESNETS.STEM_FUNC = 'basic_bn_stem'
# ResNet's shortcut function
__C.RESNETS.SHORTCUT_FUNC = b'basic_bn_shortcut'
__C.RESNETS.SHORTCUT_FUNC = 'basic_bn_shortcut'
# Apply dilation in stage "res5"
__C.RESNETS.RES5_DILATION = 1
......@@ -946,10 +947,10 @@ __C.EPS = 1e-14
__C.ROOT_DIR = os.getcwd()
# Output basedir
__C.OUTPUT_DIR = b'/tmp'
__C.OUTPUT_DIR = '/tmp'
# Name (or path to) the matlab executable
__C.MATLAB = b'matlab'
__C.MATLAB = 'matlab'
# Reduce memory usage with memonger gradient blob sharing
__C.MEMONGER = True
......@@ -976,11 +977,11 @@ __C.EXPECTED_RESULTS_ATOL = 0.005
# that the actual value is within mean +/- SIGMA_TOL * std
__C.EXPECTED_RESULTS_SIGMA_TOL = 4
# Set to send email in case of an EXPECTED_RESULTS failure
__C.EXPECTED_RESULTS_EMAIL = b''
__C.EXPECTED_RESULTS_EMAIL = ''
# Models and proposals referred to by URL are downloaded to a local cache
# specified by DOWNLOAD_CACHE
__C.DOWNLOAD_CACHE = b'/tmp/detectron-download-cache'
__C.DOWNLOAD_CACHE = '/tmp/detectron-download-cache'
# ---------------------------------------------------------------------------- #
......@@ -1100,9 +1101,9 @@ def cache_cfg_urls():
def get_output_dir(datasets, training=True):
"""Get the output directory determined by the current global config."""
assert isinstance(datasets, (tuple, list, basestring)), \
assert isinstance(datasets, tuple([tuple, list] + list(six.string_types))), \
'datasets argument must be of type tuple, list or string'
is_string = isinstance(datasets, basestring)
is_string = isinstance(datasets, six.string_types)
dataset_name = datasets if is_string else ':'.join(datasets)
tag = 'train' if training else 'test'
# <output-dir>/<train|test>/<dataset-name>/<model-type>/
......@@ -1114,11 +1115,12 @@ def get_output_dir(datasets, training=True):
def load_cfg(cfg_to_load):
"""Wrapper around yaml.load used for maintaining backward compatibility"""
assert isinstance(cfg_to_load, (file, basestring)), \
'Expected {} or {} got {}'.format(file, basestring, type(cfg_to_load))
if isinstance(cfg_to_load, file):
file_types = [file, io.IOBase] if six.PY2 else [io.IOBase] # noqa false positive
expected_types = tuple(file_types + list(six.string_types))
assert isinstance(cfg_to_load, expected_types), \
'Expected one of {}, got {}'.format(expected_types, type(cfg_to_load))
if isinstance(cfg_to_load, tuple(file_types)):
cfg_to_load = ''.join(cfg_to_load.readlines())
if isinstance(cfg_to_load, basestring):
for old_module, new_module in iteritems(_RENAMED_MODULES):
# yaml object encoding: !!python/object/new:<module>.<object>
old_module, new_module = 'new:' + old_module, 'new:' + new_module
......@@ -1232,7 +1234,7 @@ def _decode_cfg_value(v):
if isinstance(v, dict):
return AttrDict(v)
# All remaining processing is only applied to strings
if not isinstance(v, basestring):
if not isinstance(v, six.string_types):
return v
# Try to interpret `v` as a:
# string, number, tuple, list, dict, boolean, or None
......@@ -1270,7 +1272,7 @@ def _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key):
# Exceptions: numpy arrays, strings, tuple<->list
if isinstance(value_b, np.ndarray):
value_a = np.array(value_a, dtype=value_b.dtype)
elif isinstance(value_b, basestring):
elif isinstance(value_b, six.string_types):
value_a = str(value_a)
elif isinstance(value_a, tuple) and isinstance(value_b, list):
value_a = list(value_a)
......
......@@ -26,11 +26,11 @@ from __future__ import print_function
from __future__ import unicode_literals
import copy
import cPickle as pickle
import logging
import numpy as np
import os
import scipy.sparse
from six.moves import cPickle as pickle
# Must happen before importing COCO API (which imports matplotlib)
import detectron.utils.env as envu
......
......@@ -22,11 +22,11 @@
"""Python implementation of the PASCAL VOC devkit's AP evaluation code."""
import cPickle
import logging
import numpy as np
import os
import xml.etree.ElementTree as ET
from six.moves import cPickle
logger = logging.getLogger(__name__)
......
......@@ -44,11 +44,11 @@ from collections import deque
from collections import OrderedDict
import logging
import numpy as np
import Queue
import signal
import threading
import time
import uuid
from six.moves import queue as Queue
from caffe2.python import core, workspace
......
......@@ -28,9 +28,9 @@ from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cPickle as pickle
import cv2
import numpy as np
from six.moves import cPickle as pickle
from caffe2.proto import caffe2_pb2
......
......@@ -22,9 +22,9 @@ from __future__ import unicode_literals
import contextlib
import logging
import Queue
import threading
import traceback
from six.moves import queue as Queue
log = logging.getLogger(__name__)
......
......@@ -20,13 +20,13 @@ from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cPickle as pickle
import hashlib
import logging
import os
import re
import sys
import urllib2
from six.moves import cPickle as pickle
from six.moves import urllib
logger = logging.getLogger(__name__)
......@@ -45,7 +45,9 @@ def cache_url(url_or_file, cache_dir):
path to the cached file. If the argument is not a URL, simply return it as
is.
"""
is_url = re.match(r'^(?:http)s?://', url_or_file, re.IGNORECASE) is not None
is_url = re.match(
r'^(?:http)s?://', url_or_file, re.IGNORECASE
) is not None
if not is_url:
return url_or_file
......@@ -111,7 +113,7 @@ def download_url(
Credit:
https://stackoverflow.com/questions/2028517/python-urllib2-progress-hook
"""
response = urllib2.urlopen(url)
response = urllib.request.urlopen(url)
total_size = response.info().getheader('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
......@@ -140,5 +142,5 @@ def _get_file_md5sum(file_name):
def _get_reference_md5sum(url):
"""By convention the md5 hash for url is stored in url + '.md5sum'."""
url_md5sum = url + '.md5sum'
md5sum = urllib2.urlopen(url_md5sum).read().strip()
md5sum = urllib.request.urlopen(url_md5sum).read().strip()
return md5sum
......@@ -21,12 +21,12 @@ from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
import cPickle as pickle
import logging
import numpy as np
import os
import pprint
import yaml
from six.moves import cPickle as pickle
from caffe2.python import core
from caffe2.python import workspace
......
......@@ -27,7 +27,7 @@ import os
import yaml
import numpy as np
import subprocess
import cPickle as pickle
from six.moves import cPickle as pickle
from six.moves import shlex_quote
from detectron.core.config import cfg
......
......@@ -9,10 +9,10 @@ from __future__ import print_function
from __future__ import unicode_literals
import argparse
import cPickle as pickle
import numpy as np
import os
import sys
from six.moves import cPickle as pickle
import detectron.datasets.coco_to_cityscapes_id as cs
......
......@@ -24,10 +24,10 @@ from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cPickle as pickle
import numpy as np
import scipy.io as sio
import sys
from six.moves import cPickle as pickle
from detectron.datasets.json_dataset import JsonDataset
......
......@@ -26,10 +26,10 @@ from __future__ import print_function
from __future__ import unicode_literals
import argparse
import cPickle as pickle
import numpy as np
import os
import sys
from six.moves import cPickle as pickle
from caffe.proto import caffe_pb2
from caffe2.proto import caffe2_pb2
......
......@@ -31,10 +31,10 @@ from __future__ import print_function
from __future__ import unicode_literals
import argparse
import cPickle as pickle
import os
import sys
import yaml
from six.moves import cPickle as pickle
from detectron.core.config import cfg
from detectron.datasets import task_evaluation
......
......@@ -23,10 +23,10 @@ from __future__ import print_function
from __future__ import unicode_literals
import argparse
import cPickle as pickle
import cv2
import os
import sys
from six.moves import cPickle as pickle
from detectron.datasets.json_dataset import JsonDataset
import detectron.utils.vis as vis_utils
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment