Commit 4dc5e92e authored by Ashwin Bharambe's avatar Ashwin Bharambe Committed by Facebook Github Bot

Prepare for Python3 compatibility [3]

Summary:
A couple small issues pointed out by Ross.

 - Treating bytes as strings with a subprocess' output :/
 - Controling precision of logged floats using a workaround instead of
   FLOAT_REPR which doesn't work with newer versions of Python3.

Reviewed By: rbgirshick

Differential Revision: D9724292

fbshipit-source-id: a6aa1730f25df5d165291dc30b9350a9fff6fca6
parent 9185b699
...@@ -24,6 +24,7 @@ import json ...@@ -24,6 +24,7 @@ import json
import logging import logging
import numpy as np import numpy as np
import os import os
import six
import uuid import uuid
from pycocotools.cocoeval import COCOeval from pycocotools.cocoeval import COCOeval
...@@ -86,6 +87,16 @@ def _write_coco_segms_results_file( ...@@ -86,6 +87,16 @@ def _write_coco_segms_results_file(
'Writing segmentation results json to: {}'.format( 'Writing segmentation results json to: {}'.format(
os.path.abspath(res_file))) os.path.abspath(res_file)))
with open(res_file, 'w') as fid: with open(res_file, 'w') as fid:
# "counts" is an array encoded by mask_util as a byte-stream. Python3's
# json writer which /always produces strings/ cannot serialize a bytestream
# unless you decode it. Thankfully, utf-8 works out (which is also what
# the pycocotools/_mask.pyx does.
if six.PY3:
for r in results:
rle = r['segmentation']
if 'counts' in rle:
rle['counts'] = rle['counts'].decode("utf8")
json.dump(results, fid) json.dump(results, fid)
......
...@@ -160,6 +160,7 @@ def get_nvidia_info(): ...@@ -160,6 +160,7 @@ def get_nvidia_info():
def get_nvidia_smi_output(): def get_nvidia_smi_output():
try: try:
info = subprocess.check_output(["nvidia-smi"], stderr=subprocess.STDOUT) info = subprocess.check_output(["nvidia-smi"], stderr=subprocess.STDOUT)
info = info.decode("utf8")
except Exception as e: except Exception as e:
info = "Executing nvidia-smi failed: " + str(e) info = "Executing nvidia-smi failed: " + str(e)
return info.strip() return info.strip()
...@@ -28,11 +28,13 @@ import numpy as np ...@@ -28,11 +28,13 @@ import numpy as np
import smtplib import smtplib
import sys import sys
# Print lower precision floating point values than default FLOAT_REPR
json.encoder.FLOAT_REPR = lambda o: format(o, '.6f')
def log_json_stats(stats, sort_keys=True): def log_json_stats(stats, sort_keys=True):
# hack to control precision of top-level floats
stats = {
k: '{:.6f}'.format(v) if isinstance(v, float) else v
for k, v in stats.items()
}
print('json_stats: {:s}'.format(json.dumps(stats, sort_keys=sort_keys))) print('json_stats: {:s}'.format(json.dumps(stats, sort_keys=sort_keys)))
......
...@@ -93,7 +93,7 @@ def process_in_parallel( ...@@ -93,7 +93,7 @@ def process_in_parallel(
outputs = [] outputs = []
for i, p, start, end, subprocess_stdout in processes: for i, p, start, end, subprocess_stdout in processes:
log_subprocess_output(i, p, output_dir, tag, start, end) log_subprocess_output(i, p, output_dir, tag, start, end)
if isinstance(subprocess_stdout, file): # NOQA (Python 2 for now) if i > 0:
subprocess_stdout.close() subprocess_stdout.close()
range_file = os.path.join( range_file = os.path.join(
output_dir, '%s_range_%s_%s.pkl' % (tag, start, end) output_dir, '%s_range_%s_%s.pkl' % (tag, start, end)
...@@ -119,10 +119,10 @@ def log_subprocess_output(i, p, output_dir, tag, start, end): ...@@ -119,10 +119,10 @@ def log_subprocess_output(i, p, output_dir, tag, start, end):
logger.info('# ' + '-' * 76 + ' #') logger.info('# ' + '-' * 76 + ' #')
if i == 0: if i == 0:
# Stream the piped stdout from the first subprocess in realtime # Stream the piped stdout from the first subprocess in realtime
with open(outfile, 'w') as f: with open(outfile, 'wb') as f:
for line in iter(p.stdout.readline, b''): for line in iter(p.stdout.readline, b''):
print(line.rstrip()) print(line.rstrip().decode("utf8"))
f.write(str(line)) f.write(line)
p.stdout.close() p.stdout.close()
ret = p.wait() ret = p.wait()
else: else:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment