| @@ -11,7 +11,7 @@ def cropMask(imageDir,saveDir,imagePath, border): | |||||
| if mask is None or mask.shape[0]==0: | if mask is None or mask.shape[0]==0: | ||||
| img = cv2.imread(imageDir+imagePath) | img = cv2.imread(imageDir+imagePath) | ||||
| if img is None: | if img is None: | ||||
| print "Could not read: "+imageDir+imagePath | |||||
| print("Could not read: "+imageDir+imagePath) | |||||
| mask = np.zeros(img.shape[:2],np.uint8) | mask = np.zeros(img.shape[:2],np.uint8) | ||||
| bgdModel = np.zeros((1,65),np.float64) | bgdModel = np.zeros((1,65),np.float64) | ||||
| fgdModel = np.zeros((1,65),np.float64) | fgdModel = np.zeros((1,65),np.float64) | ||||
| @@ -36,8 +36,8 @@ def cropMask(imageDir,saveDir,imagePath, border): | |||||
| return (labels!=maxLabel).astype('uint8') | return (labels!=maxLabel).astype('uint8') | ||||
| if len(sys.argv)<5: | if len(sys.argv)<5: | ||||
| print 'This evalutes using grab cut' | |||||
| print 'usage: '+sys.argv[0]+' gtFile.csv imageDir saveIntermDir numThreads' # [reverse]' | |||||
| print('This evalutes using grab cut') | |||||
| print('usage: '+sys.argv[0]+' gtFile.csv imageDir saveIntermDir numThreads') # [reverse]' | |||||
| exit(0) | exit(0) | ||||
| gtFile=sys.argv[1] | gtFile=sys.argv[1] | ||||
| @@ -57,7 +57,7 @@ countIOU=0 | |||||
| scale=1 | scale=1 | ||||
| print 'eval on '+gtFile | |||||
| print('eval on '+gtFile) | |||||
| outFile = gtFile+'_fullgrab.res' | outFile = gtFile+'_fullgrab.res' | ||||
| #numLines=0 | #numLines=0 | ||||
| @@ -115,8 +115,8 @@ def worker(line): | |||||
| cv2.imwrite(saveDir+"ERR_"+imagePath.replace('/','_')+'.png',mask*255); | cv2.imwrite(saveDir+"ERR_"+imagePath.replace('/','_')+'.png',mask*255); | ||||
| return imagePath, float(intersection)/union | return imagePath, float(intersection)/union | ||||
| except: | except: | ||||
| print 'Error: ' | |||||
| print sys.exc_info() | |||||
| print('Error: ') | |||||
| print(sys.exc_info()) | |||||
| with open(gtFile) as f: | with open(gtFile) as f: | ||||
| pool = ThreadPool(numThreads) | pool = ThreadPool(numThreads) | ||||
| @@ -129,6 +129,6 @@ with open(gtFile) as f: | |||||
| out.write('mean IOU for '+gtFile+': '+str(sumIOU/countIOU)+'\n') | out.write('mean IOU for '+gtFile+': '+str(sumIOU/countIOU)+'\n') | ||||
| out.close() | out.close() | ||||
| print 'mean IOU for '+gtFile+': '+str(sumIOU/countIOU) | |||||
| print('mean IOU for '+gtFile+': '+str(sumIOU/countIOU)) | |||||
| @@ -21,9 +21,9 @@ def cropMaskPortion(imgH,imgW,xp1,yp1,xp2,yp2,xp3,yp3,xp4,yp4): | |||||
| return mask | return mask | ||||
| if len(sys.argv)<3: | if len(sys.argv)<3: | ||||
| print 'Computes no-crop and mean-crop baselines, generates two files {gtFile}_fullno.res and {gtFile}_fullmean.res' | |||||
| print 'usage: '+sys.argv[0]+' gtFile.csv imageDir [mean_x1 mean_y1 mean_x2 mean_y2 mean_x3 mean_y3 mean_x4 mean_y4]' | |||||
| print ' (optional mean box)' | |||||
| print('Computes no-crop and mean-crop baselines, generates two files {gtFile}_fullno.res and {gtFile}_fullmean.res') | |||||
| print('usage: '+sys.argv[0]+' gtFile.csv imageDir [mean_x1 mean_y1 mean_x2 mean_y2 mean_x3 mean_y3 mean_x4 mean_y4]') | |||||
| print(' (optional mean box)') | |||||
| exit() | exit() | ||||
| gtFile=sys.argv[1] | gtFile=sys.argv[1] | ||||
| @@ -38,7 +38,7 @@ countIOU=0 | |||||
| scale=1 | scale=1 | ||||
| print 'eval on '+gtFile | |||||
| print('eval on '+gtFile) | |||||
| outFile_no = gtFile+'_fullno.res' | outFile_no = gtFile+'_fullno.res' | ||||
| outFile_mean = gtFile+'_fullmean.res' | outFile_mean = gtFile+'_fullmean.res' | ||||
| @@ -120,7 +120,7 @@ with open(gtFile) as f: | |||||
| xp4=float(sys.argv[9]) | xp4=float(sys.argv[9]) | ||||
| yp4=float(sys.argv[10]) | yp4=float(sys.argv[10]) | ||||
| else: | else: | ||||
| print str(xp1)+' '+str(yp1)+' '+str(xp2)+' '+str(yp2)+' '+str(xp3)+' '+str(yp3)+' '+str(xp4)+' '+str(yp4) | |||||
| print(str(xp1)+' '+str(yp1)+' '+str(xp2)+' '+str(yp2)+' '+str(xp3)+' '+str(yp3)+' '+str(xp4)+' '+str(yp4)) | |||||
| cc=0 | cc=0 | ||||
| for line in lines: | for line in lines: | ||||
| @@ -6,7 +6,7 @@ import re | |||||
| import xml.etree.ElementTree as ET | import xml.etree.ElementTree as ET | ||||
| import os | import os | ||||
| import sys | import sys | ||||
| from StringIO import StringIO | |||||
| from io import StringIO | |||||
| import cv2 | import cv2 | ||||
| @@ -156,7 +156,7 @@ def clicker(event, x, y, flags, param): | |||||
| def segmenter(imDir,imagePath,dispHeight): | def segmenter(imDir,imagePath,dispHeight): | ||||
| global image,tl,tr,bl,br,tm,bm,lastDidList,orig,abnorm | global image,tl,tr,bl,br,tm,bm,lastDidList,orig,abnorm | ||||
| print 'opening '+imDir+imagePath | |||||
| print('opening '+imDir+imagePath) | |||||
| orig = cv2.imread(imDir+imagePath) | orig = cv2.imread(imDir+imagePath) | ||||
| scale = orig.shape[0]/dispHeight | scale = orig.shape[0]/dispHeight | ||||
| orig = cv2.resize(orig,(0,0),None,1.0/scale,1.0/scale) | orig = cv2.resize(orig,(0,0),None,1.0/scale,1.0/scale) | ||||
| @@ -234,8 +234,8 @@ def segmenter(imDir,imagePath,dispHeight): | |||||
| #return newWords, newWordBoxes | #return newWords, newWordBoxes | ||||
| if len(sys.argv)<4: | if len(sys.argv)<4: | ||||
| print 'usage: '+sys.argv[0]+' imgDir imgList outAnn.csv [displayHeight]' | |||||
| print 'output format: imageFile, tlx, tly, trx, try, brx, bry, blx, bly, type (,tmx, tmy, bmx, bmy)' | |||||
| print('usage: '+sys.argv[0]+' imgDir imgList outAnn.csv [displayHeight]') | |||||
| print('output format: imageFile, tlx, tly, trx, try, brx, bry, blx, bly, type (,tmx, tmy, bmx, bmy)') | |||||
| exit(0) | exit(0) | ||||
| inFile = sys.argv[2] | inFile = sys.argv[2] | ||||
| @@ -257,34 +257,34 @@ try: | |||||
| did = check.read().splitlines() | did = check.read().splitlines() | ||||
| didCount=len(did) | didCount=len(did) | ||||
| check.close() | check.close() | ||||
| print 'found '+outFile+', appending. Note: this is sychronizing based on count alone, if '+inFile+' hash changed, but sure to align '+outFile | |||||
| print('found '+outFile+', appending. Note: this is sychronizing based on count alone, if '+inFile+' hash changed, but sure to align '+outFile) | |||||
| except IOError: | except IOError: | ||||
| print ('making new out:'+outFile) | |||||
| print(('making new out:'+outFile)) | |||||
| out = open(outFile,'w') | out = open(outFile,'w') | ||||
| print ' =============================================== ' | |||||
| print ' !!! INSTRUCTIONS !!!' | |||||
| print ' If the page does not contain a single page, or ' | |||||
| print ' an open book, mark as abnormal with INSERT (e.g.' | |||||
| print ' two seperate pages).' | |||||
| print ' Click on the four corners to include all the ' | |||||
| print ' full pages in the image (including two pages if ' | |||||
| print ' fully present).' | |||||
| print ' If two pages a full present also mark page seam ' | |||||
| print ' (middle-click).' | |||||
| print ' On placing points, prioritize the following to ' | |||||
| print ' be included/discluded from the polygons in the ' | |||||
| print ' following order:' | |||||
| print ' 1. Including the present page(s) content.' | |||||
| print ' 2. Discluding other pages and background.' | |||||
| print ' 3. Discluding the present page(s) boudary.' | |||||
| print ' 4. Including the present page(s) white area.' | |||||
| print(' =============================================== ') | |||||
| print(' !!! INSTRUCTIONS !!!') | |||||
| print(' If the page does not contain a single page, or ') | |||||
| print(' an open book, mark as abnormal with INSERT (e.g.') | |||||
| print(' two seperate pages).') | |||||
| print(' Click on the four corners to include all the ') | |||||
| print(' full pages in the image (including two pages if ') | |||||
| print(' fully present).') | |||||
| print(' If two pages a full present also mark page seam ') | |||||
| print(' (middle-click).') | |||||
| print(' On placing points, prioritize the following to ') | |||||
| print(' be included/discluded from the polygons in the ') | |||||
| print(' following order:') | |||||
| print(' 1. Including the present page(s) content.') | |||||
| print(' 2. Discluding other pages and background.') | |||||
| print(' 3. Discluding the present page(s) boudary.') | |||||
| print(' 4. Including the present page(s) white area.') | |||||
| #print ' book). If a corner is torn, click where it ought' | #print ' book). If a corner is torn, click where it ought' | ||||
| #print ' to be, based on page edges. The page seem on an ' | #print ' to be, based on page edges. The page seem on an ' | ||||
| #print ' open book is the page edge.' | #print ' open book is the page edge.' | ||||
| print ' Use ESC to exit or the latest page you finished ' | |||||
| print ' will be lost.' | |||||
| print(' Use ESC to exit or the latest page you finished ') | |||||
| print(' will be lost.') | |||||
| #i=didCount | #i=didCount | ||||
| i=0 | i=0 | ||||
| @@ -299,7 +299,7 @@ doneOne=False | |||||
| while i<len(images) and not end: | while i<len(images) and not end: | ||||
| if i%10==9: | if i%10==9: | ||||
| showControls() | showControls() | ||||
| print(str(i+1)+' of '+str(len(images))) | |||||
| print((str(i+1)+' of '+str(len(images)))) | |||||
| if len(did)>i: | if len(did)>i: | ||||
| line = did[i].strip().split(',') | line = did[i].strip().split(',') | ||||
| @@ -322,7 +322,7 @@ while i<len(images) and not end: | |||||
| if undo and i>0 and doneOne: | if undo and i>0 and doneOne: | ||||
| prevSeg='' | prevSeg='' | ||||
| print(str(i)+' of '+str(len(images))) | |||||
| print((str(i)+' of '+str(len(images)))) | |||||
| prevSeg, undo, end = segmenter(imDir, images[i-1],dispHeight) | prevSeg, undo, end = segmenter(imDir, images[i-1],dispHeight) | ||||
| else: | else: | ||||
| out.write(prevSeg) | out.write(prevSeg) | ||||
| @@ -130,7 +130,7 @@ def fill_holes(img): | |||||
| # Floodfill from point (0, 0) | # Floodfill from point (0, 0) | ||||
| if img[0,0] != 0: | if img[0,0] != 0: | ||||
| print "WARNING: Filling something you shouldn't" | |||||
| print("WARNING: Filling something you shouldn't") | |||||
| cv2.floodFill(im_floodfill, mask, (0,0), 255); | cv2.floodFill(im_floodfill, mask, (0,0), 255); | ||||
| # Invert floodfilled image | # Invert floodfilled image | ||||
| @@ -5,7 +5,7 @@ import cv2 | |||||
| import os | import os | ||||
| if len(sys.argv) < 4: | if len(sys.argv) < 4: | ||||
| print "python %s manifest.txt dataset_dir out_dir" % __file__ | |||||
| print("python %s manifest.txt dataset_dir out_dir" % __file__) | |||||
| exit() | exit() | ||||
| manifest_file = sys.argv[1] | manifest_file = sys.argv[1] | ||||
| @@ -17,11 +17,11 @@ try: | |||||
| except: | except: | ||||
| pass | pass | ||||
| file_list = map(lambda s: s.strip(), open(manifest_file, 'r').readlines()) | |||||
| file_list = [s.strip() for s in open(manifest_file, 'r').readlines()] | |||||
| for line in file_list: | for line in file_list: | ||||
| tokens = line.split(',') | tokens = line.split(',') | ||||
| f = tokens[0] | f = tokens[0] | ||||
| coords = map(float, tokens[1:9]) | |||||
| coords = list(map(float, tokens[1:9])) | |||||
| resolved = os.path.join(dataset_dir, f) | resolved = os.path.join(dataset_dir, f) | ||||
| im = cv2.imread(resolved, 0) | im = cv2.imread(resolved, 0) | ||||
| @@ -34,11 +34,11 @@ def main(args): | |||||
| net = caffe.Net(args.net_file, args.weight_file, caffe.TEST) | net = caffe.Net(args.net_file, args.weight_file, caffe.TEST) | ||||
| presolve(net, args) | presolve(net, args) | ||||
| file_list = map(lambda s: s.strip(), open(args.test_manifest, 'r').readlines()) | |||||
| file_list = [s.strip() for s in open(args.test_manifest, 'r').readlines()] | |||||
| fd = open(args.out_file, 'w') | fd = open(args.out_file, 'w') | ||||
| for idx, line in enumerate(file_list): | for idx, line in enumerate(file_list): | ||||
| if idx % args.print_count == 0: | if idx % args.print_count == 0: | ||||
| print "Processed %d/%d Images" % (idx, len(file_list)) | |||||
| print("Processed %d/%d Images" % (idx, len(file_list))) | |||||
| tokens = line.split(',') | tokens = line.split(',') | ||||
| f = tokens[0] | f = tokens[0] | ||||
| resolved = os.path.join(args.dataset_dir, f) | resolved = os.path.join(args.dataset_dir, f) | ||||
| @@ -94,7 +94,7 @@ def get_args(): | |||||
| help="Print interval") | help="Print interval") | ||||
| args = parser.parse_args() | args = parser.parse_args() | ||||
| print args | |||||
| print(args) | |||||
| return args | return args | ||||
| @@ -45,11 +45,11 @@ def main(args): | |||||
| net = caffe.Net(NET_FILE, WEIGHT_FILE, caffe.TEST) | net = caffe.Net(NET_FILE, WEIGHT_FILE, caffe.TEST) | ||||
| presolve(net, args) | presolve(net, args) | ||||
| file_list = map(lambda s: s.strip(), open(args.manifest, 'r').readlines()) | |||||
| file_list = [s.strip() for s in open(args.manifest, 'r').readlines()] | |||||
| fd = open(args.out_file, 'w') | fd = open(args.out_file, 'w') | ||||
| for idx, line in enumerate(file_list): | for idx, line in enumerate(file_list): | ||||
| if idx % args.print_count == 0: | if idx % args.print_count == 0: | ||||
| print "Processed %d/%d Images" % (idx, len(file_list)) | |||||
| print("Processed %d/%d Images" % (idx, len(file_list))) | |||||
| tokens = line.split(',') | tokens = line.split(',') | ||||
| f = tokens[0] | f = tokens[0] | ||||
| resolved = os.path.join(args.image_dir, f) | resolved = os.path.join(args.image_dir, f) | ||||
| @@ -97,7 +97,7 @@ def get_args(): | |||||
| help="Print interval") | help="Print interval") | ||||
| args = parser.parse_args() | args = parser.parse_args() | ||||
| print args | |||||
| print(args) | |||||
| return args | return args | ||||
| @@ -25,7 +25,7 @@ def dump_debug(out_dir, data, dump_images=False): | |||||
| pred_image_dir = os.path.join(out_dir, 'pred_images') | pred_image_dir = os.path.join(out_dir, 'pred_images') | ||||
| safe_mkdir(pred_image_dir) | safe_mkdir(pred_image_dir) | ||||
| for idx in xrange(len(data['images'])): | |||||
| for idx in range(len(data['images'])): | |||||
| fn = data['filenames'][idx] | fn = data['filenames'][idx] | ||||
| preds = data['predictions'][idx] | preds = data['predictions'][idx] | ||||
| @@ -61,13 +61,13 @@ def prf(im1, im2): | |||||
| def update_predictions(net, data, args): | def update_predictions(net, data, args): | ||||
| print "Starting Predictions" | |||||
| print("Starting Predictions") | |||||
| total_iou = 0 | total_iou = 0 | ||||
| total_p = 0 | total_p = 0 | ||||
| total_r = 0 | total_r = 0 | ||||
| total_f = 0 | total_f = 0 | ||||
| for idx in xrange(len(data['images'])): | |||||
| for idx in range(len(data['images'])): | |||||
| im = cv2.resize(data['images'][idx], (args.image_size, args.image_size)) | im = cv2.resize(data['images'][idx], (args.image_size, args.image_size)) | ||||
| outputs = predict(net, im, 'out', args) | outputs = predict(net, im, 'out', args) | ||||
| @@ -84,7 +84,7 @@ def update_predictions(net, data, args): | |||||
| if idx and idx % args.print_count == 0: | if idx and idx % args.print_count == 0: | ||||
| print "\tPredicted %d/%d" % (idx, len(data['images'])) | |||||
| print("\tPredicted %d/%d" % (idx, len(data['images']))) | |||||
| avg_iou = total_iou / len(data['images']) | avg_iou = total_iou / len(data['images']) | ||||
| avg_p = total_p / len(data['images']) | avg_p = total_p / len(data['images']) | ||||
| avg_r = total_r / len(data['images']) | avg_r = total_r / len(data['images']) | ||||
| @@ -94,11 +94,11 @@ def update_predictions(net, data, args): | |||||
| def load_data(manifest, _dir, size, color=False): | def load_data(manifest, _dir, size, color=False): | ||||
| dataset = collections.defaultdict(list) | dataset = collections.defaultdict(list) | ||||
| file_list = map(lambda s: s.strip(), open(manifest, 'r').readlines()) | |||||
| file_list = [s.strip() for s in open(manifest, 'r').readlines()] | |||||
| for line in file_list: | for line in file_list: | ||||
| tokens = line.split(',') | tokens = line.split(',') | ||||
| f = tokens[0] | f = tokens[0] | ||||
| coords = map(float, tokens[1:9]) | |||||
| coords = list(map(float, tokens[1:9])) | |||||
| dataset['filenames'].append(f) | dataset['filenames'].append(f) | ||||
| @@ -120,7 +120,7 @@ def load_data(manifest, _dir, size, color=False): | |||||
| def preprocess_data(data, args): | def preprocess_data(data, args): | ||||
| for idx in xrange(len(data['images'])): | |||||
| for idx in range(len(data['images'])): | |||||
| im = data['images'][idx] | im = data['images'][idx] | ||||
| im = args.scale * (im - args.mean) | im = args.scale * (im - args.mean) | ||||
| data['images'][idx] = im | data['images'][idx] = im | ||||
| @@ -148,7 +148,7 @@ def presolve(net, args): | |||||
| def set_input_data(net, data, args): | def set_input_data(net, data, args): | ||||
| for batch_idx in xrange(args.batch_size): | |||||
| for batch_idx in range(args.batch_size): | |||||
| im_idx = random.randint(0, len(data['images']) - 1) | im_idx = random.randint(0, len(data['images']) - 1) | ||||
| im = data['images'][im_idx] | im = data['images'][im_idx] | ||||
| gt = data['gt'][im_idx] | gt = data['gt'][im_idx] | ||||
| @@ -168,7 +168,7 @@ def main(args): | |||||
| preprocess_data(train_data, args) | preprocess_data(train_data, args) | ||||
| preprocess_data(val_data, args) | preprocess_data(val_data, args) | ||||
| print "Done loading data" | |||||
| print("Done loading data") | |||||
| solver = caffe.SGDSolver(args.solver_file) | solver = caffe.SGDSolver(args.solver_file) | ||||
| max_iters, snapshot_interval = get_solver_params(args.solver_file) | max_iters, snapshot_interval = get_solver_params(args.solver_file) | ||||
| @@ -179,56 +179,56 @@ def main(args): | |||||
| train_r, val_r = [], [] | train_r, val_r = [], [] | ||||
| train_f, val_f = [], [] | train_f, val_f = [], [] | ||||
| for iter_num in xrange(max_iters + 1): | |||||
| for iter_num in range(max_iters + 1): | |||||
| set_input_data(solver.net, train_data, args) | set_input_data(solver.net, train_data, args) | ||||
| solver.step(1) | solver.step(1) | ||||
| if iter_num and iter_num % snapshot_interval == 0: | if iter_num and iter_num % snapshot_interval == 0: | ||||
| print "Validation Prediction: %d" % iter_num | |||||
| print("Validation Prediction: %d" % iter_num) | |||||
| avg_iou, avg_p, avg_r, avg_f = update_predictions(solver.net, val_data, args) | avg_iou, avg_p, avg_r, avg_f = update_predictions(solver.net, val_data, args) | ||||
| val_iou.append((iter_num, avg_iou)) | val_iou.append((iter_num, avg_iou)) | ||||
| val_p.append((iter_num, avg_p)) | val_p.append((iter_num, avg_p)) | ||||
| val_r.append((iter_num, avg_r)) | val_r.append((iter_num, avg_r)) | ||||
| val_f.append((iter_num, avg_f)) | val_f.append((iter_num, avg_f)) | ||||
| if args.debug_dir: | if args.debug_dir: | ||||
| print "Dumping images" | |||||
| print("Dumping images") | |||||
| out_dir = os.path.join(args.debug_dir, 'val_%d' % iter_num) | out_dir = os.path.join(args.debug_dir, 'val_%d' % iter_num) | ||||
| dump_debug(out_dir, val_data) | dump_debug(out_dir, val_data) | ||||
| if iter_num >= args.min_interval and iter_num % args.gt_interval == 0: | if iter_num >= args.min_interval and iter_num % args.gt_interval == 0: | ||||
| print "Train Prediction: %d" % iter_num | |||||
| print("Train Prediction: %d" % iter_num) | |||||
| avg_iou, avg_p, avg_r, avg_f = update_predictions(solver.net, train_data, args) | avg_iou, avg_p, avg_r, avg_f = update_predictions(solver.net, train_data, args) | ||||
| train_iou.append((iter_num, avg_iou)) | train_iou.append((iter_num, avg_iou)) | ||||
| train_p.append((iter_num, avg_p)) | train_p.append((iter_num, avg_p)) | ||||
| train_r.append((iter_num, avg_r)) | train_r.append((iter_num, avg_r)) | ||||
| train_f.append((iter_num, avg_f)) | train_f.append((iter_num, avg_f)) | ||||
| print "Train IOU: ", train_iou | |||||
| print "Val IOU: ", val_iou | |||||
| print("Train IOU: ", train_iou) | |||||
| print() | |||||
| print("Val IOU: ", val_iou) | |||||
| if args.debug_dir: | if args.debug_dir: | ||||
| plt.plot(*zip(*train_iou), label='train') | |||||
| plt.plot(*zip(*val_iou), label='val') | |||||
| plt.plot(*list(zip(*train_iou)), label='train') | |||||
| plt.plot(*list(zip(*val_iou)), label='val') | |||||
| plt.legend() | plt.legend() | ||||
| plt.savefig(os.path.join(args.debug_dir, 'iou.png')) | plt.savefig(os.path.join(args.debug_dir, 'iou.png')) | ||||
| plt.clf() | plt.clf() | ||||
| plt.plot(*zip(*train_p), label='train') | |||||
| plt.plot(*zip(*val_p), label='val') | |||||
| plt.plot(*list(zip(*train_p)), label='train') | |||||
| plt.plot(*list(zip(*val_p)), label='val') | |||||
| plt.legend() | plt.legend() | ||||
| plt.savefig(os.path.join(args.debug_dir, 'precision.png')) | plt.savefig(os.path.join(args.debug_dir, 'precision.png')) | ||||
| plt.clf() | plt.clf() | ||||
| plt.plot(*zip(*train_r), label='train') | |||||
| plt.plot(*zip(*val_r), label='val') | |||||
| plt.plot(*list(zip(*train_r)), label='train') | |||||
| plt.plot(*list(zip(*val_r)), label='val') | |||||
| plt.legend() | plt.legend() | ||||
| plt.savefig(os.path.join(args.debug_dir, 'recall.png')) | plt.savefig(os.path.join(args.debug_dir, 'recall.png')) | ||||
| plt.clf() | plt.clf() | ||||
| plt.plot(*zip(*train_f), label='train') | |||||
| plt.plot(*zip(*val_f), label='val') | |||||
| plt.plot(*list(zip(*train_f)), label='train') | |||||
| plt.plot(*list(zip(*val_f)), label='val') | |||||
| plt.legend() | plt.legend() | ||||
| plt.savefig(os.path.join(args.debug_dir, 'fmeasure.png')) | plt.savefig(os.path.join(args.debug_dir, 'fmeasure.png')) | ||||
| @@ -287,7 +287,7 @@ def get_args(): | |||||
| help="How often to print progress") | help="How often to print progress") | ||||
| args = parser.parse_args() | args = parser.parse_args() | ||||
| print args | |||||
| print(args) | |||||
| return args | return args | ||||