From e693649ec0c19cc868f229c7091fcf7b1a6fd66e Mon Sep 17 00:00:00 2001 From: Liam McKay Date: Wed, 18 Mar 2020 12:17:41 -0700 Subject: [PATCH 1/9] opencv image_analysis and python image transfer --- open_cv_img_analysis_test.py | 122 +++++++++++++++++++++++++++++++++++ pregui_img_analysis_3.py | 59 +++++++++++++++-- run.sh | 4 +- transfer_imgs_1.py | 120 ++++++++++++++++++++++++++++++++++ 4 files changed, 296 insertions(+), 9 deletions(-) create mode 100644 open_cv_img_analysis_test.py create mode 100644 transfer_imgs_1.py diff --git a/open_cv_img_analysis_test.py b/open_cv_img_analysis_test.py new file mode 100644 index 0000000..daa9128 --- /dev/null +++ b/open_cv_img_analysis_test.py @@ -0,0 +1,122 @@ +# import the necessary packages +import numpy as np +import argparse +import cv2 +from skimage.util import img_as_ubyte +from skimage import io +from glob import glob +import os +import time + +title_window = 'Linear Blend' + +global trackbar_name1 +global trackbar_name2 + +global thresh1_double_binary +global thresh2_double_binary +global thresh1_binary +global thresh2_binary +global i + + + + +def on_trackbar_canny_1(val): + global thresh1_double_binary + thresh1_double_binary=val + redraw_edges() + +def on_trackbar_canny_2(val): + global thresh2_double_binary + thresh2_double_binary=val + redraw_edges() + +def on_trackbar_binary_1(val): + global thresh1_binary + thresh1_binary=val + redraw_edges() + +def on_trackbar_binary_2(val): + global thresh2_binary + thresh2_binary=val + redraw_edges() + + +def redraw_edges(found_circles=0): + circles=[] + zstack = io.imread(overlayed_image[i]) + output = img_as_ubyte(zstack[:, :, 0]).copy() + edged = image.copy() + # ret, thresh = cv2.threshold(image, thresh1_binary, thresh2_binary, cv2.THRESH_BINARY) + ret, thresh = cv2.threshold(image, thresh1_binary, thresh2_binary, cv2.THRESH_TOZERO_INV) + ret, thresh = cv2.threshold(thresh, 0, 255, cv2.THRESH_BINARY) + print("bw:",thresh1_binary, thresh2_binary) + edged = cv2.Canny(thresh, 101, 112) + print("edged:", thresh1_double_binary, thresh2_double_binary) + circles = cv2.HoughCircles(image=edged, method=cv2.HOUGH_GRADIENT, dp=3, minDist=400, minRadius=475, maxRadius=495) + if circles is not None: + found_circles += 1 + # convert the (x, y) coordinates and radius of the circles to integers + circles = np.round(circles[0, :]).astype("int") + # loop over the (x, y) coordinates and radius of the circles + for (x, y, r) in circles: + # draw the circle in the output image, then draw a rectangle + # corresponding to the center of the circle + cv2.circle(output, (x, y), r, (255, 0, 0), 4) + cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1) + # else: + # print(image_path) + + cv2.imshow(title_window, np.hstack([output, edged, thresh])) + # show the output image + # global contours + # global hierarchy + # contours, hierarchy = cv2.findContours(edged, + # cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) + +# construct the argument parser and parse the arguments +ap = argparse.ArgumentParser() +ap.add_argument("-d", "--dir", required=True, help="Path to the dir of images overlay") +args = vars(ap.parse_args()) + +# load the image, clone it for output, and then convert it to grayscale +found_circles = 0 + +thresh1_binary = 255 +thresh1_double_binary = 255 +thresh2_binary = 255 +thresh2_double_binary = 255 + +# overlay_files = list(sorted(glob(args["dir"]))) + +dropimages = list(sorted(glob(os.path.join(args["dir"],"organizedWells","wellNum_*")), key=lambda s: int(s.split('wellNum_')[1]))) +overlayed_image = list(sorted(glob(os.path.join(args["dir"],"overlayed","*.jpg")))) + + +for wellNum, drop in enumerate(dropimages): + image_list = sorted(glob(os.path.join(drop,"*"))) + if len(image_list) == 3: + image_path = image_list[2] + else: + image_path = image_list[0] + + zstack = io.imread(image_path) + image = img_as_ubyte(zstack[:, :, 0]) # finds the top x-y pixels in the z-stack + print(image_path) + output = image.copy() + cv2.namedWindow(title_window) + + cv2.createTrackbar('thresh1 canny %d' % thresh1_double_binary, title_window, 0, thresh1_double_binary, on_trackbar_canny_1) + cv2.createTrackbar('thresh1 threshold %d' % thresh1_binary, title_window , 0, thresh1_binary, on_trackbar_binary_1) + cv2.createTrackbar('thresh2 canny %d' % thresh2_double_binary, title_window, 0, thresh2_double_binary, on_trackbar_canny_2) + cv2.createTrackbar('thresh2 threshold %d' % thresh2_binary, title_window , 0, thresh2_binary, on_trackbar_binary_2) + + i = wellNum + on_trackbar_canny_1(thresh1_double_binary) + on_trackbar_canny_2(thresh2_double_binary) + on_trackbar_binary_1(thresh1_binary) + on_trackbar_binary_2(thresh2_binary) + + cv2.waitKey(0) + exit() \ No newline at end of file diff --git a/pregui_img_analysis_3.py b/pregui_img_analysis_3.py index a721ce8..3523cdc 100644 --- a/pregui_img_analysis_3.py +++ b/pregui_img_analysis_3.py @@ -21,6 +21,7 @@ import time as ti import json from tqdm import tqdm +import cv2 #Function Definitions #Params @@ -66,16 +67,60 @@ def draw_circles_on_image_center(image,cx,cy,radii,colour,dotsize): image[circy,circx] = colour image[cy[0]-dotsize:cy[0]+dotsize,cx[0]-dotsize:cx[0]+dotsize] = colour -def save_canny_save_fit(path,sig,low,high,temp): #sig=3,low = 0, high = 30 +def save_canny_save_fit(path, temp): #sig=3,low = 0, high = 30 + # circles=[] + # zstack = io.imread(overlayed_image[i]) + # output = img_as_ubyte(zstack[:, :, 0]).copy() + # edged = image.copy() + # # ret, thresh = cv2.threshold(image, thresh1_binary, thresh2_binary, cv2.THRESH_BINARY) + # print("bw:",thresh1_binary, thresh2_binary) + # edged = cv2.Canny(thresh, thresh1_canny, thresh2_canny) + # print("edged:",thresh1_canny, thresh2_canny) + # circles = cv2.HoughCircles(image=edged, method=cv2.HOUGH_GRADIENT, dp=3, minDist=400, minRadius=475, maxRadius=495) + # if circles is not None: + # found_circles += 1 + # # convert the (x, y) coordinates and radius of the circles to integers + # circles = np.round(circles[0, :]).astype("int") + # # loop over the (x, y) coordinates and radius of the circles + # for (x, y, r) in circles: + # # draw the circle in the output image, then draw a rectangle + # # corresponding to the center of the circle + # cv2.circle(output, (x, y), r, (255, 0, 0), 4) + # cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1) + + accum_d, cx_d, cy_d, radii_d, cx_w, cy_w, radii_w = [0,0,0,0,0,0,0] # initialize variables + zstack = io.imread(path) image = img_as_ubyte(zstack[:,:,0]) # finds the top x-y pixels in the z-stack - edges = canny(image,sigma=sig,low_threshold=low,high_threshold=high) # edge detection - accum_d, cx_d, cy_d, radii_d = circular_hough_transform(135,145,2,edges,1) #edge detection on drop, params: r1,r2,stepsize,image,peaknum. Key params to change are r1&r2 for start & end radius - + + _, thresh = cv2.threshold(image, 38, 179, cv2.THRESH_TOZERO_INV) # tested march 17 2020 on plate 10818 + _, thresh = cv2.threshold(thresh, 0, 255, cv2.THRESH_BINARY) # tested march 17 2020 on plate 10818 + + edged = cv2.Canny(thresh, 101, 112) + drop_circles = cv2.HoughCircles(image=edged, method=cv2.HOUGH_GRADIENT, dp=3, minDist=600, minRadius=135, maxRadius=145) + + if drop_circles is not None: + drop_circles = np.round(drop_circles[0, :]).astype("int") + cx_d, cy_d, radii_d = drop_circles[0] # always take first circle found + + cv2.imshow('drop_img', edged) + if temp == '20C': ### This works well for echo RT plate type for rockmaker - accum_w, cx_w, cy_w, radii_w = circular_hough_transform(479,495,1,edges,1) #edge detection on well. Units for both are in pixels + circles = cv2.HoughCircles(image=edged, method=cv2.HOUGH_GRADIENT, dp=3, minDist=600, minRadius=475, maxRadius=495) else: ### This works well for echo 4C plate type for rockmaker - accum_w, cx_w, cy_w, radii_w = circular_hough_transform(459,475,1,edges,1) #edge detection on well. Units for both are in pixels + circles = cv2.HoughCircles(image=edged, method=cv2.HOUGH_GRADIENT, dp=3, minDist=600, minRadius=459, maxRadius=475) + + if circles is not None: + circles = np.round(circles[0, :]).astype("int") + cx_w, cy_w, radii_w = circles[0] # always take first circle found + + + # + # # edges = canny(image,sigma=sig,low_threshold=low,high_threshold=high) # edge detection + # if temp == '20C': ### This works well for echo RT plate type for rockmaker + # accum_w, cx_w, cy_w, radii_w = circular_hough_transform(479,495,1,edges,1) #edge detection on well. Units for both are in pixels + # else: ### This works well for echo 4C plate type for rockmaker + # accum_w, cx_w, cy_w, radii_w = circular_hough_transform(459,475,1,edges,1) #edge detection on well. Units for both are in pixels return cx_d,cy_d,radii_d, cx_w, cy_w, radii_w @@ -137,7 +182,7 @@ def main(): print("Finding pixel location of wells.") for im_idx, im_path in tqdm(sorted(dict_image_path_subwells.items())): if im_path: - cx_d,cy_d,radii_d, cx_w, cy_w, radii_w = save_canny_save_fit(im_path,3,0,50,plate_temperature) ### calling this function for 4c or 20c temp + cx_d,cy_d,radii_d, cx_w, cy_w, radii_w = save_canny_save_fit(im_path, plate_temperature) ### calling this function for 4c or 20c temp # cx_d,cy_d,radii_d, cx_w, cy_w, radii_w = [0,0,0,0,0,0] # time saving code (will output zeros) ### radii radius of the drop circle ### everything _w is for the well diff --git a/run.sh b/run.sh index 559c46e..5f0e3b1 100755 --- a/run.sh +++ b/run.sh @@ -10,6 +10,6 @@ temperature=$2 plateID=$3 bash transfer_imgs_1.sh ${plateID} ${output_dir} ${temperature} 169.230.29.134 -python bounding_box_overlay_2.py ${output_dir} -python pregui_img_analysis_3.py ${output_dir} +python3 bounding_box_overlay_2.py ${output_dir} +python3 pregui_img_analysis_3.py ${output_dir} diff --git a/transfer_imgs_1.py b/transfer_imgs_1.py new file mode 100644 index 0000000..034b9e5 --- /dev/null +++ b/transfer_imgs_1.py @@ -0,0 +1,120 @@ +### Testing for the corect number of input arguments +### expecting 1 arugment for PlateID + +import argparse +import os +from glob import glob +from os.path import join, exists +import subprocess # runs bash commands in python + +from tqdm import tqdm + + +def argparse_reader(): + parser = argparse.ArgumentParser() + parser.add_argument('plateID', type=int, + help='RockMaker Plate ID (4 or 5 digit code on barcode or 2nd number on RockMaker screen experiment file') + parser.add_argument('output_plate_folder', type=str, help='Output folder for images and json') + parser.add_argument('plate_temp', type=int, help='Temperature of plate stored at') + parser.add_argument('rock_drive_IP_address', type=str, help="IP addess of rock_drive storage (images)") + return parser + + +def main(): + args = argparse_reader().parse_args() + + plateID = args.plateID + output_dir = args.output_plate_folder + temperature = args.plate_temp + rock_drive_ip = args.rock_drive_IP_address + + if not exists(join(output_dir)): + os.mkdir(join(output_dir)) + + rsync_log = ["rsync", "-nmav", "--include", "*/", "--exclude", "*_th.jpg", "--include", "*.jpg", "-e", "ssh", + "xray@" + rock_drive_ip + ":/volume1/RockMakerStorage/WellImages/" + str(plateID)[ + 2:] + '/plateID_' + str( + plateID) + '/', str(output_dir) + '/'] + + print(*rsync_log) + rsync = subprocess.run(rsync_log, capture_output=True) + rsync_out = rsync.stdout.decode("utf-8") + with open(join(output_dir, "log_rsync_init_file_list.txt"), 'w') as log_rsync_file_out: + log_rsync_file_out.write(rsync.stdout.decode("utf-8")) + + # get all batches + batches = set() + with open(join(output_dir, "log_rsync_init_file_list.txt"), 'r', encoding='utf-8') as log_rsync_file: + for file in sorted(log_rsync_file.readlines()): + if 'batchID_' in file: + batches.add(int( + file.split('batchID_')[1].split('wellNum')[0].replace("/", '').replace("\\", '').replace('n', ''))) + batches = sorted(list(batches)) + batchID_overview = batches[-1] # last in list + batchID_drop = batches[0] # first in list + print("batch IDs selected: ", *batches) + + # ### Create a list of files to transfer in a text file for rsync to transfer using the --files-from option + # ### first cat: add drop images to file list + # ### second cat: add overview drop location files to file list + # ### third cat: add overview extended focus images to file list + # #cat log_rsync_init_file_list.txt | grep "${batchID_drop}\|${batchID_overview}" | grep ".jpg" > files_to_transfer.txt + + # get unique image names + # Tries to grab all the images in the same batch first, because doing two batches is two different batches of images + # i think batch id are different times taking the pictures so if you try to match a extended focus to a drop image from two different imaging times(aka batches) it will not match exactly and could be the problem with the well not matching + image_names = set() + path_names_only_necessary = list() + for line in rsync_out.split('\n'): + if ".jpg" in line: + image_name = line.split('/')[-1] + if image_name not in image_names: + path_names_only_necessary.append(line) + image_names.add(line.split('/')[-1]) + print() + + drop_images_paths = [] + overview_drop_location_paths = [] + overview_extended_focus_paths = [] + # sort by image name + i = 1 + for path in list(sorted([line for line in path_names_only_necessary], key=lambda line: line.split('/')[-1])): + if "ef.jpg" in path and i == 1: + drop_images_paths.append(path) + i += 1 + elif "dl.jpg" in path: + overview_drop_location_paths.append(path) + elif "ef.jpg" in path and i == 2: + overview_extended_focus_paths.append(path) + i = 1 + + print("drop image:", len(drop_images_paths), " images \noverview ef:", len(overview_extended_focus_paths), + " images \noverview drop location:", len(overview_drop_location_paths), "images") + with open(join(output_dir, "files_to_transfer.txt"), 'w') as files_to_transfer: + for path in tqdm([*drop_images_paths, *overview_drop_location_paths, *overview_extended_focus_paths]): + files_to_transfer.write(path + "\n") + + rsync_download = [ + "rsync", "--files-from=" + output_dir + "/files_to_transfer.txt", "-e", "ssh", + "xray@" + rock_drive_ip + ":" + join("/volume1", + "RockMakerStorage", + "WellImages", + str(plateID)[2:], + 'plateID_' + str(plateID)), + join(str(output_dir), "")] + # print(*rsync_download) + # print(path) + with tqdm(total=len(glob(join(output_dir,"batchID_*","wellNum_*","profileID_*","*")))): + subprocess.run(rsync_download) + + +# ","cat ${output_dir}/log_rsync_init_file_list.txt | grep "${batchID_overview}" | grep "dl.jpg" >> ${output_dir}/files_to_transfer.txt +# cat ${output_dir}/log_rsync_init_file_list.txt | grep "${batchID_overview}" | grep "dl.jpg" | sed 's/dl/ef/' >> ${output_dir}/files_to_transfer.txt +# echo ${plateID} > ${output_dir}/plateid.txt +# echo ${temperature} > ${output_dir}/temperature.txt + +# ### transfer files using rsync +# echo "2nd password prompt will appear below, should only take ~30 sec to download all images on UCSFwpa" +# rsync --progress -mav --rsync-path "/bin/rsync" --files-from=${output_dir}/files_to_transfer.txt xray@${rock_drive_ip}:"/volume1/RockMakerStorage/WellImages/${plateID: -3}/plateID_${plateID}" ${output_dir}/ > ${output_dir}/log_rsync_transferred.txt + +main() From 8af8c69f9f62132238352ba82700610ab0e23b0e Mon Sep 17 00:00:00 2001 From: Liam McKay Date: Wed, 18 Mar 2020 12:33:17 -0700 Subject: [PATCH 2/9] ignore folders --- .gitignore | 1 + transfer_imgs_1.py | 42 ++++++++++++++++-------------------------- 2 files changed, 17 insertions(+), 26 deletions(-) diff --git a/.gitignore b/.gitignore index 73514cc..dc65fa5 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ *.pyc */ .DS_Store +/* \ No newline at end of file diff --git a/transfer_imgs_1.py b/transfer_imgs_1.py index 034b9e5..72a593b 100644 --- a/transfer_imgs_1.py +++ b/transfer_imgs_1.py @@ -55,14 +55,14 @@ def main(): print("batch IDs selected: ", *batches) # ### Create a list of files to transfer in a text file for rsync to transfer using the --files-from option - # ### first cat: add drop images to file list - # ### second cat: add overview drop location files to file list - # ### third cat: add overview extended focus images to file list - # #cat log_rsync_init_file_list.txt | grep "${batchID_drop}\|${batchID_overview}" | grep ".jpg" > files_to_transfer.txt # get unique image names # Tries to grab all the images in the same batch first, because doing two batches is two different batches of images - # i think batch id are different times taking the pictures so if you try to match a extended focus to a drop image from two different imaging times(aka batches) it will not match exactly and could be the problem with the well not matching + """ + i think batch id are different times taking the pictures so if you try to match a extended focus to a drop + image from two different imaging times(aka batches) it will not match exactly and could be the problem with + the well not matching + """ image_names = set() path_names_only_necessary = list() for line in rsync_out.split('\n'): @@ -95,26 +95,16 @@ def main(): files_to_transfer.write(path + "\n") rsync_download = [ - "rsync", "--files-from=" + output_dir + "/files_to_transfer.txt", "-e", "ssh", - "xray@" + rock_drive_ip + ":" + join("/volume1", - "RockMakerStorage", - "WellImages", - str(plateID)[2:], - 'plateID_' + str(plateID)), - join(str(output_dir), "")] - # print(*rsync_download) - # print(path) - with tqdm(total=len(glob(join(output_dir,"batchID_*","wellNum_*","profileID_*","*")))): - subprocess.run(rsync_download) - - -# ","cat ${output_dir}/log_rsync_init_file_list.txt | grep "${batchID_overview}" | grep "dl.jpg" >> ${output_dir}/files_to_transfer.txt -# cat ${output_dir}/log_rsync_init_file_list.txt | grep "${batchID_overview}" | grep "dl.jpg" | sed 's/dl/ef/' >> ${output_dir}/files_to_transfer.txt -# echo ${plateID} > ${output_dir}/plateid.txt -# echo ${temperature} > ${output_dir}/temperature.txt - -# ### transfer files using rsync -# echo "2nd password prompt will appear below, should only take ~30 sec to download all images on UCSFwpa" -# rsync --progress -mav --rsync-path "/bin/rsync" --files-from=${output_dir}/files_to_transfer.txt xray@${rock_drive_ip}:"/volume1/RockMakerStorage/WellImages/${plateID: -3}/plateID_${plateID}" ${output_dir}/ > ${output_dir}/log_rsync_transferred.txt + "rsync", "--info=progress2", "--files-from=" + output_dir + "/files_to_transfer.txt", "-e", "ssh", + "xray@" + rock_drive_ip + ":" + join("/volume1", + "RockMakerStorage", + "WellImages", + str(plateID)[2:], + 'plateID_' + str(plateID)), + join(str(output_dir), "") + ] + print(*rsync_download) + subprocess.run(rsync_download) + main() From 7984974d36d9a9c6bcfac65099ff57868ae9de5e Mon Sep 17 00:00:00 2001 From: Liam McKay Date: Wed, 18 Mar 2020 13:11:16 -0700 Subject: [PATCH 3/9] readme, formatting --- bounding_box_overlay_2.py | 339 +++++++++++++++++++------------------- readme.md | 19 ++- transfer_imgs_1.py | 4 +- 3 files changed, 194 insertions(+), 168 deletions(-) diff --git a/bounding_box_overlay_2.py b/bounding_box_overlay_2.py index f546006..1dc0a29 100644 --- a/bounding_box_overlay_2.py +++ b/bounding_box_overlay_2.py @@ -5,173 +5,182 @@ import os import organizeImages as oI from tqdm import tqdm -def overlay_images(overview_dl_fh, overview_ef_fh, zoom_fh,output_fh): - ### This is the main function of the script - - ### Get image with signal from red box in drop location image - box_im_open = red_box_subt(overview_dl_fh,4) - # box_im_open.show() - - ### Get the dimensions of the signal for the red box, by calculating the bounding box - box_dims=find_bounding_box(box_im_open,4) - - ### Overlay resized drop image onto the original overview image, using the bounding box dimensions - overlay_open = align_drop_to_box(overview_ef_fh,zoom_fh,box_dims) - - ### Save the overlayed image - overlay_open.save(output_fh,format="JPEG") - #return overlay_open - -def red_box_subt(w_box_fh,scaling_factor): - ### Funciton to read in overview image with red drop location box and convert to grey scale image of red box signal - ### Open images - #ef_open = Im.open(wo_box_fh) - dl_open = Im.open(w_box_fh) - - ### Check and get size - #assert ef_open.size == dl_open.size - dl_im_width, dl_im_height = dl_open.size - search_w = int(dl_im_width/scaling_factor) - search_h = int(dl_im_height/scaling_factor) - #print(search_w, search_h) - - ### Resize image for speed - dl_thumb = dl_open.resize((search_w,search_h),resample=Im.BICUBIC) - #ef_thumb = ef_open.resize((search_w,search_h),resample=Im.BICUBIC) - - ### Create new image object - new_dl_box_img = Im.new("L", (search_w,search_h)) - new_pixels = new_dl_box_img.load() - - ### Transform to custom greyscale and subtract - threshold_val = 50 - #print("time_to_start_loop: %s"%(time.time()-t0)) - for i in range(0, search_w): - for j in range(0, search_h): - ### Get pixel are recalculate signal for red box as greyscale - #pixel_ef = ef_thumb.getpixel((i, j)) - pixel_dl = dl_thumb.getpixel((i, j)) - - ### This is an old way of calculating the signal - #average_ef_bkgd = np.average([pixel_ef[1],pixel_ef[2]]) - #average_dl_bkgd = np.average([pixel_dl[1],pixel_dl[2]]) - #complex_r = np.round(max(pixel_dl[0]-average_dl_bkgd-(pixel_ef[0]-average_ef_bkgd),0)) - - complex_r = pixel_dl[0]-(pixel_dl[1]+pixel_dl[2])/2 - - ### This is an old way of calculating the signal - #complex_r = max(int(np.round((pixel_dl[0]-pixel_ef[0]+pixel_ef[1]-pixel_dl[1]+pixel_ef[2]-pixel_dl[2])/4.)),0) - #complex_r = min(255,np.round((pixel_dl[0]/255.)*(abs(pixel_ef[1]-pixel_dl[0])+abs(pixel_dl[1]-pixel_ef[1])+abs(pixel_dl[2]-pixel_ef[2])-50))) - - ### Threshold the new pixel value (complex_r) - if complex_r < threshold_val: - complex_r=0 - ### Set pixel value in new image - new_pixels[i,j] = (int(complex_r)) - #new_dl_box_img.show() - dl_open.close() - ### return new image with calculated pixel values - return new_dl_box_img - - -def find_bounding_box(box_open,scaling_factor): - ### Function finds the oringal size of the red box signal - x0,y0,x1,y1 = box_open.getbbox() - - return (x0*scaling_factor,y0*scaling_factor,scaling_factor*(x1-x0),scaling_factor*(y1-y0)) - -def align_drop_to_box(over_ef,drop_fh,box): - ### This funciton figures out the correct alignment of the drop to the overview and overlays the images - - ### open the image and compare aspect ratios of the drop location to the drop image - drop_open = Im.open(drop_fh) - #print("drop_f_size: {}".format(drop_open.size)) - drop_ratio = drop_open.size[0]/float(drop_open.size[1]) - ##=print("drop_ratio: {}".format(drop_ratio)) - #print("box: {}".format(box)) - box_ratio = box[2]/float(box[3]) - #print("box_ratio: {}".format(box_ratio)) - - ### The calcualtion for the alignment of the images is different depending on the ratio of the aspect ratios - if drop_ratio <= box_ratio: - ### X-axis based scaling - ### resize the drop image and calculate the alignemnt - resize_ratio = box[2] / float(drop_open.size[0]) - new_w = int(np.round(drop_open.size[0] * resize_ratio)) - new_h = int(np.round(drop_open.size[1] * resize_ratio)) - drop_resized=drop_open.resize((new_w,new_h)) - new_x = box[0] - new_y = int(np.round(((box[3] - new_h) / 2) + box[1])) - else: - ### Y-axis based scaling - ### resize the drop image and calculate the alignemnt - resize_ratio = box[3] / float(drop_open.size[1]) - new_w = int(np.round(drop_open.size[0] * resize_ratio)) - new_h = int(np.round(drop_open.size[1] * resize_ratio)) - drop_resized=drop_open.resize((new_w,new_h)) - new_x = int(np.round(((box[2] - new_w) / 2) + box[0])) - new_y = box[1] - - ### open overview image and do the overlay - overview_open=Im.open(over_ef) - overview_open.paste(drop_resized,box=(new_x,new_y)) - #overview_open.show() - return overview_open + + +def overlay_images(overview_dl_fh, overview_ef_fh, zoom_fh, output_fh): + ### This is the main function of the script + + ### Get image with signal from red box in drop location image + box_im_open = red_box_subt(overview_dl_fh, 4) + # box_im_open.show() + + ### Get the dimensions of the signal for the red box, by calculating the bounding box + box_dims = find_bounding_box(box_im_open, 4) + + ### Overlay resized drop image onto the original overview image, using the bounding box dimensions + overlay_open = align_drop_to_box(overview_ef_fh, zoom_fh, box_dims) + + ### Save the overlayed image + overlay_open.save(output_fh, format="JPEG") + # return overlay_open + + +def red_box_subt(w_box_fh, scaling_factor): + ### Funciton to read in overview image with red drop location box and convert to grey scale image of red box signal + ### Open images + # ef_open = Im.open(wo_box_fh) + dl_open = Im.open(w_box_fh) + + ### Check and get size + # assert ef_open.size == dl_open.size + dl_im_width, dl_im_height = dl_open.size + search_w = int(dl_im_width / scaling_factor) + search_h = int(dl_im_height / scaling_factor) + # print(search_w, search_h) + + ### Resize image for speed + dl_thumb = dl_open.resize((search_w, search_h), resample=Im.BICUBIC) + # ef_thumb = ef_open.resize((search_w,search_h),resample=Im.BICUBIC) + + ### Create new image object + new_dl_box_img = Im.new("L", (search_w, search_h)) + new_pixels = new_dl_box_img.load() + + ### Transform to custom greyscale and subtract + threshold_val = 50 + # print("time_to_start_loop: %s"%(time.time()-t0)) + for i in range(0, search_w): + for j in range(0, search_h): + ### Get pixel are recalculate signal for red box as greyscale + # pixel_ef = ef_thumb.getpixel((i, j)) + pixel_dl = dl_thumb.getpixel((i, j)) + + ### This is an old way of calculating the signal + # average_ef_bkgd = np.average([pixel_ef[1],pixel_ef[2]]) + # average_dl_bkgd = np.average([pixel_dl[1],pixel_dl[2]]) + # complex_r = np.round(max(pixel_dl[0]-average_dl_bkgd-(pixel_ef[0]-average_ef_bkgd),0)) + + complex_r = pixel_dl[0] - (pixel_dl[1] + pixel_dl[2]) / 2 + + ### This is an old way of calculating the signal + # complex_r = max(int(np.round((pixel_dl[0]-pixel_ef[0]+pixel_ef[1]-pixel_dl[1]+pixel_ef[2]-pixel_dl[2])/4.)),0) + # complex_r = min(255,np.round((pixel_dl[0]/255.)*(abs(pixel_ef[1]-pixel_dl[0])+abs(pixel_dl[1]-pixel_ef[1])+abs(pixel_dl[2]-pixel_ef[2])-50))) + + ### Threshold the new pixel value (complex_r) + if complex_r < threshold_val: + complex_r = 0 + ### Set pixel value in new image + new_pixels[i, j] = (int(complex_r)) + # new_dl_box_img.show() + dl_open.close() + ### return new image with calculated pixel values + return new_dl_box_img + + +def find_bounding_box(box_open, scaling_factor): + ### Function finds the oringal size of the red box signal + x0, y0, x1, y1 = box_open.getbbox() + + return (x0 * scaling_factor, y0 * scaling_factor, scaling_factor * (x1 - x0), scaling_factor * (y1 - y0)) + + +def align_drop_to_box(over_ef, drop_fh, box): + ### This funciton figures out the correct alignment of the drop to the overview and overlays the images + + ### open the image and compare aspect ratios of the drop location to the drop image + drop_open = Im.open(drop_fh) + # print("drop_f_size: {}".format(drop_open.size)) + drop_ratio = drop_open.size[0] / float(drop_open.size[1]) + ##=print("drop_ratio: {}".format(drop_ratio)) + # print("box: {}".format(box)) + box_ratio = box[2] / float(box[3]) + # print("box_ratio: {}".format(box_ratio)) + + ### The calcualtion for the alignment of the images is different depending on the ratio of the aspect ratios + if drop_ratio <= box_ratio: + ### X-axis based scaling + ### resize the drop image and calculate the alignemnt + resize_ratio = box[2] / float(drop_open.size[0]) + new_w = int(np.round(drop_open.size[0] * resize_ratio)) + new_h = int(np.round(drop_open.size[1] * resize_ratio)) + drop_resized = drop_open.resize((new_w, new_h)) + new_x = box[0] + new_y = int(np.round(((box[3] - new_h) / 2) + box[1])) + else: + ### Y-axis based scaling + ### resize the drop image and calculate the alignemnt + resize_ratio = box[3] / float(drop_open.size[1]) + new_w = int(np.round(drop_open.size[0] * resize_ratio)) + new_h = int(np.round(drop_open.size[1] * resize_ratio)) + drop_resized = drop_open.resize((new_w, new_h)) + new_x = int(np.round(((box[2] - new_w) / 2) + box[0])) + new_y = box[1] + + ### open overview image and do the overlay + overview_open = Im.open(over_ef) + overview_open.paste(drop_resized, box=(new_x, new_y)) + # overview_open.show() + return overview_open + def main(): - ### save the time to later see how long script took - t0=time.time() - # save usage to a string to save space - usage = "Usage: python bounding_box_overlay.py [parent image directory]" - try: # case 1: catches if there is no argv 1 - # not the greatest, but works - imageDirectory = sys.argv[1] - except IndexError: # if there is, leave the program - print(usage) - exit(1) - if not os.path.exists(imageDirectory): # case 2: directory doesn't exist - print("Error: cannot find directory "+imageDirectory) - else: - oI.organizeImages(imageDirectory) - if not os.path.exists(os.path.join(imageDirectory,"overlayed")): - os.mkdir(os.path.join(imageDirectory,"overlayed")) - print("overlaying images.\n") - completedWells = 0 - - # generate wells a01-h12 - letters=list('abcdefgh'.upper()) - numbers = ["{:02d}".format(n) for n in range(1,13)] - wells =[[c+n for n in numbers] for c in letters] - wellflat=[] - [[wellflat.append(wells[i][j]) for j in range(len(wells[i]))] for i in range(len(wells))] - - for i in tqdm(range(1,97)): - filepaths = sorted(glob.glob(os.path.join(imageDirectory,'organizedWells','wellNum_'+str(i),'*'))) # find all images - subwell_list = [z.split("/d")[1].split("_")[0] for z in filepaths] - if len(filepaths) % 3 == 0: - for j in range(0,len(filepaths),3): - output_fh = os.path.join(imageDirectory,"overlayed","well_"+str(i)+"_subwell"+subwell_list[0+j]+"_overlay.jpg") - zoom_ef_fh=filepaths[0+j] - dl_fh=filepaths[1+j] - ef_fh=filepaths[2+j] - try: - overlayed_img = overlay_images(dl_fh,ef_fh,zoom_ef_fh,output_fh) - completedWells += 1 - except TypeError: - print("\nwellNum_"+str(i)+' Overlay Error: Could not get bounding box from box_open.getbbox(). Image wasn\'t loaded') - except OSError: - print("\nwellNum_{0} File Error: Could not open image file".format(i)) - - else: - print("\nwellNum_"+str(i)+" does not have the 3 required images for bounding box overlay. Continuing...") - - ### show how many wells have an overlay - print("Completed images:",completedWells) - - ### print the time it took to run the script - print("time to run: %s"%(time.time()-t0)) - + ### save the time to later see how long script took + t0 = time.time() + # save usage to a string to save space + usage = "Usage: python bounding_box_overlay.py [parent image directory]" + imageDirectory='' + try: # case 1: catches if there is no argv 1 + # not the greatest, but works + imageDirectory = sys.argv[1] + except IndexError: # if there is, leave the program + print(usage) + exit(1) + if not os.path.exists(imageDirectory): # case 2: directory doesn't exist + print("Error: cannot find directory " + imageDirectory) + else: + oI.organizeImages(imageDirectory) + if not os.path.exists(os.path.join(imageDirectory, "overlayed")): + os.mkdir(os.path.join(imageDirectory, "overlayed")) + print("overlaying images.\n") + completedWells = 0 + + # generate wells a01-h12 + letters = list('abcdefgh'.upper()) + numbers = ["{:02d}".format(n) for n in range(1, 13)] + wells = [[c + n for n in numbers] for c in letters] + wellflat = [] + [[wellflat.append(wells[i][j]) for j in range(len(wells[i]))] for i in range(len(wells))] + + for i in tqdm(range(1, 97)): + filepaths = sorted( + glob.glob(os.path.join(imageDirectory, 'organizedWells', 'wellNum_' + str(i), '*'))) # find all images + subwell_list = [z.split("/d")[1].split("_")[0] for z in filepaths] + if len(filepaths) % 3 == 0: + for j in range(0, len(filepaths), 3): + output_fh = os.path.join(imageDirectory, "overlayed", + "well_" + str(i) + "_subwell" + subwell_list[0 + j] + "_overlay.jpg") + zoom_ef_fh = filepaths[0 + j] + dl_fh = filepaths[1 + j] + ef_fh = filepaths[2 + j] + try: + overlayed_img = overlay_images(dl_fh, ef_fh, zoom_ef_fh, output_fh) + completedWells += 1 + except TypeError: + print("\nwellNum_" + str( + i) + ' Overlay Error: Could not get bounding box from box_open.getbbox(). Image wasn\'t loaded') + except OSError: + print("\nwellNum_{0} File Error: Could not open image file".format(i)) + + else: + print("\nwellNum_" + str( + i) + " does not have the 3 required images for bounding box overlay. Continuing...") + + ### show how many wells have an overlay + print("Completed images:", completedWells) + + ### print the time it took to run the script + print("time to run: %s" % (time.time() - t0)) if __name__ == "__main__": - main() + main() diff --git a/readme.md b/readme.md index 6ee0d34..50721e7 100644 --- a/readme.md +++ b/readme.md @@ -1,5 +1,22 @@ # Echo_pregui_script -Usage: `bash run.sh [output directory name] [plate temperature: 4c/20c] [plate_id number] ` +`usage: echo_pregui_run.py [-h] -ids PLATEID [PLATEID ...] -dir + OUTPUT_PLATE_FOLDER -temp PLATE_TEMP` + + +## Help: + - `-h, --help` + + show this help message and exit + - `-ids PLATEID [PLATEID ...], --plateID PLATEID [PLATEID ...]` + + RockMaker Plate ID (4 or 5 digit code on barcode or + 2nd number on RockMaker screen experiment file + - `-dir OUTPUT_PLATE_FOLDER, --output_plate_folder OUTPUT_PLATE_FOLDER` + + Output folder for images and json + - `-temp PLATE_TEMP, --plate_temp PLATE_TEMP` + + Temperature of plate where it is stored at #### SAMPLE OUTPUT diff --git a/transfer_imgs_1.py b/transfer_imgs_1.py index 72a593b..d760331 100644 --- a/transfer_imgs_1.py +++ b/transfer_imgs_1.py @@ -106,5 +106,5 @@ def main(): print(*rsync_download) subprocess.run(rsync_download) - -main() +if __name__ == '__main__': + main() From a9af858684d42ffc2a550f6d05498769ef022734 Mon Sep 17 00:00:00 2001 From: Liam McKay Date: Tue, 24 Mar 2020 08:57:29 -0700 Subject: [PATCH 4/9] all python, new circle finding algorithm --- .gitignore | 3 +- Plate.py | 80 ++++++++++ bounding_box_overlay_2.py | 39 ++--- echo_pregui_run.py | 38 +++++ organizeImages.py | 86 ++++++++--- pregui_img_analysis_3.py | 297 +++++++++++++++++++++++--------------- transfer_imgs_1.py | 172 ++++++++++++---------- 7 files changed, 479 insertions(+), 236 deletions(-) create mode 100644 Plate.py create mode 100644 echo_pregui_run.py diff --git a/.gitignore b/.gitignore index dc65fa5..e9dad5c 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,5 @@ *.pyc */ .DS_Store -/* \ No newline at end of file +/* +images/ \ No newline at end of file diff --git a/Plate.py b/Plate.py new file mode 100644 index 0000000..3518ce9 --- /dev/null +++ b/Plate.py @@ -0,0 +1,80 @@ +import numpy as np +import pandas as pd +import string +import itertools +import math + + +class Plate(): + def __init__(self, r, c, subwell_num): + """ + General purpose plate lookup table. Converts a single number (Index) into a A01_1 format. + Index is ordered left-right + + @param r: amount of rows + @param c: amount of columns + @param subwell_num: amount of subwells + """ + self.row_num = r + self.col_num = c + self.subwell_num = subwell_num + self.leading_zeros = int(math.log10(round(self.col_num, 0))) + 1 + self.zero_format = "{:0" + str(self.leading_zeros) + "d}" + self.matrix = self.create_plate_matrix() + + def __str__(self): + """ + Prints full table Pandas DataFrame as string + @return: Pandas DataFrame as string + """ + return str(self.matrix) + + + def get_number_to_well_id(self, index): + """ + Get well id (e.g. A01_1) from an Index (e.g. 1) + @param index: Index (counting left-right what number well is it) + @return: well_id + """ + well_location = self.matrix[self.matrix == index].dropna(axis='index', how='all').dropna(axis='columns', + how='all') + if well_location.size > 0: + col, subcol = well_location.columns.values[0] + row = well_location.index.values[0] + else: + raise LookupError("%d doesn't exist in the plate" % index) + return row+self.zero_format.format(int(col))+"_"+str(subcol) + + def create_plate_matrix(self): + cols = list(str(n) for n in list(range(1, self.col_num + 1))) + if self.row_num > len(string.ascii_letters): + rows = itertools.product(string.ascii_uppercase, repeat=self.row_num // 27 + 1) + rows = ["".join(pair) for pair in rows] + else: + rows = string.ascii_uppercase[:self.row_num] + + col_list = [[n] * self.subwell_num for n in cols] + col_list_flat = [] + for l in col_list: + for n in l: + col_list_flat.append(n) + + multi_index_col_well_subwell_tuples = list( + zip(col_list_flat, [i for _ in range(self.col_num) for i in range(1, self.subwell_num + 1)])) + col_multindex = pd.MultiIndex.from_tuples(multi_index_col_well_subwell_tuples, names=['well', 'subwell']) + well_ids = [[c + self.zero_format.format(int(n)) for n in cols] for c in rows] + well_ids_flat = [] + i = 1 + for well in well_ids: + well_subwell_one_letter = [] + for _ in well: + for subwell in range(1, self.subwell_num + 1): + well_subwell_one_letter.append(i) + i += 1 + well_ids_flat.append(well_subwell_one_letter) + + return pd.DataFrame(well_ids_flat, + index=[c for c in rows], + columns=col_multindex) + + diff --git a/bounding_box_overlay_2.py b/bounding_box_overlay_2.py index 1dc0a29..f4161d2 100644 --- a/bounding_box_overlay_2.py +++ b/bounding_box_overlay_2.py @@ -1,3 +1,5 @@ +import subprocess + from PIL import Image as Im import numpy as np import time, sys @@ -122,29 +124,16 @@ def align_drop_to_box(over_ef, drop_fh, box): # overview_open.show() return overview_open - -def main(): - ### save the time to later see how long script took - t0 = time.time() - # save usage to a string to save space - usage = "Usage: python bounding_box_overlay.py [parent image directory]" - imageDirectory='' - try: # case 1: catches if there is no argv 1 - # not the greatest, but works - imageDirectory = sys.argv[1] - except IndexError: # if there is, leave the program - print(usage) - exit(1) +def run(imageDirectory): if not os.path.exists(imageDirectory): # case 2: directory doesn't exist print("Error: cannot find directory " + imageDirectory) else: - oI.organizeImages(imageDirectory) if not os.path.exists(os.path.join(imageDirectory, "overlayed")): os.mkdir(os.path.join(imageDirectory, "overlayed")) print("overlaying images.\n") completedWells = 0 - # generate wells a01-h12 + # generate wells a01-h12 - only works on 96 well plates letters = list('abcdefgh'.upper()) numbers = ["{:02d}".format(n) for n in range(1, 13)] wells = [[c + n for n in numbers] for c in letters] @@ -162,6 +151,7 @@ def main(): zoom_ef_fh = filepaths[0 + j] dl_fh = filepaths[1 + j] ef_fh = filepaths[2 + j] + subprocess.run(['cp', ef_fh, os.path.join(imageDirectory,"overview",ef_fh)]) try: overlayed_img = overlay_images(dl_fh, ef_fh, zoom_ef_fh, output_fh) completedWells += 1 @@ -178,8 +168,23 @@ def main(): ### show how many wells have an overlay print("Completed images:", completedWells) - ### print the time it took to run the script - print("time to run: %s" % (time.time() - t0)) + +def main(): + ### save the time to later see how long script took + t0 = time.time() + # save usage to a string to save space + usage = "Usage: python bounding_box_overlay.py [parent image directory]" + imageDirectory='' + try: # case 1: catches if there is no argv 1 + # not the greatest, but works + imageDirectory = sys.argv[1] + except IndexError: # if there is, leave the program + print(usage) + exit(1) + + run(imageDirectory) + ### print the time it took to run the script + print("time to run: %s" % (time.time() - t0)) if __name__ == "__main__": diff --git a/echo_pregui_run.py b/echo_pregui_run.py new file mode 100644 index 0000000..268d9a3 --- /dev/null +++ b/echo_pregui_run.py @@ -0,0 +1,38 @@ +import argparse + +from transfer_imgs_1 import run as transfer_imgs +from bounding_box_overlay_2 import run as bounding_box_overlay +from pregui_img_analysis_3 import get_dict_image_to_well, createJson +from organizeImages import organizeImages, rename_overview_images_well_id + +def argparse_reader_main(): + parser = argparse.ArgumentParser() + parser.add_argument('-ids', '--plateID', nargs='+', type=int, + help='RockMaker Plate ID (4 or 5 digit code on barcode or 2nd number on RockMaker screen experiment file', + required=True) + parser.add_argument('-dir', '--output_plate_folder', type=str, help='Output folder for images and json', + required=True) + parser.add_argument('-temp', '--plate_temp', type=int, help='Temperature of plate stored at', required=True) + return parser + + +def main(): + args = argparse_reader_main().parse_args() + + plateID_list = args.plateID + output_dir = args.output_plate_folder + temperature = args.plate_temp + rock_drive_ip = "169.230.29.134" + + for plateID in plateID_list: + transfer_imgs(plateID, output_dir, rock_drive_ip) + organizeImages(output_dir) + rename_overview_images_well_id(output_dir) + bounding_box_overlay(output_dir) + img_well_dict = get_dict_image_to_well(output_dir) + createJson(plate_dir=output_dir, plate_id=plateID, plate_temperature=temperature, + dict_image_path_subwells=img_well_dict) + + +if __name__ == '__main__': + main() diff --git a/organizeImages.py b/organizeImages.py index e494135..e689a4b 100644 --- a/organizeImages.py +++ b/organizeImages.py @@ -1,29 +1,69 @@ import os import glob import shutil +import subprocess import sys +from Plate import Plate + + +def rename_overview_images_well_id(imageDirectory): + # prereq: have run delete drop images first + + image_path_list = sorted(glob.glob(os.path.join(imageDirectory, "overview", "*", "*")), key=lambda x: x[-18:]) + + # if (len(image_path_list)) % 96 == 0: # if divisible by 96, make a 96 well plate + # else: + # raise NotImplementedError("This script doesn't support other than 96 well plates") + + for path in image_path_list: + subwell = path[path[-18:].find('d') + 1] + wellNum = int(path.split("wellNum_")[1].split(os.path.sep)[0]) + well_id, _ = p.get_number_to_well_id(wellNum).split("_") + subprocess.run( + ["mv", path, os.path.join(imageDirectory, "overview", "wellNum_%d" % wellNum, well_id + "_" + subwell)]) + + def organizeImages(imageDirectory): - print("organizing images.") - try: - if os.path.exists(os.path.join(".",imageDirectory)): - newDirectory = os.path.join(imageDirectory,"organizedWells") - try: - os.mkdir(newDirectory) - except: - print(newDirectory, 'already exists. continuing') - for path in glob.glob(os.path.join(imageDirectory,"batchID*","*","profileID_1","*.jpg")): - a = path.split(os.path.join("a","").replace("a","")) - well_num = "".join([(a[x] if c==0 else '') for x,c in enumerate([s.find('well') for s in a])]) # just gets the wellNum_## folder name - if not os.path.exists(os.path.join(newDirectory,well_num)): - os.mkdir(os.path.join(newDirectory,well_num)) - os.system("cp " + path + " " + os.path.join(newDirectory,well_num,a[-1])) - else: - print("Error: cannot find image directory", imageDirectory) - exit(1) - except IndexError: - print("Usage: python organizeImages.py [parent image directory]") - exit(1) - -if __name__ == "__main__": - main() \ No newline at end of file + print("organizing images.") + try: + if os.path.exists(os.path.join(".", imageDirectory)): + newDirectory = os.path.join(imageDirectory, "organizedWells") + try: + os.mkdir(newDirectory) + except: + print(newDirectory, 'already exists. continuing') + + overview_img_dir = os.path.join(imageDirectory, "overview") + try: + os.mkdir(overview_img_dir) + except: + print(overview_img_dir, 'already exists. continuing') + + for path in sorted(glob.glob(os.path.join(imageDirectory, "batchID*", "*", "profileID_1", "*.jpg")), + key=lambda x: x[-18]): + a = path.split(os.path.sep) + well_num = "".join([(a[x] if c == 0 else '') for x, c in + enumerate([s.find('well') for s in a])]) # just gets the wellNum_## folder name + if not os.path.exists(os.path.join(newDirectory, well_num)): + os.mkdir(os.path.join(newDirectory, well_num)) + os.system("cp " + path + " " + os.path.join(newDirectory, well_num, a[-1])) + + if not os.path.exists(os.path.join(overview_img_dir, well_num)): + os.mkdir(os.path.join(overview_img_dir, well_num)) + if "ef.jpg" in path: + well_id = p.get_number_to_well_id(int(well_num.split("_")[1])).split("_")[0] + os.system("cp " + path + " " + os.path.join(overview_img_dir, well_num, + well_id + "_" + a[-1][1] + ".jpg")) + # overwrites the *ef.jpg copied, all files saved as well id - last img is saved + + else: + print("Error: cannot find image directory", imageDirectory) + exit(1) + + except IndexError: + print("Usage: python organizeImages.py [parent image directory]") + exit(1) + + +p = Plate(r=8, c=12, subwell_num=1) # don't need to worry about subwell (that's specified in img path) diff --git a/pregui_img_analysis_3.py b/pregui_img_analysis_3.py index 3523cdc..7d76ab2 100644 --- a/pregui_img_analysis_3.py +++ b/pregui_img_analysis_3.py @@ -1,8 +1,9 @@ -#python3.7.0 -#Project/dropk#/well/profileID/name_ef.jpg -#run this command above the echo project directory +# python3.7.0 +# Project/dropk#/well/profileID/name_ef.jpg +# run this command above the echo project directory import glob +import string import sys import pickle as pkl import matplotlib.pyplot as plt @@ -22,52 +23,70 @@ import json from tqdm import tqdm import cv2 - -#Function Definitions -#Params - #r1 lower radius bound - #r2 upper radius bound - #search step size between radii - #edge: a binary image which is the output of canny edge detection - #peak_num the # of circles searched for in hough space - -#Output: - #accums - #cx : circle center x - #cy : circle center y - #radii circle radii -def circular_hough_transform(r1,r2,step,edge, peak_num): # do we need to do this? difference in a radium in circle matters to the conversion rate +import argparse + + +def argparse_reader(): + parser = argparse.ArgumentParser() + parser.add_argument('plateID', type=int, + help='RockMaker Plate ID (4 or 5 digit code on barcode or 2nd number on RockMaker screen experiment file') + parser.add_argument('output_plate_folder', type=str, help='Output folder for images and json') + parser.add_argument('plate_temp', type=int, help='Temperature of plate stored at') + parser.add_argument('-json', '--generateJson', action='store_true', + help="JSON Output re-run well pixel finding algorithm") + return parser + + +# Function Definitions +# Params +# r1 lower radius bound +# r2 upper radius bound +# search step size between radii +# edge: a binary image which is the output of canny edge detection +# peak_num the # of circles searched for in hough space + +# Output: +# accums +# cx : circle center x +# cy : circle center y +# radii circle radii +def circular_hough_transform(r1, r2, step, edge, + peak_num): # do we need to do this? difference in a radium in circle matters to the conversion rate # be able to distinguish up to one pixel of the circle - hough_radii = np.arange(r1,r2,step) - hough_res = hough_circle(edge,hough_radii) - accums, cx, cy, radii = hough_circle_peaks(hough_res,hough_radii,total_num_peaks=peak_num) + hough_radii = np.arange(r1, r2, step) + hough_res = hough_circle(edge, hough_radii) + accums, cx, cy, radii = hough_circle_peaks(hough_res, hough_radii, total_num_peaks=peak_num) return accums, cx, cy, radii -def single_radii_circular_hough_transform(r1,edge): - hough_res = hough_circle(edge,r1) - accums, cx, cy, radii = hough_circle_peaks(hough_res,r1,total_num_peaks=1) + +def single_radii_circular_hough_transform(r1, edge): + hough_res = hough_circle(edge, r1) + accums, cx, cy, radii = hough_circle_peaks(hough_res, r1, total_num_peaks=1) return accums, cx, cy, radii -#Params - #This functions draws a circle of radius r centered on x,y on an image. It draws the center of the circle - #image: input greyscale numpy 2darray - #cx: int center x - #cy: int center y - #color: single 8-bit channel int. i.e 0-255 -def draw_circles_on_image(image,cx,cy,radii,colour,dotsize): - for center_y, center_x, radius in zip(cy,cx,radii): - circy, circx = circle_perimeter(center_y,center_x,radius) - image[circy,circx] = colour - image[cy[0]-dotsize:cy[0]+dotsize,cx[0]-dotsize:cx[0]+dotsize] = colour - -def draw_circles_on_image_center(image,cx,cy,radii,colour,dotsize): - for center_y, center_x, radius in zip(cy,cx,radii): - circy, circx = circle_perimeter(center_y,center_x,radius) - image[circy,circx] = colour - image[cy[0]-dotsize:cy[0]+dotsize,cx[0]-dotsize:cx[0]+dotsize] = colour - -def save_canny_save_fit(path, temp): #sig=3,low = 0, high = 30 + +# Params +# This functions draws a circle of radius r centered on x,y on an image. It draws the center of the circle +# image: input greyscale numpy 2darray +# cx: int center x +# cy: int center y +# color: single 8-bit channel int. i.e 0-255 +def draw_circles_on_image(image, cx, cy, radii, colour, dotsize): + for center_y, center_x, radius in zip(cy, cx, radii): + circy, circx = circle_perimeter(center_y, center_x, radius) + image[circy, circx] = colour + image[cy[0] - dotsize:cy[0] + dotsize, cx[0] - dotsize:cx[0] + dotsize] = colour + + +def draw_circles_on_image_center(image, cx, cy, radii, colour, dotsize): + for center_y, center_x, radius in zip(cy, cx, radii): + circy, circx = circle_perimeter(center_y, center_x, radius) + image[circy, circx] = colour + image[cy[0] - dotsize:cy[0] + dotsize, cx[0] - dotsize:cx[0] + dotsize] = colour + + +def save_canny_save_fit(path, temp): # sig=3,low = 0, high = 30 # circles=[] # zstack = io.imread(overlayed_image[i]) # output = img_as_ubyte(zstack[:, :, 0]).copy() @@ -88,119 +107,130 @@ def save_canny_save_fit(path, temp): #sig=3,low = 0, high = 30 # cv2.circle(output, (x, y), r, (255, 0, 0), 4) # cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1) - accum_d, cx_d, cy_d, radii_d, cx_w, cy_w, radii_w = [0,0,0,0,0,0,0] # initialize variables + accum_d, cx_d, cy_d, radii_d, cx_w, cy_w, radii_w = [0, 0, 0, 0, 0, 0, 0] # initialize variables - zstack = io.imread(path) - image = img_as_ubyte(zstack[:,:,0]) # finds the top x-y pixels in the z-stack + zstack = io.imread(path) + image = img_as_ubyte(zstack[:, :, 0]) # finds the top x-y pixels in the z-stack + # image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) + _, thresh = cv2.threshold(image, 100, 255, cv2.THRESH_BINARY) # tested march 17 2020 on plate 10818 - _, thresh = cv2.threshold(image, 38, 179, cv2.THRESH_TOZERO_INV) # tested march 17 2020 on plate 10818 - _, thresh = cv2.threshold(thresh, 0, 255, cv2.THRESH_BINARY) # tested march 17 2020 on plate 10818 + # _, thresh = cv2.threshold(image, 0, 50, cv2.THRESH_BINARY) # tested march 17 2020 on plate 10818 edged = cv2.Canny(thresh, 101, 112) - drop_circles = cv2.HoughCircles(image=edged, method=cv2.HOUGH_GRADIENT, dp=3, minDist=600, minRadius=135, maxRadius=145) + # drop_circles = cv2.HoughCircles(image=thresh, method=cv2.HOUGH_GRADIENT, dp=3, minDist=100, minRadius=135, + # maxRadius=145) - if drop_circles is not None: - drop_circles = np.round(drop_circles[0, :]).astype("int") - cx_d, cy_d, radii_d = drop_circles[0] # always take first circle found + print(temp) - cv2.imshow('drop_img', edged) - - if temp == '20C': ### This works well for echo RT plate type for rockmaker - circles = cv2.HoughCircles(image=edged, method=cv2.HOUGH_GRADIENT, dp=3, minDist=600, minRadius=475, maxRadius=495) - else: ### This works well for echo 4C plate type for rockmaker - circles = cv2.HoughCircles(image=edged, method=cv2.HOUGH_GRADIENT, dp=3, minDist=600, minRadius=459, maxRadius=475) + if int(temp) == 20: ### This works well for echo RT plate type for rockmaker + circles = cv2.HoughCircles(image=edged, method=cv2.HOUGH_GRADIENT, dp=1, minDist=0, minRadius=475, + maxRadius=495) + else: ### This works well for echo 4C plate type for rockmaker + circles = cv2.HoughCircles(image=edged, method=cv2.HOUGH_GRADIENT, dp=1, minDist=0, minRadius=459, + maxRadius=475) if circles is not None: circles = np.round(circles[0, :]).astype("int") cx_w, cy_w, radii_w = circles[0] # always take first circle found + print(circles) + for (x, y, r) in circles: + # draw the circle in the output image, then draw a rectangle + # corresponding to the center of the circle + cv2.circle(thresh, (x, y), r, (255, 0, 0), 4) + cv2.rectangle(thresh, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1) + cv2.imshow("output", thresh) + cv2.waitKey(0) - - # # # edges = canny(image,sigma=sig,low_threshold=low,high_threshold=high) # edge detection # if temp == '20C': ### This works well for echo RT plate type for rockmaker # accum_w, cx_w, cy_w, radii_w = circular_hough_transform(479,495,1,edges,1) #edge detection on well. Units for both are in pixels # else: ### This works well for echo 4C plate type for rockmaker # accum_w, cx_w, cy_w, radii_w = circular_hough_transform(459,475,1,edges,1) #edge detection on well. Units for both are in pixels - return cx_d,cy_d,radii_d, cx_w, cy_w, radii_w - - -def main(): + return cx_d, cy_d, radii_d, cx_w, cy_w, radii_w - t0=ti.time() ### save time to know how long this script takes (this one takes longer than step 2) - - if len(sys.argv) != 2: - print('Usage: python pregui_analysis.py [plate_dir]') - print('Aborting script') - sys.exit() +def get_dict_image_to_well(plate_dir): current_directory = os.getcwd() - plate_dir = sys.argv[1] - - image_list=glob.glob(os.path.join(current_directory,plate_dir,"overlayed","*")) - print(os.path.join(current_directory,plate_dir,"overlayed","*")) - image_list.sort(key=lambda x: (int(x.split('well_')[1].split('_overlay')[0].split("_subwell")[0]))) + image_list = glob.glob(os.path.join(current_directory, plate_dir, "overlayed", "*")) + overview_list = list(sorted(glob.glob(os.path.join(current_directory,plate_dir, "overview", "*", "*")),key=lambda x: x)) + print(os.path.join(current_directory, plate_dir, "overlayed", "*")) + well_overlay_subwell_format = True + try: + image_list = sorted(image_list,key=lambda x: (int(x.split('well_')[1].split('_overlay')[0].split("_subwell")[0]))) + except IndexError: + if not image_list: + well_overlay_subwell_format = False + image_list = sorted(image_list,key=lambda x: x) dict_image_path_subwells = {} - for p in image_list: - well_subwell=p.split('well_')[1].split('_overlay')[0].replace("subwell","") - well,subwell = well_subwell.split("_") - well = "{:02d}".format(int(well)) - well_subwell = well+"_"+subwell - dict_image_path_subwells[well_subwell] = p - - # Try to find the plateid.txt file - try: - with open(os.path.join(current_directory,plate_dir,"plateid.txt"), 'r') as plate_id_file: - plate_id = int(plate_id_file.read().rstrip()) - except: - print("File Error: plateid.txt not found. JSON will not have plate_id key") - plate_id = "UNKNOWN_ID_at_"+plate_dir - + for p,o in zip(image_list,overview_list): + if well_overlay_subwell_format: + well_subwell = p.split('well_')[1].split('_overlay')[0].replace("subwell", "") + else: + well_subwell = p.split("overlayed")[1].replace(".jpg", "") + well, subwell = well_subwell.split("_") - try: - with open(os.path.join(current_directory,plate_dir,"temperature.txt"), 'r') as plate_id_file: - plate_temperature = plate_id_file.read().rstrip() - except: - print("File Error: temperature.txt not found. JSON will not have temperature defined") - plate_temperature = "UNKNOWN" + if well_overlay_subwell_format: + well = "{:02d}".format(int(well)) + well_subwell = well + "_" + subwell + dict_image_path_subwells[well_subwell.replace(os.path.sep,"")] = p,o + return dict_image_path_subwells - plateKeys = ["date_time","temperature"] - wellKeys = ["image_path","well_id","well_radius","well_x","well_y","drop_radius","drop_x","drop_y","offset_x","offset_y"] + +def createJson(plate_dir: str, plate_id: int, plate_temperature: int, dict_image_path_subwells: dict) -> None: + current_directory = os.getcwd() + + plateKeys = ["date_time", "temperature"] + wellKeys = ["image_path", "well_id", "well_radius", "well_x", "well_y", "drop_radius", "drop_x", "drop_y", + "offset_x", "offset_y"] ### Create json output dictionary a = {} a[plate_id] = {} - a[plate_id] = {key:0 for key in plateKeys} + a[plate_id] = {key: 0 for key in plateKeys} a[plate_id]["plate_id"] = plate_id a[plate_id]["date_time"] = datetime.now().isoformat(" ") a[plate_id]["temperature"] = plate_temperature - a[plate_id]["subwells"] = {} + a[plate_id]["subwells"] = {} if plate_temperature == "UNKNOWN": - print("File Error: Since the plate temperature could not be found, circles will be fit for 20C room temp. continuing...") + try: + raise FileNotFoundError( + "Since the plate temperature could not be found, circles will be fit for 20C room temp. continuing...") + except FileNotFoundError: + pass print("Finding pixel location of wells.") - for im_idx, im_path in tqdm(sorted(dict_image_path_subwells.items())): + for im_idx, im_paths in tqdm(sorted(dict_image_path_subwells.items())): + im_path, im_overview = im_paths if im_path: - cx_d,cy_d,radii_d, cx_w, cy_w, radii_w = save_canny_save_fit(im_path, plate_temperature) ### calling this function for 4c or 20c temp - # cx_d,cy_d,radii_d, cx_w, cy_w, radii_w = [0,0,0,0,0,0] # time saving code (will output zeros) - ### radii radius of the drop circle - ### everything _w is for the well - ### everything _d is for the drop - ### plan on keeping the drop information + cx_d, cy_d, radii_d, cx_w, cy_w, radii_w = save_canny_save_fit(im_overview, + plate_temperature) ### calling this function for 4c or 20c temp + else: + try: + raise FileNotFoundError("Well x,y,r will be zeros "+im_idx) + except FileNotFoundError: + cx_d, cy_d, radii_d, cx_w, cy_w, radii_w = [0, 0, 0, 0, 0, 0] # time saving code (will output zeros) + + # radii radius of the drop circle + # everything _w is for the well + # everything _d is for the drop + # plan on keeping the drop information offset_x = cx_d - cx_w offset_y = cy_w - cy_d - well,subwell = im_idx.split("_") + well, subwell = im_idx.split("_") - str_well_id = Plate.well_names[int(well)-1] + str_well_id = Plate.well_names[int(well) - 1] - letter_number_new_image_path = im_path.split('well')[0]+str_well_id+"_"+subwell+".jpg" - os.rename(im_path,letter_number_new_image_path) + letter_number_new_image_path = im_path + if 'well' in im_path: + letter_number_new_image_path = im_path.split('well')[0] + str_well_id + "_" + subwell + ".jpg" + os.rename(im_path, letter_number_new_image_path) # print(cx_w,cy_w,radii_w,cx_d,cy_d,radii_d,cx_w,cy_w,radii_d,name,im_path,0,0,0) str_currentWell = "{0}_{1}".format(str_well_id, subwell) - a[plate_id]["subwells"][str_currentWell] = {key:0 for key in wellKeys} + a[plate_id]["subwells"][str_currentWell] = {key: 0 for key in wellKeys} a[plate_id]["subwells"][str_currentWell]["image_path"] = letter_number_new_image_path a[plate_id]["subwells"][str_currentWell]["well_id"] = str_well_id a[plate_id]["subwells"][str_currentWell]["well_radius"] = int(radii_w) @@ -213,14 +243,47 @@ def main(): a[plate_id]["subwells"][str_currentWell]["offset_x"] = int(offset_x) a[plate_id]["subwells"][str_currentWell]["subwell"] = int(subwell) - print("created:", os.path.join(current_directory,plate_dir,plate_dir.replace(os.path.join("a","").replace("a",""),'')) + '.json') - with open(os.path.join(current_directory,plate_dir,plate_dir.replace(os.path.join("a","").replace("a",""),'')) + '.json', 'w') as fp: + print("created:", os.path.join(current_directory, plate_dir, + plate_dir.replace(os.path.join("a", "").replace("a", ""), '')) + '.json') + with open(os.path.join(current_directory, plate_dir, + plate_dir.replace(os.path.join("a", "").replace("a", ""), '')) + '.json', 'w') as fp: json.dump(a, fp) print('wrote to json') - print("time to run: %s minutes"%str(int(ti.time()-t0)/60)) + +def main(): + current_directory = os.getcwd() + + args = argparse_reader().parse_args() + + t0 = ti.time() ### save time to know how long this script takes (this one takes longer than step 2) + + plate_dir = args.output_plate_folder + plate_id = args.plateID + plate_temperature = args.plate_temp + run_only_json = args.generateJson + if run_only_json: + try: + with open(os.path.join(current_directory, plate_dir, "dict_image_path_subwells.json"), + 'r') as images_to_subwell_json: + d = dict(json.load(images_to_subwell_json)) + createJson(plate_dir=plate_dir, plate_id=plate_id, plate_temperature=plate_temperature, + dict_image_path_subwells=d) + exit(1) + except FileNotFoundError as e: + print(e) + exit(0) + + dict_image_path_subwells = get_dict_image_to_well(plate_dir=plate_dir) + with open(os.path.join(current_directory, plate_dir, "dict_image_path_subwells.json"), + 'w') as images_to_subwell_json: + json.dump(dict_image_path_subwells, images_to_subwell_json) + + createJson(plate_dir=plate_dir, plate_id=plate_id, plate_temperature=plate_temperature, + dict_image_path_subwells=dict_image_path_subwells) + + print("time to run: %s minutes" % str(int(ti.time() - t0) / 60)) if __name__ == "__main__": main() - \ No newline at end of file diff --git a/transfer_imgs_1.py b/transfer_imgs_1.py index d760331..75b2955 100644 --- a/transfer_imgs_1.py +++ b/transfer_imgs_1.py @@ -6,6 +6,7 @@ from glob import glob from os.path import join, exists import subprocess # runs bash commands in python +import sys from tqdm import tqdm @@ -15,96 +16,111 @@ def argparse_reader(): parser.add_argument('plateID', type=int, help='RockMaker Plate ID (4 or 5 digit code on barcode or 2nd number on RockMaker screen experiment file') parser.add_argument('output_plate_folder', type=str, help='Output folder for images and json') - parser.add_argument('plate_temp', type=int, help='Temperature of plate stored at') parser.add_argument('rock_drive_IP_address', type=str, help="IP addess of rock_drive storage (images)") return parser +def run(plateID, output_dir, rock_drive_ip): + if not exists(join(output_dir)): + os.mkdir(join(output_dir)) + + rsync_log = ["rsync", "-nmav", "--include", "*/", "--exclude", "*_th.jpg", "--include", "*.jpg", "-e", "ssh", + "xray@" + rock_drive_ip + ":/volume1/RockMakerStorage/WellImages/" + str(plateID)[ + 2:] + '/plateID_' + str( + plateID) + '/', str(output_dir) + '/'] + + print(*rsync_log) + rsync = subprocess.run(rsync_log, capture_output=True) + rsync_out = rsync.stdout.decode("utf-8") + with open(join(output_dir, "log_rsync_init_file_list.txt"), 'w') as log_rsync_file_out: + log_rsync_file_out.write(rsync.stdout.decode("utf-8")) + + # get all batches + batches = set() + with open(join(output_dir, "log_rsync_init_file_list.txt"), 'r', encoding='utf-8') as log_rsync_file: + for file in sorted(log_rsync_file.readlines()): + if 'batchID_' in file: + batches.add(int( + file.split('batchID_')[1].split('wellNum')[0].replace("/", '').replace("\\", '').replace('n', ''))) + batches = sorted(list(batches)) + # batchID_overview = batches[-1] # last in list + # batchID_drop = batches[0] # first in list + print("batch IDs selected: ", *batches) + + # ### Create a list of files to transfer in a text file for rsync to transfer using the --files-from option + + # get unique image names + # Tries to grab all the images in the same batch first, because doing two batches is two different batches of images + """ + i think batch id are different times taking the pictures so if you try to match a extended focus to a drop + image from two different imaging times(aka batches) it will not match exactly and could be the problem with + the well not matching + """ + image_names = set() + path_names_only_necessary = list() + for line in rsync_out.split('\n'): + if ".jpg" in line: + image_name = line.split('/')[-1] + if image_name not in image_names: + path_names_only_necessary.append(line) + image_names.add(line.split('/')[-1]) + print() + + drop_images_paths = [] + overview_drop_location_paths = [] + overview_extended_focus_paths = [] + # sort by image name + i = 1 + for path in list(sorted([line for line in path_names_only_necessary], key=lambda line: line.split(os.sep)[-1])): + if "ef.jpg" in path and i == 1: + drop_images_paths.append(path) + i += 1 + elif "dl.jpg" in path: + overview_drop_location_paths.append(path) + elif "ef.jpg" in path and i == 2: + overview_extended_focus_paths.append(path) + i = 1 + + print("drop image:", len(drop_images_paths), " images \noverview ef:", len(overview_extended_focus_paths), + " images \noverview drop location:", len(overview_drop_location_paths), "images") + with open(join(output_dir, "files_to_transfer.txt"), 'w') as files_to_transfer: + for path in tqdm([*drop_images_paths, *overview_drop_location_paths, *overview_extended_focus_paths]): + files_to_transfer.write(path + "\n") + + rsync_download = [ + "rsync", "-mav", "-P", "--files-from=" + output_dir + "/files_to_transfer.txt", "-e", "ssh", + "xray@" + rock_drive_ip + ":" + join("/volume1", + "RockMakerStorage", + "WellImages", + str(plateID)[2:], + 'plateID_' + str(plateID)), + join(str(output_dir), "") + ] + print(*rsync_download) + rsync_stdout_download = subprocess.run(rsync_download, capture_output=True).stdout.decode("utf-8") + downloaded_files = 0 + for line in rsync_stdout_download.split("\n"): + if ".jpg" in line: + downloaded_files += 1 + print("Downloaded Files = ", downloaded_files, "(should be 288 = 96*3)") + else: + try: + raise RuntimeWarning("Using files from previous download in "+output_dir) + except RuntimeWarning as e: + print(e) + pass + return + + def main(): args = argparse_reader().parse_args() plateID = args.plateID output_dir = args.output_plate_folder - temperature = args.plate_temp rock_drive_ip = args.rock_drive_IP_address - if not exists(join(output_dir)): - os.mkdir(join(output_dir)) + run(plateID=plateID, output_dir=output_dir, rock_drive_ip=rock_drive_ip) - rsync_log = ["rsync", "-nmav", "--include", "*/", "--exclude", "*_th.jpg", "--include", "*.jpg", "-e", "ssh", - "xray@" + rock_drive_ip + ":/volume1/RockMakerStorage/WellImages/" + str(plateID)[ - 2:] + '/plateID_' + str( - plateID) + '/', str(output_dir) + '/'] - - print(*rsync_log) - rsync = subprocess.run(rsync_log, capture_output=True) - rsync_out = rsync.stdout.decode("utf-8") - with open(join(output_dir, "log_rsync_init_file_list.txt"), 'w') as log_rsync_file_out: - log_rsync_file_out.write(rsync.stdout.decode("utf-8")) - - # get all batches - batches = set() - with open(join(output_dir, "log_rsync_init_file_list.txt"), 'r', encoding='utf-8') as log_rsync_file: - for file in sorted(log_rsync_file.readlines()): - if 'batchID_' in file: - batches.add(int( - file.split('batchID_')[1].split('wellNum')[0].replace("/", '').replace("\\", '').replace('n', ''))) - batches = sorted(list(batches)) - batchID_overview = batches[-1] # last in list - batchID_drop = batches[0] # first in list - print("batch IDs selected: ", *batches) - - # ### Create a list of files to transfer in a text file for rsync to transfer using the --files-from option - - # get unique image names - # Tries to grab all the images in the same batch first, because doing two batches is two different batches of images - """ - i think batch id are different times taking the pictures so if you try to match a extended focus to a drop - image from two different imaging times(aka batches) it will not match exactly and could be the problem with - the well not matching - """ - image_names = set() - path_names_only_necessary = list() - for line in rsync_out.split('\n'): - if ".jpg" in line: - image_name = line.split('/')[-1] - if image_name not in image_names: - path_names_only_necessary.append(line) - image_names.add(line.split('/')[-1]) - print() - - drop_images_paths = [] - overview_drop_location_paths = [] - overview_extended_focus_paths = [] - # sort by image name - i = 1 - for path in list(sorted([line for line in path_names_only_necessary], key=lambda line: line.split('/')[-1])): - if "ef.jpg" in path and i == 1: - drop_images_paths.append(path) - i += 1 - elif "dl.jpg" in path: - overview_drop_location_paths.append(path) - elif "ef.jpg" in path and i == 2: - overview_extended_focus_paths.append(path) - i = 1 - - print("drop image:", len(drop_images_paths), " images \noverview ef:", len(overview_extended_focus_paths), - " images \noverview drop location:", len(overview_drop_location_paths), "images") - with open(join(output_dir, "files_to_transfer.txt"), 'w') as files_to_transfer: - for path in tqdm([*drop_images_paths, *overview_drop_location_paths, *overview_extended_focus_paths]): - files_to_transfer.write(path + "\n") - - rsync_download = [ - "rsync", "--info=progress2", "--files-from=" + output_dir + "/files_to_transfer.txt", "-e", "ssh", - "xray@" + rock_drive_ip + ":" + join("/volume1", - "RockMakerStorage", - "WellImages", - str(plateID)[2:], - 'plateID_' + str(plateID)), - join(str(output_dir), "") - ] - print(*rsync_download) - subprocess.run(rsync_download) if __name__ == '__main__': main() From b75a41d2061de340309574214d8c30c5491cad96 Mon Sep 17 00:00:00 2001 From: Liam McKay Date: Tue, 24 Mar 2020 09:00:55 -0700 Subject: [PATCH 5/9] ignore local folder --- .gitignore | 3 ++- run.sh | 15 --------------- run_multiplePlates.sh | 33 --------------------------------- run_onePlate.sh | 18 ------------------ run_one_ip_change.sh | 15 --------------- transfer_imgs_1.sh | 40 ---------------------------------------- 6 files changed, 2 insertions(+), 122 deletions(-) delete mode 100755 run.sh delete mode 100755 run_multiplePlates.sh delete mode 100644 run_onePlate.sh delete mode 100644 run_one_ip_change.sh delete mode 100644 transfer_imgs_1.sh diff --git a/.gitignore b/.gitignore index e9dad5c..6d90367 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,5 @@ */ .DS_Store /* -images/ \ No newline at end of file +images/ +ignore/ \ No newline at end of file diff --git a/run.sh b/run.sh deleted file mode 100755 index 5f0e3b1..0000000 --- a/run.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -### Testing for the corect number of input arguments -if [ ! $# -eq 3 ]; then - echo "Usage Error: incorrect number of arguments - sh run.sh [output_plate_folder] [plate_temperature 4c/20c] [plateID] " - exit 1 -fi -output_dir=$1 -temperature=$2 -plateID=$3 - -bash transfer_imgs_1.sh ${plateID} ${output_dir} ${temperature} 169.230.29.134 -python3 bounding_box_overlay_2.py ${output_dir} -python3 pregui_img_analysis_3.py ${output_dir} - diff --git a/run_multiplePlates.sh b/run_multiplePlates.sh deleted file mode 100755 index 8bf367d..0000000 --- a/run_multiplePlates.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -### Testing for the corect number of input arguments - - -### Take command-line arguments and set them to variables - - -if [ "$#" -ne 3 ]; then - output_dir=$1 - temperature=$2 - plateIDs=("${@:3}") - for plateID in ${plateIDs[@]} - do - echo "${plateID}" - bash transfer_imgs_1.sh ${plateID} ${output_dir}/${plateID} ${temperature} 169.230.29.134 - python bounding_box_overlay_2.py ${output_dir}/${plateID} - python pregui_img_analysis_3.py ${output_dir}/${plateID} - done -else - if [ ! $# -eq 3 ]; then - echo "Usage Error: incorrect number of arguments - sh run.sh [output_plate_folder] [plate_temperature 4c/20c] [plateID]" - exit 1 - fi - output_dir=$1 - temperature=$2 - plateID=$3 - - bash transfer_imgs_1.sh ${plateID} ${output_dir} ${temperature} 169.230.29.134 - python bounding_box_overlay_2.py ${output_dir} - python pregui_img_analysis_3.py ${output_dir} - -fi diff --git a/run_onePlate.sh b/run_onePlate.sh deleted file mode 100644 index 2b86a81..0000000 --- a/run_onePlate.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -### Testing for the corect number of input arguments -if [ ! $# -eq 3 ];then - echo "Usage Error: incorrect number of arguments -sh run.sh [plateID] [output_plate_folder] [plate_temperature 4c/20c]" - exit 1 -fi - -### Take command-line arguments and set them to variables -output_dir=$1 -temperature=$2 -plateID=$3 - -bash transfer_imgs_1.sh "${plateID} ${output_dir} ${temperature}" 169.230.29.134 -python bounding_box_overlay_2.py "${output_dir}" -python pregui_img_analysis_3.py "${output_dir}" - - diff --git a/run_one_ip_change.sh b/run_one_ip_change.sh deleted file mode 100644 index 559c46e..0000000 --- a/run_one_ip_change.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -### Testing for the corect number of input arguments -if [ ! $# -eq 3 ]; then - echo "Usage Error: incorrect number of arguments - sh run.sh [output_plate_folder] [plate_temperature 4c/20c] [plateID] " - exit 1 -fi -output_dir=$1 -temperature=$2 -plateID=$3 - -bash transfer_imgs_1.sh ${plateID} ${output_dir} ${temperature} 169.230.29.134 -python bounding_box_overlay_2.py ${output_dir} -python pregui_img_analysis_3.py ${output_dir} - diff --git a/transfer_imgs_1.sh b/transfer_imgs_1.sh deleted file mode 100644 index f3aa8f7..0000000 --- a/transfer_imgs_1.sh +++ /dev/null @@ -1,40 +0,0 @@ -### Testing for the corect number of input arguments -### expecting 1 arugment for PlateID -if [ ! $# -eq 4 ];then - echo "Usage Error: incorrect number of arguments -sh transfer_imgs.sh [plateID] [output_plate_folder] [plate_temperature 4c/20c] [rock_drive LAN 1 ip address]" - exit 1 -fi - -### Take command-line arguments and set them to variables -plateID=$1 -output_dir=$2 -temperature=$3 -rock_drive_ip=$4 - -### Make output directory -mkdir -p "${output_dir}/overlay" - -### Run rsync to grab all non-thumbnail image paths and store in file -rsync -nmav --rsync-path "/bin/rsync" --include "*/" --exclude "*_th.jpg" --include "*.jpg" xray@${rock_drive_ip}:"/volume1/RockMakerStorage/WellImages/${plateID: -3}/plateID_${plateID}/" ${output_dir}/ > ${output_dir}/log_rsync_init_file_list.txt - -### grab first and last batch IDs from rsync path list -batchID_overview=`grep ".jpg" ${output_dir}/log_rsync_init_file_list.txt | awk -F "batchID_|/wellNum" '{print $2}' | sort | uniq | head -n 1` -batchID_drop=`grep ".jpg" ${output_dir}/log_rsync_init_file_list.txt | awk -F "batchID_|/wellNum" '{print $2}' | sort | uniq | tail -n 1` - -echo "selected IDs: ${batchID_drop} ${batchID_overview}" - -### Create a list of files to transfer in a text file for rsync to transfer using the --files-from option -### first cat: add drop images to file list -### second cat: add overview drop location files to file list -### third cat: add overview extended focus images to file list -#cat log_rsync_init_file_list.txt | grep "${batchID_drop}\|${batchID_overview}" | grep ".jpg" > files_to_transfer.txt -cat ${output_dir}/log_rsync_init_file_list.txt | grep "${batchID_drop}" | grep ".jpg" > ${output_dir}/files_to_transfer.txt -cat ${output_dir}/log_rsync_init_file_list.txt | grep "${batchID_overview}" | grep "dl.jpg" >> ${output_dir}/files_to_transfer.txt -cat ${output_dir}/log_rsync_init_file_list.txt | grep "${batchID_overview}" | grep "dl.jpg" | sed 's/dl/ef/' >> ${output_dir}/files_to_transfer.txt -echo ${plateID} > ${output_dir}/plateid.txt -echo ${temperature} > ${output_dir}/temperature.txt - -### transfer files using rsync -echo "2nd password prompt will appear below, should only take ~30 sec to download all images on UCSFwpa" -rsync --progress -mav --rsync-path "/bin/rsync" --files-from=${output_dir}/files_to_transfer.txt xray@${rock_drive_ip}:"/volume1/RockMakerStorage/WellImages/${plateID: -3}/plateID_${plateID}" ${output_dir}/ > ${output_dir}/log_rsync_transferred.txt From da7bc3af09cd27313bfb1a6bf58ef9d3d95e72ad Mon Sep 17 00:00:00 2001 From: Liam McKay Date: Wed, 25 Mar 2020 14:09:05 -0700 Subject: [PATCH 6/9] open cv (faster), overlay is jank --- bounding_box_overlay_2.py | 148 ++++++++++++++++++++++++++------- organizeImages.py | 19 +++-- pregui_img_analysis_3.py | 167 +++++++++++++------------------------- 3 files changed, 183 insertions(+), 151 deletions(-) diff --git a/bounding_box_overlay_2.py b/bounding_box_overlay_2.py index f4161d2..2f542a2 100644 --- a/bounding_box_overlay_2.py +++ b/bounding_box_overlay_2.py @@ -1,30 +1,113 @@ import subprocess +import imutils from PIL import Image as Im import numpy as np import time, sys import glob import os + +from cv2 import cv2 + import organizeImages as oI from tqdm import tqdm +from Plate import Plate + + +def save_overview_img(original_fp, imageDirectory): + plate = Plate(r=8, c=12, subwell_num=1) # don't need to worry about subwell (that's specified in img path) + + original_fp = os.path.abspath(original_fp) + fp = original_fp.split(os.path.sep) + + well_num = "".join([(fp[x] if c == 0 else '') for x, c in + enumerate([s.find('well') for s in fp])]) # just gets the wellNum_## folder name + + subwell_number = fp[-1][1] + + well_id = plate.get_number_to_well_id(int(well_num.split("_")[1])).split("_")[0] + new_fp = os.path.join(imageDirectory, "overview", well_id + "_" + subwell_number + ".jpg") + + subprocess.run(["cp", original_fp, new_fp]) + return well_id + "_" + subwell_number + def overlay_images(overview_dl_fh, overview_ef_fh, zoom_fh, output_fh): ### This is the main function of the script ### Get image with signal from red box in drop location image - box_im_open = red_box_subt(overview_dl_fh, 4) - # box_im_open.show() - - ### Get the dimensions of the signal for the red box, by calculating the bounding box - box_dims = find_bounding_box(box_im_open, 4) - - ### Overlay resized drop image onto the original overview image, using the bounding box dimensions - overlay_open = align_drop_to_box(overview_ef_fh, zoom_fh, box_dims) + # box_im_open = red_box_subt(overview_dl_fh, 4) + overview_dl = cv2.imread(overview_dl_fh) + zoom = cv2.imread(zoom_fh) + overview_ef = cv2.imread(overview_ef_fh) + + mask = cv2.inRange(overview_dl, np.array([0, 2, 57]), np.array([69, 92, 255])) + # all pixels in our image that have a R >= 100, B >= 15, and G >= 17 along with R <= 200, B <= 56, and G <= 50 will be considered red. + # https://www.pyimagesearch.com/2014/08/04/opencv-python-color-detection/ + + output = cv2.bitwise_and(overview_dl, overview_dl, mask=mask) + _, output = cv2.threshold(output, 100, 255, cv2.THRESH_BINARY) + + grey = cv2.cvtColor(output, cv2.COLOR_RGB2GRAY) + blurred = cv2.GaussianBlur(grey, (5, 5), 1) + _, thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY) + + cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, + cv2.CHAIN_APPROX_SIMPLE) + cnts = imutils.grab_contours(cnts) + boundRect = [None] * len(cnts) + + contours_poly = [None] * len(cnts) + # centers = [None] * len(cnts) + # radius = [None] * len(cnts) + biggest_area = 0 + box_with_biggest_area = 0 + for i, c in enumerate(cnts): + contours_poly[i] = cv2.approxPolyDP(c, 3, True) + boundRect[i] = cv2.boundingRect(contours_poly[i]) + area = boundRect[i][2]*boundRect[i][3] + if area > biggest_area: + box_with_biggest_area = i + biggest_area = area + + # centers[i], radius[i] = cv2.minEnclosingCircle(contours_poly[i]) + + # for cases with blue boxes on overview drop location, this messes up. + # 1. try to take last contour + # 2. take largest area of bounding box + + # draw the contours + # c = c.astype("float") + # c = c.astype("int") + # cv2.drawContours(overview_dl, [c], -1, (0, 255, 0), 2) + + # cv2.rectangle(overview_dl, (int(boundRect[i][0]), int(boundRect[i][1])), + # (int(boundRect[i][0] + boundRect[i][2]), int(boundRect[i][1] + boundRect[i][3])), (255, 0, 0), + # thickness=-1) + + rows, cols, _ = zoom.shape + max_b = box_with_biggest_area + + b_x, b_y, b_w, b_h = int(boundRect[max_b][0]), int(boundRect[max_b][1]), int(boundRect[max_b][2]), int(boundRect[max_b][3]) + + # width of box found = new x width for zoom picture + xscale = b_w + # height of box found = new y height for zoom picture + yscale = b_h + + xscale = max(xscale,yscale) + yscale = max(xscale,yscale) + + if (xscale, yscale) > (0, 0): + drop = cv2.resize(zoom, (xscale, yscale), interpolation=cv2.INTER_AREA) + else: + drop = cv2.resize(zoom, (cols//2, rows//2), interpolation=cv2.INTER_AREA) - ### Save the overlayed image - overlay_open.save(output_fh, format="JPEG") - # return overlay_open + if (drop.shape[0]+b_y,drop.shape[1]+b_x) <= (overview_ef.shape[0], overview_ef.shape[1]): + overview_ef[b_y:b_y + drop.shape[0], b_x:b_x + drop.shape[1]] = drop + cv2.imwrite(output_fh, overview_ef) + return overview_ef def red_box_subt(w_box_fh, scaling_factor): @@ -124,6 +207,7 @@ def align_drop_to_box(over_ef, drop_fh, box): # overview_open.show() return overview_open + def run(imageDirectory): if not os.path.exists(imageDirectory): # case 2: directory doesn't exist print("Error: cannot find directory " + imageDirectory) @@ -133,40 +217,44 @@ def run(imageDirectory): print("overlaying images.\n") completedWells = 0 - # generate wells a01-h12 - only works on 96 well plates - letters = list('abcdefgh'.upper()) - numbers = ["{:02d}".format(n) for n in range(1, 13)] - wells = [[c + n for n in numbers] for c in letters] - wellflat = [] - [[wellflat.append(wells[i][j]) for j in range(len(wells[i]))] for i in range(len(wells))] - for i in tqdm(range(1, 97)): filepaths = sorted( glob.glob(os.path.join(imageDirectory, 'organizedWells', 'wellNum_' + str(i), '*'))) # find all images - subwell_list = [z.split("/d")[1].split("_")[0] for z in filepaths] if len(filepaths) % 3 == 0: for j in range(0, len(filepaths), 3): - output_fh = os.path.join(imageDirectory, "overlayed", - "well_" + str(i) + "_subwell" + subwell_list[0 + j] + "_overlay.jpg") zoom_ef_fh = filepaths[0 + j] dl_fh = filepaths[1 + j] ef_fh = filepaths[2 + j] - subprocess.run(['cp', ef_fh, os.path.join(imageDirectory,"overview",ef_fh)]) + + # save overview image (no drop location box) to overview folder + well_name = save_overview_img(ef_fh, imageDirectory) + + output_fp = os.path.join(imageDirectory, "overlayed", well_name + ".jpg") try: - overlayed_img = overlay_images(dl_fh, ef_fh, zoom_ef_fh, output_fh) + overlayed_img = overlay_images(dl_fh, ef_fh, zoom_ef_fh, output_fp) completedWells += 1 except TypeError: - print("\nwellNum_" + str( - i) + ' Overlay Error: Could not get bounding box from box_open.getbbox(). Image wasn\'t loaded') + try: + raise RuntimeWarning( + "wellNum_%d" % i + + 'error with overlay: Could not get bounding box from box_open.getbbox(). Image wasn\'t loaded') + except RuntimeWarning as e: + print(e) except OSError: - print("\nwellNum_{0} File Error: Could not open image file".format(i)) + try: + raise RuntimeWarning("wellNum_%d Could not open image file" % i) + except RuntimeWarning as e: + print(e) else: - print("\nwellNum_" + str( - i) + " does not have the 3 required images for bounding box overlay. Continuing...") + try: + raise RuntimeWarning("\nwellNum_" + str( + i) + " does not have the 3 required images for bounding box overlay. Continuing...") + except RuntimeWarning as e: + print(e) ### show how many wells have an overlay - print("Completed images:", completedWells) + print("Completed images (should be 96 for 96 well):", completedWells) def main(): @@ -174,7 +262,7 @@ def main(): t0 = time.time() # save usage to a string to save space usage = "Usage: python bounding_box_overlay.py [parent image directory]" - imageDirectory='' + imageDirectory = '' try: # case 1: catches if there is no argv 1 # not the greatest, but works imageDirectory = sys.argv[1] diff --git a/organizeImages.py b/organizeImages.py index e689a4b..9db99f1 100644 --- a/organizeImages.py +++ b/organizeImages.py @@ -21,7 +21,8 @@ def rename_overview_images_well_id(imageDirectory): wellNum = int(path.split("wellNum_")[1].split(os.path.sep)[0]) well_id, _ = p.get_number_to_well_id(wellNum).split("_") subprocess.run( - ["mv", path, os.path.join(imageDirectory, "overview", "wellNum_%d" % wellNum, well_id + "_" + subwell)]) + ["mv", path, + os.path.join(imageDirectory, "overview", "wellNum_%d" % wellNum, well_id + "_" + subwell + ".jpg")]) def organizeImages(imageDirectory): @@ -48,14 +49,14 @@ def organizeImages(imageDirectory): if not os.path.exists(os.path.join(newDirectory, well_num)): os.mkdir(os.path.join(newDirectory, well_num)) os.system("cp " + path + " " + os.path.join(newDirectory, well_num, a[-1])) - - if not os.path.exists(os.path.join(overview_img_dir, well_num)): - os.mkdir(os.path.join(overview_img_dir, well_num)) - if "ef.jpg" in path: - well_id = p.get_number_to_well_id(int(well_num.split("_")[1])).split("_")[0] - os.system("cp " + path + " " + os.path.join(overview_img_dir, well_num, - well_id + "_" + a[-1][1] + ".jpg")) - # overwrites the *ef.jpg copied, all files saved as well id - last img is saved + # + # if not os.path.exists(os.path.join(overview_img_dir, well_num)): + # os.mkdir(os.path.join(overview_img_dir, well_num)) + # if "ef.jpg" in path: + # well_id = p.get_number_to_well_id(int(well_num.split("_")[1])).split("_")[0] + # os.system("cp " + path + " " + os.path.join(overview_img_dir, well_num, + # well_id + "_" + a[-1][1] + ".jpg")) + # # overwrites the *ef.jpg copied, all files saved as well id - last img is saved else: print("Error: cannot find image directory", imageDirectory) diff --git a/pregui_img_analysis_3.py b/pregui_img_analysis_3.py index 7d76ab2..32475c3 100644 --- a/pregui_img_analysis_3.py +++ b/pregui_img_analysis_3.py @@ -3,22 +3,15 @@ # run this command above the echo project directory import glob -import string -import sys -import pickle as pkl -import matplotlib.pyplot as plt import numpy as np from skimage.transform import hough_circle, hough_circle_peaks -from skimage.feature import canny from skimage.draw import circle_perimeter from skimage.util import img_as_ubyte -from skimage.util import pad from skimage import io import os -from classes_only import Well_well_well as well from classes_only import Plate -from datetime import datetime, date, time +from datetime import datetime import time as ti import json from tqdm import tqdm @@ -26,6 +19,11 @@ import argparse +# https://stackoverflow.com/questions/11686720/is-there-a-numpy-builtin-to-reject-outliers-from-a-list +def reject_outliers(data, m=2): + return data[abs(data - np.mean(data)) <= m * np.std(data)] + + def argparse_reader(): parser = argparse.ArgumentParser() parser.add_argument('plateID', type=int, @@ -34,6 +32,8 @@ def argparse_reader(): parser.add_argument('plate_temp', type=int, help='Temperature of plate stored at') parser.add_argument('-json', '--generateJson', action='store_true', help="JSON Output re-run well pixel finding algorithm") + parser.add_argument('-debug', '--debug', action='store_true', + help='Shows images that well/drop were not found in analysis') return parser @@ -50,102 +50,49 @@ def argparse_reader(): # cx : circle center x # cy : circle center y # radii circle radii -def circular_hough_transform(r1, r2, step, edge, - peak_num): # do we need to do this? difference in a radium in circle matters to the conversion rate - # be able to distinguish up to one pixel of the circle - - hough_radii = np.arange(r1, r2, step) - hough_res = hough_circle(edge, hough_radii) - accums, cx, cy, radii = hough_circle_peaks(hough_res, hough_radii, total_num_peaks=peak_num) - return accums, cx, cy, radii -def single_radii_circular_hough_transform(r1, edge): - hough_res = hough_circle(edge, r1) - accums, cx, cy, radii = hough_circle_peaks(hough_res, r1, total_num_peaks=1) - return accums, cx, cy, radii +def process_found_circles(circles): + x, y, r = np.average(reject_outliers(circles[:, 0])).astype(int), np.average(reject_outliers(circles[:, 1])).astype( + int), np.average(reject_outliers(circles[:, 2])).astype(int) + return x, y, r -# Params -# This functions draws a circle of radius r centered on x,y on an image. It draws the center of the circle -# image: input greyscale numpy 2darray -# cx: int center x -# cy: int center y -# color: single 8-bit channel int. i.e 0-255 -def draw_circles_on_image(image, cx, cy, radii, colour, dotsize): - for center_y, center_x, radius in zip(cy, cx, radii): - circy, circx = circle_perimeter(center_y, center_x, radius) - image[circy, circx] = colour - image[cy[0] - dotsize:cy[0] + dotsize, cx[0] - dotsize:cx[0] + dotsize] = colour - - -def draw_circles_on_image_center(image, cx, cy, radii, colour, dotsize): - for center_y, center_x, radius in zip(cy, cx, radii): - circy, circx = circle_perimeter(center_y, center_x, radius) - image[circy, circx] = colour - image[cy[0] - dotsize:cy[0] + dotsize, cx[0] - dotsize:cx[0] + dotsize] = colour - - -def save_canny_save_fit(path, temp): # sig=3,low = 0, high = 30 - # circles=[] - # zstack = io.imread(overlayed_image[i]) - # output = img_as_ubyte(zstack[:, :, 0]).copy() - # edged = image.copy() - # # ret, thresh = cv2.threshold(image, thresh1_binary, thresh2_binary, cv2.THRESH_BINARY) - # print("bw:",thresh1_binary, thresh2_binary) - # edged = cv2.Canny(thresh, thresh1_canny, thresh2_canny) - # print("edged:",thresh1_canny, thresh2_canny) - # circles = cv2.HoughCircles(image=edged, method=cv2.HOUGH_GRADIENT, dp=3, minDist=400, minRadius=475, maxRadius=495) - # if circles is not None: - # found_circles += 1 - # # convert the (x, y) coordinates and radius of the circles to integers - # circles = np.round(circles[0, :]).astype("int") - # # loop over the (x, y) coordinates and radius of the circles - # for (x, y, r) in circles: - # # draw the circle in the output image, then draw a rectangle - # # corresponding to the center of the circle - # cv2.circle(output, (x, y), r, (255, 0, 0), 4) - # cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1) - +def save_canny_save_fit(path, temp, debug=False): accum_d, cx_d, cy_d, radii_d, cx_w, cy_w, radii_w = [0, 0, 0, 0, 0, 0, 0] # initialize variables zstack = io.imread(path) image = img_as_ubyte(zstack[:, :, 0]) # finds the top x-y pixels in the z-stack - # image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) - _, thresh = cv2.threshold(image, 100, 255, cv2.THRESH_BINARY) # tested march 17 2020 on plate 10818 - - # _, thresh = cv2.threshold(image, 0, 50, cv2.THRESH_BINARY) # tested march 17 2020 on plate 10818 - + ret, thresh = cv2.threshold(image, 30, 74, cv2.THRESH_TOZERO_INV) + ret, thresh = cv2.threshold(thresh, 0, 255, cv2.THRESH_BINARY) # brightens grey to white TO ZERO threshold image edged = cv2.Canny(thresh, 101, 112) - # drop_circles = cv2.HoughCircles(image=thresh, method=cv2.HOUGH_GRADIENT, dp=3, minDist=100, minRadius=135, - # maxRadius=145) - print(temp) + drop_circles = cv2.HoughCircles(image=image, method=cv2.HOUGH_GRADIENT, dp=3, minDist=1, minRadius=135, + maxRadius=145) if int(temp) == 20: ### This works well for echo RT plate type for rockmaker - circles = cv2.HoughCircles(image=edged, method=cv2.HOUGH_GRADIENT, dp=1, minDist=0, minRadius=475, + circles = cv2.HoughCircles(image=edged, method=cv2.HOUGH_GRADIENT, dp=3, minDist=1, minRadius=475, maxRadius=495) else: ### This works well for echo 4C plate type for rockmaker - circles = cv2.HoughCircles(image=edged, method=cv2.HOUGH_GRADIENT, dp=1, minDist=0, minRadius=459, + circles = cv2.HoughCircles(image=edged, method=cv2.HOUGH_GRADIENT, dp=3, minDist=1, minRadius=459, maxRadius=475) + image = cv2.UMat(image) if circles is not None: circles = np.round(circles[0, :]).astype("int") - cx_w, cy_w, radii_w = circles[0] # always take first circle found - print(circles) - for (x, y, r) in circles: - # draw the circle in the output image, then draw a rectangle - # corresponding to the center of the circle - cv2.circle(thresh, (x, y), r, (255, 0, 0), 4) - cv2.rectangle(thresh, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1) - cv2.imshow("output", thresh) - cv2.waitKey(0) - - # # edges = canny(image,sigma=sig,low_threshold=low,high_threshold=high) # edge detection - # if temp == '20C': ### This works well for echo RT plate type for rockmaker - # accum_w, cx_w, cy_w, radii_w = circular_hough_transform(479,495,1,edges,1) #edge detection on well. Units for both are in pixels - # else: ### This works well for echo 4C plate type for rockmaker - # accum_w, cx_w, cy_w, radii_w = circular_hough_transform(459,475,1,edges,1) #edge detection on well. Units for both are in pixels + cx_w, cy_w, radii_w = process_found_circles(circles) + else: + if debug: + cv2.imshow("could not find well, press key to continue", np.concatenate([image, edged], axis=1)) + cv2.waitKey(0) + + if drop_circles is not None: + drop_circles = np.round(drop_circles[0, :]).astype("int") + cx_d, cy_d, radii_d = process_found_circles(drop_circles) + else: + if debug: + cv2.imshow("could not find drop, press key to continue", image) + cv2.waitKey(0) return cx_d, cy_d, radii_d, cx_w, cy_w, radii_w @@ -154,32 +101,30 @@ def get_dict_image_to_well(plate_dir): current_directory = os.getcwd() image_list = glob.glob(os.path.join(current_directory, plate_dir, "overlayed", "*")) - overview_list = list(sorted(glob.glob(os.path.join(current_directory,plate_dir, "overview", "*", "*")),key=lambda x: x)) - print(os.path.join(current_directory, plate_dir, "overlayed", "*")) + overview_list = list( + sorted(glob.glob(os.path.join(current_directory, plate_dir, "overview", "*")), key=lambda x: x)) + print("overviewimgs = ", len(overview_list)) well_overlay_subwell_format = True - try: - image_list = sorted(image_list,key=lambda x: (int(x.split('well_')[1].split('_overlay')[0].split("_subwell")[0]))) - except IndexError: - if not image_list: - well_overlay_subwell_format = False - image_list = sorted(image_list,key=lambda x: x) + # try: + # image_list = sorted(image_list,key=lambda x: (int(x.split('well_')[1].split('_overlay')[0].split("_subwell")[0]))) + # except IndexError: + # if not image_list: + image_list = sorted(image_list, key=lambda x: x) dict_image_path_subwells = {} - for p,o in zip(image_list,overview_list): - if well_overlay_subwell_format: - well_subwell = p.split('well_')[1].split('_overlay')[0].replace("subwell", "") - else: - well_subwell = p.split("overlayed")[1].replace(".jpg", "") + for p, o in zip(image_list, overview_list): + # if well_overlay_subwell_format: + # well_subwell = p.split('well_')[1].split('_overlay')[0].replace("subwell", "") + # else: + well_subwell = p.split("overlayed")[1].replace(".jpg", "").replace(os.sep, '') well, subwell = well_subwell.split("_") - if well_overlay_subwell_format: - well = "{:02d}".format(int(well)) well_subwell = well + "_" + subwell - dict_image_path_subwells[well_subwell.replace(os.path.sep,"")] = p,o + dict_image_path_subwells[well_subwell.replace(os.path.sep, "")] = p, o return dict_image_path_subwells -def createJson(plate_dir: str, plate_id: int, plate_temperature: int, dict_image_path_subwells: dict) -> None: +def createJson(plate_dir: str, plate_id: int, plate_temperature: int, dict_image_path_subwells: dict, debug=False) -> None: current_directory = os.getcwd() plateKeys = ["date_time", "temperature"] @@ -205,10 +150,10 @@ def createJson(plate_dir: str, plate_id: int, plate_temperature: int, dict_image im_path, im_overview = im_paths if im_path: cx_d, cy_d, radii_d, cx_w, cy_w, radii_w = save_canny_save_fit(im_overview, - plate_temperature) ### calling this function for 4c or 20c temp + plate_temperature, debug) ### calling this function for 4c or 20c temp else: try: - raise FileNotFoundError("Well x,y,r will be zeros "+im_idx) + raise FileNotFoundError("Well x,y,r will be zeros " + im_idx) except FileNotFoundError: cx_d, cy_d, radii_d, cx_w, cy_w, radii_w = [0, 0, 0, 0, 0, 0] # time saving code (will output zeros) @@ -220,19 +165,17 @@ def createJson(plate_dir: str, plate_id: int, plate_temperature: int, dict_image offset_y = cy_w - cy_d well, subwell = im_idx.split("_") - str_well_id = Plate.well_names[int(well) - 1] - letter_number_new_image_path = im_path if 'well' in im_path: - letter_number_new_image_path = im_path.split('well')[0] + str_well_id + "_" + subwell + ".jpg" + letter_number_new_image_path = im_path.split('well')[0] + well + "_" + subwell + ".jpg" os.rename(im_path, letter_number_new_image_path) # print(cx_w,cy_w,radii_w,cx_d,cy_d,radii_d,cx_w,cy_w,radii_d,name,im_path,0,0,0) - str_currentWell = "{0}_{1}".format(str_well_id, subwell) + str_currentWell = "{0}_{1}".format(well, subwell) a[plate_id]["subwells"][str_currentWell] = {key: 0 for key in wellKeys} a[plate_id]["subwells"][str_currentWell]["image_path"] = letter_number_new_image_path - a[plate_id]["subwells"][str_currentWell]["well_id"] = str_well_id + a[plate_id]["subwells"][str_currentWell]["well_id"] = well a[plate_id]["subwells"][str_currentWell]["well_radius"] = int(radii_w) a[plate_id]["subwells"][str_currentWell]["well_x"] = int(cx_w) a[plate_id]["subwells"][str_currentWell]["well_y"] = int(cy_w) @@ -268,7 +211,7 @@ def main(): 'r') as images_to_subwell_json: d = dict(json.load(images_to_subwell_json)) createJson(plate_dir=plate_dir, plate_id=plate_id, plate_temperature=plate_temperature, - dict_image_path_subwells=d) + dict_image_path_subwells=d, debug=args.debug) exit(1) except FileNotFoundError as e: print(e) @@ -280,7 +223,7 @@ def main(): json.dump(dict_image_path_subwells, images_to_subwell_json) createJson(plate_dir=plate_dir, plate_id=plate_id, plate_temperature=plate_temperature, - dict_image_path_subwells=dict_image_path_subwells) + dict_image_path_subwells=dict_image_path_subwells, debug=args.debug) print("time to run: %s minutes" % str(int(ti.time() - t0) / 60)) From 157dd001a9e147b04a83a7979bc46dcd8b0e70fe Mon Sep 17 00:00:00 2001 From: Liam McKay Date: Thu, 26 Mar 2020 17:43:56 -0700 Subject: [PATCH 7/9] cropping methods cicle convex and box --- bounding_box_overlay_2.py | 386 ++++++++++++++++++++++++++++---------- 1 file changed, 285 insertions(+), 101 deletions(-) diff --git a/bounding_box_overlay_2.py b/bounding_box_overlay_2.py index 2f542a2..9fcd0b5 100644 --- a/bounding_box_overlay_2.py +++ b/bounding_box_overlay_2.py @@ -14,10 +14,11 @@ from Plate import Plate +plate = Plate(r=8, c=12, subwell_num=1) # don't need to worry about subwell (that's specified in img path) +white = (255, 255, 255) # white - color -def save_overview_img(original_fp, imageDirectory): - plate = Plate(r=8, c=12, subwell_num=1) # don't need to worry about subwell (that's specified in img path) +def save_overview_img(original_fp, imageDirectory): original_fp = os.path.abspath(original_fp) fp = original_fp.split(os.path.sep) @@ -33,133 +34,316 @@ def save_overview_img(original_fp, imageDirectory): return well_id + "_" + subwell_number -def overlay_images(overview_dl_fh, overview_ef_fh, zoom_fh, output_fh): - ### This is the main function of the script +def align_drop_to_box_opencv(b_x, b_y, b_w, b_h, zoom, overview_ef, b_w_mask_2=None): + if b_w_mask_2 is None: + b_w_mask_2 = cv2.bitwise_not(np.zeros((zoom.shape[0], zoom.shape[1]), np.uint8)) # make white box zoom size + box = [b_x, b_y, b_w, b_h] - ### Get image with signal from red box in drop location image - # box_im_open = red_box_subt(overview_dl_fh, 4) - overview_dl = cv2.imread(overview_dl_fh) - zoom = cv2.imread(zoom_fh) - overview_ef = cv2.imread(overview_ef_fh) + drop_ratio = zoom.shape[0] / float(zoom.shape[1]) + # print("drop_ratio: {}".format(drop_ratio)) + # print("box: {}".format(box)) + box_ratio = b_w / float(b_h) + # print("box_ratio: {}".format(box_ratio)) + + ### The calcualtion for the alignment of the images is different depending on the ratio of the aspect ratios + if drop_ratio <= box_ratio: + ### X-axis based scaling + ### resize the drop image and calculate the alignemnt + resize_ratio = box[2] / float(zoom.shape[1]) + new_w = int(np.round(zoom.shape[1] * resize_ratio)) + new_h = int(np.round(zoom.shape[0] * resize_ratio)) + # drop_resized = drop_open.resize((new_w, new_h)) + new_x = box[0] + new_y = int(np.round(((box[3] - new_h) / 2) + box[1])) + else: + ### Y-axis based scaling + ### resize the drop image and calculate the alignemnt + resize_ratio = box[3] / float(zoom.shape[0]) + new_w = int(np.round(zoom.shape[1] * resize_ratio)) + new_h = int(np.round(zoom.shape[0] * resize_ratio)) + # drop_resized = drop_open.resize((new_w, new_h)) + new_x = int(np.round(((box[2] - new_w) / 2) + box[0])) + new_y = box[1] + + rows, cols, _ = zoom.shape + + # xscale = b_w + # # height of box found = new y height for zoom picture + # yscale = b_h + + xscale = new_w + yscale = new_h + + if (xscale, yscale) > (0, 0): + drop = cv2.resize(zoom, (xscale, yscale), interpolation=cv2.INTER_AREA) + drop_mask = cv2.resize(b_w_mask_2, (xscale, yscale), interpolation=cv2.INTER_AREA) + else: + drop = cv2.resize(zoom, (cols // 2, rows // 2), interpolation=cv2.INTER_AREA) + drop_mask = cv2.resize(b_w_mask_2, (cols // 2, rows // 2), interpolation=cv2.INTER_AREA) + + # print(new_x,new_y,new_w,new_h) + # print(overview_ef.shape) + + if (drop.shape[0] + new_y, drop.shape[1] + new_x) <= (overview_ef.shape[0], overview_ef.shape[1]): + drop_grey = cv2.cvtColor(drop, cv2.COLOR_BGR2GRAY) + if len(overview_ef.shape) == 3: + overview_ef_grey = cv2.cvtColor(overview_ef, cv2.COLOR_BGR2GRAY) + else: + overview_ef_grey = overview_ef - mask = cv2.inRange(overview_dl, np.array([0, 2, 57]), np.array([69, 92, 255])) - # all pixels in our image that have a R >= 100, B >= 15, and G >= 17 along with R <= 200, B <= 56, and G <= 50 will be considered red. - # https://www.pyimagesearch.com/2014/08/04/opencv-python-color-detection/ + overview_mask = np.zeros((overview_ef_grey.shape[0], overview_ef.shape[1]), np.uint8) + zoom_overview_size = np.zeros((overview_ef_grey.shape[0], overview_ef.shape[1]), np.uint8) + + # overview_mask = large mask + overview_mask[new_y:new_y + drop.shape[0], new_x:new_x + drop.shape[1]] = drop_mask + mask_inv = cv2.bitwise_not(overview_mask) + + # zoom picture on black bg + zoom_overview_size[new_y:new_y + drop.shape[0], new_x:new_x + drop.shape[1]] = drop_grey + + # Now black-out the area of drop in overview_img + img1_bg = cv2.bitwise_and(overview_ef_grey, overview_ef_grey, mask=mask_inv) + # Take only region of logo from logo image. + img2_fg = cv2.bitwise_and(zoom_overview_size, zoom_overview_size, mask=overview_mask) + + # Put logo in ROI and modify the main image + overlay = cv2.add(img1_bg, img2_fg) + # print('overlayed') + + return overlay + else: + return overview_ef + + +def find_contours(image, mask_color=True, mask_color_min=None, mask_color_max=None, percent_arc_length=0.1, + bilateral=False, CONTOUR_METHOD=cv2.CHAIN_APPROX_SIMPLE, RETREIVAL_METHOD=cv2.RETR_EXTERNAL, + gradient=True, blur_iterations=1, box=False): + if mask_color_max is None: + mask_color_max = np.array([255, 255, 255]) + if mask_color_min is None: + mask_color_min = np.array([0, 0, 0]) + if mask_color: + # mask = cv2.inRange(image, np.array([0, 2, 57]), np.array([69, 92, 255])) + mask = cv2.inRange(image, mask_color_min, mask_color_max) + output = cv2.bitwise_and(image, image, mask=mask) + else: + output = image - output = cv2.bitwise_and(overview_dl, overview_dl, mask=mask) _, output = cv2.threshold(output, 100, 255, cv2.THRESH_BINARY) - grey = cv2.cvtColor(output, cv2.COLOR_RGB2GRAY) - blurred = cv2.GaussianBlur(grey, (5, 5), 1) + if len(image.shape) == 3: + grey = cv2.cvtColor(output, cv2.COLOR_RGB2GRAY) + else: + grey = output + + if gradient: + if bilateral: + blurred = cv2.bilateralFilter(grey, 75, 1, 1) + else: + blurred = cv2.GaussianBlur(grey, (5, 5), 1) + else: + blurred = grey + + if blur_iterations > 1: + for i in range(blur_iterations): + blurred = cv2.GaussianBlur(blurred, (5, 5), 1) + _, thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY) - cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, - cv2.CHAIN_APPROX_SIMPLE) - cnts = imutils.grab_contours(cnts) - boundRect = [None] * len(cnts) + if RETREIVAL_METHOD == cv2.RETR_EXTERNAL: + cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, + CONTOUR_METHOD) + cnts = imutils.grab_contours(cnts) + hierarchy = [] + else: + cnts, hierarchy = cv2.findContours(thresh, RETREIVAL_METHOD, CONTOUR_METHOD) + boundRect = [None] * len(cnts) contours_poly = [None] * len(cnts) - # centers = [None] * len(cnts) - # radius = [None] * len(cnts) biggest_area = 0 box_with_biggest_area = 0 for i, c in enumerate(cnts): - contours_poly[i] = cv2.approxPolyDP(c, 3, True) + if box: + epsilon = 3 + else: + epsilon = percent_arc_length * cv2.arcLength(c, True) + + contours_poly[i] = cv2.approxPolyDP(c, epsilon, True) boundRect[i] = cv2.boundingRect(contours_poly[i]) - area = boundRect[i][2]*boundRect[i][3] + area = boundRect[i][2] * boundRect[i][3] if area > biggest_area: box_with_biggest_area = i biggest_area = area + if RETREIVAL_METHOD == cv2.RETR_EXTERNAL: + return cnts, boundRect, contours_poly, box_with_biggest_area + elif len(hierarchy) is not 0: + return cnts, hierarchy, boundRect, contours_poly, box_with_biggest_area + else: + return cnts, boundRect, contours_poly, box_with_biggest_area - # centers[i], radius[i] = cv2.minEnclosingCircle(contours_poly[i]) - - # for cases with blue boxes on overview drop location, this messes up. - # 1. try to take last contour - # 2. take largest area of bounding box - - # draw the contours - # c = c.astype("float") - # c = c.astype("int") - # cv2.drawContours(overview_dl, [c], -1, (0, 255, 0), 2) - - # cv2.rectangle(overview_dl, (int(boundRect[i][0]), int(boundRect[i][1])), - # (int(boundRect[i][0] + boundRect[i][2]), int(boundRect[i][1] + boundRect[i][3])), (255, 0, 0), - # thickness=-1) - rows, cols, _ = zoom.shape +def get_drop_location_box(overview_dl, mask_color_min, mask_color_max): + _, boundRect, _, box_with_biggest_area = find_contours(overview_dl, + mask_color_min=mask_color_min, + mask_color_max=mask_color_max, blur_iterations=0, box=True, + gradient=True) max_b = box_with_biggest_area - b_x, b_y, b_w, b_h = int(boundRect[max_b][0]), int(boundRect[max_b][1]), int(boundRect[max_b][2]), int(boundRect[max_b][3]) + b_x, b_y, b_w, b_h = int(boundRect[max_b][0]), int(boundRect[max_b][1]), int(boundRect[max_b][2]), int( + boundRect[max_b][3]) + + xoffset = -3 + yoffset = -3 + woffset = 3 + hoffset = 3 + b_x, b_y, b_w, b_h = b_x + xoffset, b_y + yoffset, b_w - xoffset + woffset, b_h - yoffset + hoffset + if b_w <= 500 and b_h <= 500: + # print(b_x, b_y, b_w, b_h) + return b_x, b_y, b_w, b_h, True + + # cv2.rectangle(overview_dl, (b_x, b_y), (b_x + b_w, b_y + b_h), (255, 0, 0), 3) + # cv2.imshow('dl', overview_dl) + # cv2.imshow('dl', overview_dl) + # cv2.waitKey(0) + # cv2.destroyAllWindows() + else: + # make these half x or half y whichever is bigger (above 500) + return b_x, b_y, b_w, b_h, False - # width of box found = new x width for zoom picture - xscale = b_w - # height of box found = new y height for zoom picture - yscale = b_h - xscale = max(xscale,yscale) - yscale = max(xscale,yscale) +def overlay_images(overview_dl_fh, overview_ef_fh, zoom_fh, output_fh, circle=True, box=False, convex=False): + ### This is the main function of the script - if (xscale, yscale) > (0, 0): - drop = cv2.resize(zoom, (xscale, yscale), interpolation=cv2.INTER_AREA) - else: - drop = cv2.resize(zoom, (cols//2, rows//2), interpolation=cv2.INTER_AREA) + overview_dl = cv2.imread(overview_dl_fh) + zoom = cv2.imread(zoom_fh) + overview_ef = cv2.imread(overview_ef_fh) + + b_x, b_y, b_w, b_h, img_is_normal_sized = get_drop_location_box(overview_dl, np.array([0, 2, 57]), + np.array([69, 92, 255])) + if img_is_normal_sized: + if circle or convex: + zoom_grey = cv2.cvtColor(zoom, cv2.COLOR_RGB2GRAY) + _, zoom_grey = cv2.threshold(zoom_grey, 175, 255, cv2.THRESH_BINARY_INV) + zoom_blur_grey = cv2.GaussianBlur(zoom_grey, (5, 5), 0) + _, zoom_sharp_grey = cv2.threshold(zoom_blur_grey, 0, 255, cv2.THRESH_BINARY_INV) + + edges = cv2.Canny(zoom_sharp_grey, 0, 255) + cnts, hierarchy, boundRect, contours_poly_zoom, box_with_biggest_area = find_contours(edges, + mask_color=False, + percent_arc_length=0.01, + bilateral=False, + CONTOUR_METHOD=cv2.CHAIN_APPROX_NONE, + RETREIVAL_METHOD=cv2.RETR_TREE) + + hull = [] + for c in cnts: + hull.append(cv2.convexHull(c, False)) + + # create blank image for masking drop image + b_w_mask = np.zeros((zoom_grey.shape[0], zoom_grey.shape[1]), np.uint8) + zoom_x, zoom_y, _ = zoom.shape + center_zoom_x, center_zoom_y = (zoom_x // 2, zoom_y // 2) + + if circle: + radius_zoom = max(center_zoom_x, center_zoom_y) + (x, y), radius = cv2.minEnclosingCircle(cnts[0]) + center = (int(x), int(y)) + radius = int(radius) + best_circle = 0 + best_circle_center = center + best_circle_radius = radius + for i in range(len(cnts)): + # cv2.drawContours(b_w_mask, cnts, i, color_contours, 1, 8, hierarchy) + if convex: + cv2.drawContours(b_w_mask, hull, i, white, -1, 8) + elif circle: + (x, y), radius = cv2.minEnclosingCircle(cnts[i]) + center = (int(x), int(y)) + radius = int(radius) + if radius_zoom - 50 < radius < radius_zoom: + best_circle_radius = radius + best_circle_center = center + + if convex: + cnts_mask, hierarchy_mask, _, _, _ = find_contours(b_w_mask, mask_color=False, percent_arc_length=1, + RETREIVAL_METHOD=cv2.RETR_TREE, + bilateral=False, gradient=True, blur_iterations=20) + hull = [] + for c in cnts_mask: + hull.append(cv2.convexHull(c, False)) + + b_w_mask_2 = np.zeros((zoom_grey.shape[0], zoom_grey.shape[1]), np.uint8) + + for i in range(len(cnts_mask)): + cv2.drawContours(b_w_mask_2, hull, i, white, -1, 8) + + overview_ef = align_drop_to_box_opencv(b_x, b_y, b_w, b_h, zoom, overview_ef, b_w_mask_2) + + elif circle: # circle + circle_mask = np.zeros((zoom_grey.shape[0], zoom_grey.shape[1]), np.uint8) + cv2.circle(circle_mask, best_circle_center, best_circle_radius, white, -1) + overview_ef = align_drop_to_box_opencv(b_x, b_y, b_w, b_h, zoom, overview_ef, circle_mask) + + + elif box or not img_is_normal_sized: + overview_ef = align_drop_to_box_opencv(b_x, b_y, b_w, b_h, zoom, overview_ef) - if (drop.shape[0]+b_y,drop.shape[1]+b_x) <= (overview_ef.shape[0], overview_ef.shape[1]): - overview_ef[b_y:b_y + drop.shape[0], b_x:b_x + drop.shape[1]] = drop cv2.imwrite(output_fh, overview_ef) return overview_ef -def red_box_subt(w_box_fh, scaling_factor): - ### Funciton to read in overview image with red drop location box and convert to grey scale image of red box signal - ### Open images - # ef_open = Im.open(wo_box_fh) - dl_open = Im.open(w_box_fh) - - ### Check and get size - # assert ef_open.size == dl_open.size - dl_im_width, dl_im_height = dl_open.size - search_w = int(dl_im_width / scaling_factor) - search_h = int(dl_im_height / scaling_factor) - # print(search_w, search_h) - - ### Resize image for speed - dl_thumb = dl_open.resize((search_w, search_h), resample=Im.BICUBIC) - # ef_thumb = ef_open.resize((search_w,search_h),resample=Im.BICUBIC) - - ### Create new image object - new_dl_box_img = Im.new("L", (search_w, search_h)) - new_pixels = new_dl_box_img.load() - - ### Transform to custom greyscale and subtract - threshold_val = 50 - # print("time_to_start_loop: %s"%(time.time()-t0)) - for i in range(0, search_w): - for j in range(0, search_h): - ### Get pixel are recalculate signal for red box as greyscale - # pixel_ef = ef_thumb.getpixel((i, j)) - pixel_dl = dl_thumb.getpixel((i, j)) - - ### This is an old way of calculating the signal - # average_ef_bkgd = np.average([pixel_ef[1],pixel_ef[2]]) - # average_dl_bkgd = np.average([pixel_dl[1],pixel_dl[2]]) - # complex_r = np.round(max(pixel_dl[0]-average_dl_bkgd-(pixel_ef[0]-average_ef_bkgd),0)) - - complex_r = pixel_dl[0] - (pixel_dl[1] + pixel_dl[2]) / 2 - - ### This is an old way of calculating the signal - # complex_r = max(int(np.round((pixel_dl[0]-pixel_ef[0]+pixel_ef[1]-pixel_dl[1]+pixel_ef[2]-pixel_dl[2])/4.)),0) - # complex_r = min(255,np.round((pixel_dl[0]/255.)*(abs(pixel_ef[1]-pixel_dl[0])+abs(pixel_dl[1]-pixel_ef[1])+abs(pixel_dl[2]-pixel_ef[2])-50))) - - ### Threshold the new pixel value (complex_r) - if complex_r < threshold_val: - complex_r = 0 - ### Set pixel value in new image - new_pixels[i, j] = (int(complex_r)) - # new_dl_box_img.show() - dl_open.close() - ### return new image with calculated pixel values - return new_dl_box_img +# +# +# def red_box_subt(w_box_fh, scaling_factor): +# ### Funciton to read in overview image with red drop location box and convert to grey scale image of red box signal +# ### Open images +# # ef_open = Im.open(wo_box_fh) +# dl_open = Im.open(w_box_fh) +# +# ### Check and get size +# # assert ef_open.size == dl_open.size +# dl_im_width, dl_im_height = dl_open.size +# search_w = int(dl_im_width / scaling_factor) +# search_h = int(dl_im_height / scaling_factor) +# # print(search_w, search_h) +# +# ### Resize image for speed +# dl_thumb = dl_open.resize((search_w, search_h), resample=Im.BICUBIC) +# # ef_thumb = ef_open.resize((search_w,search_h),resample=Im.BICUBIC) +# +# ### Create new image object +# new_dl_box_img = Im.new("L", (search_w, search_h)) +# new_pixels = new_dl_box_img.load() +# +# ### Transform to custom greyscale and subtract +# threshold_val = 50 +# # print("time_to_start_loop: %s"%(time.time()-t0)) +# for i in range(0, search_w): +# for j in range(0, search_h): +# ### Get pixel are recalculate signal for red box as greyscale +# # pixel_ef = ef_thumb.getpixel((i, j)) +# pixel_dl = dl_thumb.getpixel((i, j)) +# +# ### This is an old way of calculating the signal +# # average_ef_bkgd = np.average([pixel_ef[1],pixel_ef[2]]) +# # average_dl_bkgd = np.average([pixel_dl[1],pixel_dl[2]]) +# # complex_r = np.round(max(pixel_dl[0]-average_dl_bkgd-(pixel_ef[0]-average_ef_bkgd),0)) +# +# complex_r = pixel_dl[0] - (pixel_dl[1] + pixel_dl[2]) / 2 +# +# ### This is an old way of calculating the signal +# # complex_r = max(int(np.round((pixel_dl[0]-pixel_ef[0]+pixel_ef[1]-pixel_dl[1]+pixel_ef[2]-pixel_dl[2])/4.)),0) +# # complex_r = min(255,np.round((pixel_dl[0]/255.)*(abs(pixel_ef[1]-pixel_dl[0])+abs(pixel_dl[1]-pixel_ef[1])+abs(pixel_dl[2]-pixel_ef[2])-50))) +# +# ### Threshold the new pixel value (complex_r) +# if complex_r < threshold_val: +# complex_r = 0 +# ### Set pixel value in new image +# new_pixels[i, j] = (int(complex_r)) +# # new_dl_box_img.show() +# dl_open.close() +# ### return new image with calculated pixel values +# return new_dl_box_img def find_bounding_box(box_open, scaling_factor): From 2d17bf903773a5203e822440802806c6c1ba5a59 Mon Sep 17 00:00:00 2001 From: Liam McKay Date: Mon, 30 Mar 2020 15:11:55 -0700 Subject: [PATCH 8/9] readme, re-organization, testing, requirements --- bounding_box_overlay_2.py | 352 ++++++++++++++++---------------------- echo_pregui_run.py | 18 +- organizeImages.py | 2 - pregui_img_analysis_3.py | 9 +- readme.md | 88 ++++++---- requirements.txt | 24 +-- transfer_imgs_1.py | 82 +++++---- 7 files changed, 280 insertions(+), 295 deletions(-) diff --git a/bounding_box_overlay_2.py b/bounding_box_overlay_2.py index 9fcd0b5..48826dc 100644 --- a/bounding_box_overlay_2.py +++ b/bounding_box_overlay_2.py @@ -1,17 +1,13 @@ import subprocess import imutils -from PIL import Image as Im import numpy as np import time, sys import glob import os from cv2 import cv2 - -import organizeImages as oI from tqdm import tqdm - from Plate import Plate plate = Plate(r=8, c=12, subwell_num=1) # don't need to worry about subwell (that's specified in img path) @@ -34,16 +30,14 @@ def save_overview_img(original_fp, imageDirectory): return well_id + "_" + subwell_number -def align_drop_to_box_opencv(b_x, b_y, b_w, b_h, zoom, overview_ef, b_w_mask_2=None): - if b_w_mask_2 is None: - b_w_mask_2 = cv2.bitwise_not(np.zeros((zoom.shape[0], zoom.shape[1]), np.uint8)) # make white box zoom size +def align_drop_to_overview(b_x, b_y, b_w, b_h, zoom, overview_ef, black_white_mask_2=None): + if black_white_mask_2 is None: + black_white_mask_2 = cv2.bitwise_not( + np.zeros((zoom.shape[0], zoom.shape[1]), np.uint8)) # make white box zoom size box = [b_x, b_y, b_w, b_h] drop_ratio = zoom.shape[0] / float(zoom.shape[1]) - # print("drop_ratio: {}".format(drop_ratio)) - # print("box: {}".format(box)) box_ratio = b_w / float(b_h) - # print("box_ratio: {}".format(box_ratio)) ### The calcualtion for the alignment of the images is different depending on the ratio of the aspect ratios if drop_ratio <= box_ratio: @@ -67,23 +61,18 @@ def align_drop_to_box_opencv(b_x, b_y, b_w, b_h, zoom, overview_ef, b_w_mask_2=N rows, cols, _ = zoom.shape - # xscale = b_w - # # height of box found = new y height for zoom picture - # yscale = b_h - xscale = new_w yscale = new_h + # resize drop image and its mask (mask- for convex or circle overlay) if (xscale, yscale) > (0, 0): drop = cv2.resize(zoom, (xscale, yscale), interpolation=cv2.INTER_AREA) - drop_mask = cv2.resize(b_w_mask_2, (xscale, yscale), interpolation=cv2.INTER_AREA) + drop_mask = cv2.resize(black_white_mask_2, (xscale, yscale), interpolation=cv2.INTER_AREA) else: drop = cv2.resize(zoom, (cols // 2, rows // 2), interpolation=cv2.INTER_AREA) - drop_mask = cv2.resize(b_w_mask_2, (cols // 2, rows // 2), interpolation=cv2.INTER_AREA) - - # print(new_x,new_y,new_w,new_h) - # print(overview_ef.shape) + drop_mask = cv2.resize(black_white_mask_2, (cols // 2, rows // 2), interpolation=cv2.INTER_AREA) + # if drop is a reasonable size if (drop.shape[0] + new_y, drop.shape[1] + new_x) <= (overview_ef.shape[0], overview_ef.shape[1]): drop_grey = cv2.cvtColor(drop, cv2.COLOR_BGR2GRAY) if len(overview_ef.shape) == 3: @@ -103,60 +92,61 @@ def align_drop_to_box_opencv(b_x, b_y, b_w, b_h, zoom, overview_ef, b_w_mask_2=N # Now black-out the area of drop in overview_img img1_bg = cv2.bitwise_and(overview_ef_grey, overview_ef_grey, mask=mask_inv) + # Take only region of logo from logo image. img2_fg = cv2.bitwise_and(zoom_overview_size, zoom_overview_size, mask=overview_mask) # Put logo in ROI and modify the main image overlay = cv2.add(img1_bg, img2_fg) - # print('overlayed') return overlay else: + print("not overlaying an image, drop location is the entire well (not accurate)") return overview_ef -def find_contours(image, mask_color=True, mask_color_min=None, mask_color_max=None, percent_arc_length=0.1, - bilateral=False, CONTOUR_METHOD=cv2.CHAIN_APPROX_SIMPLE, RETREIVAL_METHOD=cv2.RETR_EXTERNAL, - gradient=True, blur_iterations=1, box=False): +def find_image_features(image, mask_color=True, mask_color_min=None, mask_color_max=None, percent_arc_length=0.1, + bilateral=False, CONTOUR_METHOD=cv2.CHAIN_APPROX_SIMPLE, RETREIVAL_METHOD=cv2.RETR_EXTERNAL, + blur_image=True, blur_iterations=1, box=False): if mask_color_max is None: mask_color_max = np.array([255, 255, 255]) if mask_color_min is None: mask_color_min = np.array([0, 0, 0]) if mask_color: - # mask = cv2.inRange(image, np.array([0, 2, 57]), np.array([69, 92, 255])) + # mask = low red [0, 2, 57], light red [69, 92, 255] mask = cv2.inRange(image, mask_color_min, mask_color_max) output = cv2.bitwise_and(image, image, mask=mask) else: output = image - _, output = cv2.threshold(output, 100, 255, cv2.THRESH_BINARY) + _, thresh = cv2.threshold(output, 100, 255, cv2.THRESH_BINARY) if len(image.shape) == 3: - grey = cv2.cvtColor(output, cv2.COLOR_RGB2GRAY) + grey_thresh = cv2.cvtColor(thresh, cv2.COLOR_RGB2GRAY) else: - grey = output + grey_thresh = thresh - if gradient: + if blur_image: if bilateral: - blurred = cv2.bilateralFilter(grey, 75, 1, 1) + blurred = cv2.bilateralFilter(grey_thresh, 75, 1, 1) else: - blurred = cv2.GaussianBlur(grey, (5, 5), 1) + blurred = cv2.GaussianBlur(grey_thresh, (5, 5), 1) else: - blurred = grey + blurred = grey_thresh if blur_iterations > 1: for i in range(blur_iterations): blurred = cv2.GaussianBlur(blurred, (5, 5), 1) - _, thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY) + _, thresh_blur_grey_thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY) if RETREIVAL_METHOD == cv2.RETR_EXTERNAL: - cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, + cnts = cv2.findContours(thresh_blur_grey_thresh, cv2.RETR_EXTERNAL, CONTOUR_METHOD) cnts = imutils.grab_contours(cnts) hierarchy = [] else: - cnts, hierarchy = cv2.findContours(thresh, RETREIVAL_METHOD, CONTOUR_METHOD) + cnts, hierarchy = cv2.findContours(thresh_blur_grey_thresh, RETREIVAL_METHOD, CONTOUR_METHOD) boundRect = [None] * len(cnts) contours_poly = [None] * len(cnts) @@ -182,217 +172,166 @@ def find_contours(image, mask_color=True, mask_color_min=None, mask_color_max=No return cnts, boundRect, contours_poly, box_with_biggest_area -def get_drop_location_box(overview_dl, mask_color_min, mask_color_max): - _, boundRect, _, box_with_biggest_area = find_contours(overview_dl, - mask_color_min=mask_color_min, - mask_color_max=mask_color_max, blur_iterations=0, box=True, - gradient=True) +def get_drop_location_box(overview_dl, mask_color_min, mask_color_max, debug=False): + _, boundRect, _, box_with_biggest_area = find_image_features(overview_dl, + mask_color_min=mask_color_min, + mask_color_max=mask_color_max, blur_iterations=0, + box=True, + blur_image=True) max_b = box_with_biggest_area b_x, b_y, b_w, b_h = int(boundRect[max_b][0]), int(boundRect[max_b][1]), int(boundRect[max_b][2]), int( boundRect[max_b][3]) - xoffset = -3 - yoffset = -3 - woffset = 3 - hoffset = 3 + xoffset = 0 + yoffset = 0 + woffset = 6 + hoffset = 6 b_x, b_y, b_w, b_h = b_x + xoffset, b_y + yoffset, b_w - xoffset + woffset, b_h - yoffset + hoffset if b_w <= 500 and b_h <= 500: - # print(b_x, b_y, b_w, b_h) + if debug: + cv2.rectangle(overview_dl, (b_x, b_y), (b_x + b_w, b_y + b_h), (255, 0, 0), 3) + cv2.imshow('dl', overview_dl) + cv2.imshow('dl', overview_dl) + cv2.waitKey(0) + cv2.destroyAllWindows() return b_x, b_y, b_w, b_h, True - - # cv2.rectangle(overview_dl, (b_x, b_y), (b_x + b_w, b_y + b_h), (255, 0, 0), 3) - # cv2.imshow('dl', overview_dl) - # cv2.imshow('dl', overview_dl) - # cv2.waitKey(0) - # cv2.destroyAllWindows() else: # make these half x or half y whichever is bigger (above 500) return b_x, b_y, b_w, b_h, False -def overlay_images(overview_dl_fh, overview_ef_fh, zoom_fh, output_fh, circle=True, box=False, convex=False): +def find_biggest_contour(image, contours, max_area=None, min_area=100 ** 2, max_border=(100, 100, 100, 100)): + if max_area is None: + max_area = max(image.shape) ** 2 + + ''' Contour searching algorithm ''' + # find initial contour data + M = cv2.moments(contours[-1]) + area = cv2.contourArea(contours[-1]) + cx = int(M['m10'] / M['m00']) + cy = int(M['m01'] / M['m00']) + image_x, image_y, _ = image.shape + center_image_x, center_image_y = (image_x // 2, image_y // 2) + best_area = area + best_center = (cx, cy) + best_contour = contours[-1] + border_x_min = max_border[0] + border_x_max = image_x + max_border[2] + border_y_min = max_border[1] + border_y_max = image_y + max_border[3] + for i in range(len(contours)): + # find area of contour + area = cv2.contourArea(contours[i]) + + # find moments of contour + M = cv2.moments(contours[i]) + + # find center of mass of contour + cx = int(M['m10'] / M['m00']) + cy = int(M['m01'] / M['m00']) + center = (cx, cy) + + if min_area < area < max_area: + if border_x_min < cx < border_x_max and border_y_min < cy < border_y_max: + best_area = area + best_center = center + best_contour = contours[i] + + return best_contour + + +def overlay_images(overview_dl_fh, overview_ef_fh, zoom_fh, output_fh, circle=False, box=True, convex=False, + debug=False): ### This is the main function of the script overview_dl = cv2.imread(overview_dl_fh) zoom = cv2.imread(zoom_fh) overview_ef = cv2.imread(overview_ef_fh) - b_x, b_y, b_w, b_h, img_is_normal_sized = get_drop_location_box(overview_dl, np.array([0, 2, 57]), - np.array([69, 92, 255])) + if debug: + cv2.imshow("dl, ef, zoom Press Any Key to Close", np.concatenate([overview_dl, overview_ef, zoom])) + cv2.imshow("dl, ef, zoom Press Any Key to Close", np.concatenate([overview_dl, overview_ef, zoom])) + cv2.waitKey(0) + + dark_red = np.array([0, 2, 57]) + light_red = np.array([69, 92, 255]) + b_x, b_y, b_w, b_h, img_is_normal_sized = get_drop_location_box(overview_dl, dark_red, light_red, debug=debug) + if img_is_normal_sized: if circle or convex: + # convert drop image to grey image zoom_grey = cv2.cvtColor(zoom, cv2.COLOR_RGB2GRAY) + + # invert image make dark pixels brighter (edge of drop) _, zoom_grey = cv2.threshold(zoom_grey, 175, 255, cv2.THRESH_BINARY_INV) + + # blur the image (fill gaps, reduce noise) zoom_blur_grey = cv2.GaussianBlur(zoom_grey, (5, 5), 0) + + # (make mask) high contrast dark black and bright white color _, zoom_sharp_grey = cv2.threshold(zoom_blur_grey, 0, 255, cv2.THRESH_BINARY_INV) + # find edges of mask edges = cv2.Canny(zoom_sharp_grey, 0, 255) - cnts, hierarchy, boundRect, contours_poly_zoom, box_with_biggest_area = find_contours(edges, - mask_color=False, - percent_arc_length=0.01, - bilateral=False, - CONTOUR_METHOD=cv2.CHAIN_APPROX_NONE, - RETREIVAL_METHOD=cv2.RETR_TREE) - hull = [] - for c in cnts: - hull.append(cv2.convexHull(c, False)) + # find contour points of mask + cnts, hierarchy, _, _, _ = find_image_features(edges, mask_color=False, percent_arc_length=0.01, + bilateral=False, + CONTOUR_METHOD=cv2.CHAIN_APPROX_NONE, + RETREIVAL_METHOD=cv2.RETR_TREE) # create blank image for masking drop image - b_w_mask = np.zeros((zoom_grey.shape[0], zoom_grey.shape[1]), np.uint8) - zoom_x, zoom_y, _ = zoom.shape - center_zoom_x, center_zoom_y = (zoom_x // 2, zoom_y // 2) + black_white_mask = np.zeros((zoom_grey.shape[0], zoom_grey.shape[1]), np.uint8) if circle: - radius_zoom = max(center_zoom_x, center_zoom_y) - (x, y), radius = cv2.minEnclosingCircle(cnts[0]) - center = (int(x), int(y)) - radius = int(radius) - best_circle = 0 - best_circle_center = center - best_circle_radius = radius - for i in range(len(cnts)): - # cv2.drawContours(b_w_mask, cnts, i, color_contours, 1, 8, hierarchy) - if convex: - cv2.drawContours(b_w_mask, hull, i, white, -1, 8) - elif circle: - (x, y), radius = cv2.minEnclosingCircle(cnts[i]) - center = (int(x), int(y)) - radius = int(radius) - if radius_zoom - 50 < radius < radius_zoom: - best_circle_radius = radius - best_circle_center = center - - if convex: - cnts_mask, hierarchy_mask, _, _, _ = find_contours(b_w_mask, mask_color=False, percent_arc_length=1, - RETREIVAL_METHOD=cv2.RETR_TREE, - bilateral=False, gradient=True, blur_iterations=20) + cnt = find_biggest_contour(image=zoom, contours=cnts, max_area=zoom.shape[0] * zoom.shape[1]) + (circle_x, circle_y), radius = cv2.minEnclosingCircle(cnt) + (circle_x, circle_y, radius) = (int(circle_x), int(circle_y), int(radius)) + circle_mask = np.zeros((zoom_grey.shape[0], zoom_grey.shape[1]), np.uint8) + cv2.circle(circle_mask, (circle_x, circle_y), radius, white, -1) + overview_ef = align_drop_to_overview(b_x, b_y, b_w, b_h, zoom, overview_ef, circle_mask) + + elif convex: + # make convex shapes that fit biggest contour point set hull = [] - for c in cnts_mask: - hull.append(cv2.convexHull(c, False)) + for i in range(len(cnts)): + hull.append(cv2.convexHull(cnts[i], False)) - b_w_mask_2 = np.zeros((zoom_grey.shape[0], zoom_grey.shape[1]), np.uint8) + # draw them on a mask + for i in range(len(hull)): + cv2.drawContours(black_white_mask, hull, i, white, -1, 8) - for i in range(len(cnts_mask)): - cv2.drawContours(b_w_mask_2, hull, i, white, -1, 8) + # find contours of that mask (adds some smoothing) + cnts_mask, hierarchy_mask, _, _, _ = find_image_features(black_white_mask, mask_color=False, + percent_arc_length=3, + RETREIVAL_METHOD=cv2.RETR_TREE, + bilateral=False, blur_image=True, + blur_iterations=30) - overview_ef = align_drop_to_box_opencv(b_x, b_y, b_w, b_h, zoom, overview_ef, b_w_mask_2) + # create final convex shape mask + black_white_mask_2 = np.zeros((zoom_grey.shape[0], zoom_grey.shape[1]), np.uint8) - elif circle: # circle - circle_mask = np.zeros((zoom_grey.shape[0], zoom_grey.shape[1]), np.uint8) - cv2.circle(circle_mask, best_circle_center, best_circle_radius, white, -1) - overview_ef = align_drop_to_box_opencv(b_x, b_y, b_w, b_h, zoom, overview_ef, circle_mask) + # make convex shapes that fit biggest contour point set + hull = [] + for i in range(len(cnts_mask)): + hull.append(cv2.convexHull(cnts_mask[i], False)) + # draw them on a mask + for i in range(len(hull)): + cv2.drawContours(black_white_mask_2, hull, i, white, -1, 8) - elif box or not img_is_normal_sized: - overview_ef = align_drop_to_box_opencv(b_x, b_y, b_w, b_h, zoom, overview_ef) + overview_ef = align_drop_to_overview(b_x, b_y, b_w, b_h, zoom, overview_ef, black_white_mask_2) + elif box: + overview_ef = align_drop_to_overview(b_x, b_y, b_w, b_h, zoom, overview_ef) + else: + overview_ef = overview_ef cv2.imwrite(output_fh, overview_ef) return overview_ef -# -# -# def red_box_subt(w_box_fh, scaling_factor): -# ### Funciton to read in overview image with red drop location box and convert to grey scale image of red box signal -# ### Open images -# # ef_open = Im.open(wo_box_fh) -# dl_open = Im.open(w_box_fh) -# -# ### Check and get size -# # assert ef_open.size == dl_open.size -# dl_im_width, dl_im_height = dl_open.size -# search_w = int(dl_im_width / scaling_factor) -# search_h = int(dl_im_height / scaling_factor) -# # print(search_w, search_h) -# -# ### Resize image for speed -# dl_thumb = dl_open.resize((search_w, search_h), resample=Im.BICUBIC) -# # ef_thumb = ef_open.resize((search_w,search_h),resample=Im.BICUBIC) -# -# ### Create new image object -# new_dl_box_img = Im.new("L", (search_w, search_h)) -# new_pixels = new_dl_box_img.load() -# -# ### Transform to custom greyscale and subtract -# threshold_val = 50 -# # print("time_to_start_loop: %s"%(time.time()-t0)) -# for i in range(0, search_w): -# for j in range(0, search_h): -# ### Get pixel are recalculate signal for red box as greyscale -# # pixel_ef = ef_thumb.getpixel((i, j)) -# pixel_dl = dl_thumb.getpixel((i, j)) -# -# ### This is an old way of calculating the signal -# # average_ef_bkgd = np.average([pixel_ef[1],pixel_ef[2]]) -# # average_dl_bkgd = np.average([pixel_dl[1],pixel_dl[2]]) -# # complex_r = np.round(max(pixel_dl[0]-average_dl_bkgd-(pixel_ef[0]-average_ef_bkgd),0)) -# -# complex_r = pixel_dl[0] - (pixel_dl[1] + pixel_dl[2]) / 2 -# -# ### This is an old way of calculating the signal -# # complex_r = max(int(np.round((pixel_dl[0]-pixel_ef[0]+pixel_ef[1]-pixel_dl[1]+pixel_ef[2]-pixel_dl[2])/4.)),0) -# # complex_r = min(255,np.round((pixel_dl[0]/255.)*(abs(pixel_ef[1]-pixel_dl[0])+abs(pixel_dl[1]-pixel_ef[1])+abs(pixel_dl[2]-pixel_ef[2])-50))) -# -# ### Threshold the new pixel value (complex_r) -# if complex_r < threshold_val: -# complex_r = 0 -# ### Set pixel value in new image -# new_pixels[i, j] = (int(complex_r)) -# # new_dl_box_img.show() -# dl_open.close() -# ### return new image with calculated pixel values -# return new_dl_box_img - - -def find_bounding_box(box_open, scaling_factor): - ### Function finds the oringal size of the red box signal - x0, y0, x1, y1 = box_open.getbbox() - - return (x0 * scaling_factor, y0 * scaling_factor, scaling_factor * (x1 - x0), scaling_factor * (y1 - y0)) - - -def align_drop_to_box(over_ef, drop_fh, box): - ### This funciton figures out the correct alignment of the drop to the overview and overlays the images - - ### open the image and compare aspect ratios of the drop location to the drop image - drop_open = Im.open(drop_fh) - # print("drop_f_size: {}".format(drop_open.size)) - drop_ratio = drop_open.size[0] / float(drop_open.size[1]) - ##=print("drop_ratio: {}".format(drop_ratio)) - # print("box: {}".format(box)) - box_ratio = box[2] / float(box[3]) - # print("box_ratio: {}".format(box_ratio)) - - ### The calcualtion for the alignment of the images is different depending on the ratio of the aspect ratios - if drop_ratio <= box_ratio: - ### X-axis based scaling - ### resize the drop image and calculate the alignemnt - resize_ratio = box[2] / float(drop_open.size[0]) - new_w = int(np.round(drop_open.size[0] * resize_ratio)) - new_h = int(np.round(drop_open.size[1] * resize_ratio)) - drop_resized = drop_open.resize((new_w, new_h)) - new_x = box[0] - new_y = int(np.round(((box[3] - new_h) / 2) + box[1])) - else: - ### Y-axis based scaling - ### resize the drop image and calculate the alignemnt - resize_ratio = box[3] / float(drop_open.size[1]) - new_w = int(np.round(drop_open.size[0] * resize_ratio)) - new_h = int(np.round(drop_open.size[1] * resize_ratio)) - drop_resized = drop_open.resize((new_w, new_h)) - new_x = int(np.round(((box[2] - new_w) / 2) + box[0])) - new_y = box[1] - - ### open overview image and do the overlay - overview_open = Im.open(over_ef) - overview_open.paste(drop_resized, box=(new_x, new_y)) - # overview_open.show() - return overview_open - - -def run(imageDirectory): +def run(imageDirectory, circle=False, box=True, convex=False, debug=False): if not os.path.exists(imageDirectory): # case 2: directory doesn't exist print("Error: cannot find directory " + imageDirectory) else: @@ -400,10 +339,11 @@ def run(imageDirectory): os.mkdir(os.path.join(imageDirectory, "overlayed")) print("overlaying images.\n") completedWells = 0 - - for i in tqdm(range(1, 97)): + well_folders = glob.glob(os.path.join(imageDirectory, 'organizedWells', 'wellNum_*')) + for i in tqdm(range(1, len(well_folders) + 1)): filepaths = sorted( - glob.glob(os.path.join(imageDirectory, 'organizedWells', 'wellNum_' + str(i), '*'))) # find all images + glob.glob(os.path.join(imageDirectory, 'organizedWells', 'wellNum_' + str(i), + '*'))) # find all images in this well if len(filepaths) % 3 == 0: for j in range(0, len(filepaths), 3): zoom_ef_fh = filepaths[0 + j] @@ -414,8 +354,10 @@ def run(imageDirectory): well_name = save_overview_img(ef_fh, imageDirectory) output_fp = os.path.join(imageDirectory, "overlayed", well_name + ".jpg") + try: - overlayed_img = overlay_images(dl_fh, ef_fh, zoom_ef_fh, output_fp) + overlayed_img = overlay_images(dl_fh, ef_fh, zoom_ef_fh, output_fp, circle=circle, box=box, + convex=convex, debug=debug) completedWells += 1 except TypeError: try: diff --git a/echo_pregui_run.py b/echo_pregui_run.py index 268d9a3..307c119 100644 --- a/echo_pregui_run.py +++ b/echo_pregui_run.py @@ -1,4 +1,5 @@ import argparse +import os from transfer_imgs_1 import run as transfer_imgs from bounding_box_overlay_2 import run as bounding_box_overlay @@ -13,6 +14,14 @@ def argparse_reader_main(): parser.add_argument('-dir', '--output_plate_folder', type=str, help='Output folder for images and json', required=True) parser.add_argument('-temp', '--plate_temp', type=int, help='Temperature of plate stored at', required=True) + parser.add_argument('-box', '--box_overlay', action='store_true', default=True, + help='Fits original drop images to drop location') + parser.add_argument('-convex', '--convex_overlay', action='store_true', default=False, + help='Fits cookie cutter of original drop images to drop location') + parser.add_argument('-circle', '--circle_overlay', action='store_true', default=False, + help='Fits hole-punch circle of original drop images to drop location') + parser.add_argument('-debug', '--debug', action='store_true', default=False, + help='Show images during process') return parser @@ -24,11 +33,18 @@ def main(): temperature = args.plate_temp rock_drive_ip = "169.230.29.134" + if not os.path.exists(output_dir): + os.mkdir(output_dir) + for plateID in plateID_list: + output_dir = os.path.join(args.output_plate_folder,str(plateID)) transfer_imgs(plateID, output_dir, rock_drive_ip) + + for plateID in plateID_list: + output_dir = os.path.join(args.output_plate_folder,str(plateID)) organizeImages(output_dir) rename_overview_images_well_id(output_dir) - bounding_box_overlay(output_dir) + bounding_box_overlay(output_dir, box=args.box_overlay, circle=args.circle_overlay, convex=args.convex_overlay, debug=args.debug) img_well_dict = get_dict_image_to_well(output_dir) createJson(plate_dir=output_dir, plate_id=plateID, plate_temperature=temperature, dict_image_path_subwells=img_well_dict) diff --git a/organizeImages.py b/organizeImages.py index 9db99f1..b448710 100644 --- a/organizeImages.py +++ b/organizeImages.py @@ -8,8 +8,6 @@ def rename_overview_images_well_id(imageDirectory): - # prereq: have run delete drop images first - image_path_list = sorted(glob.glob(os.path.join(imageDirectory, "overview", "*", "*")), key=lambda x: x[-18:]) # if (len(image_path_list)) % 96 == 0: # if divisible by 96, make a 96 well plate diff --git a/pregui_img_analysis_3.py b/pregui_img_analysis_3.py index 32475c3..2485d1b 100644 --- a/pregui_img_analysis_3.py +++ b/pregui_img_analysis_3.py @@ -4,12 +4,7 @@ import glob import numpy as np -from skimage.transform import hough_circle, hough_circle_peaks -from skimage.draw import circle_perimeter -from skimage.util import img_as_ubyte -from skimage import io import os -from classes_only import Plate from datetime import datetime import time as ti @@ -61,8 +56,8 @@ def process_found_circles(circles): def save_canny_save_fit(path, temp, debug=False): accum_d, cx_d, cy_d, radii_d, cx_w, cy_w, radii_w = [0, 0, 0, 0, 0, 0, 0] # initialize variables - zstack = io.imread(path) - image = img_as_ubyte(zstack[:, :, 0]) # finds the top x-y pixels in the z-stack + image = cv2.imread(path) + image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) ret, thresh = cv2.threshold(image, 30, 74, cv2.THRESH_TOZERO_INV) ret, thresh = cv2.threshold(thresh, 0, 255, cv2.THRESH_BINARY) # brightens grey to white TO ZERO threshold image edged = cv2.Canny(thresh, 101, 112) diff --git a/readme.md b/readme.md index 50721e7..8670a2c 100644 --- a/readme.md +++ b/readme.md @@ -1,46 +1,74 @@ -# Echo_pregui_script -`usage: echo_pregui_run.py [-h] -ids PLATEID [PLATEID ...] -dir - OUTPUT_PLATE_FOLDER -temp PLATE_TEMP` - - -## Help: - - `-h, --help` - - show this help message and exit - - `-ids PLATEID [PLATEID ...], --plateID PLATEID [PLATEID ...]` - - RockMaker Plate ID (4 or 5 digit code on barcode or - 2nd number on RockMaker screen experiment file - - `-dir OUTPUT_PLATE_FOLDER, --output_plate_folder OUTPUT_PLATE_FOLDER` - - Output folder for images and json - - `-temp PLATE_TEMP, --plate_temp PLATE_TEMP` - - Temperature of plate where it is stored at +# Echo Pregui Script +For local use with hitsDB web application. -#### SAMPLE OUTPUT +Stand-alone drop-image overlay and analysis tool for Formulatrix RockImager 1000 machine plate well images. -```▶ bash run.sh output_test 20 10962 +Uses OpenCV, Numpy, Pandas -xray@169.230.29.115's password: +## Installation +1. Make a virtual environment +2. `pip3 install -r requirements.txt` -selected IDs: 108228 105546 +## Note +When using `-convex` or `-circle` do not expect every image to be overlayed with a convex or circle drop image. +If it is not possible/reasonable to overlay a circle or convex shape, it will overlay a rectangular image. -After entering password, should only take ~30 sec to download all images on UCSFwpa -xray@169.230.29.115's password: + + ## Example +Example command: `python3 echo_pregui_run.py -ids 10818 -temp 20 -convex -dir echo_pregui_example` + + ## Usage + Use -h flag for help on command-line arguments +`[-h] ` help +`-ids PLATEID [PLATEID ...] ` Rockimager plate ID (looks like 10930 or 9580) -organizing images. +`-dir OUTPUT_PLATE_FOLDER` Output location of analysis and images from this script -('output_test/organizedWells', 'already exists. continuing') +`-temp PLATE_TEMP +` Temperature plate is stored at (must be '4' or "20" - not 4c or 20c) -overlaying images. +`[-box] +` (default) Overlay full rectangular drop image onto overview image -() +`[-convex] +` Overlay a convex shaped cutout of the drop image onto the overview image + +`[-circle] +` Overlay a circular cutout of the drop image onto the overview image + +`[-debug]` Show images during finding the drop location bounding box + + +#### Sample Output + +``` +▶ python3 echo_pregui_run.py -ids 10818 -temp 20 -convex -dir images/march30_readme_outout + +rsync -nmav --include */ --exclude *_th.jpg --include *.jpg -e ssh xray@169.230.29.134:/volume1/RockMakerStorage/WellImages/818/plateID_10818/ images/march30_readme_outout/10818/ +xray@169.230.29.134's password: +batch IDs selected: droplocation, dropimg: 103969 106812 +drop image: 96 images +overview ef: 96 images +overview drop location: 96 images +100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 288/288 [00:00<00:00, 396702.64it/s] + +rsync -mav -P --files-from=images/march30_readme_outout/10818/files_to_transfer.txt -e ssh xray@169.230.29.134:/volume1/RockMakerStorage/WellImages/818/plateID_10818 images/march30_readme_outout/10818/ +xray@169.230.29.134's password: +Downloaded Files = 288 (should be 288 = 96*3) +organizing images. +overlaying images. -53%|█████▎ | 51/96 [00:09<00:08, 5.46it/s]^C + 75%|███████████████████████████████████████████████████████████████████████████████████████████████████▊ | 72/96 [00:10<00:03, 6.09it/s]not overlaying an image, drop location is the entire well (not accurate) +100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 96/96 [00:12<00:00, 7.75it/s] +Completed images (should be 96 for 96 well): 96 +overviewimgs = 96 +Finding pixel location of wells. +100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 96/96 [00:05<00:00, 18.82it/s] +created: /Users/liam_msg/Documents/echo_pregui_scripts/echo_pregui_scripts/images/march30_readme_outout/10818/imagesmarch30_readme_outout10818.json +wrote to json diff --git a/requirements.txt b/requirements.txt index 17e62a5..9e0e8f7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,15 +1,9 @@ -cycler==0.10.0 -decorator==4.4.0 -imageio==2.5.0 -kiwisolver==1.1.0 -matplotlib==3.1.1 -networkx==2.3 -numpy==1.17.2 -Pillow==6.1.0 -pyparsing==2.4.2 -python-dateutil==2.8.0 -PyWavelets==1.0.3 -scikit-image==0.15.0 -scipy==1.3.1 -six==1.12.0 -tqdm==4.36.1 +imutils==0.5.3 +numpy==1.18.2 +opencv-python==4.2.0.32 +opencv-python-headless==4.2.0.32 +pandas==1.0.3 +python-dateutil==2.8.1 +pytz==2019.3 +six==1.14.0 +tqdm==4.44.1 diff --git a/transfer_imgs_1.py b/transfer_imgs_1.py index 75b2955..a32a222 100644 --- a/transfer_imgs_1.py +++ b/transfer_imgs_1.py @@ -3,10 +3,8 @@ import argparse import os -from glob import glob from os.path import join, exists import subprocess # runs bash commands in python -import sys from tqdm import tqdm @@ -20,6 +18,42 @@ def argparse_reader(): return parser +def get_path_names_necessary(rsync_out, selected_batches=None): + image_names = set() + unique_paths = [] + for line in sorted(rsync_out.split('\n'), reverse=True): + if ".jpg" in line: + batch = int( + line.split('batchID_')[1].split('wellNum')[0].replace(os.sep, '').replace("\\", '').replace('n', + '')) + jpg = line.split(os.sep)[-1] + if jpg not in image_names: + unique_paths.append(line) + image_names.add(jpg) + if selected_batches is not None: + if batch in selected_batches: + unique_paths.append(line) + return unique_paths + + +def sort_image_path_names(paths): + # sort by image name + drop_images_paths = [] + overview_drop_location_paths = [] + overview_extended_focus_paths = [] + i = 1 + for path in list(sorted([line for line in paths], key=lambda line: line.split(os.sep)[-1])): + if "ef.jpg" in path and i == 1: + drop_images_paths.append(path) + i += 1 + elif "dl.jpg" in path: + overview_drop_location_paths.append(path) + elif "ef.jpg" in path and i == 2: + overview_extended_focus_paths.append(path) + i = 1 + return drop_images_paths, overview_drop_location_paths, overview_extended_focus_paths + + def run(plateID, output_dir, rock_drive_ip): if not exists(join(output_dir)): os.mkdir(join(output_dir)) @@ -29,6 +63,7 @@ def run(plateID, output_dir, rock_drive_ip): 2:] + '/plateID_' + str( plateID) + '/', str(output_dir) + '/'] + print() print(*rsync_log) rsync = subprocess.run(rsync_log, capture_output=True) rsync_out = rsync.stdout.decode("utf-8") @@ -41,45 +76,21 @@ def run(plateID, output_dir, rock_drive_ip): for file in sorted(log_rsync_file.readlines()): if 'batchID_' in file: batches.add(int( - file.split('batchID_')[1].split('wellNum')[0].replace("/", '').replace("\\", '').replace('n', ''))) + file.split('batchID_')[1].split('wellNum')[0].replace("/", '').replace("\\", '').replace('n', + ''))) batches = sorted(list(batches)) # batchID_overview = batches[-1] # last in list # batchID_drop = batches[0] # first in list - print("batch IDs selected: ", *batches) + print("batch IDs selected: droplocation, dropimg: ", batches[0], batches[-1]) + selected_batches = (batches[0], batches[-1]) # ### Create a list of files to transfer in a text file for rsync to transfer using the --files-from option - # get unique image names - # Tries to grab all the images in the same batch first, because doing two batches is two different batches of images - """ - i think batch id are different times taking the pictures so if you try to match a extended focus to a drop - image from two different imaging times(aka batches) it will not match exactly and could be the problem with - the well not matching - """ - image_names = set() - path_names_only_necessary = list() - for line in rsync_out.split('\n'): - if ".jpg" in line: - image_name = line.split('/')[-1] - if image_name not in image_names: - path_names_only_necessary.append(line) - image_names.add(line.split('/')[-1]) - print() + # get unique image names starting from the last image taken. Most recent images will be used. + path_names_only_necessary = get_path_names_necessary(rsync_out) - drop_images_paths = [] - overview_drop_location_paths = [] - overview_extended_focus_paths = [] - # sort by image name - i = 1 - for path in list(sorted([line for line in path_names_only_necessary], key=lambda line: line.split(os.sep)[-1])): - if "ef.jpg" in path and i == 1: - drop_images_paths.append(path) - i += 1 - elif "dl.jpg" in path: - overview_drop_location_paths.append(path) - elif "ef.jpg" in path and i == 2: - overview_extended_focus_paths.append(path) - i = 1 + drop_images_paths, overview_drop_location_paths, overview_extended_focus_paths = sort_image_path_names( + path_names_only_necessary) print("drop image:", len(drop_images_paths), " images \noverview ef:", len(overview_extended_focus_paths), " images \noverview drop location:", len(overview_drop_location_paths), "images") @@ -96,6 +107,7 @@ def run(plateID, output_dir, rock_drive_ip): 'plateID_' + str(plateID)), join(str(output_dir), "") ] + print() print(*rsync_download) rsync_stdout_download = subprocess.run(rsync_download, capture_output=True).stdout.decode("utf-8") downloaded_files = 0 @@ -105,7 +117,7 @@ def run(plateID, output_dir, rock_drive_ip): print("Downloaded Files = ", downloaded_files, "(should be 288 = 96*3)") else: try: - raise RuntimeWarning("Using files from previous download in "+output_dir) + raise RuntimeWarning("Using files from previous download in " + output_dir) except RuntimeWarning as e: print(e) pass From 35dea8aa1f1163288ac7d5427c9ec2011e1dfec7 Mon Sep 17 00:00:00 2001 From: Liam McKay Date: Tue, 31 Mar 2020 16:59:41 -0700 Subject: [PATCH 9/9] commenting --- bounding_box_overlay_2.py | 113 +++++++++++++++++++++++++++++++------- echo_pregui_run.py | 6 +- pregui_img_analysis_3.py | 89 ++++++++++++++++++------------ transfer_imgs_1.py | 24 +++++++- 4 files changed, 172 insertions(+), 60 deletions(-) diff --git a/bounding_box_overlay_2.py b/bounding_box_overlay_2.py index 0064b9b..1ea8204 100644 --- a/bounding_box_overlay_2.py +++ b/bounding_box_overlay_2.py @@ -5,25 +5,38 @@ import time, sys import glob import os +import argparse from cv2 import cv2 from tqdm import tqdm from Plate import Plate -plate = Plate(r=8, c=12, subwell_num=1) # don't need to worry about subwell (that's specified in img path) -white = (255, 255, 255) # white - color +PLATE = Plate(r=8, c=12, subwell_num=1) # don't need to worry about subwell (that's specified in img path) +COLOR_WHITE = (255, 255, 255) # white - color +def argparse_reader(): + parser = argparse.ArgumentParser() + parser.add_argument('output_plate_folder', type=str, help='Parent folder of batchID folders', required=True) + parser.add_argument('-debug', '--debug', action='store_true', + help='Shows images that well/drop were not found in analysis') + return parser def save_overview_img(original_fp, imageDirectory): + """ + Saves original overview image to its own folder + @param original_fp: Overview image downloaded location + @param imageDirectory: output directory + @return: Well_subwell ID (A01_2) + """ original_fp = os.path.abspath(original_fp) fp = original_fp.split(os.path.sep) well_num = "".join([(fp[x] if c == 0 else '') for x, c in - enumerate([s.find('well') for s in fp])]) # just gets the wellNum_## folder name + enumerate([s.find("well") for s in fp])]) # just gets the wellNum_## folder name subwell_number = fp[-1][1] - well_id = plate.get_number_to_well_id(int(well_num.split("_")[1])).split("_")[0] + well_id = PLATE.get_number_to_well_id(int(well_num.split("_")[1])).split("_")[0] new_fp = os.path.join(imageDirectory, "overview", well_id + "_" + subwell_number + ".jpg") subprocess.run(["cp", original_fp, new_fp]) @@ -31,6 +44,17 @@ def save_overview_img(original_fp, imageDirectory): def align_drop_to_overview(b_x, b_y, b_w, b_h, zoom, overview_ef, black_white_mask_2=None): + """ + Overlay zoom image (with its mask) onto overview image at the location of the red bounding-box drop location + @param b_x: top left X of box + @param b_y: top left Y of box + @param b_w: width of box + @param b_h: height of box + @param zoom: zoom/drop image (in cv2 array format) + @param overview_ef: overview image (in cv2 array format) + @param black_white_mask_2: mask for zoom image(in cv2 array format) + @return: overlayed image (in cv2 array format) + """ if black_white_mask_2 is None: black_white_mask_2 = cv2.bitwise_not( np.zeros((zoom.shape[0], zoom.shape[1]), np.uint8)) # make white box zoom size @@ -105,9 +129,26 @@ def align_drop_to_overview(b_x, b_y, b_w, b_h, zoom, overview_ef, black_white_ma return overview_ef -def find_image_features(image, mask_color=True, mask_color_min=None, mask_color_max=None, percent_arc_length=0.1, - bilateral=False, CONTOUR_METHOD=cv2.CHAIN_APPROX_SIMPLE, RETREIVAL_METHOD=cv2.RETR_EXTERNAL, - blur_image=True, blur_iterations=1, box=False): +def find_image_features(image: np.ndarray, mask_color: bool = True, mask_color_min: np.array = None, mask_color_max: np.array() = None, + percent_arc_length: float = 0.1, + bilateral: bool = False, CONTOUR_METHOD: int = cv2.CHAIN_APPROX_SIMPLE, + RETREIVAL_METHOD: int = cv2.RETR_EXTERNAL, + blur_image: bool = True, blur_iterations: int = 1, box: bool = False): + """ + Analyze image for shapes and colors + @param image: cv2 numpy array image + @param mask_color: bool look for a color + @param mask_color_min: Min BGR values (blue, green, red) + @param mask_color_max: Max BGR values (blue, green, red) + @param percent_arc_length: Contour sensitivity to hard turns + @param bilateral: bool Use bilateral image filtering (very slow) + @param CONTOUR_METHOD: Use simple chain approximation or return all contours found (slower) + @param RETREIVAL_METHOD: Return a hierarchy of contours + @param blur_image: bool to blur image during contour finding (Gaussian) + @param blur_iterations: Times to blur the image over on itself (Gaussian) + @param box: bool return best fit box + @return: (all arrays of points on image) contours, hierarchy, polygon shape contours, box with biggest area + """ if mask_color_max is None: mask_color_max = np.array([255, 255, 255]) if mask_color_min is None: @@ -173,6 +214,14 @@ def find_image_features(image, mask_color=True, mask_color_min=None, mask_color_ def get_drop_location_box(overview_dl, mask_color_min, mask_color_max, debug=False): + """ + Get bounding box coordinates for drop location + @param overview_dl: Drop location image + @param mask_color_min: BGR color minimum + @param mask_color_max: BGR color maximum + @param debug: shows box found outlined in blue color + @return: + """ _, boundRect, _, box_with_biggest_area = find_image_features(overview_dl, mask_color_min=mask_color_min, mask_color_max=mask_color_max, blur_iterations=0, @@ -202,6 +251,15 @@ def get_drop_location_box(overview_dl, mask_color_min, mask_color_max, debug=Fal def find_biggest_contour(image, contours, max_area=None, min_area=100 ** 2, max_border=(100, 100, 100, 100)): + """ + Get biggest area contour in list of contours + @param image: image contours processed on + @param contours: list of contours (from cv2.findContours()) + @param max_area: Upper limit of contour area + @param min_area: Lower limit of contour area + @param max_border: Border around image where contour can exist + @return: contour + """ if max_area is None: max_area = max(image.shape) ** 2 @@ -243,6 +301,18 @@ def find_biggest_contour(image, contours, max_area=None, min_area=100 ** 2, max_ def overlay_images(overview_dl_fh, overview_ef_fh, zoom_fh, output_fh, circle=False, box=True, convex=False, debug=False): + """ + Overlay drop image in convex, circle or box shape on overview image from Rockimager + @param overview_dl_fh: Overview drop location file path + @param overview_ef_fh: Overview file path + @param zoom_fh: Drop file path + @param output_fh: Overlay output file path + @param circle: bool, Shape circle cut out + @param box: bool, Shape box overlay + @param convex: bool, Shape convex cut out + @param debug: Show images during processing + @return: overlayed image + """ ### This is the main function of the script overview_dl = cv2.imread(overview_dl_fh) @@ -289,7 +359,7 @@ def overlay_images(overview_dl_fh, overview_ef_fh, zoom_fh, output_fh, circle=Fa (circle_x, circle_y), radius = cv2.minEnclosingCircle(cnt) (circle_x, circle_y, radius) = (int(circle_x), int(circle_y), int(radius)) circle_mask = np.zeros((zoom_grey.shape[0], zoom_grey.shape[1]), np.uint8) - cv2.circle(circle_mask, (circle_x, circle_y), radius, white, -1) + cv2.circle(circle_mask, (circle_x, circle_y), radius, COLOR_WHITE, -1) overview_ef = align_drop_to_overview(b_x, b_y, b_w, b_h, zoom, overview_ef, circle_mask) elif convex: @@ -300,7 +370,7 @@ def overlay_images(overview_dl_fh, overview_ef_fh, zoom_fh, output_fh, circle=Fa # draw them on a mask for i in range(len(hull)): - cv2.drawContours(black_white_mask, hull, i, white, -1, 8) + cv2.drawContours(black_white_mask, hull, i, COLOR_WHITE, -1, 8) # find contours of that mask (adds some smoothing) cnts_mask, hierarchy_mask, _, _, _ = find_image_features(black_white_mask, mask_color=False, @@ -319,7 +389,7 @@ def overlay_images(overview_dl_fh, overview_ef_fh, zoom_fh, output_fh, circle=Fa # draw them on a mask for i in range(len(hull)): - cv2.drawContours(black_white_mask_2, hull, i, white, -1, 8) + cv2.drawContours(black_white_mask_2, hull, i, COLOR_WHITE, -1, 8) overview_ef = align_drop_to_overview(b_x, b_y, b_w, b_h, zoom, overview_ef, black_white_mask_2) elif box: @@ -332,6 +402,14 @@ def overlay_images(overview_dl_fh, overview_ef_fh, zoom_fh, output_fh, circle=Fa def run(imageDirectory, circle=False, box=True, convex=False, debug=False): + """ + Overlay a directory of images + @param imageDirectory: output dir + @param circle: bool Shape circle cut out + @param box: bool Shape box on zoom image + @param convex: bool Shape convex cut out + @param debug: Shows images during processing + """ if not os.path.exists(imageDirectory): # case 2: directory doesn't exist print("Error: cannot find directory " + imageDirectory) else: @@ -384,17 +462,14 @@ def run(imageDirectory, circle=False, box=True, convex=False, debug=False): def main(): + """ + Run only overlay step. Overlay images in a directory with timing + """ ### save the time to later see how long script took t0 = time.time() - # save usage to a string to save space - usage = "Usage: python bounding_box_overlay.py [parent image directory]" - imageDirectory = '' - try: # case 1: catches if there is no argv 1 - # not the greatest, but works - imageDirectory = sys.argv[1] - except IndexError: # if there is, leave the program - print(usage) - exit(1) + + args = argparse_reader().parse_args() + imageDirectory = args.output_plate_folder run(imageDirectory) ### print the time it took to run the script print("time to run: %s" % (time.time() - t0)) diff --git a/echo_pregui_run.py b/echo_pregui_run.py index 307c119..9ef9458 100644 --- a/echo_pregui_run.py +++ b/echo_pregui_run.py @@ -3,7 +3,7 @@ from transfer_imgs_1 import run as transfer_imgs from bounding_box_overlay_2 import run as bounding_box_overlay -from pregui_img_analysis_3 import get_dict_image_to_well, createJson +from pregui_img_analysis_3 import get_dict_image_to_well, create_json from organizeImages import organizeImages, rename_overview_images_well_id def argparse_reader_main(): @@ -46,8 +46,8 @@ def main(): rename_overview_images_well_id(output_dir) bounding_box_overlay(output_dir, box=args.box_overlay, circle=args.circle_overlay, convex=args.convex_overlay, debug=args.debug) img_well_dict = get_dict_image_to_well(output_dir) - createJson(plate_dir=output_dir, plate_id=plateID, plate_temperature=temperature, - dict_image_path_subwells=img_well_dict) + create_json(plate_dir=output_dir, plate_id=plateID, plate_temperature=temperature, + dict_image_path_subwells=img_well_dict) if __name__ == '__main__': diff --git a/pregui_img_analysis_3.py b/pregui_img_analysis_3.py index 752d17c..3b0e35a 100644 --- a/pregui_img_analysis_3.py +++ b/pregui_img_analysis_3.py @@ -14,11 +14,6 @@ import argparse -# https://stackoverflow.com/questions/11686720/is-there-a-numpy-builtin-to-reject-outliers-from-a-list -def reject_outliers(data, m=2): - return data[abs(data - np.mean(data)) <= m * np.std(data)] - - def argparse_reader(): parser = argparse.ArgumentParser() parser.add_argument('plateID', type=int, @@ -32,28 +27,36 @@ def argparse_reader(): return parser -# Function Definitions -# Params -# r1 lower radius bound -# r2 upper radius bound -# search step size between radii -# edge: a binary image which is the output of canny edge detection -# peak_num the # of circles searched for in hough space - -# Output: -# accums -# cx : circle center x -# cy : circle center y -# radii circle radii +# https://stackoverflow.com/questions/11686720/is-there-a-numpy-builtin-to-reject-outliers-from-a-list +def reject_outliers(data, m=2): + """ + Trim data in numpy array + @param data: numpy array + @param m: max standard deviations + @return: trimmed data + """ + return data[abs(data - np.mean(data)) <= m * np.std(data)] def process_found_circles(circles): + """ + Average trimmed data to get one circle x,y,radius + @param circles: numpy array of circles + @return: x position, y position, radius of circle + """ x, y, r = np.average(reject_outliers(circles[:, 0])).astype(int), np.average(reject_outliers(circles[:, 1])).astype( int), np.average(reject_outliers(circles[:, 2])).astype(int) return x, y, r def save_canny_save_fit(path, temp, debug=False): + """ + Find location of well center, drop center on overview image. Averages and trims circles found to be more accurate + @param path: overview file path + @param temp: int Tempurature of storage + @param debug: Show wells and drop found + @return: + """ accum_d, cx_d, cy_d, radii_d, cx_w, cy_w, radii_w = [0, 0, 0, 0, 0, 0, 0] # initialize variables image = cv2.imread(path) @@ -76,6 +79,10 @@ def save_canny_save_fit(path, temp, debug=False): if circles is not None: circles = np.round(circles[0, :]).astype("int") cx_w, cy_w, radii_w = process_found_circles(circles) + if debug: + cv2.circle(image, (cx_w, cx_y), radii_w, color=(255,0,0)) # blue + cv2.imshow("could not find drop, press key to continue", image) + cv2.waitKey(0) else: if debug: cv2.imshow("could not find well, press key to continue", np.concatenate([image, edged], axis=1)) @@ -84,6 +91,10 @@ def save_canny_save_fit(path, temp, debug=False): if drop_circles is not None: drop_circles = np.round(drop_circles[0, :]).astype("int") cx_d, cy_d, radii_d = process_found_circles(drop_circles) + if debug: + cv2.circle(image, (cx_d, cx_d), radii_d, color=(0,102,204)) # orange + cv2.imshow("could not find drop, press key to continue", image) + cv2.waitKey(0) else: if debug: cv2.imshow("could not find drop, press key to continue", image) @@ -93,24 +104,21 @@ def save_canny_save_fit(path, temp, debug=False): def get_dict_image_to_well(plate_dir): + """ + Create data file in json format for relating well id to image path + @param plate_dir: str output directory + @return: + """ current_directory = os.getcwd() image_list = glob.glob(os.path.join(current_directory, plate_dir, "overlayed", "*")) overview_list = list( sorted(glob.glob(os.path.join(current_directory, plate_dir, "overview", "*")), key=lambda x: x)) print("overviewimgs = ", len(overview_list)) - well_overlay_subwell_format = True - # try: - # image_list = sorted(image_list,key=lambda x: (int(x.split('well_')[1].split('_overlay')[0].split("_subwell")[0]))) - # except IndexError: - # if not image_list: image_list = sorted(image_list, key=lambda x: x) dict_image_path_subwells = {} for p, o in zip(image_list, overview_list): - # if well_overlay_subwell_format: - # well_subwell = p.split('well_')[1].split('_overlay')[0].replace("subwell", "") - # else: well_subwell = p.split("overlayed")[1].replace(".jpg", "").replace(os.sep, '') well, subwell = well_subwell.split("_") @@ -119,8 +127,16 @@ def get_dict_image_to_well(plate_dir): return dict_image_path_subwells - -def createJson(plate_dir: str, plate_id: int, plate_temperature: int, dict_image_path_subwells: dict, debug=False) -> None: +def create_json(plate_dir: str, plate_id: int, plate_temperature: int, dict_image_path_subwells: dict, + debug=False) -> None: + """ + Create pregui script json file + @param plate_dir: output dir + @param plate_id: Rockimager plate id + @param plate_temperature: storage temperatrue + @param dict_image_path_subwells: well id to image path relationship (get_dict_image_to_well(plate_dir)) + @param debug: Show images that are being processed + """ current_directory = os.getcwd() plateKeys = ["date_time", "temperature"] @@ -146,7 +162,8 @@ def createJson(plate_dir: str, plate_id: int, plate_temperature: int, dict_image im_path, im_overview = im_paths if im_path: cx_d, cy_d, radii_d, cx_w, cy_w, radii_w = save_canny_save_fit(im_overview, - plate_temperature, debug) ### calling this function for 4c or 20c temp + plate_temperature, + debug) ### calling this function for 4c or 20c temp else: try: raise FileNotFoundError("Well x,y,r will be zeros " + im_idx) @@ -166,8 +183,6 @@ def createJson(plate_dir: str, plate_id: int, plate_temperature: int, dict_image letter_number_new_image_path = im_path.split('well')[0] + well + "_" + subwell + ".jpg" os.rename(im_path, letter_number_new_image_path) - # print(cx_w,cy_w,radii_w,cx_d,cy_d,radii_d,cx_w,cy_w,radii_d,name,im_path,0,0,0) - str_currentWell = "{0}_{1}".format(well, subwell) a[plate_id]["subwells"][str_currentWell] = {key: 0 for key in wellKeys} a[plate_id]["subwells"][str_currentWell]["image_path"] = letter_number_new_image_path @@ -182,7 +197,6 @@ def createJson(plate_dir: str, plate_id: int, plate_temperature: int, dict_image a[plate_id]["subwells"][str_currentWell]["offset_x"] = int(offset_x) a[plate_id]["subwells"][str_currentWell]["subwell"] = int(subwell) - print("created:", os.path.join(current_directory, plate_dir, plate_dir.replace(os.path.join("a", "").replace("a", ""), '')) + '.json') with open(os.path.join(current_directory, plate_dir, @@ -193,6 +207,9 @@ def createJson(plate_dir: str, plate_id: int, plate_temperature: int, dict_image def main(): + """ + Run image analysis on a directory of images containing overlayed, overview, and batchID_#### folders + """ current_directory = os.getcwd() args = argparse_reader().parse_args() @@ -208,8 +225,8 @@ def main(): with open(os.path.join(current_directory, plate_dir, "dict_image_path_subwells.json"), 'r') as images_to_subwell_json: d = dict(json.load(images_to_subwell_json)) - createJson(plate_dir=plate_dir, plate_id=plate_id, plate_temperature=plate_temperature, - dict_image_path_subwells=d, debug=args.debug) + create_json(plate_dir=plate_dir, plate_id=plate_id, plate_temperature=plate_temperature, + dict_image_path_subwells=d, debug=args.debug) exit(1) except FileNotFoundError as e: print(e) @@ -220,8 +237,8 @@ def main(): 'w') as images_to_subwell_json: json.dump(dict_image_path_subwells, images_to_subwell_json) - createJson(plate_dir=plate_dir, plate_id=plate_id, plate_temperature=plate_temperature, - dict_image_path_subwells=dict_image_path_subwells, debug=args.debug) + create_json(plate_dir=plate_dir, plate_id=plate_id, plate_temperature=plate_temperature, + dict_image_path_subwells=dict_image_path_subwells, debug=args.debug) print("time to run: %s minutes" % str(int(ti.time() - t0) / 60)) diff --git a/transfer_imgs_1.py b/transfer_imgs_1.py index a32a222..228bcee 100644 --- a/transfer_imgs_1.py +++ b/transfer_imgs_1.py @@ -10,6 +10,10 @@ def argparse_reader(): + """ + Parse arguments for the image transfer script + @return: parse object with arguments + """ parser = argparse.ArgumentParser() parser.add_argument('plateID', type=int, help='RockMaker Plate ID (4 or 5 digit code on barcode or 2nd number on RockMaker screen experiment file') @@ -19,6 +23,12 @@ def argparse_reader(): def get_path_names_necessary(rsync_out, selected_batches=None): + """ + Get unique list of path names from rsync file output + @param rsync_out: rsync files matched output + @param selected_batches: Batches to use for downloading images (if not specified, find images starting at the last batch) + @return: + """ image_names = set() unique_paths = [] for line in sorted(rsync_out.split('\n'), reverse=True): @@ -37,11 +47,16 @@ def get_path_names_necessary(rsync_out, selected_batches=None): def sort_image_path_names(paths): - # sort by image name + """ + Put paths into 3 bins: drop location, zoom, overview + @param paths: list of unique image paths from rsync + @return: zoom, drop location, overview + """ drop_images_paths = [] overview_drop_location_paths = [] overview_extended_focus_paths = [] i = 1 + # sort by image name for path in list(sorted([line for line in paths], key=lambda line: line.split(os.sep)[-1])): if "ef.jpg" in path and i == 1: drop_images_paths.append(path) @@ -55,6 +70,12 @@ def sort_image_path_names(paths): def run(plateID, output_dir, rock_drive_ip): + """ + Transfer Rockimager images from NAS server using rsync + @param plateID: Rockimager plate ID + @param output_dir: local output directory + @param rock_drive_ip: IP address of NAS server + """ if not exists(join(output_dir)): os.mkdir(join(output_dir)) @@ -121,7 +142,6 @@ def run(plateID, output_dir, rock_drive_ip): except RuntimeWarning as e: print(e) pass - return def main():