Update morphocut.py

This commit is contained in:
tpollina 2020-01-30 21:52:12 -08:00 committed by GitHub
parent 9d72a1e8bb
commit 70bf0a9680
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -22,25 +22,56 @@ from morphocut.stat import RunningMedian
from morphocut.str import Format from morphocut.str import Format
from morphocut.stream import TQDM, Enumerate from morphocut.stream import TQDM, Enumerate
# import_path = "/data-ssd/mschroeder/Datasets/Pyrocystis_noctiluca/RAW" from skimage.feature import canny
import_path = "/home/pi/Desktop/PlanktonScope_acquisition/01_16_2020/afternoon/14_1" from skimage.color import rgb2gray, label2rgb
export_path = "/home/pi/Desktop/PlanktonScope_acquisition/01_16_2020/14_1_export" from skimage.morphology import disk
archive_fn = os.path.join(export_path, "14_1_morphocut_processed.zip") from skimage.morphology import erosion, dilation, closing
from skimage.measure import label, regionprops
import_path = "/media/tpollina/rootfs/home/pi/Desktop/PlanktonScope_acquisition/01_17_2020/RAW"
export_path = "/media/tpollina/rootfs/home/pi/Desktop/PlanktonScope_acquisition/01_17_2020/"
CLEAN = os.path.join(export_path, "CLEAN")
os.makedirs(CLEAN, exist_ok=True)
OBJECTS = os.path.join(export_path, "OBJECTS")
os.makedirs(OBJECTS, exist_ok=True)
archive_fn = os.path.join(export_path, "ecotaxa_export.zip")
# Meta data that is added to every object # Meta data that is added to every object
global_metadata = { global_metadata = {
"acq_instrument": "Planktoscope", "acq_instrument": "Planktoscope",
"process_datetime": datetime.datetime.now(), "process_datetime": datetime.datetime.now(),
"sample_project": "PlanktonScope Villefranche",
"sample_ship": "Kayak de Fabien",
"sample_operator": "Thibaut Pollina",
"sample_id": "Flowcam_PlanktonScope_comparison",
"sample_sampling_gear": "net",
"sample_time":150000,
"sample_date":16112020,
"object_lat": 43.696146,
"object_lon": 7.308359,
"acq_fnumber_objective": 16,
"acq_celltype": 200,
"process_pixel": 1.19,
"acq_camera": "Pi Camera V2.1",
"acq_instrument": "PlanktonScope V2.1",
"acq_software": "Node-RED Dashboard and raw python",
"acq_instrument_ID": "copepode",
"acq_volume": 24,
"acq_flowrate": "Unknown",
"acq_camera.resolution" : "(3280, 2464)",
"acq_camera.iso" : 60,
"acq_camera.shutter_speed" : 100,
"acq_camera.exposure_mode" : 'off',
"acq_camera.awb_mode" : 'off',
"acq_nb_frames" : 1000
} }
if __name__ == "__main__": # Define processing pipeline
print("Processing images under {}...".format(import_path)) with Pipeline() as p:
# Create export_path in case it doesn't exist
os.makedirs(export_path, exist_ok=True)
# Define processing pipeline
with Pipeline() as p:
# Recursively find .jpg files in import_path. # Recursively find .jpg files in import_path.
# Sort to get consective frames. # Sort to get consective frames.
abs_path = Find(import_path, [".jpg"], sort=True, verbose=True) abs_path = Find(import_path, [".jpg"], sort=True, verbose=True)
@ -60,33 +91,49 @@ if __name__ == "__main__":
# Rescale intensities and convert to uint8 to speed up calculations # Rescale intensities and convert to uint8 to speed up calculations
img = RescaleIntensity(img, in_range=(0, 1.1), dtype="uint8") img = RescaleIntensity(img, in_range=(0, 1.1), dtype="uint8")
# Convert image to uint8 gray
img_gray = RGB2Gray(img)
img_gray = Call(img_as_ubyte, img_gray)
img_canny = Call(canny, img_gray, sigma=0.3)
img_dilate = Call(dilation, img_canny)
img_closing = Call(closing, img_dilate)
mask = Call(erosion, img_closing)
# Show progress bar for frames # Show progress bar for frames
TQDM(Format("Frame {name}", name=name)) TQDM(Format("Frame {name}", name=name))
# Convert image to uint8 gray
img_gray = RGB2Gray(img)
img_gray = Call(img_as_ubyte, img_gray)
# Apply threshold find objects # Apply threshold find objects
threshold = 204 # Call(skimage.filters.threshold_otsu, img_gray) #threshold = 204 # Call(skimage.filters.threshold_otsu, img_gray)
mask = img_gray < threshold #mask = img_gray < threshold
# Write corrected frames # Write corrected frames
frame_fn = Format(os.path.join(export_path, "{name}.jpg"), name=name) frame_fn = Format(os.path.join(CLEAN, "{name}.jpg"), name=name)
ImageWriter(frame_fn, img) ImageWriter(frame_fn, img)
# Find objects # Find objects
regionprops = FindRegions( regionprops = FindRegions(
mask, img_gray, min_area=100, padding=10, warn_empty=name mask, img_gray, min_area=1000, padding=10, warn_empty=name
) )
# For an object, extract a vignette/ROI from the image # For an object, extract a vignette/ROI from the image
roi_orig = ExtractROI(img, regionprops, bg_color=255) roi_orig = ExtractROI(img, regionprops, bg_color=255)
roi_gray = ExtractROI(img_gray, regionprops, bg_color=255)
roi_orig
# Generate an object identifier # Generate an object identifier
i = Enumerate() i = Enumerate()
#Call(print,i)
object_id = Format("{name}_{i:d}", name=name, i=i) object_id = Format("{name}_{i:d}", name=name, i=i)
#Call(print,object_id)
object_fn = Format(os.path.join(OBJECTS, "{name}.jpg"), name=object_id)
ImageWriter(object_fn, roi_orig)
# Calculate features. The calculated features are added to the global_metadata. # Calculate features. The calculated features are added to the global_metadata.
# Returns a Variable representing a dict for every object in the stream. # Returns a Variable representing a dict for every object in the stream.
@ -102,15 +149,21 @@ if __name__ == "__main__":
# Generate object filenames # Generate object filenames
orig_fn = Format("{object_id}.jpg", object_id=object_id) orig_fn = Format("{object_id}.jpg", object_id=object_id)
gray_fn = Format("{object_id}-gray.jpg", object_id=object_id)
# Write objects to an EcoTaxa archive: # Write objects to an EcoTaxa archive:
# roi image in original color, roi image in grayscale, metadata associated with each object # roi image in original color, roi image in grayscale, metadata associated with each object
EcotaxaWriter(archive_fn, [(orig_fn, roi_orig), (gray_fn, roi_gray)], meta) EcotaxaWriter(archive_fn, (orig_fn, roi_orig), meta)
# Progress bar for objects # Progress bar for objects
TQDM(Format("Object {object_id}", object_id=object_id)) TQDM(Format("Object {object_id}", object_id=object_id))
# Execute pipeline
p.run()
import datetime
BEGIN = datetime.datetime.now()
# Execute pipeline
p.run()
END = datetime.datetime.now()
print("MORPHOCUT :"+str(END-BEGIN))