Update mqtt_pump_focus_image.py

This commit is contained in:
tpollina 2020-02-02 04:20:55 +01:00 committed by GitHub
parent 04e6853b81
commit 9f1609ded4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -4,11 +4,10 @@ from datetime import datetime, timedelta
from adafruit_motor import stepper from adafruit_motor import stepper
from adafruit_motorkit import MotorKit from adafruit_motorkit import MotorKit
from time import sleep from time import sleep
import shutil
import json import json
import os import os
import subprocess
from skimage.util import img_as_ubyte from skimage.util import img_as_ubyte
@ -25,16 +24,21 @@ from morphocut.image import (
RescaleIntensity, RescaleIntensity,
RGB2Gray, RGB2Gray,
) )
from morphocut.stat import RunningMedian from morphocut.stat import RunningMedian
from morphocut.str import Format from morphocut.str import Format
from morphocut.stream import TQDM, Enumerate from morphocut.stream import TQDM, Enumerate, FilterVariables
from skimage.feature import canny from skimage.feature import canny
from skimage.color import rgb2gray, label2rgb from skimage.color import rgb2gray, label2rgb
from skimage.morphology import disk from skimage.morphology import disk
from skimage.morphology import erosion, dilation, closing from skimage.morphology import erosion, dilation, closing
from skimage.measure import label, regionprops from skimage.measure import label, regionprops
import cv2, shutil
import smbus
#fan
bus = smbus.SMBus(1)
################################################################################ ################################################################################
kit = MotorKit() kit = MotorKit()
pump_stepper = kit.stepper1 pump_stepper = kit.stepper1
@ -47,12 +51,8 @@ focus_stepper.release()
camera = PiCamera() camera = PiCamera()
camera.resolution = (3280, 2464) camera.resolution = (3280, 2464)
camera.iso = 60 camera.iso = 60
sleep(3)
camera.shutter_speed = 500 camera.shutter_speed = 500
camera.exposure_mode = 'off' camera.exposure_mode = 'fixedfps'
g = camera.awb_gains
camera.awb_mode = 'off'
camera.awb_gains = g
################################################################################ ################################################################################
message = '' message = ''
@ -62,9 +62,9 @@ count=''
################################################################################ ################################################################################
def on_connect(client, userdata, flags, rc): def on_connect(client, userdata, flags, rc):
print("Connected! - " + str(rc)) print("Connected! - " + str(rc))
client.subscribe("actuator/#") client.subscribe("actuator/#")
rgb(0,255,0)
def on_subscribe(client, obj, mid, granted_qos): def on_subscribe(client, obj, mid, granted_qos):
print("Subscribed! - "+str(mid)+" "+str(granted_qos)) print("Subscribed! - "+str(mid)+" "+str(granted_qos))
@ -80,6 +80,24 @@ def on_message(client, userdata, msg):
def on_log(client, obj, level, string): def on_log(client, obj, level, string):
print(string) print(string)
def rgb(R,G,B):
bus.write_byte_data(0x0d, 0x00, 0)
bus.write_byte_data(0x0d, 0x01, R)
bus.write_byte_data(0x0d, 0x02, G)
bus.write_byte_data(0x0d, 0x03, B)
bus.write_byte_data(0x0d, 0x00, 1)
bus.write_byte_data(0x0d, 0x01, R)
bus.write_byte_data(0x0d, 0x02, G)
bus.write_byte_data(0x0d, 0x03, B)
bus.write_byte_data(0x0d, 0x00, 2)
bus.write_byte_data(0x0d, 0x01, R)
bus.write_byte_data(0x0d, 0x02, G)
bus.write_byte_data(0x0d, 0x03, B)
cmd="i2cdetect -y 1"
subprocess.Popen(cmd.split(),stdout=subprocess.PIPE)
################################################################################ ################################################################################
client = mqtt.Client() client = mqtt.Client()
client.connect("127.0.0.1",1883,60) client.connect("127.0.0.1",1883,60)
@ -94,22 +112,23 @@ client.loop_start()
while True: while True:
################################################################################ ################################################################################
if (topic=="pump"): if (topic=="pump"):
rgb(0,0,255)
direction=message.split(" ")[0] direction=message.split(" ")[0]
delay=float(message.split(" ")[1]) delay=float(message.split(" ")[1])
nb_step=int(message.split(" ")[2]) nb_step=int(message.split(" ")[2])
client.publish("receiver/pump", "Start"); client.publish("receiver/pump", "Start");
if direction == "BACKWARD":
direction=stepper.direction
if direction == "FORWARD":
direction=stepper.FORWARD
while True: while True:
if direction == "BACKWARD":
direction=stepper.BACKWARD
if direction == "FORWARD":
direction=stepper.FORWARD
count+=1 count+=1
print(count,nb_step) # print(count,nb_step)
pump_stepper.onestep(direction=direction, style=stepper.DOUBLE) pump_stepper.onestep(direction=direction, style=stepper.DOUBLE)
sleep(delay) sleep(delay)
@ -117,6 +136,7 @@ while True:
pump_stepper.release() pump_stepper.release()
print("The pump has been interrompted.") print("The pump has been interrompted.")
client.publish("receiver/pump", "Interrompted"); client.publish("receiver/pump", "Interrompted");
rgb(0,255,0)
break break
if count>nb_step: if count>nb_step:
@ -124,31 +144,34 @@ while True:
print("The pumping is done.") print("The pumping is done.")
topic="wait" topic="wait"
client.publish("receiver/pump", "Done"); client.publish("receiver/pump", "Done");
rgb(0,255,0)
break break
################################################################################ ################################################################################
elif (topic=="focus"): elif (topic=="focus"):
rgb(255,255,0)
direction=message.split(" ")[0] direction=message.split(" ")[0]
nb_step=int(message.split(" ")[1]) nb_step=int(message.split(" ")[1])
client.publish("receiver/focus", "Start"); client.publish("receiver/focus", "Start");
if direction == "FORWARD":
direction=stepper.FORWARD
if direction == "BACKWARD":
direction=stepper.BACKWARD
while True: while True:
count+=1
print(count,nb_step)
focus_stepper.onestep(direction=direction, style=stepper.MICROSTEP)
if direction == "FORWARD":
direction=stepper.FORWARD
if direction == "BACKWARD":
direction=stepper.BACKWARD
count+=1
# print(count,nb_step)
focus_stepper.onestep(direction=direction, style=stepper.MICROSTEP)
if topic!="focus": if topic!="focus":
focus_stepper.release() focus_stepper.release()
print("The stage has been interrompted.") print("The stage has been interrompted.")
client.publish("receiver/focus", "Interrompted"); client.publish("receiver/focus", "Interrompted");
rgb(0,255,0)
break break
if count>nb_step: if count>nb_step:
@ -156,12 +179,15 @@ while True:
print("The focusing is done.") print("The focusing is done.")
topic="wait" topic="wait"
client.publish("receiver/focus", "Done"); client.publish("receiver/focus", "Done");
rgb(0,255,0)
break break
################################################################################ ################################################################################
elif (topic=="image"): elif (topic=="image"):
camera.start_preview(fullscreen=False, window = (160, 0, 640, 480))
sleep_before=int(message.split(" ")[0]) sleep_before=int(message.split(" ")[0])
nb_step=int(message.split(" ")[1]) nb_step=int(message.split(" ")[1])
@ -179,37 +205,51 @@ while True:
#flushing before to begin #flushing before to begin
rgb(0,0,255)
for i in range(nb_step): for i in range(nb_step):
pump_stepper.onestep(direction=stepper.FORWARD, style=stepper.DOUBLE) pump_stepper.onestep(direction=stepper.FORWARD, style=stepper.DOUBLE)
sleep(0.01) sleep(0.01)
rgb(0,255,0)
directory = os.path.join(path, "PlanktonScope") directory = os.path.join(path, "PlanktonScope")
os.makedirs(directory, exist_ok=True) os.makedirs(directory, exist_ok=True)
export = os.path.join(directory, "export")
os.makedirs(export, exist_ok=True)
date=datetime.now().strftime("%m_%d_%Y") date=datetime.now().strftime("%m_%d_%Y")
time=datetime.now().strftime("%H_%M") time=datetime.now().strftime("%H_%M")
path_date = os.path.join(directory, date) path_date = os.path.join(directory, date)
os.makedirs(path_date, exist_ok=True) os.makedirs(path_date, exist_ok=True)
path_time = os.path.join(path_date,time) path_time = os.path.join(path_date,time)
os.makedirs(path_time, exist_ok=True) os.makedirs(path_time, exist_ok=True)
while True: while True:
count+=1 count+=1
print(count,nb_frame) # print(count,nb_frame)
filename = os.path.join(path_time,datetime.now().strftime("%M_%S_%f")+".jpg") filename = os.path.join(path_time,datetime.now().strftime("%M_%S_%f")+".jpg")
rgb(0,255,255)
camera.capture(filename) camera.capture(filename)
client.publish("receiver/image", datetime.now().strftime("%M_%S_%f")+".jpg has been imaged."); rgb(0,255,0)
client.publish("receiver/image", datetime.now().strftime("%M_%S_%f")+".jpg has been imaged.");
rgb(0,0,255)
for i in range(10): for i in range(10):
pump_stepper.onestep(direction=stepper.FORWARD, style=stepper.DOUBLE) pump_stepper.onestep(direction=stepper.FORWARD, style=stepper.DOUBLE)
sleep(0.01) sleep(0.01)
sleep(0.5) sleep(0.5)
rgb(0,255,0)
if(count>nb_frame): if(count>nb_frame):
camera.stop_preview()
client.publish("receiver/image", "Completed");
# Meta data that is added to every object # Meta data that is added to every object
local_metadata = { local_metadata = {
"process_datetime": datetime.now(), "process_datetime": datetime.now(),
@ -217,106 +257,154 @@ while True:
"acq_camera_iso" : camera.iso, "acq_camera_iso" : camera.iso,
"acq_camera_shutter_speed" : camera.shutter_speed "acq_camera_shutter_speed" : camera.shutter_speed
} }
global_metadata = None
config_txt = None
RAW = None
CLEAN = None
ANNOTATED = None
OBJECTS = None
archive_fn = None
config_txt = open('/home/pi/PlanktonScope/config.txt','r') config_txt = open('/home/pi/PlanktonScope/config.txt','r')
node_red_metadata = json.loads(config_txt.read()) node_red_metadata = json.loads(config_txt.read())
global_metadata = {**local_metadata, **node_red_metadata} global_metadata = {**local_metadata, **node_red_metadata}
RAW = os.path.join(path_time, "RAW")
os.makedirs(RAW, exist_ok=True)
os.system("mv "+str(path_time)+"/*.jpg "+str(RAW))
import_path = path_time CLEAN = os.path.join(path_time, "CLEAN")
archive_fn = os.path.join(directory, str(date)+"_"+str(time)+"_ecotaxa_export.zip") os.makedirs(CLEAN, exist_ok=True)
ANNOTATED = os.path.join(path_time, "ANNOTATED")
os.makedirs(ANNOTATED, exist_ok=True)
OBJECTS = os.path.join(path_time, "OBJECTS")
os.makedirs(OBJECTS, exist_ok=True)
archive_fn = os.path.join(directory,"export", str(date)+"_"+str(time)+"_ecotaxa_export.zip")
client.publish("receiver/segmentation", "Start");
# Define processing pipeline # Define processing pipeline
# Define processing pipeline
with Pipeline() as p: with Pipeline() as p:
# Recursively find .jpg files in import_path. # Recursively find .jpg files in import_path.
# Sort to get consective frames. # Sort to get consective frames.
abs_path = Find(import_path, [".jpg"], sort=True, verbose=True) abs_path = Find(RAW, [".jpg"], sort=True, verbose=True)
FilterVariables(abs_path)
# Extract name from abs_path # Extract name from abs_path
name = Call(lambda p: os.path.splitext(os.path.basename(p))[0], abs_path) name = Call(lambda p: os.path.splitext(os.path.basename(p))[0], abs_path)
Call(rgb, 0,255,0)
# Read image # Read image
img = ImageReader(abs_path) img = ImageReader(abs_path)
# Show progress bar for frames
#TQDM(Format("Frame {name}", name=name))
# Apply running median to approximate the background image # Apply running median to approximate the background image
flat_field = RunningMedian(img, 5) flat_field = RunningMedian(img, 5)
# Correct image # Correct image
img = img / flat_field img = img / flat_field
FilterVariables(name,img)
# Rescale intensities and convert to uint8 to speed up calculations # Rescale intensities and convert to uint8 to speed up calculations
img = RescaleIntensity(img, in_range=(0, 1.1), dtype="uint8") img = RescaleIntensity(img, in_range=(0, 1.1), dtype="uint8")
frame_fn = Format(os.path.join(CLEAN, "{name}.jpg"), name=name)
ImageWriter(frame_fn, img)
FilterVariables(name,img)
# Convert image to uint8 gray # Convert image to uint8 gray
img_gray = RGB2Gray(img) img_gray = RGB2Gray(img)
# ?
img_gray = Call(img_as_ubyte, img_gray) img_gray = Call(img_as_ubyte, img_gray)
img_canny = Call(canny, img_gray, sigma=0.3) #Canny edge detection
img_canny = Call(cv2.Canny, img_gray, 50,100)
img_dilate = Call(dilation, img_canny) #Dilate
kernel = Call(cv2.getStructuringElement, cv2.MORPH_ELLIPSE, (15, 15))
img_closing = Call(closing, img_dilate) img_dilate = Call(cv2.dilate, img_canny, kernel, iterations=2)
mask = Call(erosion, img_closing) #Close
kernel = Call(cv2.getStructuringElement, cv2.MORPH_ELLIPSE, (5, 5))
# Show progress bar for frames img_close = Call(cv2.morphologyEx, img_dilate, cv2.MORPH_CLOSE, kernel, iterations=1)
TQDM(Format("Frame {name}", name=name))
#Erode
kernel = Call(cv2.getStructuringElement, cv2.MORPH_ELLIPSE, (15, 15))
mask = Call(cv2.erode, img_close, kernel, iterations=2)
FilterVariables(name,img,img_gray,mask)
# Apply threshold find objects
#threshold = 204 # Call(skimage.filters.threshold_otsu, img_gray)
#mask = img_gray < threshold
# Find objects # Find objects
regionprops = FindRegions( regionprops = FindRegions(
mask, img_gray, min_area=1000, padding=10, warn_empty=name mask, img_gray, min_area=1000, padding=10, warn_empty=name
) )
Call(rgb, 255,0,255)
# For an object, extract a vignette/ROI from the image # For an object, extract a vignette/ROI from the image
roi_orig = ExtractROI(img, regionprops, bg_color=255) roi_orig = ExtractROI(img, regionprops, bg_color=255)
roi_orig
# Generate an object identifier # Generate an object identifier
i = Enumerate() i = Enumerate()
#Call(print,i) #Call(print,i)
object_id = Format("{name}_{i:d}", name=name, i=i) object_id = Format("{name}_{i:d}", name=name, i=i)
#Call(print,object_id) #Call(print,object_id)
object_fn = Format(os.path.join(OBJECTS, "{name}.jpg"), name=object_id)
ImageWriter(object_fn, roi_orig)
# Calculate features. The calculated features are added to the global_metadata. # Calculate features. The calculated features are added to the global_metadata.
# Returns a Variable representing a dict for every object in the stream. # Returns a Variable representing a dict for every object in the stream.
meta = CalculateZooProcessFeatures( meta = CalculateZooProcessFeatures(
regionprops, prefix="object_", meta=global_metadata regionprops, prefix="object_", meta=global_metadata
) )
# If CalculateZooProcessFeatures is not used, we need to copy global_metadata into the stream: json_meta = Call(json.dumps,meta, sort_keys=True, default=str)
# meta = Call(lambda: global_metadata.copy())
# https://github.com/morphocut/morphocut/issues/51 Call(client.publish, "receiver/segmentation/metric", json_meta)
# Add object_id to the metadata dictionary # Add object_id to the metadata dictionary
meta["object_id"] = object_id meta["object_id"] = object_id
# Generate object filenames # Generate object filenames
orig_fn = Format("{object_id}.jpg", object_id=object_id) orig_fn = Format("{object_id}.jpg", object_id=object_id)
FilterVariables(orig_fn,roi_orig,meta,object_id)
# Write objects to an EcoTaxa archive: # Write objects to an EcoTaxa archive:
# roi image in original color, roi image in grayscale, metadata associated with each object # roi image in original color, roi image in grayscale, metadata associated with each object
EcotaxaWriter(archive_fn, (orig_fn, roi_orig), meta) EcotaxaWriter(archive_fn, (orig_fn, roi_orig), meta)
# Progress bar for objects # Progress bar for objects
TQDM(Format("Object {object_id}", object_id=object_id)) TQDM(Format("Object {object_id}", object_id=object_id))
Call(client.publish, "receiver/segmentation/object_id", object_id)
meta=None
FilterVariables(meta)
Call(client.publish, "receiver/image", object_id)
p.run() p.run()
#remove directory
shutil.rmtree(import_path)
sleep(sleep_during)
for i in range(nb_step): #remove directory
pump_stepper.onestep(direction=stepper.FORWARD, style=stepper.DOUBLE) #shutil.rmtree(import_path)
sleep(0.01)
count=0 client.publish("receiver/segmentation", "Completed");
rgb(255,255,255)
sleep(sleep_during)
rgb(0,255,0)
date=datetime.now().strftime("%m_%d_%Y") date=datetime.now().strftime("%m_%d_%Y")
time=datetime.now().strftime("%H_%M") time=datetime.now().strftime("%H_%M")
@ -324,18 +412,28 @@ while True:
path_date = os.path.join(directory, date) path_date = os.path.join(directory, date)
os.makedirs(path_date, exist_ok=True) os.makedirs(path_date, exist_ok=True)
path_time = os.path.join(path_date,time) path_time = os.path.join(path_date,time)
os.makedirs(path_time, exist_ok=True)
rgb(0,0,255)
for i in range(nb_step):
pump_stepper.onestep(direction=stepper.FORWARD, style=stepper.DOUBLE)
sleep(0.01)
rgb(0,255,0)
os.makedirs(path_time, exist_ok=True) os.makedirs(path_time, exist_ok=True)
count=0
if topic!="image": if topic!="image":
pump_focus.release() pump_stepper.release()
print("The imaging has been interrompted.") print("The imaging has been interrompted.")
client.publish("receiver/image", "Interrompted"); client.publish("receiver/image", "Interrompted");
rgb(0,255,0)
count=0
break break
else: else:
print("Waiting") # print("Waiting")
sleep(1) sleep(1)