Update mqtt_pump_focus_image.py

This commit is contained in:
tpollina 2020-01-31 08:49:32 +01:00 committed by GitHub
parent c237534368
commit af90680da7
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -1,11 +1,39 @@
import paho.mqtt.client as mqtt
from picamera import PiCamera
from datetime import datetime, timedelta
import os
from adafruit_motor import stepper
from adafruit_motorkit import MotorKit
from time import sleep
import json
import os
from skimage.util import img_as_ubyte
from morphocut import Call
from morphocut.contrib.ecotaxa import EcotaxaWriter
from morphocut.contrib.zooprocess import CalculateZooProcessFeatures
from morphocut.core import Pipeline
from morphocut.file import Find
from morphocut.image import (
ExtractROI,
FindRegions,
ImageReader,
ImageWriter,
RescaleIntensity,
RGB2Gray,
)
from morphocut.stat import RunningMedian
from morphocut.str import Format
from morphocut.stream import TQDM, Enumerate
from skimage.feature import canny
from skimage.color import rgb2gray, label2rgb
from skimage.morphology import disk
from skimage.morphology import erosion, dilation, closing
from skimage.measure import label, regionprops
################################################################################
kit = MotorKit()
pump_stepper = kit.stepper1
@ -61,6 +89,7 @@ client.on_message = on_message
client.on_log = on_log
client.loop_start()
################################################################################
while True:
@ -68,17 +97,20 @@ while True:
if (topic=="pump"):
direction=message.split(" ")[0]
flowrate=message.split(" ")[1]
volume=message.split(" ")[2]
nb_step=int(volume)*507
duration=(int(volume)*60)/float(flowrate)
delay=(duration/nb_step)-0.005
delay=float(message.split(" ")[1])
nb_step=int(message.split(" ")[2])
client.publish("receiver/pump", "Start");
if direction == "BACKWARD":
direction=stepper.direction
if direction == "FORWARD":
direction=stepper.FORWARD
while True:
count+=1
print(count,nb_step)
print("pump_stepper.onestep(direction=+action+, style=stepper.DOUBLE")
pump_stepper.onestep(direction=direction, style=stepper.DOUBLE)
sleep(delay)
if topic!="pump":
@ -102,20 +134,25 @@ while True:
nb_step=int(message.split(" ")[1])
client.publish("receiver/focus", "Start");
if direction == "FORWARD":
direction=stepper.FORWARD
if direction == "BACKWARD":
direction=stepper.BACKWARD
while True:
count+=1
print(count,nb_step)
print("stage.onestep(direction=+action+, style=stepper.microstep")
sleep(0.001)
focus_stepper.onestep(direction=direction, style=stepper.MICROSTEP)
if topic!="focus":
pump_focus.release()
focus_stepper.release()
print("The stage has been interrompted.")
client.publish("receiver/focus", "Interrompted");
break
if count>nb_step:
pump_focus.release()
focus_stepper.release()
print("The focusing is done.")
topic="wait"
client.publish("receiver/focus", "Done");
@ -125,41 +162,161 @@ while True:
elif (topic=="image"):
delay=int(message.split(" ")[0])
volume_before=int(message.split(" ")[1])
nb_frame=int(message.split(" ")[2])
wait_duration=int(message.split(" ")[3])
path=str(message.split(" ")[4])
sleep_before=int(message.split(" ")[0])
nb_step=int(message.split(" ")[1])
path=str(message.split(" ")[2])
nb_frame=int(message.split(" ")[3])
sleep_during=int(message.split(" ")[4])
#sleep a duration before to start
sleep(delay)
sleep(sleep_before)
client.publish("receiver/image", "Start");
#flushing before to begin
nb_step=int(volume)*507
for i in range(nb_step):
print("pump_stepper.onestep(direction=+action+, style=stepper.DOUBLE")
time.sleep(0.01)
pump_stepper.onestep(direction=stepper.FORWARD, style=stepper.DOUBLE)
sleep(0.01)
directory = os.path.join(path, "PlanktonScope")
os.makedirs(directory, exist_ok=True)
date=datetime.now().strftime("%m_%d_%Y")
time=datetime.now().strftime("%H_%M")
path_date = os.path.join(directory, datetime.now().strftime("%m_%d_%Y"))
path_date = os.path.join(directory, date)
os.makedirs(path_date, exist_ok=True)
path_hour = os.path.join(path_date,datetime.now().strftime("%H"))
os.makedirs(path_hour, exist_ok=True)
path_time = os.path.join(path_date,time)
os.makedirs(path_time, exist_ok=True)
while True:
count+=1
print(count,nb_frame)
filename = os.path.join(path_hour,datetime.now().strftime("%M_%S_%f")+".jpg")
filename = os.path.join(path_time,datetime.now().strftime("%M_%S_%f")+".jpg")
camera.capture(filename)
print("pump_stepper.onestep(direction=+action+, style=stepper.DOUBLE")
client.publish("receiver/image", datetime.now().strftime("%M_%S_%f")+".jpg has been imaged.");
for i in range(10):
pump_stepper.onestep(direction=stepper.FORWARD, style=stepper.DOUBLE)
sleep(0.01)
sleep(0.5)
if(count>nb_frame):
# Meta data that is added to every object
local_metadata = {
"process_datetime": datetime.now(),
"acq_camera_resolution" : camera.resolution,
"acq_camera_iso" : camera.iso,
"acq_camera_shutter_speed" : camera.shutter_speed
}
config_txt = open('/home/pi/PlanktonScope/config.txt','r')
node_red_metadata = json.loads(config_txt.read())
global_metadata = {**local_metadata, **node_red_metadata}
import_path = path_time
archive_fn = os.path.join(directory, str(date)+"_"+str(time)+"_ecotaxa_export.zip")
# Define processing pipeline
with Pipeline() as p:
# Recursively find .jpg files in import_path.
# Sort to get consective frames.
abs_path = Find(import_path, [".jpg"], sort=True, verbose=True)
# Extract name from abs_path
name = Call(lambda p: os.path.splitext(os.path.basename(p))[0], abs_path)
# Read image
img = ImageReader(abs_path)
# Apply running median to approximate the background image
flat_field = RunningMedian(img, 5)
# Correct image
img = img / flat_field
# Rescale intensities and convert to uint8 to speed up calculations
img = RescaleIntensity(img, in_range=(0, 1.1), dtype="uint8")
# Convert image to uint8 gray
img_gray = RGB2Gray(img)
img_gray = Call(img_as_ubyte, img_gray)
img_canny = Call(canny, img_gray, sigma=0.3)
img_dilate = Call(dilation, img_canny)
img_closing = Call(closing, img_dilate)
mask = Call(erosion, img_closing)
# Show progress bar for frames
TQDM(Format("Frame {name}", name=name))
# Apply threshold find objects
#threshold = 204 # Call(skimage.filters.threshold_otsu, img_gray)
#mask = img_gray < threshold
# Find objects
regionprops = FindRegions(
mask, img_gray, min_area=1000, padding=10, warn_empty=name
)
# For an object, extract a vignette/ROI from the image
roi_orig = ExtractROI(img, regionprops, bg_color=255)
roi_orig
# Generate an object identifier
i = Enumerate()
#Call(print,i)
object_id = Format("{name}_{i:d}", name=name, i=i)
#Call(print,object_id)
# Calculate features. The calculated features are added to the global_metadata.
# Returns a Variable representing a dict for every object in the stream.
meta = CalculateZooProcessFeatures(
regionprops, prefix="object_", meta=global_metadata
)
# If CalculateZooProcessFeatures is not used, we need to copy global_metadata into the stream:
# meta = Call(lambda: global_metadata.copy())
# https://github.com/morphocut/morphocut/issues/51
# Add object_id to the metadata dictionary
meta["object_id"] = object_id
# Generate object filenames
orig_fn = Format("{object_id}.jpg", object_id=object_id)
# Write objects to an EcoTaxa archive:
# roi image in original color, roi image in grayscale, metadata associated with each object
EcotaxaWriter(archive_fn, (orig_fn, roi_orig), meta)
# Progress bar for objects
TQDM(Format("Object {object_id}", object_id=object_id))
Call(client.publish, "receiver/image", object_id)
p.run()
sleep(sleep_during)
count=0
for i in range(nb_step):
pump_stepper.onestep(direction=stepper.FORWARD, style=stepper.DOUBLE)
sleep(0.01)
if topic!="image":
pump_focus.release()
@ -167,12 +324,8 @@ while True:
client.publish("receiver/image", "Interrompted");
break
if count>nb_frame:
print("The imaging is done.")
topic="wait"
client.publish("receiver/image", "Done");
break
else:
print("Waiting")
sleep(1)