diff --git a/scripts/planktoscope/imager.py b/scripts/planktoscope/imager.py index d791b95..a6415f0 100644 --- a/scripts/planktoscope/imager.py +++ b/scripts/planktoscope/imager.py @@ -264,7 +264,9 @@ class ImagerProcess(multiprocessing.Process): nodered_metadata = last_message["config"] # Definition of the few important metadata local_metadata = { - "process_datetime": datetime.datetime.now().isoformat(), + "process_datetime": datetime.datetime.now() + .isoformat() + .split(".")[0], "acq_camera_resolution": self.__resolution, "acq_camera_iso": self.__iso, "acq_camera_shutter_speed": self.__shutter_speed, @@ -350,6 +352,7 @@ class ImagerProcess(multiprocessing.Process): # We only keep the date '2020-09-25T15:25:21.079769' self.__global_metadata["process_datetime"].split("T")[0], str(self.__global_metadata["sample_id"]), + str(self.__global_metadata["acq_id"]), ) if not os.path.exists(self.__export_path): # create the path! @@ -374,7 +377,7 @@ class ImagerProcess(multiprocessing.Process): json.dumps( { "action": "move", - "direction": "BACKWARD", + "direction": "FORWARD", "volume": self.__pump_volume, "flowrate": 2, } @@ -399,6 +402,7 @@ class ImagerProcess(multiprocessing.Process): filename_path = os.path.join(self.__export_path, filename) logger.info(f"Capturing an image to {filename_path}") + # TODO Insert here a delay to stabilize the flow before we image # Capture an image with the proper filename self.__camera.capture(filename_path) @@ -409,7 +413,7 @@ class ImagerProcess(multiprocessing.Process): # Publish the name of the image to via MQTT to Node-RED self.imager_client.client.publish( "status/imager", - f'{{"status":"{filename} has been imaged."}}', + f'{{"status":"{self.__img_done + 1}/{self.__img_goal} has been imaged to {filename}."}}', ) # Increment the counter diff --git a/scripts/planktoscope/segmenter.py b/scripts/planktoscope/segmenter.py index 9088898..c0ac437 100644 --- a/scripts/planktoscope/segmenter.py +++ b/scripts/planktoscope/segmenter.py @@ -174,7 +174,7 @@ class SegmenterProcess(multiprocessing.Process): # Define the name of each object object_fn = morphocut.str.Format( - os.path.join("/home/pi/PlanktonScope/", "OBJECTS", "{name}.jpg"), + os.path.join(self.__working_path, "objects", "{name}.jpg"), name=object_id, ) @@ -242,31 +242,51 @@ class SegmenterProcess(multiprocessing.Process): self.segmenter_client.client.publish( "status/segmenter", '{"status":"Started"}' ) - img_paths = [x[0] for x in os.walk(self.__img_path)] logger.info(f"The pipeline will be run in {len(img_paths)} directories") + logger.debug(f"The pipeline will be run in these directories {img_paths}") for path in img_paths: - logger.info(f"Loading the metadata file for {path}") - with open(os.path.join(path, "metadata.json"), "r") as config_file: - self.__global_metadata = json.load(config_file) - logger.debug(f"Configuration loaded is {self.__global_metadata}") + logger.info("Checking for the presence of metadata.json") + if os.path.exists(os.path.join(path, "metadata.json")): + # The file exists, let's run the pipe! + logger.info(f"Loading the metadata file for {path}") + with open(os.path.join(path, "metadata.json"), "r") as config_file: + self.__global_metadata = json.load(config_file) + logger.debug( + f"Configuration loaded is {self.__global_metadata}" + ) - # Define the name of the .zip file that will contain the images and the .tsv table for EcoTaxa - self.__archive_fn = os.path.join( - self.__ecotaxa_path, - # filename includes project name, timestamp and sample id - f"export_{self.__global_metadata['sample_project']}_{self.__global_metadata['process_datetime']}_{self.__global_metadata['sample_id']}.zip", - ) + project = self.__global_metadata["sample_project"].replace(" ", "_") + date = self.__global_metadata["process_datetime"] + sample = self.__global_metadata["sample_id"] + # Define the name of the .zip file that will contain the images and the .tsv table for EcoTaxa + self.__archive_fn = os.path.join( + self.__ecotaxa_path, + # filename includes project name, timestamp and sample id + f"export_{project}_{date}_{sample}.zip", + ) - logger.info(f"Starting the pipeline in {path}") - # Start the MorphoCut Pipeline on the found path - self.__working_path = path + self.__working_path = path - try: - self.__pipe.run() - except Exception as e: - logger.exception(f"There was an error in the pipeline {e}") - logger.info(f"Pipeline has been run for {path}") + # Create the objects path + if not os.path.exists(os.path.join(self.__working_path, "objects")): + # create the path! + os.makedirs(os.path.join(self.__working_path, "objects")) + + logger.debug(f"The archive folder is {self.__archive_fn}") + + self.__create_morphocut_pipeline() + + logger.info(f"Starting the pipeline in {path}") + + # Start the MorphoCut Pipeline on the found path + try: + self.__pipe.run() + except Exception as e: + logger.exception(f"There was an error in the pipeline {e}") + logger.info(f"Pipeline has been run for {path}") + else: + logger.info("Moving to the next folder, this one's empty") # remove directory # shutil.rmtree(import_path) @@ -320,7 +340,7 @@ class SegmenterProcess(multiprocessing.Process): ) # Instantiate the morphocut pipeline - self.__create_morphocut_pipeline() + # self.__create_morphocut_pipeline() # Publish the status "Ready" to via MQTT to Node-RED self.segmenter_client.client.publish("status/segmenter", '{"status":"Ready"}')