diff --git a/SpeciesTrackerAndCounter/runtime-module.cfg b/SpeciesTrackerAndCounter/runtime-module.cfg index e67e41a..f859706 100755 --- a/SpeciesTrackerAndCounter/runtime-module.cfg +++ b/SpeciesTrackerAndCounter/runtime-module.cfg @@ -4,7 +4,7 @@ module_enabled = True runtime.platforms = command [command] -docker.image = speciestrackerandcounter:v1 +docker.image = speciestrackerandcounter:v2 environments = Staged,Docker executable = python3 PythonScriptWrapper.py files = pydist, PythonScriptWrapper.py diff --git a/SpeciesTrackerAndCounter/src/BQ_run_module.py b/SpeciesTrackerAndCounter/src/BQ_run_module.py index 7472a06..f34129e 100755 --- a/SpeciesTrackerAndCounter/src/BQ_run_module.py +++ b/SpeciesTrackerAndCounter/src/BQ_run_module.py @@ -3,7 +3,8 @@ from track import * from globox.src import globox from pathlib import Path - +from collections import defaultdict +import argparse def convert_label(input_path, output_path): label_path = Path(input_path) # Where the .txt files are image_path = Path(input_path) @@ -35,8 +36,62 @@ def convert_label(input_path, output_path): annotations.show_stats() annotations.save_coco(save_file, label_to_id =label_to_id, imageid_to_id=imageid_to_id, auto_ids=True, verbose=True) +def convert_to_cvat_video(input_path, output_path): + id_to_label = { + 0: 'fragile pink urchin', + 1: 'gray gorgonian', + 2: 'squat lobster', + 3: 'basket star', + 4: 'long legged sunflower star', + 5: 'yellow gorgonian', + 6: 'white slipper sea cucumber', + 7: 'white spine sea cucumber', + 8: 'red swiftia gorgonian', + 9: 'UI laced sponge', + } + + # input_path = '/home/bowen68/projects/bisque/Modules/SpeciesTrackerAndCounter/src/examples/output/example_more.txt' + # frame_num, class_id, tracker_id, *box_xyxy (x_min y_min x_max y_max), confidence + width = 1920 + height = 1080 + track_dict = defaultdict(list) + with open(input_path, 'r') as f: + yolo_annotations = f.readlines() + + for line in yolo_annotations: + + staff = line.split() + frame_id = int(staff[0]) + class_id = int(staff[1]) + tracker_id = int(staff[2]) + xtl, ytl, xbr, ybr = int(staff[3]), int(staff[4]), int(staff[5]), int(staff[6]) + track_dict[tracker_id].append([frame_id, class_id, xtl, ytl, xbr, ybr]) + + output_file = open(output_path, 'w') + output_file.write('\n') + for track_id, item in track_dict.items(): + if len(item) != 1: + label_name = id_to_label[item[0][1]] + string = f'\n' + output_file.write(string) + + for i, (frame_id, class_id, xtl, ytl, xbr, ybr) in enumerate(item): + outside = 0 + string = f'\n\n' + + if i != len(item)-1: + outside = 0 + else: + outside = 1 + string = f'\n\n' + output_file.write(string) + + output_file.write('\n') + output_file.write('\n') + output_file.close() + # input_path_dict will have input file paths with keys corresponding to the input names set in the cli. def run_module(input_path_dict, output_folder_path, min_hysteresis=100, max_hysteresis=200): @@ -59,7 +114,8 @@ def run_module(input_path_dict, output_folder_path, min_hysteresis=100, max_hyst _, file_name = os.path.split(input_video_path) ##### Run algorithm ##### video_output_path, hdf_path, anno_path = track(input_video_path) - + xml_path = anno_path.split('.')[0] + '.xml' + convert_to_cvat_video(anno_path, xml_path) # ##### Save output ##### # output_folder_path = '/module/src/runs/detect/detection/' @@ -71,7 +127,8 @@ def run_module(input_path_dict, output_folder_path, min_hysteresis=100, max_hyst output_paths_dict['Output Video'] = video_output_path output_paths_dict['Output Counts'] = hdf_path - output_paths_dict['Annotation File'] = anno_path + output_paths_dict['YOLO Annotation File'] = anno_path + output_paths_dict['CVAT Annotation File'] = xml_path # merge labels to one file # output_label_folder = '/module/src/runs/detect/detection/labels/' # read_files = glob.glob(output_label_folder + "*.txt") @@ -94,13 +151,19 @@ def run_module(input_path_dict, output_folder_path, min_hysteresis=100, max_hyst return output_paths_dict if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--input") + args = parser.parse_args() + print(f'Running on {args.input}') + # Place some code to test implementation # Define input_path_dict and output_folder_path input_path_dict = {} current_directory = os.getcwd() # Place test image in current directory - input_path_dict['Input Video'] = os.path.join(current_directory,'examples/test_video.mp4') # KEY MUST MATCH INPUT NAME SET IN CLI + input_path_dict['Input Video'] = os.path.join(current_directory, args.input) # KEY MUST MATCH INPUT NAME SET IN CLI + # input_path_dict['Input Video'] = os.path.join(current_directory,'examples/example_more.mp4') # KEY MUST MATCH INPUT NAME SET IN CLI output_folder_path = current_directory # Run algorithm and return output_paths_dict diff --git a/SpeciesTrackerAndCounter/src/supervision/tools/line_counter.py b/SpeciesTrackerAndCounter/src/supervision/tools/line_counter.py index d0876ef..c7eea15 100755 --- a/SpeciesTrackerAndCounter/src/supervision/tools/line_counter.py +++ b/SpeciesTrackerAndCounter/src/supervision/tools/line_counter.py @@ -20,7 +20,7 @@ def __init__(self, start: Point, end: Point): self.tracker_state: Dict[str, bool] = {} self.in_count: int = 0 self.out_count: int = 0 - self.class_dict = dict(zip(range(0, 9), [0]*10)) + self.class_dict = dict(zip(range(0, 10), [0]*10)) def update(self, detections: Detections): """ diff --git a/SpeciesTrackerAndCounter/src/track.py b/SpeciesTrackerAndCounter/src/track.py index 0073e4c..aa2ada2 100755 --- a/SpeciesTrackerAndCounter/src/track.py +++ b/SpeciesTrackerAndCounter/src/track.py @@ -31,7 +31,7 @@ @dataclass(frozen=True) class BYTETrackerArgs: - track_thresh: float = 0.25 + track_thresh: float = 0.1 track_buffer: int = 30 match_thresh: float = 0.8 aspect_ratio_thresh: float = 3.0 @@ -106,11 +106,11 @@ def track(source_video_path): video_name = video_name_no_extension + '_output.mp4' target_video_path = os.path.join(target_folder_path, video_name) - CONF_THRES = 0.25 - IOU_THRES = 0.45 + CONF_THRES = 0.1 + IOU_THRES = 0.2 MAX_DET = 1000 - LINE_START = Point(50, 600) - LINE_END = Point(1920-50, 600) + LINE_START = Point(5, 700) + LINE_END = Point(1920-5, 700) # create BYTETracker instance byte_tracker = BYTETracker(BYTETrackerArgs()) # create VideoInfo instance @@ -150,10 +150,10 @@ def track(source_video_path): xyxy = det[:,:4] confidence = det[:,4] class_id = det[:,5].astype(int) - for i in range(len(xyxy)): - line = (frame_num, class_id[i], *xyxy[i]) # label format - with open(txt_path, 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') + # for i in range(len(xyxy)): + # line = (frame_num, class_id[i], *xyxy[i]) # label format + # with open(txt_path, 'a') as f: + # f.write(('%g ' * len(line)).rstrip() % line + '\n') detections = Detections( @@ -184,6 +184,15 @@ def track(source_video_path): line_annotator.annotate(frame=frame0, line_counter=line_counter) sink.write_frame(frame0) frame_num += 1 + + # save detection and tracking to files fomatting as below: + # frame_num, class_id, tracker_id, *box_xyxy (x_min y_min x_max y_max), confidence + for box_xyxy, confidence, class_id, tracker_id in detections: + box_xyxy = box_xyxy.astype(int) + line = (frame_num, class_id, tracker_id, *box_xyxy, confidence) + line_str = ('%g ' * len(line)).rstrip() % line + '\n' + with open(txt_path, 'a') as f: + f.write(line_str) # breakpoint() # print(line_counter.class_dict) name_count_dict = {} @@ -199,4 +208,5 @@ def track(source_video_path): return target_video_path, hdf_path, txt_path if __name__ == "__main__": - track() \ No newline at end of file + input_path = '/home/bowen68/projects/bisque/Modules/SpeciesTrackerAndCounter/src/examples/example_more.mp4' + track(input_path) \ No newline at end of file