Skip to content

Commit

Permalink
initial commit for the rwhe calibration script #939
Browse files Browse the repository at this point in the history
  • Loading branch information
Kazadhum committed May 3, 2024
1 parent 3a6fe60 commit 8c60981
Showing 1 changed file with 27 additions and 0 deletions.
27 changes: 27 additions & 0 deletions atom_evaluation/scripts/other_calibrations/rwhe_calib_ali.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#!/usr/bin/env python3

import argparse
import json

def main():

# Parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-json", "--json_file", type=str, required=True,
help="Json file containing input dataset.")
args = vars(ap.parse_args())

json_file = args['json_file']

# Read dataset file
f = open(json_file, 'r')
dataset = json.load(f)

# Close dataset file
f.close()

print(dataset)


if __name__ == '__main__':
main()

6 comments on commit 8c60981

@manuelgitgomes
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@Kazadhum not to be picky, but we have two function to read the dataset already.
One to use if you want to load the images/pcd:

def loadResultsJSON(json_file, collection_selection_function=None):
dataset = loadJSONFile(json_file)
dataset_folder = os.path.dirname(json_file)
bridge = CvBridge()
# Load images from files into memory. Images in the json file are stored in separate png files and in their place
# a field "data_file" is saved with the path to the file. We must load the images from the disk.
# Do the same for point clouds saved in pcd files
skipped_loading = []
for collection_key, collection in dataset['collections'].items():
# Check if collection is listed to be ignored by csf and do not load image and point cloud if it is
if not collection_selection_function is None:
if not collection_selection_function(collection_key): # use the lambda expression csf
skipped_loading.append(collection_key)
continue
for sensor_key, sensor in dataset['sensors'].items():
if not (sensor['modality'] == 'rgb' or sensor['modality'] == 'lidar3d' or sensor['modality'] == 'lidar2d' or
sensor['modality'] == 'depth'):
continue # only process images or point clouds
# Check if we really need to load the file.
if 'data' in collection['data'][sensor_key]:
load_file = False
elif 'data_file' in collection['data'][sensor_key]:
filename = dataset_folder + '/' + collection['data'][sensor_key]['data_file']
if os.path.isfile(filename):
load_file = True
else:
raise ValueError('Datafile points to ' + collection['data'][sensor_key]['data_file'] +
' but file ' + filename + ' does not exist.')
else:
raise ValueError('Dataset does not contain data nor data_file folders.')
if load_file and (sensor['modality'] == 'rgb'): # Load image.
filename = os.path.dirname(json_file) + '/' + collection['data'][sensor_key]['data_file']
cv_image = cv2.imread(filename) # Load image from file
dict_image = getDictionaryFromCvImage(cv_image) # from opencv image to dictionary
# Check if loaded image has the same properties as the dataset in collection['data'][sensor_key]
assert collection['data'][sensor_key]['height'] == dict_image['height'], 'Image height must be the same'
assert collection['data'][sensor_key]['width'] == dict_image['width'], 'Image width must be the same'
collection['data'][sensor_key]['data'] = dict_image['data'] # set data field of collection
collection['data'][sensor_key]['encoding'] = dict_image['encoding']
collection['data'][sensor_key]['step'] = dict_image['step']
# Previous code, did not preserve frame_id and other properties
# collection['data'][sensor_key].update(getDictionaryFromCvImage(cv_image))
elif load_file and sensor['modality'] == 'depth':
filename = os.path.dirname(json_file) + '/' + collection['data'][sensor_key]['data_file']
# print(collection['data'][sensor_key]['header']['frame_id'])
cv_image_int16_tenths_of_millimeters = cv2.imread(filename, cv2.IMREAD_UNCHANGED)
cv_image_float32_meters = convertDepthImage16UC1to32FC1(cv_image_int16_tenths_of_millimeters,
scale=10000.0)
# collection['data'][sensor_key]['encoding']='32FC1'
# cv2.imshow("cv_image_float32_meters", cv_image_int16_tenths_of_millimeters)
# cv2.waitKey(0)
# imageShowUInt16OrFloat32OrBool(cv_image_float32_meters, "float32_load_file")
# cv2.waitKey(5)
# printImageInfo(cv_image_int16_tenths_of_millimeters, text='cv_image_int16_tenths_of_millimeters')
# printImageInfo(cv_image_float32_meters, text='cv_image_float32_meters')
# collection['data'][sensor_key].update(getDictionaryFromDepthImage(cv_image_float32_meters))
dict = getDictionaryFromDepthImage(cv_image_float32_meters)
collection['data'][sensor_key]['data'] = dict['data']
collection['data'][sensor_key]['encoding'] = dict['encoding']
collection['data'][sensor_key]['step'] = dict['step']
# del dict['data']
# del collection['data'][sensor_key]['data']
# print(dict)
# print(collection['data'][sensor_key])
# exit(0)
# msg_33 = message_converter.convert_dictionary_to_ros_message('sensor_msgs/Image', dict)
# image_33=bridge.imgmsg_to_cv2(msg_33, desired_encoding='passthrough')
# imageShowUInt16OrFloat32OrBool(image_33, "load_file_dic")
# cv2.waitKey(5)
# TODO eliminate data_file
# TODO Why this is not needed for rgb? Should be done as well
# print(collection['data'][sensor_key]['header']['frame_id'])
# print(collection['data'][sensor_key].keys())
# TODO verify if values in the dataset or ok
# exit(0)
# Load point cloud.
elif load_file and (sensor['modality'] == 'lidar3d' or sensor['modality'] == 'lidar2d'):
filename = os.path.dirname(json_file) + '/' + collection['data'][sensor_key]['data_file']
frame_id = str(collection['data'][sensor_key]['header']['frame_id'])
# setup header for point cloud from existing dictionary data
header = Header()
header.frame_id = frame_id
time = rospy.Time()
time.secs = collection['data'][sensor_key]['header']['stamp']['secs']
time.nsecs = collection['data'][sensor_key]['header']['stamp']['nsecs']
header.stamp = time
header.seq = collection['data'][sensor_key]['header']['seq']
# read point cloud from dist
msg = read_pcd(filename, cloud_header=header)
# convert to dictionary
collection['data'][sensor_key].update(message_converter.convert_ros_message_to_dictionary(msg))
if skipped_loading: # list is not empty
print('Skipped loading images and point clouds for collections: ' + str(skipped_loading) + '.')
return dataset, json_file

Other if you do not want to load the images/pcd:

def loadJSONFile(json_file):
"""
Loads the json file containing the dataset, without the data.
:param json_file: json file to load.
"""
json_file, _, _ = uriReader(json_file)
f = open(json_file, 'r')
dataset = json.load(f)
return dataset

Please use them to keep consistency.

@Kazadhum
Copy link
Collaborator Author

@Kazadhum Kazadhum commented on 8c60981 May 3, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you @manuelgitgomes! I forgot about this. I'll fix it asap. I notice though, that those functions don't close the files after creating the dataset object. Shouldn't they?

@manuelgitgomes
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shouldn't they?

They should!
Could you please add it?

@Kazadhum
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure!

@Kazadhum
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done!

@Kazadhum
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It took some time because I was battling with "ModuleNotFound" but it's done. It now uses the loadResultsJSON() function.

Please sign in to comment.