77import viser
88
99from py123d .datatypes .scene .abstract_scene import AbstractScene
10+ from py123d .datatypes .sensors .fisheye_mei_camera import FisheyeMEICamera , FisheyeMEICameraMetadata , FisheyeMEICameraType
1011from py123d .datatypes .sensors .lidar import LiDARType
1112from py123d .datatypes .sensors .pinhole_camera import PinholeCamera , PinholeCameraType
1213from py123d .datatypes .vehicle_state .ego_state import EgoStateSE3
@@ -50,7 +51,7 @@ def _add_camera_frustums_to_viser_server(camera_type: PinholeCameraType) -> None
5051 f"camera_frustums/{ camera_type .serialize ()} " ,
5152 fov = camera .metadata .fov_y ,
5253 aspect = camera .metadata .aspect_ratio ,
53- scale = viser_config .camera_frustum_frustum_scale ,
54+ scale = viser_config .camera_frustum_scale ,
5455 image = camera_image ,
5556 position = camera_position ,
5657 wxyz = camera_quaternion ,
@@ -74,6 +75,60 @@ def _add_camera_frustums_to_viser_server(camera_type: PinholeCameraType) -> None
7475 return None
7576
7677
78+ def add_fisheye_frustums_to_viser_server (
79+ scene : AbstractScene ,
80+ scene_interation : int ,
81+ initial_ego_state : EgoStateSE3 ,
82+ viser_server : viser .ViserServer ,
83+ viser_config : ViserConfig ,
84+ fisheye_frustum_handles : Dict [FisheyeMEICameraType , viser .CameraFrustumHandle ],
85+ ) -> None :
86+ if viser_config .fisheye_frustum_visible :
87+ scene_center_array = initial_ego_state .center .point_3d .array
88+ ego_pose = scene .get_ego_state_at_iteration (scene_interation ).rear_axle_se3 .array
89+ ego_pose [StateSE3Index .XYZ ] -= scene_center_array
90+
91+ def _add_fisheye_frustums_to_viser_server (fisheye_camera_type : FisheyeMEICameraType ) -> None :
92+ camera = scene .get_fisheye_mei_camera_at_iteration (scene_interation , fisheye_camera_type )
93+ if camera is not None :
94+ fcam_position , fcam_quaternion , fcam_image = _get_fisheye_camera_values (
95+ camera ,
96+ ego_pose .copy (),
97+ viser_config .fisheye_frustum_image_scale ,
98+ )
99+ if fisheye_camera_type in fisheye_frustum_handles :
100+ fisheye_frustum_handles [fisheye_camera_type ].position = fcam_position
101+ fisheye_frustum_handles [fisheye_camera_type ].wxyz = fcam_quaternion
102+ fisheye_frustum_handles [fisheye_camera_type ].image = fcam_image
103+ else :
104+ # NOTE @DanielDauner: The FOV is just taking as a static value here.
105+ # The function se
106+ fisheye_frustum_handles [fisheye_camera_type ] = viser_server .scene .add_camera_frustum (
107+ f"camera_frustums/{ fisheye_camera_type .serialize ()} " ,
108+ fov = 185 , # vertical fov
109+ aspect = camera .metadata .aspect_ratio ,
110+ scale = viser_config .fisheye_frustum_scale ,
111+ image = fcam_image ,
112+ position = fcam_position ,
113+ wxyz = fcam_quaternion ,
114+ )
115+
116+ return None
117+
118+ # NOTE; In order to speed up adding camera frustums, we use multithreading and resize the images.
119+ with concurrent .futures .ThreadPoolExecutor (
120+ max_workers = len (viser_config .fisheye_mei_camera_frustum_types )
121+ ) as executor :
122+ future_to_camera = {
123+ executor .submit (_add_fisheye_frustums_to_viser_server , fcam_type ): fcam_type
124+ for fcam_type in viser_config .fisheye_mei_camera_frustum_types
125+ }
126+ for future in concurrent .futures .as_completed (future_to_camera ):
127+ _ = future .result ()
128+
129+ return None
130+
131+
77132def add_camera_gui_to_viser_server (
78133 scene : AbstractScene ,
79134 scene_interation : int ,
@@ -183,10 +238,90 @@ def _get_camera_values(
183238 return camera_position , camera_rotation , camera_image
184239
185240
241+ def _get_fisheye_camera_values (
242+ camera : FisheyeMEICamera ,
243+ ego_pose : npt .NDArray [np .float64 ],
244+ resize_factor : Optional [float ] = None ,
245+ ) -> Tuple [npt .NDArray [np .float64 ], npt .NDArray [np .float64 ], npt .NDArray [np .uint8 ]]:
246+ assert ego_pose .ndim == 1 and len (ego_pose ) == len (StateSE3Index )
247+
248+ rel_camera_pose = camera .extrinsic .array
249+ abs_camera_pose = convert_relative_to_absolute_se3_array (origin = ego_pose , se3_array = rel_camera_pose )
250+
251+ camera_position = abs_camera_pose [StateSE3Index .XYZ ]
252+ camera_rotation = abs_camera_pose [StateSE3Index .QUATERNION ]
253+
254+ camera_image = _rescale_image (camera .image , resize_factor )
255+ return camera_position , camera_rotation , camera_image
256+
257+
186258def _rescale_image (image : npt .NDArray [np .uint8 ], scale : float ) -> npt .NDArray [np .uint8 ]:
187259 if scale == 1.0 :
188260 return image
189261 new_width = int (image .shape [1 ] * scale )
190262 new_height = int (image .shape [0 ] * scale )
191263 downscaled_image = cv2 .resize (image , (new_width , new_height ), interpolation = cv2 .INTER_LINEAR )
192264 return downscaled_image
265+
266+
267+ import numpy as np
268+
269+
270+ def calculate_fov (metadata : FisheyeMEICameraMetadata ) -> tuple [float , float ]:
271+ """
272+ Calculate horizontal and vertical FOV in degrees.
273+
274+ Returns:
275+ (horizontal_fov, vertical_fov) in degrees
276+ """
277+ xi = metadata .mirror_parameter
278+ gamma1 = metadata .projection .gamma1
279+ gamma2 = metadata .projection .gamma2
280+ u0 = metadata .projection .u0
281+ v0 = metadata .projection .v0
282+
283+ width = metadata .width
284+ height = metadata .height
285+
286+ # Calculate corner positions (furthest from principal point)
287+ corners = np .array ([[0 , 0 ], [width , 0 ], [0 , height ], [width , height ]])
288+
289+ # Convert to normalized coordinates
290+ x_norm = (corners [:, 0 ] - u0 ) / gamma1
291+ y_norm = (corners [:, 1 ] - v0 ) / gamma2
292+
293+ # For MEI model, inverse projection (ignoring distortion for FOV estimate):
294+ # r² = x² + y²
295+ # θ = arctan(r / (1 - ξ·√(1 + r²)))
296+
297+ r_squared = x_norm ** 2 + y_norm ** 2
298+ r = np .sqrt (r_squared )
299+
300+ # Calculate incident angle for each corner
301+ # From MEI model: r = (X/Z_s) where Z_s = Z + ξ·√(X² + Y² + Z²)
302+ # This gives: θ = arctan(r·√(1 + (1-ξ²)r²) / (1 - ξ²·r²))
303+ # Simplified approximation:
304+
305+ if xi < 1e-6 : # Perspective camera
306+ theta = np .arctan (r )
307+ else :
308+ # For small angles or as approximation
309+ denominator = 1 - xi * np .sqrt (1 + r_squared )
310+ theta = np .arctan2 (r , denominator )
311+
312+ np .max (np .abs (theta ))
313+
314+ # Calculate horizontal and vertical FOV separately
315+ x_max = np .max (np .abs (x_norm ))
316+ y_max = np .max (np .abs (y_norm ))
317+
318+ if xi < 1e-6 :
319+ h_fov = 2 * np .arctan (x_max )
320+ v_fov = 2 * np .arctan (y_max )
321+ else :
322+ denom_h = 1 - xi * np .sqrt (1 + x_max ** 2 )
323+ denom_v = 1 - xi * np .sqrt (1 + y_max ** 2 )
324+ h_fov = 2 * np .arctan2 (x_max , denom_h )
325+ v_fov = 2 * np .arctan2 (y_max , denom_v )
326+
327+ return h_fov , v_fov
0 commit comments