/*r2_t tf_distorted_sensor_coords_to_undistorted_sensor_coords (camera_parameters_t cpar, r2_t pd) { r2_t pu; double distortion_factor = 1 + cpar->kappa * r2_norm_sqr (&pd); pu.c[0] = pd.c[0] * distortion_factor; pu.c[1] = pd.c[1] * distortion_factor; return pu; }*/ /*r2_t tf_undistorted_sensor_coords_to_distorted_sensor_coords (camera_parameters_t cpar, r2_t pu) { double Xu = pu.c[0], Yu = pu.c[1]; double Ru, Rd, lambda, c, d; double Q, R, D, S, T, sinT, cosT; if (((Xu == 0) && (Yu == 0)) || (cpar->kappa == 0)) { return pu; } Ru = hypot (Xu, Yu); // sqrt(Xu*Xu+Yu*Yu) c = 1 / cpar->kappa; d = -c * Ru; Q = c / 3; R = -d / 2; D = CUB (Q) + SQR (R); if (D >= 0) { // one real root D = sqrt (D); S = cbrt (R + D); T = cbrt (R - D); Rd = S + T; if (Rd < 0) { Rd = sqrt (-1 / (3 * cpar->kappa)); fprintf (stderr, "\nWarning: undistorted image point to distorted image point mapping limited by\n"); fprintf (stderr, " maximum barrel distortion radius of %lf\n", Rd); fprintf (stderr, " (Xu = %lf, Yu = %lf) -> (Xd = %lf, Yd = %lf)\n\n", Xu, Yu, Xu * Rd / Ru, Yu * Rd / Ru); } } else { // three real roots D = sqrt (-D); S = cbrt (hypot (R, D)); T = atan2 (D, R) / 3; SINCOS (T, sinT, cosT); // the larger positive root is 2*S*cos(T) // the smaller positive root is -S*cos(T) + sqrt(3)*S*sin(T) // the negative root is -S*cos(T) - sqrt(3)*S*sin(T) Rd = -S * cosT + SQRT3 * S * sinT; // use the smaller positive root } lambda = Rd / Ru; r2_t pd; pd.c[0] = Xu * lambda; pd.c[1] = Yu * lambda; return pd; } */ r3_t tf_camera_compute_world_error_weng(camera_parameters_t cpar, r3_t p_w, r2_t p_i); /* This error metric is taken from "Camera Calibration with Distortion Models and Accuracy Evaluation" J. Weng, P. Cohen, and M. Herniou IEEE Transactions on PAMI, Vol. 14, No. 10, October 1992, pp965-980. Returns the vector difference between the 3D coordinates {p_w_pre} of a point, estimated from {cpar} and the given image coordinates {p_i}, and its known 3D coordinates {p_w}. The coordinates {p_w_pre} are computed by back-projecting {p_i} onto the plane perpendicular to the camera axis that passes through {p_w}. */ r3_t tf_camera_compute_world_error_weng(camera_parameters_t cpar, r3_t p_w, r2_t p_i) { double xc_est, yc_est, zc_est; double fu, fv; double squared_error; /* calculate the location of the data point in camera coordinates */ r3_t pc = tf_world_coords_to_camera_coords (cpar, p_w); /* convert from 2D image coordinates to distorted sensor coordinates */ r2_t pd = tf_image_coords_to_sensor_coords (cpar, p_i); /* convert from distorted sensor coordinates to undistorted sensor plane coordinates */ r2_t pu = tf_distorted_sensor_coords_to_undistorted_sensor_coords (cpar, pd); /* estimate the location of the data point by back projecting the image position */ zc_est = pc.c[2]; xc_est = zc_est * pu.c[0] / cpar->f; yc_est = zc_est * pu.c[1] / cpar->f; fu = cpar->sx * cpar->f / cpar->dpx; fv = cpar->f / cpar->dpy; squared_error = (SQR (xc_est - pc.c[0]) + SQR (yc_est - pc.c[1])) / (SQR (zc_est) * (1 / SQR (fu) + 1 / SQR (fv)) / 12); return squared_error; } /* * This routine calculates the mean, standard deviation, max, and sum-of-squared * * error of the magnitude of the error, in undistorted image coordinates, between* * the measured location of a feature point in the image plane and the image of * * the 3D feature point as projected through the calibrated model. * * The calculation is for all of the points in the calibration data set. * */ void tf_undistorted_image_plane_error ( calibration_data_t cdat, camera_parameters_t cpar, FILE *ferr ); /* * This routine performs an error measure proposed by Weng in IEEE PAMI, * * October 1992. * */ void tf_normalized_calibration_error ( calibration_data_t cdat, camera_parameters_t cpar, FILE *ferr ); void tf_norm_calibration_error_measures (calibration_data_t cdat, camera_parameters_t cpar, double *mean, double *stddev); double tf_norm_calibration_point_error ( int point, calibration_data_t cdat, camera_parameters_t cpar ); void tf_calib_summarize_undistorted_errors (calibration_data_t cdat, camera_parameters_t cpar, FILE *ferr) { int i; double x_pixel_error, y_pixel_error, error, squared_error; double max_error = 0, sum_error = 0, sum_squared_error = 0; double mean, stddev, max, sse; if (cdat->nmarks < 1) { mean = stddev = max = sse = 0; return; } for (i = 0; i < cdat->nmarks; i++) { /* calculate the ideal location of the image of the data point */ r3_t pc = tf_world_coords_to_camera_coords (cpar, cdat->world_coords[i]); /* convert from camera coordinates to undistorted sensor plane coordinates */ r2_t pu1 = tf_camera_coords_to_undistorted_sensor_coords (cpar, pc); /* convert from 2D image coordinates to distorted sensor coordinates */ r2_t pd = tf_image_coords_to_sensor_coords (cpar, cdat->image_coords[i]); r2_t pu2 = tf_distorted_sensor_coords_to_undistorted_sensor_coords (cpar, pd); /* determine the error between the ideal and actual location of the data point */ /* (in undistorted image coordinates) */ x_pixel_error = cpar->sx * (pu1.c[0] - pu2.c[0]) / cpar->dpx; y_pixel_error = (pu1.c[1] - pu2.c[1]) / cpar->dpy; squared_error = SQR (x_pixel_error) + SQR (y_pixel_error); error = sqrt (squared_error); sum_error += error; sum_squared_error += squared_error; max_error = MAX (max_error, error); } mean = sum_error / cdat->nmarks; max = max_error; sse = sum_squared_error; if (cdat->nmarks == 1) stddev = 0; else stddev = sqrt ((sum_squared_error - SQR (sum_error) / cdat->nmarks) / (cdat->nmarks - 1)); fprintf (ferr, "Undistorted image plane error:\n"); fprintf (ferr, "mean = %.6lf, stddev = %.6lf, max = %.6lf [pix], sse = %.6lf [pix^2]\n\n", mean, stddev, max, sse); } void tf_calib_print_normalized_error (calibration_data_t cdat, camera_parameters_t cpar, FILE *ferr) { double mean, stddev; tf_norm_calibration_error_measures (cdat, cpar, &mean, &stddev); fprintf (ferr, "Normalized calibration error:\n"); fprintf (ferr, "mean = %.6lf, stddev = %.6lf\n\n", mean, stddev); } void tf_norm_calibration_error_measures (calibration_data_t cdat, camera_parameters_t cpar, double *mean, double *stddev) { int i; double sum_error = 0, sum_squared_error = 0; if (cdat->nmarks < 1) { *mean = *stddev = 0; return; } for (i = 0; i < cdat->nmarks; i++) { double squared_error = tf_norm_calibration_point_error (i, cdat, cpar); sum_error += sqrt (squared_error); sum_squared_error += squared_error; } *mean = sum_error / cdat->nmarks; if (cdat->nmarks == 1) *stddev = 0; else *stddev = sqrt ((sum_squared_error - SQR (sum_error) / cdat->nmarks) / (cdat->nmarks - 1)); } /* ---------------------------------------------------------------------- */ /* calculate the undistorted image plane error statistics for the data set */ tf_calib_summarize_undistorted_errors (cdat, cpar, ferr); /* calculate the error statistics for the data set */ tf_calib_print_normalized_calibration_error (cdat, cpar, ferr); void tf_distorted_image_plane_error ( calibration_data_t cdat, camera_parameters_t cpar, FILE *ferr ); /* ---------------------------------------------------------------------- */ void tf_clip_cpar_to_cspec ( camera_specs_t cspec, camera_parameters_t cpar, tf_optimization_choice_t *which ) { /* tf_clip_to_range ("Tx", &(cpar->S.c[1][0]), which->Vx, &(cspec->Vx_w)); */ /* tf_clip_to_range ("Ty", &(cpar->S.c[2][0]), which->Vy, &(cspec->Vy_w)); */ /* tf_clip_to_range ("Tz", &(cpar->S.c[3][0]), which->Vz, &(cspec->Vz_w)); */ tf_clip_to_range ("f", &(cpar->f), which->f, &(cspec->f)); tf_clip_to_range ("kappa", &(cpar->kappa), which->kappa, &(cspec->kappa)); tf_clip_to_range ("sx", &(cpar->sx), which->sx, &(cspec->sx)); tf_clip_to_range ("Cx", &(cpar->Cx), which->Cx, &(cspec->Cx)); tf_clip_to_range ("Cy", &(cpar->Cy), which->Cy, &(cspec->Cy)); } void tf_calib_guess2_adjust_Tz_f_of_far_away_camera ( int n, r3_t p_w[], r3_t b_w, r2_t p_u[], r2_t b_u, double weight[], camera_specs_t cspec, tf_optimization_choice_t *which, bool_t debug, r4x4_t *S, double *f ); /* Move the camera closer to honor constraints on {f} and {V}: */ tf_calib_guess2_adjust_Tz_f_of_far_away_camera (n, p_w, b_w, p_u, b_u, cdat->weight, cspec, which, debug, &cpar->S, &cpar->f); /*VERIFICAR ESTA FUNCAO*/ void tf_calib_guess2_adjust_Tz_f_of_far_away_camera ( int n, r3_t p_w[], r3_t b_w, r2_t p_u[], r2_t b_u, double weight[], camera_specs_t cspec, tf_optimization_choice_t *which, bool_t debug, r4x4_t *S, double *f ) { // Guess {f} and {Tz} given {R}: interval_t f_range = (which->f ? cspec->f : (interval_t){{cpar->f, cpar->f}}); if (debug) { fprintf(stderr, " Initial f range = [ %f _ %f ]\n", LO(f_range), HI(f_range)); } // Compute {Tz} range from range of {Vx_w,Vy_w,Vz_w} and the current {R} matrix: interval_t Tz_range; Tz_range = tf_calib_guess2_compute_Tz_range_given_R (cspec, cpar->S); if (debug) { fprintf(stderr, " Initial Tz range = [ %f _ %f ]\n", LO(Tz_range), HI(Tz_range)); } int use_max_Tz = TRUE; if (use_max_Tz) { //Use max value of Tz and compute f cpar->S.c[3][0] = HI(Tz_range); if (debug) { fprintf(stderr, " Initial guess of Tz = %f\n", cpar->S.c[3][0]); } if (LO(f_range) < HI(f_range)) { tf_calib_guess2_compute_f_given_S (n, p_w, b_w, p_u, b_u, cdat->weight, f_range, cpar);} else { cpar->f = interval_mid(&f_range); } tf_calib_guess2_adjust_f (cdat, cspec->f, cpar); if (debug) { fprintf(stderr, " Initial guess of f = %f\n", cpar->f); } } else { //Compute Tz and f together if ((LO(f_range) < HI(f_range)) || (LO(Tz_range) < HI(Tz_range))) { tf_calib_guess2_compute_Tz_f_given_R_Tx_Ty(n, p_w, p_u, cdat->weight, f_range, Tz_range, cpar); } else { cpar->S.c[3][0] = interval_mid(&Tz_range); cpar->f = interval_mid(&f_range); } if (debug) { fprintf(stderr, " Initial guess of Tz = %f\n", cpar->S.c[3][0]); fprintf(stderr, " Initial guess of f = %f\n", cpar->f); } } } interval_t tf_calib_guess2_compute_Tz_range_given_R (camera_specs_t cspec, r4x4_t S); void tf_calib_guess2_compute_f_given_S ( int n, r3_t p_w[], r3_t b_w, r2_t p_u[], r2_t b_u, double weight[], interval_t f_range, camera_parameters_t cpar); void tf_calib_guess2_compute_Tz_f_given_R_Tx_Ty ( int n, r3_t p_w[], r2_t p_u[], double weight[], interval_t f_range, interval_t Tz_range, camera_parameters_t cpar); void tf_calib_guess2_adjust_f ( calibration_data_t cdat, interval_t f_range, camera_parameters_t cpar ); /* Adjusts {f} to lie in the range {f_range}, adjusting also {Tz} so that the size of the data point cloud is approximately preserved. */ interval_t tf_calib_guess2_compute_Tz_range_given_R (camera_specs_t cspec, r4x4_t S) { /* Compute {Tz} range from range of {Vx_w,Vy_w,Vz_w} and the current {R} matrix: */ int ix, iy, iz; r3_t t = {{S.c[3][1], S.c[3][2], S.c[3][3]}}; interval_t Tz_range = (interval_t){{+INFINITY, -INFINITY}}; /* Enumerate the corners of the camera position range: */ for (ix = 0; ix <= 1; ix++) { for (iy = 0; iy <= 1; iy++) { for (iz = 0; iz <= 1; iz++) { /* Get a corner {v_w} of the box: */ r3_t v_w = {{cspec->Vx_w.end[ix], cspec->Vx_w.end[iy], cspec->Vx_w.end[iz]}}; /* Compute the {Tz_c} for the camera placed at that corner: */ double Tz_c = -r3_dot (&v_w, &t); /* Update the range of {Tz}: */ Tz_range.end[0] = fmin(Tz_range.end[0], Tz_c); Tz_range.end[1] = fmax(Tz_range.end[1], Tz_c); } } } fprintf(stderr, "Tz_range [%f , %f]\n", Tz_range.end[0], Tz_range.end[1]); return Tz_range; } void tf_calib_guess2_compute_f_given_S ( int n, r3_t p_w[], r3_t b_w, r2_t p_u[], r2_t b_u, double weight[], interval_t f_range, camera_parameters_t cpar) { int i; r3_t r = {{cpar->S.c[1][1], cpar->S.c[1][2], cpar->S.c[1][3]}}; r3_t s = {{cpar->S.c[2][1], cpar->S.c[2][2], cpar->S.c[2][3]}}; r3_t t = {{cpar->S.c[3][1], cpar->S.c[3][2], cpar->S.c[3][3]}}; double b_w_dot_r = r3_dot (&b_w, &r); double b_w_dot_s = r3_dot (&b_w, &s); double snum = 0.0, sden = 0.0; for (i = 0; i < n; i++) { double dx_c = r3_dot (&p_w[i], &r) - b_w_dot_r; double dy_c = r3_dot (&p_w[i], &s) - b_w_dot_s; double z_c = r3_dot (&p_w[i], &t) + cpar->S.c[3][0]; double dx_u = p_u[i].c[0] - b_u.c[0]; double dy_u = p_u[i].c[1] - b_u.c[1]; double numi = (dx_u*dx_u + dy_u*dy_u); double deni = (dx_c*dx_c + dy_c*dy_c)/(z_c*z_c); snum += numi*weight[i]; sden += deni*weight[i]; } double f = sqrt(snum/sden); /*bool_t clipped = FALSE; double f_raw = f; if (f < LO(f_range)) {f = LO(f_range); clipped = TRUE;} if (f > HI(f_range)) {f = HI(f_range); clipped = TRUE;} if (clipped) { fprintf(stderr, "warning: f clipped\n"); fprintf(stderr, " f = %f -> %f\n", f_raw, f); }*/ cpar->f = f; } void tf_calib_guess2_compute_Tz_f_given_R_Tx_Ty ( int n, r3_t p_w[], r2_t p_u[], double weight[], interval_t f_range, interval_t Tz_range, camera_parameters_t cpar) { int i; mat_rm_t A = tf_alloc_mat_rm (2*n, 2); rm_t b = tf_alloc_rm (2*n); rm_t w = tf_alloc_rm (2*n); rm_t x = tf_alloc_rm (2); fprintf(stderr, "%s: System A,B:\n", __FUNCTION__); for (i = 0; i < n; i++) { r3_t r = {{cpar->S.c[1][1], cpar->S.c[1][2], cpar->S.c[1][3]}}; r3_t s = {{cpar->S.c[2][1], cpar->S.c[2][2], cpar->S.c[2][3]}}; r3_t t = {{cpar->S.c[3][1], cpar->S.c[3][2], cpar->S.c[3][3]}}; double x_c = r3_dot (&p_w[i], &r) + cpar->S.c[1][0]; double y_c = r3_dot (&p_w[i], &s) + cpar->S.c[2][0]; double *Ai0 = &(A->c[(2*i + 0)*2]); double *Ai1 = &(A->c[(2*i + 1)*2]); Ai0[0] = p_u[i].c[0]; Ai0[1] = -x_c; Ai1[0] = p_u[i].c[1]; Ai1[1] = -y_c; double *bi0 = &(b->c[(2*i + 0)]); double *bi1 = &(b->c[(2*i + 1)]); double dz_c = -r3_dot (&p_w[i], &t); *bi0 = dz_c*p_u[i].c[0]; *bi1 = dz_c*p_u[i].c[1]; double *wi0 = &(w->c[(2*i + 0)]); double *wi1 = &(w->c[(2*i + 1)]); *wi0 = weight[i]; *wi1 = weight[i]; int j; for (j = 0; j < 2; j++) fprintf(stderr, " %12.8f ", A->c[(2*i+0)*2+j]); fprintf(stderr, " = "); fprintf(stderr, " %12.8f (%12.8f) ", b->c[(2*i+0)], w->c[(2*i+0)]); fprintf(stderr, "\n"); for (j = 0; j < 2; j++) fprintf(stderr, " %12.8f ", A->c[(2*i+1)*2+j]); fprintf(stderr, " = "); fprintf(stderr, " %12.8f (%12.8f)", b->c[(2*i+1)], w->c[(2*i+1)]); fprintf(stderr, "\n"); } /* Compute {X} such that {A x = b} in the least squares sense: */ tf_solve_system_mxn (A, x, b, w->c); double Tz = x->c[0]; double f = x->c[1]; if (f < 0) { fprintf(stderr, "warning: projection is mirrored\n"); f = -f; Tz = -Tz; cpar->S.c[3][1] = -cpar->S.c[3][1]; cpar->S.c[3][2] = -cpar->S.c[3][2]; cpar->S.c[3][3] = -cpar->S.c[3][3]; } cpar->S.c[3][0] = Tz; cpar->f = f; tf_free_mat_rm_structure (A); tf_free_rm_structure (b); tf_free_rm_structure (x); tf_free_rm_structure (w); } void tf_calib_guess2_adjust_f ( calibration_data_t cdat, interval_t f_range, camera_parameters_t cpar ) { int n = cdat->nmarks; r3_t *p_w = cdat->world_coords; r3_t b_w; tf_compute_world_barycenter(n, p_w, cdat->weight, &b_w); double r = b_w.c[0] * cpar->S.c[3][1] + b_w.c[1] * cpar->S.c[3][2] + b_w.c[2] * cpar->S.c[3][3]; double zc_star = cpar->S.c[3][0] + r /*+ 0*zc_hat*/; double L = zc_star / cpar->f; double f_tmp = interval_mid (&f_range); if (f_tmp != cpar->f) { cpar->S.c[3][0] = L*f_tmp - r /*- 0*zc_hat*/; cpar->f = f_tmp; } } #undef interval_ISFULL /* ---------------------------------------------------------------------- */ void tf_calib2_tsai ( calibration_data_t cdat, camera_specs_t cspec, camera_parameters_t cpar, bool_t debug, bool_t optimize ) { /*Decide which parameters are variable*/ tf_optimization_choice_t which; tf_select_no_parameters(&which); tf_select_all_variable_parameters(cspec, &which); tf_calib2_guess_initial_camera_parameters (cdat, cspec, &which, cpar, debug); if (debug) { fprintf(stderr, "Guessed parameters:\n"); tf_show_camera_parameters (cpar, stderr); } if (optimize) { tf_generic_optimization ( cdat, cspec, cpar, &which, tf_calib_refine2_gather_optimization_params, tf_calib_refine2_scatter_optimization_params, debug ); if (debug) { fprintf(stderr, "Optimized parameters:\n"); tf_show_camera_parameters (cpar, stderr); } } } void tf_tsai_calib_variable_zoom ( calibration_data_t cdat, camera_parameters_t cpar, double wgts[], FILE *ferr ) { rm_t U; r3_t R; /*Rotation angles*/ calibration_aux_data_t caux; caux = tf_create_auxiliary_calibration_aux_data_structure (cdat->ntargets); tf_compute_xd_yd_and_r_squared (cdat, cpar, caux); U = tf_compute_U (cdat, caux, wgts, ferr); tf_compute_tx_and_ty (cdat, cpar, caux, U); tf_compute_R_and_store_in_S (cpar, U, &R); tf_compute_approximate_f_and_Tz (cpar, cdat, caux, wgts); if (cpar->f < 0) { /*try the other solution for the orthonormal matrix*/ cpar->S.c[1][3] = -cpar->S.c[1][3]; /*r3*/ cpar->S.c[2][3] = -cpar->S.c[2][3]; /*r6*/ cpar->S.c[3][1] = -cpar->S.c[3][1]; /*r7*/ cpar->S.c[3][2] = -cpar->S.c[3][2]; /*r8*/ tf_get_euler_angles_from_S_matrix(&(cpar->S), &R); tf_compute_approximate_f_and_Tz (cpar, cdat, caux, wgts); if (cpar->f < 0) { fprintf (stderr, "error - possible handedness problem with data\n"); //exit (-1); } } /*try the exact solution by lmdif*/ tf_compute_exact_f_and_Tz (cpar, cdat, caux, wgts); /* do a full optimization minus the X scale correction and image center */ tf_ncc_variable_zoom_optimization (cdat, cpar); /*write the parameters calculated by Tsai*/ //tf_print_error_stats (cdat, cpar, ferr); /*disallocating structures*/ tf_free_rm_structure (U); tf_free_auxiliary_calibration_aux_data_structure (caux); } /*********************************************************************** * This routine takes the position of a point in world coordinates [mm] * * and determines the position of its image in image coordinates [pix]. * ***********************************************************************/ void tf_world_coord_to_image_coord ( calibration_parameters_t cpar, calibration_data_t cdat, camera_constants_t camc, double *xf, double *yf, int i ); /*********************************************************************** * This routine takes the position of a point {pw} in world coordinates [mm], the calibration parameters {cpar} and camera constants {camc} and returns the position {pi} of the point [pixels] on the image * * and including the radial distortion effect * ***********************************************************************/ void tf_world_coord_to_distorted_image_coord ( r3_t pw, calibration_parameters_t cpar, camera_constants_t camc, r2_t *pi); /*************************************************************************** * This routine converts from undistorted to distorted sensor coordinates. * * The technique involves algebraically solving the cubic polynomial * * Ru = Rd * (1 + kappa * Rd**2) * * using the Cardan method. * * * * Note: for kappa1 < 0 the distorted sensor plane extends out to a * * maximum barrel distortion radius of Rd = sqrt (-1/(3 * kappa1)). * * * * To see the logic used in this routine try graphing the above * * polynomial for positive and negative kappa's. * **************************************************************************/ void tf_undistorted_to_distorted_sensor_coord ( calibration_parameters_t cpar, double Xu, double Yu, double *Xd, double *Yd ); /*********************************************************************** * This routine takes the position of a point in world coordinates [mm] * * and determines its position in camera coordinates [mm]. * ***********************************************************************/ void tf_world_coord_to_camera_coord ( calibration_parameters_t cpar, calibration_data_t cdat, double *xc, double *yc, double *zc, int i ); /***********************************************************************\ * This routine takes the position of a point in world coordinates [mm] * * and determines the position of its image in image coordinates [pix]. * ***********************************************************************/ void tf_world_coord_to_image_coord (calibration_parameters_t cpar, calibration_data_t cdat, camera_constants_t camc, double *xf, double *yf, int i) { double xc, yc, zc, Xu, Yu, Xd, Yd; /* convert from world coordinates to camera coordinates */ xc = cpar->S.c[1][1] * cdat->world_coords[i].c[0] + cpar->S.c[1][2] * cdat->world_coords[i].c[1] + cpar->S.c[1][3] * cdat->world_coords[i].c[2] + cpar->S.c[1][0]; yc = cpar->S.c[2][1] * cdat->world_coords[i].c[0] + cpar->S.c[2][2] * cdat->world_coords[i].c[1] + cpar->S.c[2][3] * cdat->world_coords[i].c[2] + cpar->S.c[2][0]; zc = cpar->S.c[3][1] * cdat->world_coords[i].c[0] + cpar->S.c[3][2] * cdat->world_coords[i].c[1] + cpar->S.c[3][3] * cdat->world_coords[i].c[2] + cpar->S.c[3][0]; printf("\n\nxw %f yw %f zw %f\n", cdat->world_coords[i].c[0], cdat->world_coords[i].c[1], cdat->world_coords[i].c[2]); printf("\n\nr1 %f r2 %f r3 %f\n", cpar->S.c[1][1], cpar->S.c[1][2], cpar->S.c[1][3]); printf("r4 %f r5 %f r6 %f\n", cpar->S.c[2][1], cpar->S.c[2][2], cpar->S.c[2][3]); printf("r7 %f r8 %f r9 %f\n\n", cpar->S.c[3][1], cpar->S.c[3][2], cpar->S.c[3][3]); printf("Tx %f Ty %f Tz %f\n\n", cpar->S.c[1][0], cpar->S.c[2][0], cpar->S.c[3][0]); printf("xc %f yc %f zc %f\n", xc, yc, zc); /* convert from camera coordinates to undistorted sensor plane coordinates */ Xu = cpar->f * xc / zc; Yu = cpar->f * yc / zc; /* convert from undistorted to distorted sensor plane coordinates */ tf_undistorted_to_distorted_sensor_coord (cpar, Xu, Yu, &Xd, &Yd); printf("X_u = %f, Y_u = %f\n", Xu, Yu); printf("X_d = %f, Y_d = %f\n", Xd, Yd); /* convert from distorted sensor plane coordinates to image coordinates */ *xf = (Xd * camc->sx) / camc->dpx + camc->Cx; *yf = Yd / camc->dpy + camc->Cy; printf("xf = %f, yf = %f\n", *xf, *yf); } void tf_world_coord_to_distorted_image_coord (r3_t pw, calibration_parameters_t cpar, camera_constants_t camc, r2_t *pi) { printf("xc %f yc %f zc %f\n", xc, yc, zc); /*void tf_undistorted_to_distorted_sensor_coord (calibration_parameters_t cpar, double Xu, double Yu, double *Xd, double *Yd) { double Ru, Rd, lambda, c, d; double Q, R, D, S, T, sinT, cosT; if (((Xu == 0) && (Yu == 0)) || (cpar->kappa == 0)) { *Xd = Xu; *Yd = Yu; return; } Ru = hypot (Xu, Yu); c = 1 / cpar->kappa; d = -c * Ru; Q = c / 3; R = -d / 2; D = CUB (Q) + SQR (R); if (D >= 0) { D = SQRT (D); S = CBRT (R + D); T = CBRT (R - D); Rd = S + T; if (Rd < 0) { Rd = SQRT (-1 / (3 * cpar->kappa)); fprintf (stderr, "\nWarning: undistorted image point to distorted image point mapping limited by\n"); fprintf (stderr, " maximum barrel distortion radius of %lf\n", Rd); fprintf (stderr, " (Xu = %lf, Yu = %lf) -> (Xd = %lf, Yd = %lf)\n\n", Xu, Yu, Xu * Rd / Ru, Yu * Rd / Ru); } } else { D = SQRT (-D); S = CBRT (hypot (R, D)); T = atan2 (D, R) / 3; SINCOS (T, sinT, cosT); Rd = -S * cosT + SQRT3 * S * sinT; } lambda = Rd / Ru; *Xd = Xu * lambda; *Yd = Yu * lambda; } double CBRT (double x) { double pow (); if (x == 0) return (0); else if (x > 0) return (pow (x, (double) 1.0 / 3.0)); else return (-pow (-x, (double) 1.0 / 3.0)); } */