% Initialisation
clear all;
%close all;
num_iterations = 50;
sigma = 0.01;
alpha = 0.00001;
L = sigma^2*diag(ones(3,1));
% P = (L+L')/2;
N_values = 100;
Nr = 10;
E = [0, -1; 1, 0];
T_estim_history = zeros(3, 3, num_iterations);
% Fonction pour calculer l'opérateur vee
function a = vee_operator(wedge)
a = wedge(2, 1);
end
% Génération d'une matrice de transformation aléatoire dans SE(2)
function T = rand_transformation_matrix()
theta = 2 * pi * rand();
R = [cos(theta), -sin(theta); sin(theta), cos(theta)];
t = rand(2, 1);
T = [R, t; 0, 0, 1];
end
% Fonction V(θ) pour SE(2)
function V = V_operator(theta)
if abs(theta) < 1e-12
V = eye(2);
else
V = (sin(theta) / theta) * eye(2) + ((1 - cos(theta)) / theta) * [0, -1; 1, 0];
end
end
% Fonction exponentielle pour les matrices de transformation SE(2)
function exp_W = exp_operator_se2(tau)
theta = tau(3);
R = [cos(theta), -sin(theta); sin(theta), cos(theta)];
rho = tau(1:2);
t = V_operator(theta) * rho;
exp_W = [R, t; 0, 0, 1];
end
% Fonction pour calculer la pseudo-inverse d'une matrice
function inv_A = robust_inv(A)
[U, S, V] = svd(A);
tol = max(size(A)) * eps(norm(S));
S_inv = diag(1 ./ diag(S));
S_inv(S < tol) = 0;
inv_A = V * S_inv * U';
end
% Fonction pour calculer le logarithme pour SE(2)
function log_tau = log_operator_se2(M)
R = M(1:2, 1:2);
t = M(1:2, 3);
theta = vee_operator(logm(R));
V_inv = robust_inv(V_operator(theta));
rho = V_inv * t;
log_tau = [rho; theta];
end
for n = 1:numel(N_values)
N = N_values(n);
mse_history = zeros(Nr, 1);
for nr = 1:Nr
errors_history = zeros(num_iterations, 1);
T = rand_transformation_matrix();
Z = zeros(3, 3, N);
for i = 1:N
a = sigma * randn(3, 1);
Zi = T * exp_operator_se2(a);
Z(:, :, i) = Zi;
end
T_est = T;
e = log_operator_se2(inv(T) * T_est);
errors_history(1) = norm(e);
for iter = 2:num_iterations
grad = zeros(3,1);
% Calcul de l'erreur et du gradient
errors = [];
J_l_sum = zeros(3);
for l = 1:size(Z, 3)
Zi = Z(:, :, l);
error = log_operator_se2(T_est^-1 * Zi);
J_l = Jl(T_est(1:2, 1:2), T_est(1:2, 3), Zi, E);
J_l_sum = J_l_sum + J_l;
errors = [errors, error];
grad = grad - 2 * inv(L) * error;
end
% Mise à jour de T_est selon la descente de gradient sur le groupe de Lie
delta_T = -alpha * grad;
T_est = T_est * exp_operator_se2(delta_T);
% Calcul de l'erreur
e = log_operator_se2(inv(T) * T_est);
errors_history(iter) = norm(e);
T_estim_history(:, :, iter) = T_est;
end
mse_history(nr) = errors_history(end)^2;
end
end
% Tracer l'évolution de l'erreur pour chaque itération
figure;
plot(1:num_iterations, errors_history);
xlabel('Iteration');
ylabel('Erreur');
title(['Evolution de l''erreur pour N = ', num2str(N)]);
My algorithm diverges. What can I do? I have tried modifying the number of iterations, the step size, and the learning rate (
????
α), but the issue persists. Here are some additional steps you can take to address the problem:
Check for Correct Gradient Calculation: Ensure that the gradient is computed correctly. Errors in the gradient calculation can lead to divergence.
Decrease the Learning Rate: Sometimes, a smaller learning rate (
????
α) can stabilize the algorithm. If
????
α is too large, the updates may overshoot the minimum, causing divergence.
Normalize the Data: Ensure that the input data is normalized. Large values can cause instability in the algorithm.
Initialize Parameters Carefully: Use a good initialization strategy for your transformation matrices. Poor initialization can lead to divergence.