1 .高斯zzdhf方法

残差函数f(x )是非线性函数,根据其一次wsdbbz近似为:

这里的j是残差函数f的雅可比矩阵,是引入了损失函数的:

如果将其一阶导设为0,则得到:

这就是论文中常见的正规公式。

2.LM

LM改进了高斯zzdhf方法,在求解过程中引入了衰减因子。

2.1衰减因子的作用:

2.2阻尼系数初始值的选择:

一个简单的策略是:

2.3衰减因子更新策略

3 .核心代码说明

3.1矩阵的构造

void problem :3360 make hessian (

TicToc t_h;

//直接构造大h矩阵

ulong size=ordering_generic_;

最大xh (matxx 3360: zero (size,size );

vecxb(vecx:3360zero(size );

//TODO: accelate、accelate和accelate

//#ifdef USE_OPENMP

//#pragma omp parallel for

//#endif

//对每个残差进行扫描,计算他们的雅可比,得到最后的H=J^T * J

for (自动边缘3360边缘_ ) }

edge.second-ComputeResidual (;

edge.second-ComputeJacobians (;

autojacobians=edge.second-jacobians (;

autoverticies=edge.second-vertici es (;

assert (jacobians.size (==vertici es.size ) );

for(size_tI=0; i verticies.size (; I ) {

auto v_i=verticies[i];

if(v_I-isfixed ) ) continue; //Hessian不需要添加该信息。 也就是说杰克比为0

auto jacobian_i=jacobians[i];

ulong index_i=v_i-OrderingId (;

ulong dim_i=v_i-LocalDimension (;

matxxjtw=Jacobian _ I.transpose (* edge.second-information );

for(size_tj=I; j verticies.size (; j ) {

auto v_j=verticies[j];

if(v_j-isfixed ) ) continue;

auto jacobian_j=jacobians[j];

ulong index_j=v_j-OrderingId (;

ulong dim_j=v_j-LocalDimension (;

assert(v_j-orderingid )!=-1;

MatXX hessian=JtW * jacobian_j;

//所有信息矩阵重叠

h.block(index_I,index_j,dim_i,dim_j ).noalias )=hessian;

if(j!=i ) {

//对称的下三角

h.block(index_j、index_i、dim_j、dim_i ).noalias )=hessian.transpose );

}

}

b.segment(index_I,dim_i ).noalias )-=JtW * edge.second-Residual );

}

}

Hessian_=H;

b_=b;

t_hessian_cost_ =t_h.toc (;

delta _ x _=vecx :3360 zero (size ); //initial delta_x=0_n;

}

3.2在构建的h矩阵中加入衰减因子

void problem : addlambdatohessianlm (

ulong size=Hessian_.cols (;

asert (hessian _.rows (==hessian _.cols ) ) ‘ Hessian is not square ‘ );

for(ulongI=0; i size; I ) {

Hessian_(i,I )=currentLambda_;

}

}

3.3求解后,验证该步骤的解是否合适,代码对应衰减因子的更新策略

bool problem :3360 isgoodstepinlm () {

双比例=0;

sale=delta _ x _.transpose (* (current lambda _ * delta _ x _ b _ ) );

scale =1e-3; //make sure it’s non-zero : )

//recomputeresidualsafterupdatestate

//统计所有残差

double tempChi=0.0;

for (自动边缘3360边缘_ ) }

edge.second-ComputeResidual (;

tempChi =edge.second-Chi2(;

}

doublerho=(currentchi_-tempchi )/scale;

射频识别(if )//最后步骤,误差下降

{

doublealpha=1.-pow((2*rho-1 ),3 );

阿尔法=STD:3360min (阿尔法,2./3. );

doublescalefactor=(STD :3360 max ) )1./3 .alpha );

currentLambda_ *=scaleFactor;

ni_=2;

currentChi_=tempChi;

返回真;

} else {

currentLambda_ *=ni_;

ni_ *=2;

返回假;

}

}