mirror of
https://github.com/microsoft/vcpkg.git
synced 2024-12-06 00:29:02 +08:00
dcedc95761
* [eigen3] Update to 3.4.0 * [theia] Update to support Eigen 3.4 * [shogun/openmvg] Patches to support Eigen 3.4 from @cenit * Commit result of x-add-version * [rtabmap] add bigobj * x-add-version * Update ports/eigen3/vcpkg.json Co-authored-by: Alexander Neumann <30894796+Neumann-A@users.noreply.github.com> * Run x-add-version Co-authored-by: Tobias Wood <tobias@mi3.com> Co-authored-by: Robert Schumacher <roschuma@microsoft.com> Co-authored-by: Amin Yahyaabadi <aminyahyaabadi74@gmail.com> Co-authored-by: Alexander Neumann <30894796+Neumann-A@users.noreply.github.com>
66 lines
3.0 KiB
Diff
66 lines
3.0 KiB
Diff
--- a/src/shogun/machine/gp/MultiLaplaceInferenceMethod.cpp
|
|
+++ b/src/shogun/machine/gp/MultiLaplaceInferenceMethod.cpp
|
|
@@ -84,9 +84,9 @@ class CMultiPsiLine : public func_base
|
|
float64_t result=0;
|
|
for(index_t bl=0; bl<C; bl++)
|
|
{
|
|
- eigen_f.block(bl*n,0,n,1)=K*alpha->block(bl*n,0,n,1)*CMath::exp(log_scale*2.0);
|
|
- result+=alpha->block(bl*n,0,n,1).dot(eigen_f.block(bl*n,0,n,1))/2.0;
|
|
- eigen_f.block(bl*n,0,n,1)+=eigen_m;
|
|
+ eigen_f.segment(bl*n,n)=K*alpha->segment(bl*n,n)*CMath::exp(log_scale*2.0);
|
|
+ result+=alpha->segment(bl*n,n).dot(eigen_f.segment(bl*n,n))/2.0;
|
|
+ eigen_f.segment(bl*n,n)+=eigen_m;
|
|
}
|
|
|
|
// get first and second derivatives of log likelihood
|
|
@@ -272,7 +272,7 @@ void CMultiLaplaceInferenceMethod::update_alpha()
|
|
{
|
|
Map<VectorXd> alpha(m_alpha.vector, m_alpha.vlen);
|
|
for(index_t bl=0; bl<C; bl++)
|
|
- eigen_mu.block(bl*n,0,n,1)=eigen_ktrtr*CMath::exp(m_log_scale*2.0)*alpha.block(bl*n,0,n,1);
|
|
+ eigen_mu.segment(bl*n,n)=eigen_ktrtr*CMath::exp(m_log_scale*2.0)*alpha.segment(bl*n,n);
|
|
|
|
//alpha'*(f-m)/2.0
|
|
Psi_New=alpha.dot(eigen_mu)/2.0;
|
|
@@ -316,7 +316,7 @@ void CMultiLaplaceInferenceMethod::update_alpha()
|
|
|
|
for(index_t bl=0; bl<C; bl++)
|
|
{
|
|
- VectorXd eigen_sD=eigen_dpi.block(bl*n,0,n,1).cwiseSqrt();
|
|
+ VectorXd eigen_sD=eigen_dpi.segment(bl*n,n).cwiseSqrt();
|
|
LLT<MatrixXd> chol_tmp((eigen_sD*eigen_sD.transpose()).cwiseProduct(eigen_ktrtr*CMath::exp(m_log_scale*2.0))+
|
|
MatrixXd::Identity(m_ktrtr.num_rows, m_ktrtr.num_cols));
|
|
MatrixXd eigen_L_tmp=chol_tmp.matrixU();
|
|
@@ -341,11 +341,11 @@ void CMultiLaplaceInferenceMethod::update_alpha()
|
|
VectorXd tmp2=m_tmp.array().rowwise().sum();
|
|
|
|
for(index_t bl=0; bl<C; bl++)
|
|
- eigen_b.block(bl*n,0,n,1)+=eigen_dpi.block(bl*n,0,n,1).cwiseProduct(eigen_mu.block(bl*n,0,n,1)-eigen_mean_bl-tmp2);
|
|
+ eigen_b.segment(bl*n,n)+=eigen_dpi.segment(bl*n,n).cwiseProduct(eigen_mu.segment(bl*n,n)-eigen_mean_bl-tmp2);
|
|
|
|
Map<VectorXd> &eigen_c=eigen_W;
|
|
for(index_t bl=0; bl<C; bl++)
|
|
- eigen_c.block(bl*n,0,n,1)=eigen_E.block(0,bl*n,n,n)*(eigen_ktrtr*CMath::exp(m_log_scale*2.0)*eigen_b.block(bl*n,0,n,1));
|
|
+ eigen_c.segment(bl*n,n)=eigen_E.block(0,bl*n,n,n)*(eigen_ktrtr*CMath::exp(m_log_scale*2.0)*eigen_b.segment(bl*n,n));
|
|
|
|
Map<MatrixXd> c_tmp(eigen_c.data(),n,C);
|
|
|
|
@@ -409,7 +409,7 @@ float64_t CMultiLaplaceInferenceMethod::get_derivative_helper(SGMatrix<float64_t
|
|
{
|
|
result+=((eigen_E.block(0,bl*n,n,n)-eigen_U.block(0,bl*n,n,n).transpose()*eigen_U.block(0,bl*n,n,n)).array()
|
|
*eigen_dK.array()).sum();
|
|
- result-=(eigen_dK*eigen_alpha.block(bl*n,0,n,1)).dot(eigen_alpha.block(bl*n,0,n,1));
|
|
+ result-=(eigen_dK*eigen_alpha.segment(bl*n,n)).dot(eigen_alpha.segment(bl*n,n));
|
|
}
|
|
|
|
return result/2.0;
|
|
@@ -489,7 +489,7 @@ SGVector<float64_t> CMultiLaplaceInferenceMethod::get_derivative_wrt_mean(
|
|
result[i]=0;
|
|
//currently only compute the explicit term
|
|
for(index_t bl=0; bl<C; bl++)
|
|
- result[i]-=eigen_alpha.block(bl*n,0,n,1).dot(eigen_dmu);
|
|
+ result[i]-=eigen_alpha.segment(bl*n,n).dot(eigen_dmu);
|
|
}
|
|
|
|
return result;
|