Skip to content
Snippets Groups Projects
Commit bc7fa547 authored by Carl Philipp Klemm's avatar Carl Philipp Klemm
Browse files

working minimization

parent 8ad62dca
No related branches found
No related tags found
No related merge requests found
#include <climits>
#include <sys/types.h>
#include <torch/torch.h> #include <torch/torch.h>
#include <Eigen/Dense> #include <Eigen/Dense>
#include <torch/types.h>
#include <vector> #include <vector>
template <typename V>
inline torch::TensorOptions tensorOptCpuNg()
{
static_assert(std::is_same<V, float>::value || std::is_same<V, double>::value,
"This function can only be passed double or float types");
torch::TensorOptions options;
if constexpr(std::is_same<V, float>::value)
options = options.dtype(torch::kFloat32);
else
options = options.dtype(torch::kFloat64);
options = options.layout(torch::kStrided);
options = options.device(torch::kCPU);
options = options.requires_grad(false);
return options;
}
template <typename V>
bool checkTorchType(const torch::Tensor& tensor)
{
static_assert(std::is_same<V, float>::value || std::is_same<V, double>::value ||
std::is_same<V, int64_t>::value || std::is_same<V, int32_t>::value || std::is_same<V, int8_t>::value,
"This function dose not work with this type");
if constexpr(std::is_same<V, float>::value)
return tensor.dtype() == torch::kFloat32;
else if constexpr(std::is_same<V, double>::value)
return tensor.dtype() == torch::kFloat64;
else if constexpr(std::is_same<V, int64_t>::value)
return tensor.dtype() == torch::kInt64;
else if constexpr(std::is_same<V, int32_t>::value)
return tensor.dtype() == torch::kInt32;
else if constexpr(std::is_same<V, int8_t>::value)
return tensor.dtype() == torch::kInt8;
}
template <typename V> template <typename V>
using MatrixXrm = typename Eigen::Matrix<V, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>; using MatrixXrm = typename Eigen::Matrix<V, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
...@@ -10,7 +47,7 @@ torch::Tensor eigen2libtorch(Eigen::MatrixX<V> &M) ...@@ -10,7 +47,7 @@ torch::Tensor eigen2libtorch(Eigen::MatrixX<V> &M)
{ {
Eigen::Matrix<V, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> E(M); Eigen::Matrix<V, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> E(M);
std::vector<int64_t> dims = {E.rows(), E.cols()}; std::vector<int64_t> dims = {E.rows(), E.cols()};
auto T = torch::from_blob(E.data(), dims).clone(); //.to(torch::kCPU); auto T = torch::from_blob(E.data(), dims, tensorOptCpuNg<V>()).clone();
return T; return T;
} }
...@@ -18,7 +55,7 @@ template <typename V> ...@@ -18,7 +55,7 @@ template <typename V>
torch::Tensor eigen2libtorch(MatrixXrm<V> &E, bool copydata = true) torch::Tensor eigen2libtorch(MatrixXrm<V> &E, bool copydata = true)
{ {
std::vector<int64_t> dims = {E.rows(), E.cols()}; std::vector<int64_t> dims = {E.rows(), E.cols()};
auto T = torch::from_blob(E.data(), dims); auto T = torch::from_blob(E.data(), dims, tensorOptCpuNg<V>());
if (copydata) if (copydata)
return T.clone(); return T.clone();
else else
...@@ -26,13 +63,24 @@ torch::Tensor eigen2libtorch(MatrixXrm<V> &E, bool copydata = true) ...@@ -26,13 +63,24 @@ torch::Tensor eigen2libtorch(MatrixXrm<V> &E, bool copydata = true)
} }
template<typename V> template<typename V>
Eigen::Matrix<V, Eigen::Dynamic, Eigen::Dynamic> libtorch2eigen(torch::Tensor &Tin) Eigen::Matrix<V, Eigen::Dynamic, Eigen::Dynamic> libtorch2eigenMaxtrix(torch::Tensor &Tin)
{ {
/* /*
LibTorch is Row-major order and Eigen is Column-major order. LibTorch is Row-major order and Eigen is Column-major order.
MatrixXrm uses Eigen::RowMajor for compatibility. MatrixXrm uses Eigen::RowMajor for compatibility.
*/ */
assert(checkTorchType<V>(Tin));
auto T = Tin.to(torch::kCPU); auto T = Tin.to(torch::kCPU);
Eigen::Map<MatrixXrm<V>> E(T.data_ptr<V>(), T.size(0), T.size(1)); Eigen::Map<MatrixXrm<V>> E(T.data_ptr<V>(), T.size(0), T.size(1));
return E; return E;
} }
template<typename V>
Eigen::Vector<V, Eigen::Dynamic> libtorch2eigenVector(torch::Tensor &Tin)
{
assert(Tin.sizes().size() == 1);
assert(checkTorchType<V>(Tin));
auto T = Tin.to(torch::kCPU);
Eigen::Map<Eigen::Vector<V, Eigen::Dynamic>> E(T.data_ptr<V>(), T.numel());
return E;
}
...@@ -32,24 +32,11 @@ void printImpedance(const std::vector<eis::DataPoint>& data) ...@@ -32,24 +32,11 @@ void printImpedance(const std::vector<eis::DataPoint>& data)
std::cout<<"]\n"; std::cout<<"]\n";
} }
torch::TensorOptions getTensorOptions()
{
torch::TensorOptions options;
if constexpr(sizeof(fvalue) == sizeof(float))
options = options.dtype(torch::kFloat32);
else if constexpr(sizeof(fvalue) == sizeof(double))
options = options.dtype(torch::kFloat64);
options = options.layout(torch::kStrided);
options = options.device(torch::kCPU);
options = options.requires_grad(false);
return options;
}
torch::Tensor eisToTensor(const std::vector<eis::DataPoint>& data, torch::Tensor* freqs) torch::Tensor eisToTensor(const std::vector<eis::DataPoint>& data, torch::Tensor* freqs)
{ {
torch::Tensor output = torch::empty({static_cast<long int>(data.size()*2)}, getTensorOptions()); torch::Tensor output = torch::empty({static_cast<long int>(data.size()*2)}, tensorOptCpuNg<fvalue>());
if(freqs) if(freqs)
*freqs = torch::empty({static_cast<long int>(data.size()*2)}, getTensorOptions()); *freqs = torch::empty({static_cast<long int>(data.size()*2)}, tensorOptCpuNg<fvalue>());
float* tensorDataPtr = output.contiguous().data_ptr<float>(); float* tensorDataPtr = output.contiguous().data_ptr<float>();
float* tensorFreqDataPtr = freqs ? freqs->contiguous().data_ptr<float>() : nullptr; float* tensorFreqDataPtr = freqs ? freqs->contiguous().data_ptr<float>() : nullptr;
...@@ -70,12 +57,12 @@ torch::Tensor eisToTensor(const std::vector<eis::DataPoint>& data, torch::Tensor ...@@ -70,12 +57,12 @@ torch::Tensor eisToTensor(const std::vector<eis::DataPoint>& data, torch::Tensor
torch::Tensor fvalueVectorToTensor(std::vector<fvalue>& vect) torch::Tensor fvalueVectorToTensor(std::vector<fvalue>& vect)
{ {
return torch::from_blob(vect.data(), {static_cast<int64_t>(vect.size())}, getTensorOptions()); return torch::from_blob(vect.data(), {static_cast<int64_t>(vect.size())}, tensorOptCpuNg<fvalue>());
} }
torch::Tensor guesStartingPoint(torch::Tensor& omega, torch::Tensor& impedanceSpectra) torch::Tensor guesStartingPoint(torch::Tensor& omega, torch::Tensor& impedanceSpectra)
{ {
torch::Tensor startingPoint = torch::zeros(omega.sizes(), getTensorOptions()); torch::Tensor startingPoint = torch::zeros(omega.sizes(), tensorOptCpuNg<fvalue>());
startingPoint[-1] = torch::abs(impedanceSpectra[-1]); startingPoint[-1] = torch::abs(impedanceSpectra[-1]);
return startingPoint; return startingPoint;
} }
...@@ -126,26 +113,33 @@ torch::Tensor aReal(torch::Tensor& omega) ...@@ -126,26 +113,33 @@ torch::Tensor aReal(torch::Tensor& omega)
return out; return out;
} }
/*def S(gamma_R_inf, Z_exp_re, Z_exp_im, A_re, A_im, el):
MSE_re = np.sum((gamma_R_inf[-1] + np.matmul(A_re, gamma_R_inf[:-1]) - Z_exp_re)**2)
MSE_im = np.sum((np.matmul(A_im, gamma_R_inf[:-1]) - Z_exp_im)**2)
reg_term = el/2*np.sum(gamma_R_inf[:-1]**2)
obj = MSE_re + MSE_im + reg_term
return obj
torch::Tensor tikhnovDrt(torch::Tensor& omega, torch::Tensor& impedanceSpectra, fvalue regularaziaion = 1e-2) torch::Tensor tikhnovDrt(torch::Tensor& omega, torch::Tensor& impedanceSpectra, fvalue regularaziaion = 1e-2)
{ {
torch::Tensor aMatrixImag = aImag(omega); torch::Tensor aMatrixImag = aImag(omega);
torch::Tensor aMatrixReal = aReal(omega); torch::Tensor aMatrixReal = aReal(omega);
torch::Tensor startingPoint = guesStartingPoint(omega, impedanceSpectra); torch::Tensor startingPoint = guesStartingPoint(omega, impedanceSpectra);
torch::Tensor bounds = torch::zeros({startingPoint.size(0), 1}, getTensorOptions()); torch::Tensor bounds = torch::zeros({startingPoint.size(0), 1}, tensorOptCpuNg<fvalue>());
bounds = torch::cat({bounds, torch::zeros({startingPoint.size(0), 1}, getTensorOptions())*torch::max(torch::abs(impedanceSpectra))}); bounds = torch::cat({bounds, torch::zeros({startingPoint.size(0), 1}, tensorOptCpuNg<fvalue>())*torch::max(torch::abs(impedanceSpectra))});
std::cout<<"startingPoint:\n "<<startingPoint<<'\n'; std::cout<<"startingPoint:\n "<<startingPoint<<'\n';
std::cout<<"bounds:\n "<<bounds<<'\n'; std::cout<<"bounds:\n "<<bounds<<'\n';
/*result = minimize(S, x0, args=(Z_exp_re, Z_exp_im, A_re, A_im, el), method=method, result = minimize(S, x0, args=(Z_exp_re, Z_exp_im, A_re, A_im, el), method=method,
bounds = bounds, options={'disp': True, 'ftol':1e-10, 'maxiter':200}) bounds = bounds, options={'disp': True, 'ftol':1e-10, 'maxiter':200})
gamma_R_inf = result.x gamma_R_inf = result.x
R_inf = gamma_R_inf[-1] R_inf = gamma_R_inf[-1]
gamma = gamma_R_inf[:-1] gamma = gamma_R_inf[:-1]
return gamma, R_inf*/ return gamma, R_inf
return bounds; return bounds;
} }*/
class RtFunct class RtFunct
{ {
...@@ -153,11 +147,11 @@ private: ...@@ -153,11 +147,11 @@ private:
torch::Tensor impedanceSpectra; torch::Tensor impedanceSpectra;
torch::Tensor aMatrixImag; torch::Tensor aMatrixImag;
torch::Tensor aMatrixReal; torch::Tensor aMatrixReal;
double el; fvalue el;
double epsilon; fvalue epsilon;
public: public:
RtFunct(torch::Tensor impedanceSpectraI, torch::Tensor aMatrixImagI, torch::Tensor aMatrixRealI, double elI, double epsilonI): RtFunct(torch::Tensor impedanceSpectraI, torch::Tensor aMatrixImagI, torch::Tensor aMatrixRealI, fvalue elI, fvalue epsilonI):
impedanceSpectra(impedanceSpectraI), impedanceSpectra(impedanceSpectraI),
aMatrixImag(aMatrixImagI), aMatrixImag(aMatrixImagI),
aMatrixReal(aMatrixRealI), aMatrixReal(aMatrixRealI),
...@@ -167,39 +161,42 @@ public: ...@@ -167,39 +161,42 @@ public:
} }
static double function(const torch::Tensor& x) static fvalue function(const torch::Tensor& x)
{ {
auto xAccessor = x.accessor<double, 1>(); assert(checkTorchType<fvalue>(x));
double accum = 0; auto xAccessor = x.accessor<fvalue, 1>();
fvalue accum = 0;
for(int64_t i = 0; i < x.size(0); ++i) for(int64_t i = 0; i < x.size(0); ++i)
accum += xAccessor[i]*xAccessor[i]; accum += (xAccessor[i]+3)*(xAccessor[i]+3)+20;
return accum; return accum;
} }
static torch::Tensor getGrad(std::function<double(const torch::Tensor& x)> fn, const torch::Tensor& xTensor, double epsilon) static torch::Tensor getGrad(std::function<fvalue(const torch::Tensor& x)> fn, const torch::Tensor& xTensor, fvalue epsilon)
{ {
torch::Tensor out = torch::zeros(xTensor.sizes(), getTensorOptions()); torch::Tensor out = torch::zeros(xTensor.sizes(), tensorOptCpuNg<fvalue>());
auto outAccessor = out.accessor<fvalue, 1>(); auto outAccessor = out.accessor<fvalue, 1>();
auto xAccessor = xTensor.accessor<double, 1>(); assert(checkTorchType<fvalue>(xTensor));
auto xAccessor = xTensor.accessor<fvalue, 1>();
for(int64_t i = 0; i < out.size(0); ++i) for(int64_t i = 0; i < out.size(0); ++i)
{ {
xAccessor[i] -= epsilon; xAccessor[i] -= epsilon;
double left = fn(xTensor); fvalue left = fn(xTensor);
xAccessor[i] += 2*epsilon; xAccessor[i] += 2*epsilon;
double right = fn(xTensor); fvalue right = fn(xTensor);
xAccessor[i] -= epsilon; xAccessor[i] -= epsilon;
outAccessor[i] = (right-left)/(2*epsilon); outAccessor[i] = (right-left)/(2*epsilon);
} }
return out; return out;
} }
double operator()(const Eigen::VectorXd& x, Eigen::VectorXd& grad) fvalue operator()(const Eigen::VectorX<fvalue>& x, Eigen::VectorX<fvalue>& grad)
{ {
Eigen::MatrixX<double> xMatrix = x; Eigen::MatrixX<fvalue> xMatrix = x;
torch::Tensor xTensor = eigen2libtorch(xMatrix).reshape({xTensor.numel()}); torch::Tensor xTensor = eigen2libtorch(xMatrix);
xTensor = xTensor.reshape({xTensor.numel()});
std::cout<<"xTensor\n "<<xTensor<<'\n'; std::cout<<"xTensor\n "<<xTensor<<'\n';
torch::Tensor gradTensor = getGrad(&function, xTensor, epsilon); torch::Tensor gradTensor = getGrad(&function, xTensor, epsilon);
grad = libtorch2eigen<double>(gradTensor); grad = libtorch2eigenVector<fvalue>(gradTensor);
return function(xTensor); return function(xTensor);
} }
}; };
...@@ -224,15 +221,19 @@ int main(int argc, char** argv) ...@@ -224,15 +221,19 @@ int main(int argc, char** argv)
printImpedance(data); printImpedance(data);
LBFGSpp::LBFGSParam<double> fitParam; LBFGSpp::LBFGSParam<fvalue> fitParam;
fitParam.epsilon = 1e-6; fitParam.epsilon = 1e-6;
fitParam.max_iterations = 100; fitParam.max_iterations = 100;
LBFGSpp::LBFGSSolver<double> solver(fitParam); LBFGSpp::LBFGSSolver<fvalue> solver(fitParam);
RtFunct funct(impedanceSpectra, aMatrixImag, aMatrixReal, 0.1, 0.001); RtFunct funct(impedanceSpectra, aMatrixImag, aMatrixReal, 0.1, 0.001);
Eigen::VectorXd x = Eigen::VectorXd::Ones(4)*3; Eigen::VectorX<fvalue> x = Eigen::VectorX<fvalue>::Ones(4)*3;
double fx; fvalue fx;
int iterations = solver.minimize(funct, x, fx); int iterations = solver.minimize(funct, x, fx);
std::cout<<"Iterations: "<<iterations<<'\n';
std::cout<<"fx "<<fx<<'\n';
std::cout<<"xVect\n"<<x<<'\n';
return 0; return 0;
} }
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment