diff --git a/CMakeLists.txt b/CMakeLists.txt index f44866b168bff0b167380dbbf071da2561b99198..8b108cc95e487e8f9018b3770e60d08272f4566c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -17,9 +17,16 @@ function(dump_variables) endforeach() endfunction() -add_executable(${PROJECT_NAME} main.cpp ) +add_library(${PROJECT_NAME} SHARED eistotorch.cpp drt.cpp) target_link_libraries(${PROJECT_NAME} ${TORCH_LIBRARIES} ${EIGEN3_LIBRARIES} eisgenerator) -target_include_directories(${PROJECT_NAME} PRIVATE ${TORCH_INCLUDE_DIRS} ${EIGEN3_INCLUDE_DIRS} ./LBFG) -target_compile_options(${PROJECT_NAME} PRIVATE "-Wall" "-O2" "-g" "-fno-strict-aliasing" "-Wfatal-errors" "-Wno-reorder") +target_include_directories(${PROJECT_NAME} PUBLIC ${TORCH_INCLUDE_DIRS} ${EIGEN3_INCLUDE_DIRS} ./LBFG) +set_target_properties(${PROJECT_NAME} PROPERTIES COMPILE_FLAGS "-Wall -O2 -march=native -g" LINK_FLAGS "-flto") + +link_directories(${CMAKE_CURRENT_BINARY_DIR}) +add_executable(${PROJECT_NAME}_test main.cpp) +add_dependencies(${PROJECT_NAME}_test ${PROJECT_NAME}) +target_link_libraries(${PROJECT_NAME}_test -l${PROJECT_NAME} ${TORCH_LIBRARIES} eisgenerator) +target_include_directories(${PROJECT_NAME}_test PRIVATE . ${TORCH_INCLUDE_DIRS}) +set_target_properties(${PROJECT_NAME} PROPERTIES COMPILE_FLAGS "-Wall -O2 -march=native -g" LINK_FLAGS "-flto") install(TARGETS ${PROJECT_NAME} RUNTIME DESTINATION bin) diff --git a/LBFG/types.h b/LBFG/types.h new file mode 100644 index 0000000000000000000000000000000000000000..3c92447f176aae463756faaf61a605105a2a6831 --- /dev/null +++ b/LBFG/types.h @@ -0,0 +1,11 @@ +#pragma once + +#include <torch/torch.h> + +namespace LBFGSpp { + +typedef torch::Tensor Vector; +typedef torch::Tensor Matrix; +typedef std::vector<int> IndexSet; + +} diff --git a/drt.cpp b/drt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..023b91e19091a2a4679aab6725bcd5247a759338 --- /dev/null +++ b/drt.cpp @@ -0,0 +1,167 @@ +#include "drt/drt.h" + +#include <Eigen/Core> +#include <eisgenerator/eistype.h> + + +#include "tensoroptions.h" +#include "eigentorchconversions.h" +#include "eistotorch.h" +#include "LBFG/LBFGS.h" + + + +static torch::Tensor guesStartingPoint(torch::Tensor& omega, torch::Tensor& impedanceSpectra) +{ + std::vector<int64_t> size = omega.sizes().vec(); + ++size[0]; + torch::Tensor startingPoint = torch::zeros(size, tensorOptCpu<fvalue>(false)); + startingPoint[-1] = torch::abs(impedanceSpectra[-1]); + return startingPoint; +} + +static torch::Tensor aImag(torch::Tensor& omega) +{ + torch::Tensor tau = 1.0/(omega/(2*M_PI)); + torch::Tensor out = torch::zeros({omega.numel(), omega.numel()}, torch::TensorOptions().dtype(torch::kFloat32)); + auto outAccessor = out.accessor<float, 2>(); + auto omegaAccessor = omega.accessor<float, 1>(); + auto tauAccessor = tau.accessor<float, 1>(); + for(int32_t i = 0; i < out.size(0); ++i) + { + for(int32_t j = 0; j < out.size(1); ++j) + { + outAccessor[i][j] = 0.5*(omegaAccessor[i]*tauAccessor[j])/(1+std::pow(omegaAccessor[i]*tauAccessor[j], 2)); + if(j == 0) + outAccessor[i][j] = outAccessor[i][j]*std::log(tauAccessor[j+1]/tauAccessor[j]); + else if(j == out.size(1)-1) + outAccessor[i][j] = outAccessor[i][j]*std::log(tauAccessor[j]/tauAccessor[j-1]); + else + outAccessor[i][j] = outAccessor[i][j]*std::log(tauAccessor[j+1]/tauAccessor[j-1]); + } + } + return out; +} + +static torch::Tensor aReal(torch::Tensor& omega) +{ + torch::Tensor tau = 1.0/(omega/(2*M_PI)); + torch::Tensor out = torch::zeros({omega.numel(), omega.numel()}, torch::TensorOptions().dtype(torch::kFloat32)); + auto outAccessor = out.accessor<float, 2>(); + auto omegaAccessor = omega.accessor<float, 1>(); + auto tauAccessor = tau.accessor<float, 1>(); + for(int32_t i = 0; i < out.size(0); ++i) + { + for(int32_t j = 0; j < out.size(1); ++j) + { + outAccessor[i][j] = -0.5/(1+std::pow(omegaAccessor[i]*tauAccessor[j], 2)); + if(j == 0) + outAccessor[i][j] = outAccessor[i][j]*std::log(tauAccessor[j+1]/tauAccessor[j]); + else if(j == out.size(1)-1) + outAccessor[i][j] = outAccessor[i][j]*std::log(tauAccessor[j]/tauAccessor[j-1]); + else + outAccessor[i][j] = outAccessor[i][j]*std::log(tauAccessor[j+1]/tauAccessor[j-1]); + } + } + return out; +} + +class RtFunct +{ +private: + torch::Tensor impedanceSpectra; + torch::Tensor aMatrixImag; + torch::Tensor aMatrixReal; + fvalue el; + fvalue epsilon; + +public: + RtFunct(torch::Tensor impedanceSpectraI, torch::Tensor aMatrixImagI, torch::Tensor aMatrixRealI, fvalue elI, fvalue epsilonI): + impedanceSpectra(impedanceSpectraI), + aMatrixImag(aMatrixImagI), + aMatrixReal(aMatrixRealI), + el(elI), + epsilon(epsilonI) + { + + } + + fvalue function(const torch::Tensor& x) + { + assert(checkTorchType<fvalue>(x)); + assert(x.sizes().size() == 1); + auto xAccessor = x.accessor<fvalue, 1>(); + int64_t size = x.numel(); + torch::Tensor xLeft = x.narrow(0, 0, x.numel()-1); + + std::cout<<"x:\n"<<x<<'\n'; + std::cout<<"xLeft:\n"<<xLeft<<'\n'; + std::cout<<"real:\n"<<torch::real(impedanceSpectra)<<'\n'; + std::cout<<"imag:\n"<<torch::imag(impedanceSpectra)<<'\n'; + std::cout<<"aMatrixReal:\n"<<aMatrixReal<<'\n'; + std::cout<<"aMatrixImag:\n"<<aMatrixImag<<'\n'; + + + torch::Tensor MSE_re = torch::sum(torch::pow(xAccessor[size-1] + torch::matmul(aMatrixReal, xLeft) - torch::real(impedanceSpectra), 2)); + torch::Tensor MSE_im = torch::sum(torch::pow(torch::matmul(aMatrixImag, xLeft) - torch::imag(impedanceSpectra), 2)); + torch::Tensor reg_term = el/2*torch::sum(torch::pow(xLeft, 2)); + torch::Tensor obj = MSE_re + MSE_im + reg_term; + return obj.item().to<fvalue>(); + } + + static torch::Tensor getGrad(std::function<fvalue(const torch::Tensor& x)> fn, const torch::Tensor& xTensor, fvalue epsilon) + { + torch::Tensor out = torch::zeros(xTensor.sizes(), tensorOptCpu<fvalue>(false)); + auto outAccessor = out.accessor<fvalue, 1>(); + assert(checkTorchType<fvalue>(xTensor)); + auto xAccessor = xTensor.accessor<fvalue, 1>(); + for(int64_t i = 0; i < out.size(0); ++i) + { + xAccessor[i] -= epsilon; + fvalue left = fn(xTensor); + xAccessor[i] += 2*epsilon; + fvalue right = fn(xTensor); + xAccessor[i] -= epsilon; + outAccessor[i] = (right-left)/(2*epsilon); + } + return out; + } + + fvalue operator()(const Eigen::VectorX<fvalue>& x, Eigen::VectorX<fvalue>& grad) + { + Eigen::MatrixX<fvalue> xMatrix = x; + torch::Tensor xTensor = eigen2libtorch(xMatrix); + xTensor = xTensor.reshape({xTensor.numel()}); + torch::Tensor gradTensor = getGrad(std::bind(&RtFunct::function, this, std::placeholders::_1), xTensor, epsilon); + grad = libtorch2eigenVector<fvalue>(gradTensor); + return function(xTensor); + } +}; + +torch::Tensor calcDrt(torch::Tensor& impedanceSpectra, torch::Tensor& omegaTensor, FitMetics& fm) +{ + torch::Tensor aMatrixImag = aImag(omegaTensor); + torch::Tensor aMatrixReal = aReal(omegaTensor); + + LBFGSpp::LBFGSParam<fvalue> fitParam; + fitParam.epsilon = 1e-2; + fitParam.max_iterations = 100; + fitParam.max_linesearch = 1000; + + LBFGSpp::LBFGSSolver<fvalue> solver(fitParam); + RtFunct funct(impedanceSpectra, aMatrixImag, aMatrixReal, 0.01, 0.001); + + torch::Tensor startTensor = guesStartingPoint(omegaTensor, impedanceSpectra); + Eigen::VectorX<fvalue> x = libtorch2eigenVector<fvalue>(startTensor); + + fm.iterations = solver.minimize(funct, x, fm.fx); + + return eigenVector2libtorch<fvalue>(x); +} + +torch::Tensor calcDrt(const std::vector<eis::DataPoint>& data, const std::vector<fvalue>& omegaVector, FitMetics& fm) +{ + torch::Tensor impedanceSpectra = eisToComplexTensor(data, nullptr); + torch::Tensor omegaTensor = fvalueVectorToTensor(const_cast<std::vector<fvalue>&>(omegaVector)).clone(); + return calcDrt(impedanceSpectra, omegaTensor, fm); +} diff --git a/drt/drt.h b/drt/drt.h new file mode 100644 index 0000000000000000000000000000000000000000..192ee3e2cf750772b93406296b6ffee505f99d35 --- /dev/null +++ b/drt/drt.h @@ -0,0 +1,13 @@ +#pragma once +#include <eisgenerator/eistype.h> +#include <torch/torch.h> + +struct FitMetics +{ + int iterations; + fvalue fx; +}; + +torch::Tensor calcDrt(torch::Tensor& impedanceSpectra, torch::Tensor& omegaTensor, FitMetics& fm); + +torch::Tensor calcDrt(const std::vector<eis::DataPoint>& data, const std::vector<fvalue>& omegaVector, FitMetics& fm); diff --git a/eigentorchconversions.h b/eigentorchconversions.h index cedbf38d62eebab678e676bfc6e7afb87a496100..efd7a488b17305fe88cb2048616d8563cd9a255b 100644 --- a/eigentorchconversions.h +++ b/eigentorchconversions.h @@ -5,21 +5,7 @@ #include <torch/types.h> #include <vector> -template <typename V> -inline torch::TensorOptions tensorOptCpuNg() -{ - static_assert(std::is_same<V, float>::value || std::is_same<V, double>::value, - "This function can only be passed double or float types"); - torch::TensorOptions options; - if constexpr(std::is_same<V, float>::value) - options = options.dtype(torch::kFloat32); - else - options = options.dtype(torch::kFloat64); - options = options.layout(torch::kStrided); - options = options.device(torch::kCPU); - options = options.requires_grad(false); - return options; -} +#include "tensoroptions.h" template <typename V> bool checkTorchType(const torch::Tensor& tensor) @@ -47,7 +33,7 @@ torch::Tensor eigen2libtorch(Eigen::MatrixX<V> &M) { Eigen::Matrix<V, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> E(M); std::vector<int64_t> dims = {E.rows(), E.cols()}; - auto T = torch::from_blob(E.data(), dims, tensorOptCpuNg<V>()).clone(); + auto T = torch::from_blob(E.data(), dims, tensorOptCpu<V>(false)).clone(); return T; } @@ -55,7 +41,18 @@ template <typename V> torch::Tensor eigen2libtorch(MatrixXrm<V> &E, bool copydata = true) { std::vector<int64_t> dims = {E.rows(), E.cols()}; - auto T = torch::from_blob(E.data(), dims, tensorOptCpuNg<V>()); + auto T = torch::from_blob(E.data(), dims, tensorOptCpu<V>(false)); + if (copydata) + return T.clone(); + else + return T; +} + +template <typename V> +torch::Tensor eigenVector2libtorch(Eigen::Vector<V, Eigen::Dynamic> &E, bool copydata = true) +{ + std::vector<int64_t> dims = {E.cols()}; + auto T = torch::from_blob(E.data(), dims, tensorOptCpu<V>(false)); if (copydata) return T.clone(); else diff --git a/eistotorch.cpp b/eistotorch.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fc2d88238f9d1ed24d0ba6c386215afa9bc79023 --- /dev/null +++ b/eistotorch.cpp @@ -0,0 +1,90 @@ +#include "eistotorch.h" +#include <cassert> +#include <cmath> +#include <cstdint> + +#include "tensoroptions.h" + +torch::Tensor eisToComplexTensor(const std::vector<eis::DataPoint>& data, torch::Tensor* freqs) +{ + torch::TensorOptions options = tensorOptCpu<fvalue>(); + + if constexpr(std::is_same<fvalue, float>::value) + options = options.dtype(torch::kComplexFloat); + else + options = options.dtype(torch::kComplexDouble); + torch::Tensor output = torch::empty({static_cast<long int>(data.size())}, options); + if(freqs) + *freqs = torch::empty({static_cast<long int>(data.size())}, tensorOptCpu<fvalue>()); + + torch::Tensor real = torch::real(output); + torch::Tensor imag = torch::imag(output); + + auto realAccessor = real.accessor<fvalue, 1>(); + auto imagAccessor = imag.accessor<fvalue, 1>(); + float* tensorFreqDataPtr = freqs ? freqs->contiguous().data_ptr<float>() : nullptr; + + for(size_t i = 0; i < data.size(); ++i) + { + fvalue real = data[i].im.real(); + fvalue imag = data[i].im.imag(); + if(std::isnan(real) || std::isinf(real)) + real = 0; + if(std::isnan(imag) || std::isinf(imag)) + real = 0; + + realAccessor[i] = real; + imagAccessor[i] = imag; + if(tensorFreqDataPtr) + tensorFreqDataPtr[i] = data[i % data.size()].omega; + } + + return output; +} + +torch::Tensor eisToTorch(const std::vector<eis::DataPoint>& data, torch::Tensor* freqs) +{ + torch::Tensor input = torch::empty({static_cast<long int>(data.size()*2)}, tensorOptCpu<fvalue>()); + if(freqs) + *freqs = torch::empty({static_cast<long int>(data.size()*2)}, tensorOptCpu<fvalue>()); + + float* tensorDataPtr = input.contiguous().data_ptr<float>(); + float* tensorFreqDataPtr = freqs ? freqs->contiguous().data_ptr<float>() : nullptr; + + for(size_t i = 0; i < data.size()*2; ++i) + { + float datapoint = i < data.size() ? data[i].im.real() : data[i - data.size()].im.imag(); + if(std::isnan(datapoint) || std::isinf(datapoint)) + datapoint = 0; + tensorDataPtr[i] = datapoint; + if(tensorFreqDataPtr) + tensorFreqDataPtr[i] = data[i % data.size()].omega; + } + + return input; +} + +std::vector<eis::DataPoint> torchToEis(const torch::Tensor& input) +{ + assert(input.numel() % 2 == 0); + input.reshape({1, input.numel()}); + + std::vector<eis::DataPoint> output(input.numel()/2); + + float* tensorDataPtr = input.contiguous().data_ptr<float>(); + + for(int64_t i = 0; i < input.numel()/2; ++i) + { + output[i].omega = i; + output[i].im.real(tensorDataPtr[i]); + output[i].im.imag(tensorDataPtr[i+input.numel()/2]); + } + + return output; +} + + +torch::Tensor fvalueVectorToTensor(std::vector<fvalue>& vect) +{ + return torch::from_blob(vect.data(), {static_cast<int64_t>(vect.size())}, tensorOptCpu<fvalue>()); +} diff --git a/eistotorch.h b/eistotorch.h new file mode 100644 index 0000000000000000000000000000000000000000..2474d74f2ef873b98dc831f9c4e549da7ce48390 --- /dev/null +++ b/eistotorch.h @@ -0,0 +1,11 @@ +#pragma once + +#include <eisgenerator/eistype.h> +#include <torch/torch.h> + +#include <vector> + +torch::Tensor eisToComplexTensor(const std::vector<eis::DataPoint>& data, torch::Tensor* freqs); +torch::Tensor eisToTorch(const std::vector<eis::DataPoint>& data, torch::Tensor* freqs = nullptr); +std::vector<eis::DataPoint> torchToEis(const torch::Tensor& input); +torch::Tensor fvalueVectorToTensor(std::vector<fvalue>& vect); diff --git a/main.cpp b/main.cpp index 71c93de4c3345f8cb0368c95b9fc60ef0e8e566e..a17f0d3d7bd304cf509d3271a87c9da0d9a7e35a 100644 --- a/main.cpp +++ b/main.cpp @@ -1,21 +1,8 @@ -#include <ATen/core/ATen_fwd.h> -#include <ATen/core/TensorBody.h> -#include <ATen/ops/imag.h> -#include <c10/core/ScalarType.h> -#include <cstdint> #include <iostream> #include <eisgenerator/model.h> #include <eisgenerator/eistype.h> -#include <math.h> -#include <torch/csrc/autograd/generated/variable_factories.h> -#include <torch/torch.h> -#include <cmath> -#include <functional> -#include <Eigen/Dense> -#include "Eigen/src/Core/Matrix.h" -#include "LBFGS.h" -#include "eigentorchconversions.h" +#include "drt/drt.h" void printImpedance(const std::vector<eis::DataPoint>& data) { @@ -34,247 +21,22 @@ void printImpedance(const std::vector<eis::DataPoint>& data) std::cout<<"]\n"; } -torch::Tensor eisToTensor(const std::vector<eis::DataPoint>& data, torch::Tensor* freqs) -{ - torch::TensorOptions options = tensorOptCpuNg<fvalue>(); - - if constexpr(std::is_same<fvalue, float>::value) - options = options.dtype(torch::kComplexFloat); - else - options = options.dtype(torch::kComplexDouble); - torch::Tensor output = torch::empty({static_cast<long int>(data.size())}, options); - if(freqs) - *freqs = torch::empty({static_cast<long int>(data.size())}, tensorOptCpuNg<fvalue>()); - - torch::Tensor real = torch::real(output); - torch::Tensor imag = torch::imag(output); - - auto realAccessor = real.accessor<fvalue, 1>(); - auto imagAccessor = imag.accessor<fvalue, 1>(); - float* tensorFreqDataPtr = freqs ? freqs->contiguous().data_ptr<float>() : nullptr; - - for(size_t i = 0; i < data.size(); ++i) - { - fvalue real = data[i].im.real(); - fvalue imag = data[i].im.imag(); - if(std::isnan(real) || std::isinf(real)) - real = 0; - if(std::isnan(imag) || std::isinf(imag)) - real = 0; - - realAccessor[i] = real; - imagAccessor[i] = imag; - if(tensorFreqDataPtr) - tensorFreqDataPtr[i] = data[i % data.size()].omega; - } - - return output; -} - -torch::Tensor fvalueVectorToTensor(std::vector<fvalue>& vect) -{ - return torch::from_blob(vect.data(), {static_cast<int64_t>(vect.size())}, tensorOptCpuNg<fvalue>()); -} - -torch::Tensor guesStartingPoint(torch::Tensor& omega, torch::Tensor& impedanceSpectra) -{ - std::vector<int64_t> size = omega.sizes().vec(); - ++size[0]; - torch::Tensor startingPoint = torch::zeros(size, tensorOptCpuNg<fvalue>()); - startingPoint[-1] = torch::abs(impedanceSpectra[-1]); - return startingPoint; -} - -torch::Tensor aImag(torch::Tensor& omega) -{ - torch::Tensor tau = 1.0/(omega/(2*M_PI)); - torch::Tensor out = torch::zeros({omega.numel(), omega.numel()}, torch::TensorOptions().dtype(torch::kFloat32)); - auto outAccessor = out.accessor<float, 2>(); - auto omegaAccessor = omega.accessor<float, 1>(); - auto tauAccessor = tau.accessor<float, 1>(); - for(int32_t i = 0; i < out.size(0); ++i) - { - for(int32_t j = 0; j < out.size(1); ++j) - { - outAccessor[i][j] = 0.5*(omegaAccessor[i]*tauAccessor[j])/(1+std::pow(omegaAccessor[i]*tauAccessor[j], 2)); - if(j == 0) - outAccessor[i][j] = outAccessor[i][j]*std::log(tauAccessor[j+1]/tauAccessor[j]); - else if(j == out.size(1)-1) - outAccessor[i][j] = outAccessor[i][j]*std::log(tauAccessor[j]/tauAccessor[j-1]); - else - outAccessor[i][j] = outAccessor[i][j]*std::log(tauAccessor[j+1]/tauAccessor[j-1]); - } - } - return out; -} - -torch::Tensor aReal(torch::Tensor& omega) -{ - torch::Tensor tau = 1.0/(omega/(2*M_PI)); - torch::Tensor out = torch::zeros({omega.numel(), omega.numel()}, torch::TensorOptions().dtype(torch::kFloat32)); - auto outAccessor = out.accessor<float, 2>(); - auto omegaAccessor = omega.accessor<float, 1>(); - auto tauAccessor = tau.accessor<float, 1>(); - for(int32_t i = 0; i < out.size(0); ++i) - { - for(int32_t j = 0; j < out.size(1); ++j) - { - outAccessor[i][j] = -0.5/(1+std::pow(omegaAccessor[i]*tauAccessor[j], 2)); - if(j == 0) - outAccessor[i][j] = outAccessor[i][j]*std::log(tauAccessor[j+1]/tauAccessor[j]); - else if(j == out.size(1)-1) - outAccessor[i][j] = outAccessor[i][j]*std::log(tauAccessor[j]/tauAccessor[j-1]); - else - outAccessor[i][j] = outAccessor[i][j]*std::log(tauAccessor[j+1]/tauAccessor[j-1]); - } - } - return out; -} - -/*def S(gamma_R_inf, Z_exp_re, Z_exp_im, A_re, A_im, el): - MSE_re = np.sum((gamma_R_inf[-1] + np.matmul(A_re, gamma_R_inf[:-1]) - Z_exp_re)**2) - MSE_im = np.sum((np.matmul(A_im, gamma_R_inf[:-1]) - Z_exp_im)**2) - reg_term = el/2*np.sum(gamma_R_inf[:-1]**2) - obj = MSE_re + MSE_im + reg_term - return obj - -torch::Tensor tikhnovDrt(torch::Tensor& omega, torch::Tensor& impedanceSpectra, fvalue regularaziaion = 1e-2) -{ - torch::Tensor aMatrixImag = aImag(omega); - torch::Tensor aMatrixReal = aReal(omega); - torch::Tensor startingPoint = guesStartingPoint(omega, impedanceSpectra); - - torch::Tensor bounds = torch::zeros({startingPoint.size(0), 1}, tensorOptCpuNg<fvalue>()); - bounds = torch::cat({bounds, torch::zeros({startingPoint.size(0), 1}, tensorOptCpuNg<fvalue>())*torch::max(torch::abs(impedanceSpectra))}); - - std::cout<<"startingPoint:\n "<<startingPoint<<'\n'; - std::cout<<"bounds:\n "<<bounds<<'\n'; - - result = minimize(S, x0, args=(Z_exp_re, Z_exp_im, A_re, A_im, el), method=method, - bounds = bounds, options={'disp': True, 'ftol':1e-10, 'maxiter':200}) - gamma_R_inf = result.x - R_inf = gamma_R_inf[-1] - gamma = gamma_R_inf[:-1] - return gamma, R_inf - return bounds; -}*/ - -class RtFunct -{ -private: - torch::Tensor impedanceSpectra; - torch::Tensor aMatrixImag; - torch::Tensor aMatrixReal; - fvalue el; - fvalue epsilon; - -public: - RtFunct(torch::Tensor impedanceSpectraI, torch::Tensor aMatrixImagI, torch::Tensor aMatrixRealI, fvalue elI, fvalue epsilonI): - impedanceSpectra(impedanceSpectraI), - aMatrixImag(aMatrixImagI), - aMatrixReal(aMatrixRealI), - el(elI), - epsilon(epsilonI) - { - - } - - fvalue function(const torch::Tensor& x) - { - assert(checkTorchType<fvalue>(x)); - assert(x.sizes().size() == 1); - auto xAccessor = x.accessor<fvalue, 1>(); - int64_t size = x.numel(); - torch::Tensor xLeft = x.narrow(0, 0, x.numel()-1); - - std::cout<<"x:\n"<<x<<'\n'; - std::cout<<"xLeft:\n"<<xLeft<<'\n'; - std::cout<<"real:\n"<<torch::real(impedanceSpectra)<<'\n'; - std::cout<<"imag:\n"<<torch::imag(impedanceSpectra)<<'\n'; - std::cout<<"aMatrixReal:\n"<<aMatrixReal<<'\n'; - std::cout<<"aMatrixImag:\n"<<aMatrixImag<<'\n'; - - - torch::Tensor MSE_re = torch::sum(torch::pow(xAccessor[size-1] + torch::matmul(aMatrixReal, xLeft) - torch::real(impedanceSpectra), 2)); - torch::Tensor MSE_im = torch::sum(torch::pow(torch::matmul(aMatrixImag, xLeft) - torch::imag(impedanceSpectra), 2)); - torch::Tensor reg_term = el/2*torch::sum(torch::pow(xLeft, 2)); - torch::Tensor obj = MSE_re + MSE_im + reg_term; - return obj.item().to<fvalue>(); - } - - static torch::Tensor getGrad(std::function<fvalue(const torch::Tensor& x)> fn, const torch::Tensor& xTensor, fvalue epsilon) - { - torch::Tensor out = torch::zeros(xTensor.sizes(), tensorOptCpuNg<fvalue>()); - auto outAccessor = out.accessor<fvalue, 1>(); - assert(checkTorchType<fvalue>(xTensor)); - auto xAccessor = xTensor.accessor<fvalue, 1>(); - for(int64_t i = 0; i < out.size(0); ++i) - { - xAccessor[i] -= epsilon; - fvalue left = fn(xTensor); - xAccessor[i] += 2*epsilon; - fvalue right = fn(xTensor); - xAccessor[i] -= epsilon; - outAccessor[i] = (right-left)/(2*epsilon); - } - return out; - } - - fvalue operator()(const Eigen::VectorX<fvalue>& x, Eigen::VectorX<fvalue>& grad) - { - Eigen::MatrixX<fvalue> xMatrix = x; - torch::Tensor xTensor = eigen2libtorch(xMatrix); - xTensor = xTensor.reshape({xTensor.numel()}); - //std::cout<<"xTensor\n "<<xTensor<<'\n'; - torch::Tensor gradTensor = getGrad(std::bind(&RtFunct::function, this, std::placeholders::_1), xTensor, epsilon); - grad = libtorch2eigenVector<fvalue>(gradTensor); - return function(xTensor); - } -}; - -static void testFunc(RtFunct& funct, torch::Tensor& omega) -{ - std::cout<<__func__<<'\n'; - std::vector<int64_t> size = omega.sizes().vec(); - ++size[0]; - torch::Tensor x = torch::zeros(size, tensorOptCpuNg<fvalue>()); - x[-1] = 3; - x[0] = 0.5; - std::cout<<"RtFunct.function: "<<funct.function(x)<<std::endl; -} - int main(int argc, char** argv) { std::cout<<std::scientific; - eis::Range omega(1, 1e6, 3, true); + eis::Range omega(1, 1e6, 4, true); std::vector<fvalue> omegaVector = omega.getRangeVector(); - torch::Tensor omegaTensor = fvalueVectorToTensor(omegaVector); eis::Model model("r{10}-r{50}p{0.02, 0.8}"); std::vector<eis::DataPoint> data = model.executeSweep(omega); - torch::Tensor impedanceSpectra = eisToTensor(data, nullptr); - - torch::Tensor aMatrixImag = aImag(omegaTensor); - torch::Tensor aMatrixReal = aReal(omegaTensor); - printImpedance(data); - LBFGSpp::LBFGSParam<fvalue> fitParam; - fitParam.epsilon = 1e-2; - fitParam.max_iterations = 100; - fitParam.max_linesearch = 1000; - - LBFGSpp::LBFGSSolver<fvalue> solver(fitParam); - RtFunct funct(impedanceSpectra, aMatrixImag, aMatrixReal, 0.01, 0.001); - - torch::Tensor startTensor = guesStartingPoint(omegaTensor, impedanceSpectra); - Eigen::VectorX<fvalue> x = libtorch2eigenVector<fvalue>(startTensor); - fvalue fx; - int iterations = solver.minimize(funct, x, fx); + FitMetics fm; + torch::Tensor x = calcDrt(data, omegaVector, fm); - std::cout<<"Iterations: "<<iterations<<'\n'; - std::cout<<"fx "<<fx<<'\n'; + std::cout<<"Iterations: "<<fm.iterations<<'\n'; + std::cout<<"fx "<<fm.fx<<'\n'; std::cout<<"xVect\n"<<x<<'\n'; return 0; diff --git a/tensoroptions.h b/tensoroptions.h new file mode 100644 index 0000000000000000000000000000000000000000..7fc1701552df81b2c8ca61adefd9c33db9a2a531 --- /dev/null +++ b/tensoroptions.h @@ -0,0 +1,18 @@ +#pragma once +#include <torch/torch.h> + +template <typename V> +inline torch::TensorOptions tensorOptCpu(bool grad = true) +{ + static_assert(std::is_same<V, float>::value || std::is_same<V, double>::value, + "This function can only be passed double or float types"); + torch::TensorOptions options; + if constexpr(std::is_same<V, float>::value) + options = options.dtype(torch::kFloat32); + else + options = options.dtype(torch::kFloat64); + options = options.layout(torch::kStrided); + options = options.device(torch::kCPU); + options = options.requires_grad(grad); + return options; +}