Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
L
libdrt
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Klemm, Carl Philipp
libdrt
Commits
bc7fa547
Commit
bc7fa547
authored
2 years ago
by
Carl Philipp Klemm
Browse files
Options
Downloads
Patches
Plain Diff
working minimization
parent
8ad62dca
No related branches found
No related tags found
No related merge requests found
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
eigentorchconversions.h
+51
-3
51 additions, 3 deletions
eigentorchconversions.h
main.cpp
+43
-42
43 additions, 42 deletions
main.cpp
with
94 additions
and
45 deletions
eigentorchconversions.h
+
51
−
3
View file @
bc7fa547
#include
<climits>
#include
<sys/types.h>
#include
<torch/torch.h>
#include
<Eigen/Dense>
#include
<torch/types.h>
#include
<vector>
template
<
typename
V
>
inline
torch
::
TensorOptions
tensorOptCpuNg
()
{
static_assert
(
std
::
is_same
<
V
,
float
>::
value
||
std
::
is_same
<
V
,
double
>::
value
,
"This function can only be passed double or float types"
);
torch
::
TensorOptions
options
;
if
constexpr
(
std
::
is_same
<
V
,
float
>::
value
)
options
=
options
.
dtype
(
torch
::
kFloat32
);
else
options
=
options
.
dtype
(
torch
::
kFloat64
);
options
=
options
.
layout
(
torch
::
kStrided
);
options
=
options
.
device
(
torch
::
kCPU
);
options
=
options
.
requires_grad
(
false
);
return
options
;
}
template
<
typename
V
>
bool
checkTorchType
(
const
torch
::
Tensor
&
tensor
)
{
static_assert
(
std
::
is_same
<
V
,
float
>::
value
||
std
::
is_same
<
V
,
double
>::
value
||
std
::
is_same
<
V
,
int64_t
>::
value
||
std
::
is_same
<
V
,
int32_t
>::
value
||
std
::
is_same
<
V
,
int8_t
>::
value
,
"This function dose not work with this type"
);
if
constexpr
(
std
::
is_same
<
V
,
float
>::
value
)
return
tensor
.
dtype
()
==
torch
::
kFloat32
;
else
if
constexpr
(
std
::
is_same
<
V
,
double
>::
value
)
return
tensor
.
dtype
()
==
torch
::
kFloat64
;
else
if
constexpr
(
std
::
is_same
<
V
,
int64_t
>::
value
)
return
tensor
.
dtype
()
==
torch
::
kInt64
;
else
if
constexpr
(
std
::
is_same
<
V
,
int32_t
>::
value
)
return
tensor
.
dtype
()
==
torch
::
kInt32
;
else
if
constexpr
(
std
::
is_same
<
V
,
int8_t
>::
value
)
return
tensor
.
dtype
()
==
torch
::
kInt8
;
}
template
<
typename
V
>
using
MatrixXrm
=
typename
Eigen
::
Matrix
<
V
,
Eigen
::
Dynamic
,
Eigen
::
Dynamic
,
Eigen
::
RowMajor
>
;
...
...
@@ -10,7 +47,7 @@ torch::Tensor eigen2libtorch(Eigen::MatrixX<V> &M)
{
Eigen
::
Matrix
<
V
,
Eigen
::
Dynamic
,
Eigen
::
Dynamic
,
Eigen
::
RowMajor
>
E
(
M
);
std
::
vector
<
int64_t
>
dims
=
{
E
.
rows
(),
E
.
cols
()};
auto
T
=
torch
::
from_blob
(
E
.
data
(),
dims
).
clone
();
//.to(torch::kCPU
);
auto
T
=
torch
::
from_blob
(
E
.
data
(),
dims
,
tensorOptCpuNg
<
V
>
()).
clone
(
);
return
T
;
}
...
...
@@ -18,7 +55,7 @@ template <typename V>
torch
::
Tensor
eigen2libtorch
(
MatrixXrm
<
V
>
&
E
,
bool
copydata
=
true
)
{
std
::
vector
<
int64_t
>
dims
=
{
E
.
rows
(),
E
.
cols
()};
auto
T
=
torch
::
from_blob
(
E
.
data
(),
dims
);
auto
T
=
torch
::
from_blob
(
E
.
data
(),
dims
,
tensorOptCpuNg
<
V
>
()
);
if
(
copydata
)
return
T
.
clone
();
else
...
...
@@ -26,13 +63,24 @@ torch::Tensor eigen2libtorch(MatrixXrm<V> &E, bool copydata = true)
}
template
<
typename
V
>
Eigen
::
Matrix
<
V
,
Eigen
::
Dynamic
,
Eigen
::
Dynamic
>
libtorch2eigen
(
torch
::
Tensor
&
Tin
)
Eigen
::
Matrix
<
V
,
Eigen
::
Dynamic
,
Eigen
::
Dynamic
>
libtorch2eigen
Maxtrix
(
torch
::
Tensor
&
Tin
)
{
/*
LibTorch is Row-major order and Eigen is Column-major order.
MatrixXrm uses Eigen::RowMajor for compatibility.
*/
assert
(
checkTorchType
<
V
>
(
Tin
));
auto
T
=
Tin
.
to
(
torch
::
kCPU
);
Eigen
::
Map
<
MatrixXrm
<
V
>>
E
(
T
.
data_ptr
<
V
>
(),
T
.
size
(
0
),
T
.
size
(
1
));
return
E
;
}
template
<
typename
V
>
Eigen
::
Vector
<
V
,
Eigen
::
Dynamic
>
libtorch2eigenVector
(
torch
::
Tensor
&
Tin
)
{
assert
(
Tin
.
sizes
().
size
()
==
1
);
assert
(
checkTorchType
<
V
>
(
Tin
));
auto
T
=
Tin
.
to
(
torch
::
kCPU
);
Eigen
::
Map
<
Eigen
::
Vector
<
V
,
Eigen
::
Dynamic
>>
E
(
T
.
data_ptr
<
V
>
(),
T
.
numel
());
return
E
;
}
This diff is collapsed.
Click to expand it.
main.cpp
+
43
−
42
View file @
bc7fa547
...
...
@@ -32,24 +32,11 @@ void printImpedance(const std::vector<eis::DataPoint>& data)
std
::
cout
<<
"]
\n
"
;
}
torch
::
TensorOptions
getTensorOptions
()
{
torch
::
TensorOptions
options
;
if
constexpr
(
sizeof
(
fvalue
)
==
sizeof
(
float
))
options
=
options
.
dtype
(
torch
::
kFloat32
);
else
if
constexpr
(
sizeof
(
fvalue
)
==
sizeof
(
double
))
options
=
options
.
dtype
(
torch
::
kFloat64
);
options
=
options
.
layout
(
torch
::
kStrided
);
options
=
options
.
device
(
torch
::
kCPU
);
options
=
options
.
requires_grad
(
false
);
return
options
;
}
torch
::
Tensor
eisToTensor
(
const
std
::
vector
<
eis
::
DataPoint
>&
data
,
torch
::
Tensor
*
freqs
)
{
torch
::
Tensor
output
=
torch
::
empty
({
static_cast
<
long
int
>
(
data
.
size
()
*
2
)},
getT
ensorOpt
ions
());
torch
::
Tensor
output
=
torch
::
empty
({
static_cast
<
long
int
>
(
data
.
size
()
*
2
)},
t
ensorOpt
CpuNg
<
fvalue
>
());
if
(
freqs
)
*
freqs
=
torch
::
empty
({
static_cast
<
long
int
>
(
data
.
size
()
*
2
)},
getT
ensorOpt
ions
());
*
freqs
=
torch
::
empty
({
static_cast
<
long
int
>
(
data
.
size
()
*
2
)},
t
ensorOpt
CpuNg
<
fvalue
>
());
float
*
tensorDataPtr
=
output
.
contiguous
().
data_ptr
<
float
>
();
float
*
tensorFreqDataPtr
=
freqs
?
freqs
->
contiguous
().
data_ptr
<
float
>
()
:
nullptr
;
...
...
@@ -70,12 +57,12 @@ torch::Tensor eisToTensor(const std::vector<eis::DataPoint>& data, torch::Tensor
torch
::
Tensor
fvalueVectorToTensor
(
std
::
vector
<
fvalue
>&
vect
)
{
return
torch
::
from_blob
(
vect
.
data
(),
{
static_cast
<
int64_t
>
(
vect
.
size
())},
getT
ensorOpt
ions
());
return
torch
::
from_blob
(
vect
.
data
(),
{
static_cast
<
int64_t
>
(
vect
.
size
())},
t
ensorOpt
CpuNg
<
fvalue
>
());
}
torch
::
Tensor
guesStartingPoint
(
torch
::
Tensor
&
omega
,
torch
::
Tensor
&
impedanceSpectra
)
{
torch
::
Tensor
startingPoint
=
torch
::
zeros
(
omega
.
sizes
(),
getT
ensorOpt
ions
());
torch
::
Tensor
startingPoint
=
torch
::
zeros
(
omega
.
sizes
(),
t
ensorOpt
CpuNg
<
fvalue
>
());
startingPoint
[
-
1
]
=
torch
::
abs
(
impedanceSpectra
[
-
1
]);
return
startingPoint
;
}
...
...
@@ -126,26 +113,33 @@ torch::Tensor aReal(torch::Tensor& omega)
return
out
;
}
/*def S(gamma_R_inf, Z_exp_re, Z_exp_im, A_re, A_im, el):
MSE_re = np.sum((gamma_R_inf[-1] + np.matmul(A_re, gamma_R_inf[:-1]) - Z_exp_re)**2)
MSE_im = np.sum((np.matmul(A_im, gamma_R_inf[:-1]) - Z_exp_im)**2)
reg_term = el/2*np.sum(gamma_R_inf[:-1]**2)
obj = MSE_re + MSE_im + reg_term
return obj
torch::Tensor tikhnovDrt(torch::Tensor& omega, torch::Tensor& impedanceSpectra, fvalue regularaziaion = 1e-2)
{
torch::Tensor aMatrixImag = aImag(omega);
torch::Tensor aMatrixReal = aReal(omega);
torch::Tensor startingPoint = guesStartingPoint(omega, impedanceSpectra);
torch
::
Tensor
bounds
=
torch
::
zeros
({
startingPoint
.
size
(
0
),
1
},
getT
ensorOpt
ions
());
bounds
=
torch
::
cat
({
bounds
,
torch
::
zeros
({
startingPoint
.
size
(
0
),
1
},
getT
ensorOpt
ions
())
*
torch
::
max
(
torch
::
abs
(
impedanceSpectra
))});
torch::Tensor bounds = torch::zeros({startingPoint.size(0), 1},
t
ensorOpt
CpuNg<fvalue>
());
bounds = torch::cat({bounds, torch::zeros({startingPoint.size(0), 1},
t
ensorOpt
CpuNg<fvalue>
())*torch::max(torch::abs(impedanceSpectra))});
std::cout<<"startingPoint:\n "<<startingPoint<<'\n';
std::cout<<"bounds:\n "<<bounds<<'\n';
/*
result = minimize(S, x0, args=(Z_exp_re, Z_exp_im, A_re, A_im, el), method=method,
result = minimize(S, x0, args=(Z_exp_re, Z_exp_im, A_re, A_im, el), method=method,
bounds = bounds, options={'disp': True, 'ftol':1e-10, 'maxiter':200})
gamma_R_inf = result.x
R_inf = gamma_R_inf[-1]
gamma = gamma_R_inf[:-1]
return gamma, R_inf
*/
return gamma, R_inf
return bounds;
}
}
*/
class
RtFunct
{
...
...
@@ -153,11 +147,11 @@ private:
torch
::
Tensor
impedanceSpectra
;
torch
::
Tensor
aMatrixImag
;
torch
::
Tensor
aMatrixReal
;
doubl
e
el
;
doubl
e
epsilon
;
fvalu
e
el
;
fvalu
e
epsilon
;
public:
RtFunct
(
torch
::
Tensor
impedanceSpectraI
,
torch
::
Tensor
aMatrixImagI
,
torch
::
Tensor
aMatrixRealI
,
doubl
e
elI
,
doubl
e
epsilonI
)
:
RtFunct
(
torch
::
Tensor
impedanceSpectraI
,
torch
::
Tensor
aMatrixImagI
,
torch
::
Tensor
aMatrixRealI
,
fvalu
e
elI
,
fvalu
e
epsilonI
)
:
impedanceSpectra
(
impedanceSpectraI
),
aMatrixImag
(
aMatrixImagI
),
aMatrixReal
(
aMatrixRealI
),
...
...
@@ -167,39 +161,42 @@ public:
}
static
doubl
e
function
(
const
torch
::
Tensor
&
x
)
static
fvalu
e
function
(
const
torch
::
Tensor
&
x
)
{
auto
xAccessor
=
x
.
accessor
<
double
,
1
>
();
double
accum
=
0
;
assert
(
checkTorchType
<
fvalue
>
(
x
));
auto
xAccessor
=
x
.
accessor
<
fvalue
,
1
>
();
fvalue
accum
=
0
;
for
(
int64_t
i
=
0
;
i
<
x
.
size
(
0
);
++
i
)
accum
+=
xAccessor
[
i
]
*
xAccessor
[
i
];
accum
+=
(
xAccessor
[
i
]
+
3
)
*
(
xAccessor
[
i
]
+
3
)
+
20
;
return
accum
;
}
static
torch
::
Tensor
getGrad
(
std
::
function
<
doubl
e
(
const
torch
::
Tensor
&
x
)
>
fn
,
const
torch
::
Tensor
&
xTensor
,
doubl
e
epsilon
)
static
torch
::
Tensor
getGrad
(
std
::
function
<
fvalu
e
(
const
torch
::
Tensor
&
x
)
>
fn
,
const
torch
::
Tensor
&
xTensor
,
fvalu
e
epsilon
)
{
torch
::
Tensor
out
=
torch
::
zeros
(
xTensor
.
sizes
(),
getT
ensorOpt
ions
());
torch
::
Tensor
out
=
torch
::
zeros
(
xTensor
.
sizes
(),
t
ensorOpt
CpuNg
<
fvalue
>
());
auto
outAccessor
=
out
.
accessor
<
fvalue
,
1
>
();
auto
xAccessor
=
xTensor
.
accessor
<
double
,
1
>
();
assert
(
checkTorchType
<
fvalue
>
(
xTensor
));
auto
xAccessor
=
xTensor
.
accessor
<
fvalue
,
1
>
();
for
(
int64_t
i
=
0
;
i
<
out
.
size
(
0
);
++
i
)
{
xAccessor
[
i
]
-=
epsilon
;
doubl
e
left
=
fn
(
xTensor
);
fvalu
e
left
=
fn
(
xTensor
);
xAccessor
[
i
]
+=
2
*
epsilon
;
doubl
e
right
=
fn
(
xTensor
);
fvalu
e
right
=
fn
(
xTensor
);
xAccessor
[
i
]
-=
epsilon
;
outAccessor
[
i
]
=
(
right
-
left
)
/
(
2
*
epsilon
);
}
return
out
;
}
doubl
e
operator
()(
const
Eigen
::
VectorX
d
&
x
,
Eigen
::
VectorX
d
&
grad
)
fvalu
e
operator
()(
const
Eigen
::
VectorX
<
fvalue
>
&
x
,
Eigen
::
VectorX
<
fvalue
>
&
grad
)
{
Eigen
::
MatrixX
<
double
>
xMatrix
=
x
;
torch
::
Tensor
xTensor
=
eigen2libtorch
(
xMatrix
).
reshape
({
xTensor
.
numel
()});
Eigen
::
MatrixX
<
fvalue
>
xMatrix
=
x
;
torch
::
Tensor
xTensor
=
eigen2libtorch
(
xMatrix
);
xTensor
=
xTensor
.
reshape
({
xTensor
.
numel
()});
std
::
cout
<<
"xTensor
\n
"
<<
xTensor
<<
'\n'
;
torch
::
Tensor
gradTensor
=
getGrad
(
&
function
,
xTensor
,
epsilon
);
grad
=
libtorch2eigen
<
doubl
e
>
(
gradTensor
);
grad
=
libtorch2eigen
Vector
<
fvalu
e
>
(
gradTensor
);
return
function
(
xTensor
);
}
};
...
...
@@ -224,15 +221,19 @@ int main(int argc, char** argv)
printImpedance
(
data
);
LBFGSpp
::
LBFGSParam
<
doubl
e
>
fitParam
;
LBFGSpp
::
LBFGSParam
<
fvalu
e
>
fitParam
;
fitParam
.
epsilon
=
1e-6
;
fitParam
.
max_iterations
=
100
;
LBFGSpp
::
LBFGSSolver
<
doubl
e
>
solver
(
fitParam
);
LBFGSpp
::
LBFGSSolver
<
fvalu
e
>
solver
(
fitParam
);
RtFunct
funct
(
impedanceSpectra
,
aMatrixImag
,
aMatrixReal
,
0.1
,
0.001
);
Eigen
::
VectorX
d
x
=
Eigen
::
VectorX
d
::
Ones
(
4
)
*
3
;
doubl
e
fx
;
Eigen
::
VectorX
<
fvalue
>
x
=
Eigen
::
VectorX
<
fvalue
>
::
Ones
(
4
)
*
3
;
fvalu
e
fx
;
int
iterations
=
solver
.
minimize
(
funct
,
x
,
fx
);
std
::
cout
<<
"Iterations: "
<<
iterations
<<
'\n'
;
std
::
cout
<<
"fx "
<<
fx
<<
'\n'
;
std
::
cout
<<
"xVect
\n
"
<<
x
<<
'\n'
;
return
0
;
}
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment