Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
P
psimpy
Manage
Activity
Members
Labels
Plan
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
This is an archived project. Repository and other project resources are read-only.
Show more breadcrumbs
mbd
psimpy
Commits
10160bbe
Commit
10160bbe
authored
2 years ago
by
Hu Zhao
Browse files
Options
Downloads
Patches
Plain Diff
feat: add check for duplicated iteration points, modify uncertainty indicator
parent
a1ce0ead
Branches
Branches containing commit
Tags
Tags containing commit
No related merge requests found
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
src/psimpy/inference/active_learning.py
+29
-20
29 additions, 20 deletions
src/psimpy/inference/active_learning.py
with
29 additions
and
20 deletions
src/psimpy/inference/active_learning.py
+
29
−
20
View file @
10160bbe
...
@@ -10,7 +10,6 @@ from psimpy.emulator.robustgasp import ScalarGaSP
...
@@ -10,7 +10,6 @@ from psimpy.emulator.robustgasp import ScalarGaSP
from
psimpy.utility.util_funcs
import
check_bounds
from
psimpy.utility.util_funcs
import
check_bounds
_min_float
=
10
**
(
sys
.
float_info
.
min_10_exp
)
_min_float
=
10
**
(
sys
.
float_info
.
min_10_exp
)
_max_exp
=
sys
.
float_info
.
max_exp
class
ActiveLearning
:
class
ActiveLearning
:
...
@@ -139,18 +138,14 @@ class ActiveLearning:
...
@@ -139,18 +138,14 @@ class ActiveLearning:
ranges
=
tuple
(
tuple
(
bounds
[
i
])
for
i
in
range
(
ndim
))
ranges
=
tuple
(
tuple
(
bounds
[
i
])
for
i
in
range
(
ndim
))
args_optimizer
=
[
ranges
]
args_optimizer
=
[
ranges
]
if
kwgs_optimizer
is
not
None
:
if
kwgs_optimizer
is
not
None
:
allowed_keys
=
{
"
ranges
"
,
"
args
"
,
"
Ns
"
,
"
full_output
"
,
"
finish
"
,
allowed_keys
=
{
"
Ns
"
,
"
workers
"
}
"
disp
"
,
"
workers
"
}
if
not
set
(
kwgs_optimizer
.
keys
()).
issubset
(
allowed_keys
):
if
not
set
(
kwgs_optimizer
.
keys
()).
issubset
(
allowed_keys
):
raise
ValueError
(
raise
ValueError
(
"
unsupported keyword(s) in kwgs_optimizer
"
"
allowed keys are
'
Ns
'
and
'
workers
'
for
"
"
for optimze.brute
"
)
"
optimize.brute
"
)
if
"
ranges
"
in
kwgs_optimizer
.
keys
():
else
:
del
kwgs_optimizer
[
"
ranges
"
]
kwgs_optimizer
=
{
"
Ns
"
:
50
}
if
"
args
"
in
kwgs_optimizer
.
keys
():
kwgs_optimizer
.
update
({
"
finish
"
:
None
})
del
kwgs_optimizer
[
"
args
"
]
if
"
full_output
"
in
kwgs_optimizer
.
keys
():
kwgs_optimizer
[
"
full_output
"
]
=
0
self
.
optimizer
=
optimizer
self
.
optimizer
=
optimizer
self
.
args_prior
=
()
if
args_prior
is
None
else
args_prior
self
.
args_prior
=
()
if
args_prior
is
None
else
args_prior
...
@@ -253,6 +248,13 @@ class ActiveLearning:
...
@@ -253,6 +248,13 @@ class ActiveLearning:
Natural logarithm values of the product of prior and likelihood
Natural logarithm values of the product of prior and likelihood
at `ninit` and `niter` simulations.
at `ninit` and `niter` simulations.
1d array of shape (ninit+niter,).
1d array of shape (ninit+niter,).
Notes
-----
If a duplicated iteration point is returned by the `optimizer`, the
iteration will be stopped right away. In that case, the first dimension
of returned `var_samples`, `sim_outputs`, `ln_pxl_values` is smaller
than ninit+niter.
"""
"""
if
init_var_samples
.
shape
!=
(
ninit
,
self
.
ndim
):
if
init_var_samples
.
shape
!=
(
ninit
,
self
.
ndim
):
raise
ValueError
(
"
init_var_samples must be of shape (ninit, ndim)
"
)
raise
ValueError
(
"
init_var_samples must be of shape (ninit, ndim)
"
)
...
@@ -269,7 +271,7 @@ class ActiveLearning:
...
@@ -269,7 +271,7 @@ class ActiveLearning:
raise
ValueError
(
"
Each item of iter_prefixes must be unique
"
)
raise
ValueError
(
"
Each item of iter_prefixes must be unique
"
)
ln_pxl_values
=
[
ln_pxl_values
=
[
self
.
_compute_ln_pxl
(
init_var_samples
[
i
,:],
init_sim_outputs
[
i
,:
])
self
.
_compute_ln_pxl
(
init_var_samples
[
i
,:],
init_sim_outputs
[
i
])
for
i
in
range
(
ninit
)
for
i
in
range
(
ninit
)
]
]
var_samples
=
init_var_samples
var_samples
=
init_var_samples
...
@@ -293,7 +295,16 @@ class ActiveLearning:
...
@@ -293,7 +295,16 @@ class ActiveLearning:
"
solution or a OptimizeResult object having x attribute
"
)
"
solution or a OptimizeResult object having x attribute
"
)
next_var_sample
=
next_var_sample
.
reshape
((
1
,
self
.
ndim
))
next_var_sample
=
next_var_sample
.
reshape
((
1
,
self
.
ndim
))
var_samples
=
np
.
vstack
((
var_samples
,
next_var_sample
))
temp_var_samples
=
np
.
vstack
((
var_samples
,
next_var_sample
))
if
len
(
np
.
unique
(
temp_var_samples
,
axis
=
0
))
!=
len
(
var_samples
)
+
1
:
print
(
"
Optimizer finds duplicated next_var_sample at
"
"
iteration {i}. The active learning process will
"
"
be terminated.
"
)
break
var_samples
=
temp_var_samples
self
.
run_sim_obj
.
serial_run
(
next_var_sample
,
[
iter_prefixes
[
i
]],
self
.
run_sim_obj
.
serial_run
(
next_var_sample
,
[
iter_prefixes
[
i
]],
append
=
False
)
append
=
False
)
...
@@ -437,14 +448,12 @@ class ActiveLearning:
...
@@ -437,14 +448,12 @@ class ActiveLearning:
std
=
predict
[:,
3
]
std
=
predict
[:,
3
]
if
self
.
indicator
==
"
entropy
"
:
if
self
.
indicator
==
"
entropy
"
:
neg_val
=
-
(
mean
+
0.5
*
np
.
log
(
2
*
np
.
pi
*
np
.
e
*
std
**
2
))
neg_val
=
-
mean
-
0.5
*
np
.
log
(
np
.
maximum
(
2
*
np
.
pi
*
np
.
e
*
std
**
2
,
_min_float
)
)
elif
self
.
indicator
==
"
variance
"
:
elif
self
.
indicator
==
"
variance
"
:
if
std
**
2
<
_max_exp
:
neg_val
=
-
(
2
*
mean
+
2
*
std
**
2
)
-
np
.
log
(
exp_std2
=
np
.
exp
(
std
**
2
)
np
.
maximum
(
1
-
np
.
exp
(
-
std
**
2
),
_min_float
)
else
:
exp_std2
=
np
.
exp
(
_max_exp
)
neg_val
=
-
(
2
*
mean
+
std
**
2
)
-
np
.
log
(
np
.
maximum
(
exp_std2
-
1
,
_min_float
)
)
)
return
float
(
neg_val
)
return
float
(
neg_val
)
\ No newline at end of file
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment