diff --git a/src/psimpy/inference/active_learning.py b/src/psimpy/inference/active_learning.py
index c178af67393fcae7a3ed8732784d704304b37e2b..5d58c7fd1694dc929babb8f32064e60ca458bab2 100644
--- a/src/psimpy/inference/active_learning.py
+++ b/src/psimpy/inference/active_learning.py
@@ -10,7 +10,6 @@ from psimpy.emulator.robustgasp import ScalarGaSP
 from psimpy.utility.util_funcs import check_bounds
 
 _min_float = 10**(sys.float_info.min_10_exp)
-_max_exp = sys.float_info.max_exp
 
 class ActiveLearning:
 
@@ -139,18 +138,14 @@ class ActiveLearning:
             ranges = tuple(tuple(bounds[i]) for i in range(ndim))
             args_optimizer = [ranges]
             if kwgs_optimizer is not None:
-                allowed_keys = {"ranges", "args", "Ns", "full_output", "finish",
-                    "disp", "workers"}
+                allowed_keys = {"Ns", "workers"}
                 if not set(kwgs_optimizer.keys()).issubset(allowed_keys):
                     raise ValueError(
-                        "unsupported keyword(s) in kwgs_optimizer"
-                        " for optimze.brute")
-                if "ranges" in kwgs_optimizer.keys():
-                    del kwgs_optimizer["ranges"]
-                if "args" in kwgs_optimizer.keys():
-                    del kwgs_optimizer["args"]
-                if "full_output" in kwgs_optimizer.keys():
-                    kwgs_optimizer["full_output"] = 0
+                        "allowed keys are 'Ns' and 'workers' for"
+                        " optimize.brute")
+            else:
+                kwgs_optimizer = {"Ns": 50}
+            kwgs_optimizer.update({"finish": None})
         self.optimizer = optimizer
 
         self.args_prior = () if args_prior is None else args_prior
@@ -253,6 +248,13 @@ class ActiveLearning:
             Natural logarithm values of the product of prior and likelihood 
             at `ninit` and `niter` simulations. 
             1d array of shape (ninit+niter,).
+        
+        Notes
+        -----
+        If a duplicated iteration point is returned by the `optimizer`, the
+        iteration will be stopped right away. In that case, the first dimension
+        of returned `var_samples`, `sim_outputs`, `ln_pxl_values` is smaller
+        than ninit+niter.
         """
         if init_var_samples.shape != (ninit, self.ndim):
             raise ValueError("init_var_samples must be of shape (ninit, ndim)")
@@ -269,7 +271,7 @@ class ActiveLearning:
             raise ValueError("Each item of iter_prefixes must be unique")
         
         ln_pxl_values = [
-            self._compute_ln_pxl(init_var_samples[i,:], init_sim_outputs[i,:])
+            self._compute_ln_pxl(init_var_samples[i,:], init_sim_outputs[i])
             for i in range(ninit)
             ]
         var_samples = init_var_samples
@@ -293,7 +295,16 @@ class ActiveLearning:
                     " solution or a OptimizeResult object having x attribute")
             
             next_var_sample = next_var_sample.reshape((1, self.ndim))
-            var_samples = np.vstack((var_samples, next_var_sample))
+            temp_var_samples = np.vstack((var_samples, next_var_sample))
+
+            if len(np.unique(temp_var_samples, axis=0)) != len(var_samples) + 1:
+                print(
+                    "Optimizer finds duplicated next_var_sample at"
+                    " iteration {i}. The active learning process will"
+                    " be terminated.")
+                break
+
+            var_samples = temp_var_samples
 
             self.run_sim_obj.serial_run(next_var_sample, [iter_prefixes[i]],
                 append=False)
@@ -437,14 +448,12 @@ class ActiveLearning:
         std = predict[:, 3]
 
         if self.indicator == "entropy":
-            neg_val = -(mean + 0.5*np.log(2*np.pi*np.e*std**2))
+            neg_val = -mean - 0.5*np.log(
+                np.maximum(2*np.pi*np.e*std**2, _min_float)
+                )
         elif self.indicator == "variance":
-            if std**2 < _max_exp:
-                exp_std2 = np.exp(std**2)
-            else:
-                exp_std2 = np.exp(_max_exp)
-            neg_val = -(2*mean + std**2) - np.log(
-                np.maximum(exp_std2-1, _min_float)
+            neg_val = -(2*mean + 2*std**2) - np.log(
+                np.maximum(1-np.exp(-std**2), _min_float)
                 )
         
         return float(neg_val)
\ No newline at end of file