diff --git a/CHANGELOG.md b/CHANGELOG.md
index bcea28d91de58ebff8f5291f1e59345d73366b1e..61e713504a65939f69220f3a667058df24bc8983 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,12 +1,12 @@
-# Changelog
-
-## v0.1.2
+v0.1.2
+------
 
 * Elaborate on installation. Use pip from conda-forge channel to install `PSimPy`
 
 * Add info about how tests can be run
 
-## v0.1.1
+v0.1.1
+------
 
 * Add info about installation
 
@@ -14,6 +14,7 @@
 
 * Update metadata
 
-## v0.1.0
+v0.1.0
+------
 
 * First release
diff --git a/docs/Makefile b/docs/Makefile
index 2326b821594b524d48cca73939646a2a845347f3..b881cfe07591d42f02b8f614837986d46509ecba 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -4,9 +4,9 @@
 # You can set these variables from the command line, and also
 # from the environment for the first two.
 SPHINXOPTS    =
-SPHINXBUILD   = python -msphinx
-SOURCEDIR     = .
-BUILDDIR      = _build
+SPHINXBUILD   = sphinx-build
+SOURCEDIR     = source
+BUILDDIR      = source/_build
 
 # Put it first so that "make" without argument is like "make help".
 help:
diff --git a/docs/changelog.md b/docs/changelog.md
deleted file mode 100644
index 8261b353e8cb01273fad0ba8573b66cfc72e8d5c..0000000000000000000000000000000000000000
--- a/docs/changelog.md
+++ /dev/null
@@ -1,2 +0,0 @@
-```{include} ../CHANGELOG.md
-```
\ No newline at end of file
diff --git a/docs/conduct.md b/docs/conduct.md
deleted file mode 100644
index 05687052c1d17a97077d3fa07cd209e8ac17b0de..0000000000000000000000000000000000000000
--- a/docs/conduct.md
+++ /dev/null
@@ -1,2 +0,0 @@
-```{include} ../CONDUCT.md
-```
\ No newline at end of file
diff --git a/docs/examples/emulator/README.rst b/docs/examples/emulator/README.rst
new file mode 100644
index 0000000000000000000000000000000000000000..44772270aa6f45e62a6ab9c39a011442c572c16e
--- /dev/null
+++ b/docs/examples/emulator/README.rst
@@ -0,0 +1,2 @@
+Emulator
+========
\ No newline at end of file
diff --git a/docs/examples/emulator/plot_ppgasp.py b/docs/examples/emulator/plot_ppgasp.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ae4059f5459d0f6b32d65d68c0772d36e07a856
--- /dev/null
+++ b/docs/examples/emulator/plot_ppgasp.py
@@ -0,0 +1,112 @@
+"""
+
+PPGaSP: GP emulation for multi-output functions
+===============================================
+"""
+
+
+# %% md
+# 
+# This example shows how to apply Gaussian process emulation to a multi-output
+# function using class :class:`.PPGaSP`.
+#
+# The multi-output function that we are going to look at is the `DIAMOND`
+# (diplomatic and military operations in a non-warfighting domain) computer model.
+# It is used as a testbed to illustrate the `PP GaSP` emulator in the `R` package
+# `RobustGaSP`, see :cite:t:`Gu2019` for more detail.
+#
+# The simulator has :math:`13` input parameters and :math:`5` outputs. Namely,
+# :math:`\mathbf{y}=f(\mathbf{x})` where :math:`\mathbf{x}=(x_1,\ldots,x_{13})^T`
+# and :math:`\mathbf{y}=(y_1,\ldots,y_5)^T`.
+#
+# The training and testing data are provided in the folder '.../tests/data/'.
+# We first load the training and testing data. 
+
+import numpy as np
+import os
+
+dir_data = os.path.abspath('../../../tests/data/')
+
+humanityX = np.genfromtxt(os.path.join(dir_data, 'humanityX.csv'), delimiter=',')
+humanityY = np.genfromtxt(os.path.join(dir_data, 'humanityY.csv'), delimiter=',')
+print(f"Number of training data points: ", humanityX.shape[0])
+print(f"Input dimension: ", humanityX.shape[1])
+print(f"Output dimension: ", humanityY.shape[1])
+
+humanityXt = np.genfromtxt(os.path.join(dir_data, 'humanityXt.csv'), delimiter=',')
+humanityYt = np.genfromtxt(os.path.join(dir_data, 'humanityYt.csv'), delimiter=',')
+print(f"Number of testing data points: ", humanityXt.shape[0])
+
+# %% md
+# .. note:: You may need to modify ``dir_data`` according to where you save them
+#    on your local machine.
+
+# %% md
+# ``humanityX`` and ``humanitY`` are the training data, corresponding to ``design``
+# and ``response`` respectively. ``humanityXt`` are testing input data, at which
+# we are going to make predictions once the emulator is trained. ``humanityYt``
+# are the true outputs at ``humanityXt``, which is then used to validate the
+# performance of the trained emulator.
+
+# %% md
+# 
+# To build a `PP GaSP` emulator for the above simulator,  first import class
+# :class:`.PPGaSP` by
+
+from psimpy.emulator import PPGaSP
+
+# %% md
+# 
+# Then, create an instance of :class:`.PPGaSP`.  The parameter ``ndim``
+# (dimension of function input ``x``) must be specified. Optional parameters, such
+# as ``method``, ``kernel_type``, etc., can be set up if desired. Here, we leave
+# all the optional parameters to their default values.
+
+emulator = PPGaSP(ndim=humanityX.shape[1])
+
+
+# %% md
+# 
+# Next, we train the `PP GaSP` emulator based on the training data using
+# :py:meth:`.PPGaSP.train` method.
+
+emulator.train(design=humanityX, response=humanityY)
+
+# %% md
+#
+# With the trained emulator, we can make predictions for any arbitrary set of
+# input points using :py:meth:`PPGaSP.predict` method.
+# Here, we make predictions at testing input points ``humanityXt``.
+
+predictions = emulator.predict(humanityXt)
+
+# %% md
+# 
+# We can validate the performance of the trained emulator based on the true outputs
+# ``humanityYt``.
+
+import matplotlib.pyplot as plt
+
+fig, ax = plt.subplots(5, 1, figsize=(6,15))
+
+for i in range(humanityY.shape[1]):
+
+    ax[i].set_xlabel(f'Actual $y_{i+1}$')
+    ax[i].set_ylabel(f'Emulator-predicted $y_{i+1}$')
+    ax[i].set_xlim(np.min(humanityYt[:,i]),np.max(humanityYt[:,i]))
+    ax[i].set_ylim(np.min(humanityYt[:,i]),np.max(humanityYt[:,i]))
+
+    _ = ax[i].plot([np.min(humanityYt[:,i]),np.max(humanityYt[:,i])], [np.min(humanityYt[:,i]),np.max(humanityYt[:,i])])
+    _ = ax[i].errorbar(humanityYt[:,i], predictions[:,i,0], predictions[:,i,3], fmt='.', linestyle='', label='prediction and std')
+    _ = ax[i].legend()
+
+plt.tight_layout()
+
+
+# %%md
+#
+# We can also draw any number of samples at testing input ``humanityXt`` using 
+# :py:meth:`.PPGaSPsample()` method.
+
+samples = emulator.sample(humanityXt, nsamples=10)
+print("Shape of samples: ", samples.shape)
\ No newline at end of file
diff --git a/docs/examples/emulator/plot_scalargasp.py b/docs/examples/emulator/plot_scalargasp.py
new file mode 100644
index 0000000000000000000000000000000000000000..54a5cbdcca7b28807d5ed86a323255f3c22747c6
--- /dev/null
+++ b/docs/examples/emulator/plot_scalargasp.py
@@ -0,0 +1,125 @@
+"""
+
+ScalarGaSP: GP emulation for a single-output function
+=====================================================
+"""
+
+# %% md
+# 
+# This example shows how to apply Gaussian process emulation to a single-output
+# function using class :class:`.ScalarGaSP`.
+#
+# The task is to build a GP emulator for the function :math:`y = x * sin(x)`
+# based on a few number of training data.
+
+# %% md
+# 
+# First, import the class :class:`.ScalarGaSP` by
+
+from psimpy.emulator import ScalarGaSP
+
+# %% md
+# 
+# Then, create an instance of :class:`.ScalarGaSP`.  The parameter ``ndim``
+# (dimension of function input ``x``) must be specified. Optional parameters, such
+# as ``method``, ``kernel_type``, etc., can be set up if desired. Here, we leave
+# all the optional parameters to their default values.
+
+emulator = ScalarGaSP(ndim=1)
+
+# %% md
+# 
+# Given training input points ``design`` and corresponding output values ``response``,
+# the emulator can be trained using :py:meth:`.ScalarGaSP.train`. Below we train
+# an emulator using :math:`8` selected points.
+
+import numpy as np
+
+def f(x):
+    #return x + 3*np.sin(x/2)
+    return x*np.sin(x)
+
+x = np.arange(2,10,1)
+y = f(x)
+
+emulator.train(design=x, response=y)
+
+# %% md
+# 
+# We can validate the performance of the trained emulator using the leave-one-out
+# cross validation method :py:meth:`loo_validate()`.
+
+validation = emulator.loo_validate()
+
+# %% md
+#
+# Let's plot emulator predictions vs actual outputs. The error bar indicates the
+# standard deviation.
+
+import matplotlib.pyplot as plt
+
+fig , ax = plt.subplots(figsize=(4,4))
+
+ax.set_xlabel('Actual y')
+ax.set_ylabel('Emulator-predicted y')
+ax.set_xlim(np.min(y)-1,np.max(y)+1)
+ax.set_ylim(np.min(y)-1,np.max(y)+1)
+
+_ = ax.plot([np.min(y)-1,np.max(y)+1], [np.min(y)-1,np.max(y)+1])
+_ = ax.errorbar(y, validation[:,0], validation[:,1], fmt='.', linestyle='', label='prediction and std')
+_ = plt.legend()
+plt.tight_layout()
+
+
+# %% md
+#
+# With the trained emulator at our deposit, we can use the
+# :py:meth:`.ScalarGaSP.predict()` method to make predictions at 
+# any arbitrary set of input points (``testing_input``). It should be noted that,
+# ``testing_trend`` should be set according to ``trend`` used during emulator
+# training. 
+
+testing_input = np.arange(0,10,0.1)
+predictions = emulator.predict(testing_input)
+
+plt.plot(testing_input, predictions[:, 0], 'r-', label= "mean")
+plt.scatter(x, y, s=15, c='k', label="training data", zorder=3)
+plt.plot(testing_input, f(testing_input), 'k:', zorder=2, label="true function")
+plt.fill_between(testing_input, predictions[:, 1], predictions[:, 2], alpha=0.3, label="95% CI")
+plt.xlabel('x')
+plt.ylabel('emulator-predicted y')
+plt.xlim(testing_input[0], testing_input[-1])
+_ = plt.legend()
+plt.tight_layout()
+
+# %% md
+#
+# We can also draw any number of samples at ``testing_input``` using
+# :py:meth:`.ScalarGaSP.sample()` method.
+
+nsamples = 5
+samples = emulator.sample(testing_input, nsamples=nsamples)
+
+# sphinx_gallery_thumbnail_number = 3
+for i in range(nsamples):
+    plt.plot(testing_input, samples[:,i], '--', label=f'sample{i+1}')
+
+plt.scatter(x, y, s=15, c='k', label="training data", zorder=2)
+plt.plot(testing_input, f(testing_input), 'k:', zorder=2, label="true function")
+plt.fill_between(testing_input, predictions[:, 1], predictions[:, 2], alpha=0.3, label="95% CI")
+plt.xlabel('x')
+plt.ylabel('emulator-predicted y')
+plt.xlim(testing_input[0], testing_input[-1])
+_ = plt.legend()
+plt.tight_layout()
+
+
+# %% md
+#
+# .. tip:: Above example shows how to train a GP emulator based on noise-free training data,
+#    which is often the case of emulating a deterministic simulator. If you are dealing
+#    with noisy training data, you can
+#
+#     - set the parameter ``nugget`` to a desired value, or
+#     - set ``nugget`` to :math:`0` and ``nugget_est`` to `True`, meaning that ``nugget``
+#       is estimated from the noisy training data.
\ No newline at end of file
diff --git a/docs/examples/inference/README.rst b/docs/examples/inference/README.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3fe8003427b95ee442278fdd74a3ebd7116ce71c
--- /dev/null
+++ b/docs/examples/inference/README.rst
@@ -0,0 +1,2 @@
+Inference
+=========
\ No newline at end of file
diff --git a/docs/examples/inference/plot_active_learning.py b/docs/examples/inference/plot_active_learning.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f341c0271372194ccffe839a80a02211d703b2c
--- /dev/null
+++ b/docs/examples/inference/plot_active_learning.py
@@ -0,0 +1,127 @@
+"""
+
+Active learning
+===============
+"""
+
+# %% md
+# 
+# This example shows how to use the :class:`.ActiveLearning` class to iteratively
+# build a Gaussian process emulator for an unnormalized posterior involving a
+# simulator. It should be noted that this example is only for illustration
+# purpose, rather than a real case. For simplicity, required arguments for
+# :class:`.ActiveLearning` (including `simulator`, `likelihood`, `data`, etc.) are purely
+# made up. For a realistic case of active learning, one can refer to :cite:t:`Zhao2022`.
+#
+# First, we define the simulator, prior distribution of its variable parameters,
+# observed data, and likelihood function. They basically define the Bayesian
+# inference problem. 
+
+import numpy as np
+
+ndim = 2 # dimension of variable parameters of the simulator
+bounds = bounds = np.array([[-5,5],[-5,5]]) # bounds of variable parameters
+data = np.array([1,0])
+
+def simulator(x1, x2):
+    """Simulator y=f(x)."""
+    y1, y2 = x1, x2
+    return np.array([y1, y2])
+
+def prior(x):
+    """Uniform prior."""
+    return 1/(10*10)
+
+def likelihood(y, data):
+    """Likelihood function L(y,data)."""
+    return np.exp(-(y[0]-data[0])**2/100 - (y[0]**2-y[1]-data[1])**2)
+
+# %% md
+#
+# Imagine that the simulator is a complex solver. It is not computationally feasible
+# to compute the posterior distribution of the variable parameters using grid estimation
+# or Metropolis Hastings estimation. This is because they require evaluating the likelihood
+# many times which essentially leads to many evaluations of the simulator. Therefore, we
+# resort to use active learning to build a Gaussian process emulator for the unnormalized
+# posterior (prior times likelihood) based on a small number of evaluations of the simulator.
+# The the posterior can be estimated using the emulator.
+#
+# To do so, we need to pass arguments to following parameters of the :class:`.ActiveLearning` class:
+#
+#   - run_sim_obj : instance of class :class:`.RunSimulator`. It carries information on how
+#     to run the simulator.
+#   - lhs_sampler : instance of class :class:`.LHS`. It is used to draw initial samples to run
+#     simulations in order to train an inital Gaussian process emulator.
+#   - scalar_gasp : instance of class :class:`.ScalarGaSP`. It sets up the emulator structure.
+#
+
+from psimpy.simulator import RunSimulator
+from psimpy.sampler import LHS
+from psimpy.emulator import ScalarGaSP
+
+run_simulator = RunSimulator(simulator, var_inp_parameter=['x1','x2'])
+lhs_sampler = LHS(ndim=ndim, bounds=bounds, seed=1)
+scalar_gasp = ScalarGaSP(ndim=ndim)
+
+# %%md
+#
+# Next, we create an object of the :class:`.ActiveLearning` class by
+
+from psimpy.inference import ActiveLearning
+
+active_learner = ActiveLearning(ndim, bounds, data, run_simulator, prior, likelihood,
+    lhs_sampler, scalar_gasp)
+
+# %%md
+#
+# Then we can call the :py:meth:`.ActiveLearning.initial_simulation` method to run initial
+# simulations and call the :py:meth:`.ActiveLearning.iterative_emulation` method to
+# iteratively run new simulation and build emulator. Here we allocate 40 simulations for
+# initial emulator training and 60 simulations for adaptive training.
+#
+n0 = 40
+niter = 60
+
+init_var_samples, init_sim_outputs = active_learner.initial_simulation(
+    n0, mode='parallel', max_workers=4)
+
+var_samples, _, _ = active_learner.iterative_emulation(
+    n0, init_var_samples, init_sim_outputs, niter=niter)
+
+# %%md
+#
+# Once the active learning process is finished, we obtain the final emulator for the
+# logarithm of the unnormalized posterior, which is given by the
+# :py:meth:`.ActiveLearning.approx_ln_pxl` method.
+# 
+# We can then estimate the posterior using grid estimation or Metropolis Hastings
+# estimation based on the emulator. An example is as follows. The contour plot shows
+# the estimated posterior. 
+#
+from psimpy.inference import GridEstimation
+import matplotlib.pyplot as plt
+
+grid_estimator = GridEstimation(ndim, bounds, ln_pxl=active_learner.approx_ln_pxl)
+posterior, x_ndim = grid_estimator.run(nbins=50)
+
+fig, ax = plt.subplots(1,1,figsize=(5,4))
+
+# initial training points
+ax.scatter(init_var_samples[:,0], init_var_samples[:,1], s=10, c='r', marker='o',
+    zorder=1, alpha=0.8, label='initial training points')
+# actively picked training points
+ax.scatter(var_samples[n0:,0], var_samples[n0:,1], s=15, c='k', marker='+',
+    zorder=2, alpha=0.8, label='iterative training points')
+
+# estimated posterior based on the final emulator
+posterior = np.where(posterior < 1e-10, None, posterior)
+contour = ax.contour(x_ndim[0], x_ndim[1], np.transpose(posterior), levels=10, zorder=0)
+plt.colorbar(contour, ax=ax)
+
+ax.legend()
+ax.set_title('Active learning')
+ax.set_xlabel('x1')
+ax.set_ylabel('x2')
+ax.set_xlim([-5,5])
+ax.set_ylim([-5,5])
+plt.tight_layout()
\ No newline at end of file
diff --git a/docs/examples/inference/plot_bayes_inference.py b/docs/examples/inference/plot_bayes_inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..855d62b3b816c563958001cced557386b72e5085
--- /dev/null
+++ b/docs/examples/inference/plot_bayes_inference.py
@@ -0,0 +1,116 @@
+"""
+
+Bayesian inference
+==================
+"""
+
+# %% md
+# 
+# This example shows how to perform Bayesian inference given the uniform prior
+#
+# :math:`p(\mathbf{x})=p(x_1,x_2)=0.01`
+# 
+# where :math:`x_i \in [-5,5], i=1,2`, and likelihood
+#
+# :math:`L(\mathbf{x}|\mathbf{d})=\exp \left(-\frac{1}{100}\left(x_1-1\right)^2-\left(x_1^2-x_2\right)^2\right)`.
+#
+
+import numpy as np
+
+ndim = 2
+bounds = np.array([[-5,5],[-5,5]])
+
+def prior(x):
+        return 0.01
+
+def likelihood(x):
+        return np.exp(-(x[0]-1)**2/100 - (x[0]**2-x[1])**2)
+
+# %% md
+#
+# To estimate the posterior using grid estimation, we need to import the
+# :class:`.GridEstimation` class, create an instance, and call the 
+# :py:meth:`.GridEstimation.run` method.
+
+from psimpy.inference.bayes_inference import GridEstimation
+
+grid_estimator = GridEstimation(ndim, bounds, prior, likelihood)
+posterior, x_ndim = grid_estimator.run(nbins=[50,40])
+
+# %% md
+#
+# The following figure plots the estimated posterior.
+# 
+
+import matplotlib.pyplot as plt
+
+fig, ax = plt.subplots(1,1,figsize=(6,4))
+
+# mask insignificant values
+posterior = np.where(posterior < 1e-10, None, posterior)
+
+contour = ax.contour(x_ndim[0], x_ndim[1], np.transpose(posterior), levels=10)
+plt.colorbar(contour, ax=ax)
+ax.set_xlim(bounds[0,0], bounds[0,1])
+ax.set_ylim(bounds[1,0], bounds[1,1])
+ax.set_title('Grid estimation')
+ax.set_xlabel('x1')
+ax.set_ylabel('x2')
+plt.tight_layout()
+
+# %% md
+#
+# To estimate the posterior using Metropolis Hastings estimation, we need to import
+# the :class:`.MetropolisHastingsEstimation` class, create an instance, and call the 
+# :py:meth:`.MetropolisHastingsEstimation.run` method. The 
+# :py:meth:`.MetropolisHastingsEstimation.run` method has a parameter, ``mh_sampler``,
+# which takes an instance of :class:`.MetropolisHastings` as argument.
+
+from psimpy.inference.bayes_inference import MetropolisHastingsEstimation
+
+mh_estimator = MetropolisHastingsEstimation(ndim, bounds, prior, likelihood)
+
+# create a mh_sampler
+from psimpy.sampler.metropolis_hastings import MetropolisHastings
+from scipy.stats import multivariate_normal
+
+init_state = np.array([-4,-4])
+f_sample = multivariate_normal.rvs
+nburn = 100
+nthin = 10
+seed = 1
+kwgs_f_sample = {'random_state': np.random.default_rng(seed)}
+
+mh_sampler = MetropolisHastings(ndim=ndim, init_state=init_state,
+    f_sample=f_sample, bounds=bounds, nburn=nburn, nthin=nthin, seed=seed,
+    kwgs_f_sample=kwgs_f_sample)
+    
+nsamples = 5000
+mh_samples, mh_accept = mh_estimator.run(nsamples, mh_sampler)
+
+
+# %% md
+#
+# The following figure plots the samples drawn from the unnormalized posterior,
+# which can be used to estimate the posterior and its poperties.
+# 
+import matplotlib.pyplot as plt
+
+fig, ax = plt.subplots(1,1,figsize=(5,4))
+
+ax.scatter(mh_samples[:,0], mh_samples[:,1], s=10, c='r', marker='o', alpha=0.1)
+ax.set_xlim(bounds[0,0], bounds[0,1])
+ax.set_ylim(bounds[1,0], bounds[1,1])
+ax.set_title('MH estimation')
+ax.set_xlabel('x1')
+ax.set_ylabel('x2')
+plt.tight_layout()
+
+
+# %% md
+# .. note:: Besides ``prior`` and ``likelihood``, one can also instantiate
+#     the :class:`.MetropolisHastingsEstimation` class with
+#
+#      - ``ln_prior`` and ``ln_likelihood``: Natural logarithm of ``prior`` and ``likelihood``.
+#      - ``ln_pxl``: Natural logarithm of the product of ``prior`` and ``likelihood``.
+#
\ No newline at end of file
diff --git a/docs/examples/sampler/README.rst b/docs/examples/sampler/README.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b69c0b3629a4a6631093fda7be22f22ff521edd5
--- /dev/null
+++ b/docs/examples/sampler/README.rst
@@ -0,0 +1,2 @@
+Sampler
+=======
\ No newline at end of file
diff --git a/docs/examples/sampler/plot_latin.py b/docs/examples/sampler/plot_latin.py
new file mode 100644
index 0000000000000000000000000000000000000000..1faa3cc14599583ca0c5fca117c7ce71f09a3604
--- /dev/null
+++ b/docs/examples/sampler/plot_latin.py
@@ -0,0 +1,91 @@
+"""
+
+Latin hypercube sampling
+========================
+"""
+
+# %% md
+# 
+# This example shows how to draw samples using Latin hypercube sampling.
+#
+# 
+
+# %% md
+# 
+# For the illustration purpose, let's have a look at a two-dimensional example
+# where we have two random variables X and Y. Each is uniformly distributed in its
+# range.
+
+import numpy as np
+
+ndim = 2
+# range of X is 10 to 20, range of Y is -10 to 0
+bounds = np.array([[10,20], [-10,0]])
+
+# %% md
+#
+# Given this setting, we can import :class:`.Latin`, create an instance, and
+# call the :py:meth:`.Latin.sample` method to draw required number of samples
+
+from psimpy.sampler import LHS
+
+# setting seed leads to same samples every time when the codes are run
+lhs_sampler = LHS(ndim, bounds, seed=10)
+lhs_samples = lhs_sampler.sample(nsamples=5)
+
+# %% md
+#
+# The samples are plotted in the following figure.
+
+import matplotlib.pyplot as plt
+
+fig , ax = plt.subplots(figsize=(6,4))
+
+ax.scatter(lhs_samples[:,0], lhs_samples[:,1], s=10, c='blue', marker='o')
+
+ax.set_xlabel('X')
+ax.set_ylabel('Y')
+ax.set_xlim(bounds[0])
+ax.set_ylim(bounds[1])
+ax.set_title("Latin hypercube samples (criterion='random')")
+_ = ax.grid(visible=True, which='major', axis='both')
+
+
+# %% md
+#
+# There are different criterions to pick samples in each hypercube. The default
+# is `random`, as used above. Other options are `center` and `maximin`. For instance,
+# we can use the `center` criterion to draw :math:`5` samples as follows:
+
+lhs_sampler = LHS(ndim, bounds, criterion='center', seed=10)
+lhs_samples = lhs_sampler.sample(nsamples=5)
+
+fig , ax = plt.subplots(figsize=(6,4))
+
+ax.scatter(lhs_samples[:,0], lhs_samples[:,1], s=10, c='blue', marker='o')
+
+ax.set_xlabel('X')
+ax.set_ylabel('Y')
+ax.set_xlim(bounds[0])
+ax.set_ylim(bounds[1])
+ax.set_title("Latin hypercube samples (criterion='center')")
+_ = ax.grid(visible=True, which='major', axis='both')
+
+
+# %% md
+#
+# And we can use the `maximin` criterion as follows:
+
+lhs_sampler = LHS(ndim, bounds, criterion='maximin', seed=10, iteration=500)
+lhs_samples = lhs_sampler.sample(nsamples=5)
+
+fig , ax = plt.subplots(figsize=(6,4))
+
+ax.scatter(lhs_samples[:,0], lhs_samples[:,1], s=10, c='blue', marker='o')
+
+ax.set_xlabel('X')
+ax.set_ylabel('Y')
+ax.set_xlim(bounds[0])
+ax.set_ylim(bounds[1])
+ax.set_title("Latin hypercube samples (criterion='maximin')")
+_ = ax.grid(visible=True, which='major', axis='both')
\ No newline at end of file
diff --git a/docs/examples/sampler/plot_metropolis_hastings.py b/docs/examples/sampler/plot_metropolis_hastings.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3654ff6da7bfa1e56598b9baf7058c06f503489
--- /dev/null
+++ b/docs/examples/sampler/plot_metropolis_hastings.py
@@ -0,0 +1,116 @@
+"""
+
+Metropolis Hastings sampling
+============================
+"""
+
+# %% md
+# 
+# This example shows how to draw samples using Metropolis Hastings sampling.
+# The target probability distribution is
+#
+# :math:`p(\mathbf{x})=p(x_1, x_2) \propto \exp \left(-\frac{1}{100}\left(x_1-1\right)^2-\left(x_1^2-x_2\right)^2\right)`
+#
+# where :math:`x_1 \in [-5,5]` and :math:`x_2 \in [-5,5]`.
+#
+# It should be noted that the right hand side of the equation is an unnormalized
+# probability density function since its integral is not equal to :math:`1`.
+# This can happen, for example, when the normalization constant is unknown or difficult
+# to compute.
+# 
+# We can define the target probability distribution in Python as follows:
+# 
+
+import numpy as np
+
+def target_dist(x):
+    if (x[0]>=-5 and x[0]<=5) and (x[1]>=-5 and x[1]<=5):
+        return np.exp(-0.01*(x[0]-1)**2 - (x[0]**2-x[1])**2)
+    else:
+        return 0
+
+# %% md
+# 
+# The figure below shows how the target distribution looks like.
+
+import matplotlib.pyplot as plt
+import itertools
+
+x1_values = np.linspace(-5,5.1,100)
+x2_values = np.linspace(-5,5.1,100)
+
+target_values = np.zeros((100, 100))
+for i, j in itertools.product(range(100), range(100)):
+    x1 = x1_values[i]
+    x2 = x2_values[j]
+    target_values[i,j] = target_dist(np.array([x1, x2]))
+
+fig , ax = plt.subplots(figsize=(4,4))
+
+ax.contourf(x1_values, x2_values, np.transpose(target_values), levels=10, cmap='Blues')
+
+ax.set_title('(unnormalized) target distribution')
+ax.set_xlabel(r'$x_1$')
+ax.set_ylabel(r'$x_2$')
+ax.set_xlim(-5,5)
+ax.set_ylim(-5,5)
+plt.tight_layout()
+
+
+# %% md
+# 
+# To perform Metropolis Hastings sampling, we need to choose a proposal distribution
+# which can be used to determine a new state ``x'`` given a current state ``x`` at
+# each iteration. This is defined by the parameter ``f_sample`` which should be a
+# function. A usual choice is to choose the new state from a Gaussian distribution
+# centered at the current state.
+#
+
+from scipy.stats import multivariate_normal
+
+f_sample = multivariate_normal.rvs
+# make the samples reproducible
+kwgs_f_sample = {'random_state': np.random.default_rng(seed=1)}
+
+# %% md
+# 
+# Then, we create an instance of :class:`.MetropolisHastings` class.
+
+from psimpy.sampler.metropolis_hastings import MetropolisHastings
+
+mh_sampler = MetropolisHastings(ndim=2, init_state=np.array([-4,-4]),
+    f_sample=f_sample, target=target_dist, nburn=1000, nthin=5,
+    seed=1, kwgs_f_sample=kwgs_f_sample)
+
+# %% md
+# 
+# Next, we call the :py:meth:`.MetropolisHastings.sample` method to draw required 
+# number of samples.
+
+mh_samples, mh_accept = mh_sampler.sample(nsamples=5000)
+
+print("Acceptance ratio: ", np.mean(mh_accept))
+
+
+# %% md
+# 
+# The following figure shows how the samples look like.
+
+import matplotlib.pyplot as plt
+import itertools
+
+# sphinx_gallery_thumbnail_number = 2
+fig , ax = plt.subplots(figsize=(4,4))
+
+ax.contourf(x1_values, x2_values, np.transpose(target_values), levels=10, cmap='Blues')
+ax.scatter(mh_samples[:,0], mh_samples[:,1], s=5, c='r', marker='o',alpha=0.05)
+
+ax.set_title('Metropolis Hastings samples')
+ax.set_xlabel(r'$x_1$')
+ax.set_ylabel(r'$x_2$')
+ax.set_xlim(-5,5)
+ax.set_ylim(-5,5)
+plt.tight_layout()
+
+
+
diff --git a/docs/examples/sampler/plot_saltelli.py b/docs/examples/sampler/plot_saltelli.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcc50232d49b189a18e939db74836de87abdfcc5
--- /dev/null
+++ b/docs/examples/sampler/plot_saltelli.py
@@ -0,0 +1,82 @@
+"""
+
+Saltelli sampling
+=================
+"""
+
+# %% md
+# 
+# This example shows how to draw samples using Saltelli sampling. 
+# Assume that there is a three-dimensional problem where X, Y, and Z are the
+# three random variables.
+
+import numpy as np
+
+ndim = 3
+# range of X is 10 to 20, range of Y is 100 to 200, range of Z is 1000 to 2000
+bounds = np.array([[10,20], [100,200], [1000,2000]])
+
+# %% md
+#
+# Given this setting, we can import :class:`.Saltelli`, create an instance, and
+# call the :py:meth:`.Saltelli.sample` method to draw required number of samples.
+
+from psimpy.sampler import Saltelli
+
+saltelli_sampler = Saltelli(ndim, bounds, calc_second_order=False)
+saltelli_samples = saltelli_sampler.sample(nbase=128)
+
+# %% md
+#
+# In above codes, we set ``calc_second_order`` to `False`. It means that picked
+# samples can be used in following Sobol' analysis to compute first-order and
+# total-effect Sobol' indices but not second-order Sobol' indices. It leads to 
+# :math:`nbase*(ndim+2)` samples as shown below
+
+import matplotlib.pyplot as plt
+
+fig = plt.figure()
+ax = fig.add_subplot(projection='3d')
+
+ax.scatter(saltelli_samples[:,0], saltelli_samples[:,1], saltelli_samples[:,2], marker='o')
+ax.set_xlabel('X')
+ax.set_ylabel('Y')
+ax.set_zlabel('Z')
+
+plt.tight_layout()
+
+print('Number of samples: ', f'{len(saltelli_samples)}')
+
+
+# %% md
+#
+# If we want to draw samples which can also be used to compute second-order
+# Sobol' indices, we need to set ``calc_second_order`` to `True`.
+# It leads to :math:`nbase*(2*ndim+2)` samples.
+
+saltelli_sampler = Saltelli(ndim, bounds, calc_second_order=True)
+saltelli_samples = saltelli_sampler.sample(nbase=128)
+
+fig = plt.figure()
+ax = fig.add_subplot(projection='3d')
+
+ax.scatter(saltelli_samples[:,0], saltelli_samples[:,1], saltelli_samples[:,2], marker='o')
+ax.set_xlabel('X')
+ax.set_ylabel('Y')
+ax.set_zlabel('Z')
+
+plt.tight_layout()
+
+print('Number of samples: ', f'{len(saltelli_samples)}')
+
+
+# %% md
+# .. note:: If one has a two-dimensional problem, there is no need to set
+#     ``calc_second_order`` to `True`. The reason is that the second-order Sobol'
+#     index can be directly computed based on the first-order and total-effect
+#     Sobol' index in that case.
+
+
+
+
+
diff --git a/docs/examples/sensitivity/README.rst b/docs/examples/sensitivity/README.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7ee2e76e78ccc6fb0cfc13c29aaa1d903364ef8b
--- /dev/null
+++ b/docs/examples/sensitivity/README.rst
@@ -0,0 +1,2 @@
+Sensitivity
+===========
\ No newline at end of file
diff --git a/docs/examples/sensitivity/plot_sobol_analyze.py b/docs/examples/sensitivity/plot_sobol_analyze.py
new file mode 100644
index 0000000000000000000000000000000000000000..87bbad8e0828257c19bd75f31b31fd7040f4694a
--- /dev/null
+++ b/docs/examples/sensitivity/plot_sobol_analyze.py
@@ -0,0 +1,213 @@
+"""
+
+Sobol' analysis
+===============
+"""
+
+# %% md
+# 
+# This example shows how to compute Sobol' sensitivity indices using
+# the :class:`.SobolAnalyze` class. 
+#
+# Let us consider the Ishigami function :cite:p:`LeGratiet2014`
+# 
+# :math:`y=f\left(x_1, x_2, x_3\right)=\sin \left(x_1\right)+7 \sin \left(\mathrm{x}_2\right)^2+0.1 x_3^4 \sin \left(x_1\right)`
+#
+# where :math:`x_i` is uniformly distributed on :math:`[-\pi, \pi]` for :math:`i \in [1,2,3]`. We define the function as follows:
+
+import numpy as np
+
+def f(x1,x2,x3):
+    return np.sin(x1) + 7*np.sin(x2)**2 + 0.1*x3**4*np.sin(x1)
+
+
+# %% md
+# 
+# The variation of each :math:`x_i` leads to the variation of :math:`y`. Sobol' analysis is used to quantify the contribution
+# of each :math:`x_i` and their interactions to the variation of :math:`y`. Steps of a Sobol' analysis are:
+#
+#  #. Draw samples of input.
+#  #. Evaluate the model at each input sample to obtain model output.
+#  #. Compute Sobol' indices based on model outputs.
+# 
+
+from psimpy.sampler import Saltelli
+from psimpy.sensitivity import SobolAnalyze
+
+ndim = 3
+bounds = np.array([[-np.pi, np.pi], [-np.pi, np.pi], [-np.pi, np.pi]])
+calc_second_order = False
+nbase = 1024
+
+# draw Saltelli samples
+saltelli_sampler = Saltelli(ndim, bounds, calc_second_order)
+saltelli_samples = saltelli_sampler.sample(nbase)
+
+# Evaluate the model to obtain model outputs
+Y = f(saltelli_samples[:,0], saltelli_samples[:,1], saltelli_samples[:, 2])
+
+# Compute Sobol' indices
+sobol_analyzer = SobolAnalyze(ndim, Y, calc_second_order, seed=2)
+S_res = sobol_analyzer.run()
+
+print("Estimated first-order Sobol' index: ", S_res['S1'][:,0])
+print("Analytical solution: [0.314, 0.442, 0]")
+
+
+# %% md
+# 
+# Figure below shows the results. The bar plots represent estimated first-order
+# and total-effect Sobol' indices, as well as their 95% confidence interval. The
+# red stars represent true values of the first-order Sobol' indices.
+#
+
+import matplotlib.pyplot as plt
+
+name_vars = [r'$x_1$',r'$x_2$',r'$x_3$']
+x = {'S1':[1,4,7], 'ST':[2,5,8]}
+color = {'S1':'#69b3a2', 'ST':'#3399e6'}
+width=0.8
+capsize=3
+
+fig, ax1 = plt.subplots(figsize=(4,3))
+
+# estimated first-order Sobol' indices and their confidence interval
+ax1.bar(x['S1'], S_res['S1'][:,0], width=width, yerr=S_res['S1'][:,2], color=color['S1'], capsize=capsize)
+
+ax1.axis([0,9,0,0.7])
+ax1.set_xticks([1.5,4.5,7.5])
+ax1.set_xticklabels(name_vars)
+ax1.tick_params(axis='both',bottom=False)
+ax1.tick_params(axis="y", labelcolor=color['S1'])
+ax1.yaxis.grid(True, linestyle='--', which='major', color='grey', alpha=.25)
+ax1.set_ylabel('First-order index', color=color['S1'])
+
+# analytical solution for first-order indices
+ax1.scatter(x['S1'], [0.314, 0.442, 0], s=30, c='red', marker='*')
+
+
+# estimated total-effect Sobol' indices and their confidence interval
+ax2 = ax1.twinx()
+ax2.bar(x['ST'], S_res['ST'][:,0], width=width, yerr=S_res['ST'][:,2], color=color['ST'], capsize=capsize)
+
+ax2.axis([0,9,0,0.7])
+ax2.set_xticks([1.5,4.5,7.5])
+ax2.tick_params(axis='both',bottom=False)
+ax2.tick_params(axis="y", labelcolor=color['ST'])
+ax2.yaxis.grid(True, linestyle='--', which='major', color='grey', alpha=.25)
+ax2.set_ylabel('Total-effect index', color=color['ST'])
+
+plt.tight_layout()
+
+
+# %% md
+# .. note:: The interaction between one parameter and all other parameters can be
+#     quantified by the difference between its first-order and total-effet Sobol'
+#     indices. If detailed information on interaction between any two parameters
+#     is needed, one can set ``calc_second_order`` to `True`.
+#  
+
+
+# %% md
+#
+# **Many models of interest in real-world applications are very computationally
+# expensive. In that case, we resort to emulation techniques in order to efficiently
+# conduct Sobol' analysis.** See :cite:t:`Zhao2021a` for the detailed theory.
+# 
+# Steps of such an analysis are:
+# 
+#  #. Draw input samples for emulator training.
+#  #. Evaluate the model at each training input point to obtain training data.
+#  #. Train an emulator for the computationally expensive model of interest.
+#  #. Draw samples of input for Sobol' analysis.
+#  #. Evaluate the emulator at each input sample at step 3 to obtain approximated model outputs.
+#  #. Compute Sobol' indices based on approximated model outputs.
+#
+# Assume that the above Ishigami function is computationally expensive, an emulator-based
+# Sobol' analysis can be conducted as follows:
+#
+
+import numpy as np
+from psimpy.sampler import LHS
+from psimpy.emulator import ScalarGaSP
+from psimpy.sampler import Saltelli
+from psimpy.sensitivity import SobolAnalyze
+
+# define the model
+def f(x1,x2,x3):
+    return np.sin(x1) + 7*np.sin(x2)**2 + 0.1*x3**4*np.sin(x1)
+
+ndim = 3
+bounds = np.array([[-np.pi, np.pi], [-np.pi, np.pi], [-np.pi, np.pi]])
+seed = 2
+
+# draw 100 input samples for emulator training using Latin hypercube sampling
+lhs_sampler = LHS(ndim, bounds, seed, criterion='maximin', iteration=200)
+design = lhs_sampler.sample(nsamples=100)
+
+# evaluate the model at each training input point
+response = f(design[:,0], design[:,1], design[:,2])
+
+# train an emulator
+scalar_gasp = ScalarGaSP(ndim)
+scalar_gasp.train(design, response)
+
+# the trained emulator needs to be validated before moving on
+# here we leave out this step for simplicity...
+
+# draw samples of input for Sobol' analysis
+calc_second_order = False
+nbase = 1024
+
+saltelli_sampler = Saltelli(ndim, bounds, calc_second_order)
+saltelli_samples = saltelli_sampler.sample(nbase)
+
+# evaluate the emulator to obtain approximated model output.
+# in order to take emulator-induced uncertainty into account, we draw 50 realizations
+# from the emulator
+Y = scalar_gasp.sample(saltelli_samples, 50)
+
+# Compute Sobol' indices based on approximated model outputs
+sobol_analyzer = SobolAnalyze(ndim, Y, calc_second_order, seed=seed)
+S_res = sobol_analyzer.run(mode='parallel', max_workers=5)
+
+print("\n")
+print("Emulator-based first-order Sobol' index: ", S_res['S1'][:,0])
+print("Analytical solution: [0.314, 0.442, 0]")
+
+# %% md
+# The following figure shows how the results of emulator-based Sobol' analysis
+# look like.
+#
+
+import matplotlib.pyplot as plt
+
+name_vars = [r'$x_1$',r'$x_2$',r'$x_3$']
+x = {'S1':[1,4,7], 'ST':[2,5,8]}
+color = {'S1':'#69b3a2', 'ST':'#3399e6'}
+width=0.8
+capsize=3
+
+fig, ax1 = plt.subplots(figsize=(4,3))
+
+ax1.bar(x['S1'], S_res['S1'][:,0], width=width, yerr=S_res['S1'][:,2], color=color['S1'], capsize=capsize)
+ax1.axis([0,9,0,0.7])
+ax1.set_xticks([1.5,4.5,7.5])
+ax1.set_xticklabels(name_vars)
+ax1.tick_params(axis='both',bottom=False)
+ax1.tick_params(axis="y", labelcolor=color['S1'])
+ax1.yaxis.grid(True, linestyle='--', which='major', color='grey', alpha=.25)
+ax1.set_ylabel('First-order index', color=color['S1'])
+
+ax1.scatter(x['S1'], [0.314, 0.442, 0], s=30, c='red', marker='*')
+
+ax2 = ax1.twinx()
+ax2.bar(x['ST'], S_res['ST'][:,0], width=width, yerr=S_res['ST'][:,2], color=color['ST'], capsize=capsize)
+ax2.axis([0,9,0,0.7])
+ax2.set_xticks([1.5,4.5,7.5])
+ax2.tick_params(axis='both',bottom=False)
+ax2.tick_params(axis="y", labelcolor=color['ST'])
+ax2.yaxis.grid(True, linestyle='--', which='major', color='grey', alpha=.25)
+ax2.set_ylabel('Total-effect index', color=color['ST'])
+
+plt.tight_layout()
\ No newline at end of file
diff --git a/docs/examples/simulator/README.rst b/docs/examples/simulator/README.rst
new file mode 100644
index 0000000000000000000000000000000000000000..893145eb4329d72764a14a0e4245c7bfd80c2787
--- /dev/null
+++ b/docs/examples/simulator/README.rst
@@ -0,0 +1,2 @@
+Simulator
+=========
diff --git a/docs/examples/simulator/plot_mass_point_model.py b/docs/examples/simulator/plot_mass_point_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b942fec7d3e3f487812db6ddc6cee85478296bf
--- /dev/null
+++ b/docs/examples/simulator/plot_mass_point_model.py
@@ -0,0 +1,128 @@
+"""
+
+Mass Point Model
+================
+"""
+
+# %% md
+# 
+# This example shows how to simulate the movement of a masspoint on a topography
+# using :class:`.MassPointModel`.
+#
+# 
+
+# %% md
+# 
+# First, import the class :class:`.MassPointModel` and create an instance by
+
+from psimpy.simulator import MassPointModel
+
+mpm = MassPointModel()
+
+# %% md
+# 
+# Required inputs for a simulation using :class:`.MassPointModel` include:
+#     1. topographic data: digital elevation model (in `ESRI ascii` format)
+#     2. friction coefficients: coulomb friction and turbulent friction coefficients
+#     3. initial state: initial location and initial velocity of the masspoint
+#     4. computational parameters: such as time step, end time, etc.
+#
+# The synthetic topography ``synthetic_topo.asc`` is used here for illustration.
+# It is located at the `tests/data/` folder.
+
+import os
+import linecache
+import numpy as np
+
+dir_data = os.path.abspath('../../../tests/data/')
+elevation = os.path.join(dir_data, 'synthetic_topo.asc')
+
+
+# %% md
+# .. note:: You may need to modify ``dir_data`` according to where you save
+#    ``synthetic_topo.asc`` on your local machine.
+
+# %% md
+# We can load the elevation data and visulize it. The figure below shows how
+# the topography looks like, as well as the initial location of the masspoint
+# (noted by the red dot).
+
+header = [linecache.getline(elevation, i) for i in range(1,6)]
+header_values = [float(h.split()[-1].strip()) for h in header]
+ncols, nrows, xll, yll, cellsize = header_values
+ncols = int(ncols)
+nrows = int(nrows)
+
+x_values = np.arange(xll, xll+(cellsize*ncols), cellsize)
+y_values = np.arange(yll, yll+(cellsize*nrows), cellsize)
+        
+z_values = np.loadtxt(elevation, skiprows=6)
+z_values = np.rot90(np.transpose(z_values))
+
+# initial location
+x0 = 200
+y0 = 2000
+z0 = z_values[0, int(x0/cellsize)]
+
+import matplotlib.pyplot as plt
+
+fig, ax = plt.subplots(2, 1, figsize=(10,6), height_ratios=[3,2])
+
+fig0 = ax[0].contourf(x_values, y_values, z_values, levels=20)
+ax[0].set_xlabel('x')
+ax[0].set_ylabel('y')
+ax[0].set_title('synthetic topography')
+ax[0].scatter(x0, y0, s=10, c='r', marker='o')
+cbar = plt.colorbar(fig0, ax=ax[0], format='%d', orientation='horizontal',
+    fraction=0.1, pad=0.2)
+cbar.ax.set_ylabel('z')
+
+
+ax[1].plot(x_values, z_values[0, :])
+ax[1].scatter(x0, z0, s=10, c='r', marker='o')
+ax[1].set_xlabel('x')
+ax[1].set_ylabel('z')
+ax[1].set_xlim(0, 5000)
+ax[1].set_title('cross section at any y')
+
+plt.tight_layout()
+
+# %% md
+# We set the friction coefficients as
+
+mu = 0.15
+xi = 1000
+
+# %% md
+# Given above topography, initial location, and friction coefficients, we can
+# call the :py:meth:`.MassPointModel.run` method to perform a simulation. Other
+# parameters are set to their default values. (we suppress raised warnings )
+# (other parameters are set to their default values).
+
+import warnings
+
+warnings.filterwarnings("ignore")
+output = mpm.run(elevation=elevation, coulomb_friction=mu, turbulent_friction=xi, x0=x0, y0=y0)
+
+# %% md
+# The simulation returns time history of the mass point's location and velocity.
+# Following plots show the simulation results.
+
+fig, ax = plt.subplots(3, 1, figsize=(10,6))
+
+ax[0].set_xlabel('time (s)')
+ax[0].set_ylabel('x')
+ax[0].set_title("x-t plot")
+ax[0].plot(output[:,0], output[:,1])
+
+ax[1].set_xlabel('time (s)')
+ax[1].set_ylabel('velocity')
+ax[1].set_title("v-t plot")
+ax[1].plot(output[:,0], output[:,5])
+
+ax[2].set_xlabel('x')
+ax[2].set_ylabel('velocity')
+ax[2].set_title("x-v plot")
+ax[2].plot(output[:,1], output[:,5])
+
+plt.tight_layout()
diff --git a/docs/examples/simulator/plot_ravaflow24.py b/docs/examples/simulator/plot_ravaflow24.py
new file mode 100644
index 0000000000000000000000000000000000000000..b73fec3db422369063a360c7219b7075e070b035
--- /dev/null
+++ b/docs/examples/simulator/plot_ravaflow24.py
@@ -0,0 +1,150 @@
+"""
+
+Ravaflow24 Mixture Model
+========================
+"""
+
+# %% md
+# 
+# This example shows how to simulate mass flow on a topography using
+# :class:`.Ravaflow24Mixture`.
+#
+#
+
+# %% md
+# 
+# First, import the class :class:`.Ravaflow24Mixture` by
+
+from psimpy.simulator import Ravaflow24Mixture
+
+# %% md
+# 
+# To create an instance of this class, we must specify the parameter ``dir_sim``.
+# It represents the directory in which output files generated by r.avaflow will be
+# saved.
+
+import os
+
+# Here we create a folder called `temp_Ravaflow24Mixture_example` to save output
+# files generated by r.avaflow
+cwd = os.getcwd()
+if not os.path.exists('temp_Ravaflow24Mixture_example'):
+    os.mkdir('temp_Ravaflow24Mixture_example')
+dir_sim = os.path.join(cwd, 'temp_Ravaflow24Mixture_example')
+
+# %% md
+# 
+# Now, we can create an instance of :class:`.Ravaflow24Mixture` by given
+# ``dir_sim`` and leaving other parameters to their default values
+# (Other parameters include ``time_step``, ``time_end``, ``curvature_control``,
+# ``entrainment_control``, ``stopping_control``, etc.)
+
+voellmy_model = Ravaflow24Mixture(dir_sim)
+
+# %% md
+#
+# To run a simulation using above `voellmy_model`, one needs to specify 
+#     1. ``elevation`` – Name of elevation raster file (including its path).
+#     2. ``hrelease`` – Name of release height raster file (including its path).
+#     3. ``prefix`` – Prefix required by r.avaflow to name output files.
+#     4. If ``elevation`` is not a georeferenced file which can be used to create
+#        a `GRASS Location`, ``EPSG`` must be provided.
+#
+# Other optional parameters include ``internal_friction``, ``basal_friction``,
+# ``turbulent_friction``, ``entrainment_coef`` etc.
+#
+# The synthetic topography ``synthetic_topo.tif`` and release mass ``synthetic_rel.tif``
+# are used here for illustration. They are located at the `/tests/data/` folder.
+
+dir_data = os.path.abspath('../../../tests/data/')
+elevation = os.path.join(dir_data, 'synthetic_topo.tif')
+hrelease = os.path.join(dir_data, 'synthetic_rel.tif')
+prefix = 'synthetic'
+
+# %% md
+#
+# .. note:: You may need to modify ``dir_data`` according to where you save them
+#    on your local machine.
+
+# %% md
+# 
+# Given above inputs, we can create a `GRASS Location` and a shell file by calling
+# :py:meth:`.Ravaflow24Mixture.preprocess` method, and run the simulation using
+# :py:meth:`.Ravaflow24Mixture.run` method
+
+grass_location, sh_file = voellmy_model.preprocess(
+    prefix=prefix, elevation=elevation, hrelease=hrelease, EPSG='2326')
+voellmy_model.run(grass_location, sh_file)
+
+# %% md
+# 
+# Once the simulation is finished, the results are located in the folder
+# `/dir_sim/your_prefix_results`. In this case, they are located in
+# `/temp_Ravaflow24Mixture_example/synthetic_results`. We can extract desired
+# outputs using respective method and visualize them.
+# For example, we can extract the overall impact area, maximum flow velocity, or
+# maximum flow velocity at specific locations:
+
+import numpy as np
+
+# overall impact area
+impact_area = voellmy_model.extract_impact_area(prefix)
+print(f"Impact_area is {impact_area} m^2")
+
+# overall maximum flow velocity
+v_mmax = voellmy_model.extract_qoi_max(prefix, 'v', aggregate=True)
+print(f"Maximum flow velocity is {v_mmax} m/s^2")
+
+# maximum flow velocity at specific locations
+loc = np.array([[500, 2000], [1500, 2000]])
+v_max_loc = voellmy_model.extract_qoi_max_loc(prefix, loc, 'v')
+print(f"Maximum flow velocity at location {loc[0]} is {v_max_loc[0]} m/s^2")
+print(f"Maximum flow velocity at location {loc[1]} is {v_max_loc[1]} m/s^2")
+
+# %% md
+# 
+# We can visulize point-wise maximum flow height and velocity using heatmaps:
+
+import matplotlib.pyplot as plt
+import linecache
+
+# extract head information of result ascii files and compute x and y coordinates
+hmax_asc = os.path.join(dir_sim, f'{prefix}_results', f'{prefix}_ascii', f'{prefix}_hflow_max.asc')
+        
+header = [linecache.getline(hmax_asc, i) for i in range(1,6)]
+header_values = [float(h.split()[-1].strip()) for h in header]
+ncols, nrows, xll, yll, cellsize = header_values
+ncols = int(ncols)
+nrows = int(nrows)
+
+x = np.arange(xll, xll+(cellsize*ncols), cellsize)
+y = np.arange(yll, yll+(cellsize*nrows), cellsize)
+
+# point-wise maximum flow height
+h_max = voellmy_model.extract_qoi_max(prefix, 'h', aggregate=False)
+# point-wise maximum flow velocity
+v_max = voellmy_model.extract_qoi_max(prefix, 'v', aggregate=False)
+
+fig, ax = plt.subplots(2, 1, figsize=(10,6))
+
+fig0 = ax[0].contourf(x, y, h_max, levels=20)
+ax[0].set_xlabel('x')
+ax[0].set_ylabel('y')
+ax[0].set_title('maximum flow height' + r' $(m/s)$')
+cbar0 = plt.colorbar(fig0, ax=ax[0], format='%.1f', orientation='vertical', fraction=0.1)
+
+fig1 = ax[1].contourf(x, y, v_max, levels=20)
+ax[1].set_xlabel('x')
+ax[1].set_ylabel('y')
+ax[1].set_title('maximum flow velocity' + r' ($m/s^2$)')
+cbar1 = plt.colorbar(fig1, ax=ax[1], format='%.1f', orientation='vertical', fraction=0.1)
+
+plt.tight_layout()
+
+# %% md
+# 
+# Here we delete the folder `temp_Ravaflow24Mixture_example` and all files therein.
+
+import shutil
+
+shutil.rmtree(dir_sim)
diff --git a/docs/examples/simulator/plot_run_mass_point_model.py b/docs/examples/simulator/plot_run_mass_point_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b073cf51ddadad866c4a2c635f596ea486a25a8
--- /dev/null
+++ b/docs/examples/simulator/plot_run_mass_point_model.py
@@ -0,0 +1,184 @@
+"""
+
+RunSimulator: Mass Point Model
+==============================
+"""
+
+# %% md
+# 
+# This example shows how to run multiple MassPointModel simulations in serial
+# and parallelly using :class:`.RunSimulator`.
+#
+#
+
+
+# %% md
+# 
+# First, we import the class :class:`.MassPointModel` and define a `simulator`
+# based on :py:meth:`.MassPointModel.run` method.
+
+from psimpy.simulator import MassPointModel
+
+mpm = MassPointModel()
+mpm_simulator = mpm.run
+
+
+# %% md
+# 
+# This simulator takes required input data, solves the mass point model, and
+# returns time history of the mass point's location and velocity
+# (see :py:meth:`.MassPointModel.run`).
+#
+# Required inputs for `mpm_simulator` include:
+#     1. topographic data: digital elevation model (in `ESRI ascii` format)
+#     2. friction coefficients: coulomb friction and turbulent friction coefficients
+#     3. initial state: initial location and initial velocity of the masspoint
+#     4. computational parameters: such as time step, end time, etc.
+#
+# The synthetic topography ``synthetic_topo.asc`` is used here for illustration.
+# It is located at the `/tests/data/` folder.
+
+import os
+
+dir_data = os.path.abspath('../../../tests/data/')
+elevation = os.path.join(dir_data, 'synthetic_topo.asc')
+
+# %% md
+# .. note:: You may need to modify ``dir_data`` according to where you save
+#    ``synthetic_topo.asc`` on your local machine.
+
+# %% md
+# 
+# For this example, we are going to run multiple simulations at different values
+# of coulomb friction coefficient (``coulomb_friction``) and turbulent friction
+# coefficient (``turbulent_friction``) while keep other inputs fixed. Namely,
+# ``coulomb_friction`` and ``turbulent_friction`` are variable input parameters.
+# All other inputs are fixed input parameters.
+#
+
+import numpy as np
+import itertools
+
+# Variable iput parameters are defined as a list of strings. Their values will be
+# passed as a numpy array (var_samples) when run simulations.
+var_inp_parameter = ['coulomb_friction', 'turbulent_friction']
+coulomb_friction = np.arange(0.1, 0.31, 0.1)
+turbulent_friction = np.arange(500, 2001, 400)
+var_samples = np.array(
+    [x for x in itertools.product(coulomb_friction, turbulent_friction)])
+print("Number of variable input samples are: ", len(var_samples))
+
+# Fixed input parameters are defined as a dictionary. Their values are given.
+fix_inp = {'elevation': elevation, 'x0': 200, 'y0': 2000, 'tend': 50}
+
+# %% md
+# .. note:: Parameters of `mpm_simulator` which are not included in
+#    ``var_inp_parameter`` and ``fix_inp``, such as ``ux0``, ``uy0`` and
+#    ``curvature``, will automatically take their default values.
+
+# %% md
+# 
+# We may want to save outputs returned by `mpm_simulator` at each simulation for
+# later inspection or processing. In that case, we need to define ``dir_out`` and
+# set ``save_out`` as `True`.
+
+import os
+
+# Here we create a folder called `temp_run_MassPointModel_example` to save outputs
+# returned at each simulation.
+cwd = os.getcwd()
+if not os.path.exists('temp_run_MassPointModel_example'):
+    os.mkdir('temp_run_MassPointModel_example')
+dir_out = os.path.join(cwd, 'temp_run_MassPointModel_example')
+
+
+# %% md
+# 
+# Now we can define an object of :class:`.RunSimulator` by
+
+from psimpy.simulator import RunSimulator
+
+run_mpm_simulator = RunSimulator(mpm_simulator, var_inp_parameter, fix_inp,
+    dir_out=dir_out, save_out=True)
+
+# %% md
+# .. note:: Since outputs of each simulation are saved in the same folder ``dir_out``,
+#    we need to give each file a unique name in order to avoid conflict. This is
+#    realized by defining ``prefixes``.
+
+serial_prefixes = ["serial"+str(i) for i in range(len(var_samples))]
+parallel_prefixes = ["parallel"+str(i) for i in range(len(var_samples))]
+
+
+# %% md
+# 
+# Using :py:meth:`.RunSimulator.serial_run` method or
+# :py:meth:`.RunSimulator.parallel_run` method, we can run the simulations
+# in serial or parallelly.
+
+import time
+
+start = time.time()
+run_mpm_simulator.serial_run(var_samples=var_samples, prefixes=serial_prefixes)
+serial_time = time.time() - start
+serial_output = run_mpm_simulator.outputs
+
+start = time.time()
+# max_workers controls maximum number of tasks running in parallel
+run_mpm_simulator.parallel_run(var_samples, prefixes=parallel_prefixes, max_workers=4)
+parallel_time = time.time() - start
+parallel_output = run_mpm_simulator.outputs
+    
+print("Serial run time: ", serial_time)
+print("Parallel run time: ", parallel_time)
+
+
+# %% md
+# 
+# Once a simulation is done, its output is saved to ``dir_out``. All output files
+# generated by above simulations are as follows: 
+
+os.listdir(dir_out)
+
+
+# %% md
+# 
+# Once all simulations are done, their outputs can also be accessed by
+# :py:attr:`.RunSimulator.outputs` attribute, which is a list.
+
+# Here we print the maximum velocity of each simulation in the parallel run.
+max_v = [np.max(output[:,5]) for output in parallel_output]
+print(
+    f"Maximum velocity of {parallel_prefixes[0]} to {parallel_prefixes[-1]} are: ",
+    '\n',
+    max_v)
+
+
+# %% md
+# .. warning:: If one simulation failed due to whatever reason, the error massage
+#    will be printed to the screen but other simulations will continue. In that 
+#    case, the output file of failed simulation will not be writted to ``dir_out``.
+#    Also, the element of :py:attr:`.RunSimulator.outputs` corresponding to that
+#    simulation will be a string representing the error message, instead of a
+#    numpy array.
+
+
+# %% md
+# 
+# Here we delete the folder `temp_run_MassPointModel_example` and all files therein.
+
+import shutil
+
+shutil.rmtree(dir_out)
+
+
+ 
+
+
+
+
+
+
+
+
+
diff --git a/docs/examples/simulator/plot_run_ravaflow24.py b/docs/examples/simulator/plot_run_ravaflow24.py
new file mode 100644
index 0000000000000000000000000000000000000000..67666519fd53a5b7ecb6b6565307935b8cf4d85c
--- /dev/null
+++ b/docs/examples/simulator/plot_run_ravaflow24.py
@@ -0,0 +1,212 @@
+"""
+
+RunSimulator: Ravaflow24 Mixture Model
+======================================
+"""
+
+# %% md
+# 
+# This example shows how to run multiple Ravaflow24Mixture simulations in serial
+# and parallelly using :class:`.RunSimulator`.
+#
+
+# %% md
+# 
+# First, we import the class :class:`.Ravaflow24Mixture`.
+
+from psimpy.simulator import Ravaflow24Mixture
+
+# %% md
+# 
+# To create an instance of this class, we must specify the parameter ``dir_sim``.
+# It represents the directory in which output files generated by r.avaflow will be
+# saved.
+
+import os
+
+# Here we create a folder called `temp1_run_Ravaflow24Mixture_example` to save
+# output files generated by r.avaflow
+cwd = os.getcwd()
+if not os.path.exists('temp1_run_Ravaflow24Mixture_example'):
+    os.mkdir('temp1_run_Ravaflow24Mixture_example')
+dir_sim = os.path.join(cwd, 'temp1_run_Ravaflow24Mixture_example')
+
+# %% md
+# 
+# Given ``dir_sim``,  we can create an instance of :class:`.Ravaflow24Mixture`.
+# To reduce simulation time, we set ``time_end`` to :math:`50`. Other parameters
+# are set to their default values.
+
+voellmy_model = Ravaflow24Mixture(dir_sim, time_end=50)
+
+# %% md
+# 
+# The `simulator` in this example is defined based on ``voellmy_model`` as follows.
+# It takes required inputs and returns the overall impact area.
+
+import numpy as np
+
+def simulator(prefix, elevation, hrelease, basal_friction, turbulent_friction, EPSG):
+    """Preprocess required inputs, run simulation, and return output as a numpy array."""
+    grass_location, sh_file = voellmy_model.preprocess(prefix=prefix, elevation=elevation,
+        hrelease=hrelease, basal_friction=basal_friction, turbulent_friction=turbulent_friction, EPSG=EPSG)
+    voellmy_model.run(grass_location, sh_file) # run this line, r.avaflow will write outputs to dir_sim
+    impact_area = voellmy_model.extract_impact_area(prefix)
+    
+    return np.array([impact_area]) # define dir_out and set save_out to True, returned numpy array will be saved to dir_out
+
+
+# %% md
+# 
+# To demonstrate how to run multiple simulations using :class:`.RunSimulator`, 
+# we choose ``basal_friction`` and ``turbulent_friction`` as variable input
+# parameters. Each has two different values, leading to four simulations.
+
+import itertools
+
+var_inp_parameter = ['basal_friction', 'turbulent_friction']
+basal_friction = [20, 30] 
+turbulent_friction = [3, 4]
+var_samples = np.array(
+    [x for x in itertools.product(basal_friction, turbulent_friction)])
+
+
+# %% md
+#
+# Other parameters of the `simulator`, including ``elevation``, ``hrelease``, and
+# ``EPSG`` are treated as fixed input. It means that their values are the same in
+# all simulations.
+
+dir_data = os.path.abspath('../../../tests/data/')
+elevation = os.path.join(dir_data, 'synthetic_topo.tif')
+hrelease = os.path.join(dir_data, 'synthetic_rel.tif')
+fix_inp = {'elevation': elevation, 'hrelease': hrelease, 'EPSG': '2326'}
+
+# %% md
+# 
+# The parameter ``prefix`` of the `simulator` is special. It is not involved in
+# the computational model, but only used to name output files generated by r.avaflow.
+# Such parameter is not defined in ``var_inp_parameter`` or ``fix_inp`` of
+# :class:`.RunSimulator`. Instead, we use a seperate parameter, called ``o_parameter``
+# for this purpose.
+
+o_parameter = 'prefix'
+
+
+# %% md
+# 
+# We may want to save outputs returned by the `simulator` at each simulation for
+# later inspection or processing. In that case, we need to define ``dir_out`` and
+# set ``save_out`` as `True`.
+
+import os
+
+cwd = os.getcwd()
+if not os.path.exists('temp2_run_Ravaflow24Mixture_example'):
+    os.mkdir('temp2_run_Ravaflow24Mixture_example')
+dir_out = os.path.join(cwd, 'temp2_run_Ravaflow24Mixture_example')
+
+# %% md
+#
+# .. note:: Please note that ``dir_out`` and ``dir_sim`` are two different
+#    parameters for different purposes.
+#  
+#     - ``dir_out`` is a parameter of :class:`.RunSimulator`. The `simulator`
+#       returns output of interest as a numpy array for each simulation. If
+#       we want to save this returned output of interest (here impact area)
+#       to our local machine, we need to specify the value of ``dir_out`` which
+#       represents the directory in which output of interest will be saved and
+#       set ``save_out`` to `True`. Otherwise, we do not need ``dir_out`` and
+#       leave ``save_out`` to `False`.
+#     - ``dir_sim`` is a parameter of :class:`.Ravaflow24Mixture`. :class:`.Ravaflow24Mixture`
+#       relies on the third party software r.avaflow 2.4. When we call
+#       :py:meth:`.Ravaflow24Mixture.run` in the function body of above `simulator`,
+#       r.avaflow 2.4 will be run and it generates output files. The value of
+#       ``dir_sim`` specifies the directory in which output files generated by
+#       r.avaflow 2.4 are going to be saved.
+#
+#    The value of ``dir_out`` and ``dir_sim`` can be the same if file names 
+#    have no conflict. We recommend to keep them seperate. In addition, if the function
+#    body of the `simulator` does not write files to disk, ``dir_sim`` is not required. Our
+#    :class:`.MassPointModel` is an example. Similarly, if we do not want to save
+#    returned numpy array of the `simulator`, ``dir_out`` is not needed.
+
+# %% md
+#
+# Now we can define an object of :class:`.RunSimulator` by
+
+from psimpy.simulator import RunSimulator
+
+run_simulator = RunSimulator(simulator, var_inp_parameter, fix_inp, o_parameter,
+    dir_out, save_out=True)
+
+
+# %% md
+# 
+# Before running simulations, we need to specify values of ``prefixes`` which
+# will be used to name files generated by each r.avaflow simulation and
+# each returned numpy array of `simulator`.
+
+serial_prefixes = ["serial"+str(i) for i in range(len(var_samples))]
+parallel_prefixes = ["parallel"+str(i) for i in range(len(var_samples))]
+
+# %% md
+#
+# Now we can use :py:meth:`.RunSimulator.serial_run` method or
+# :py:meth:`.RunSimulator.parallel_run` method to run the simulations
+# in serial or parallelly.
+
+import time
+
+start = time.time()
+run_simulator.serial_run(var_samples=var_samples, prefixes=serial_prefixes)
+serial_time = time.time() - start
+serial_output = run_simulator.outputs
+print(f"serial_output: {serial_output}")
+
+start = time.time()
+# append setting to True means that simulation outputs of parallel run will be
+# appended to above serial run outputs
+run_simulator.parallel_run(var_samples, prefixes=parallel_prefixes,
+    max_workers=2, append=True)
+parallel_time = time.time() - start
+    
+parallel_output = run_simulator.outputs[len(var_samples):]
+print(f"parallel_output: {parallel_output}")
+
+    
+print("Serial run time: ", serial_time)
+print("Parallel run time: ", parallel_time)
+
+# %% md
+# 
+# All output files returned by the `simulator` are
+
+os.listdir(dir_out)
+
+# %% md
+# 
+# All output files/directories generated by r.avaflow simulations 
+# (including files/directories generated during preprocessing) are
+
+os.listdir(dir_sim)
+
+
+# %% md
+# .. warning:: If one simulation failed due to whatever reason, the error massage
+#    will be printed to the screen but other simulations will continue. In that 
+#    case, the output file of failed simulation will not be writted to ``dir_out``.
+#    Also, the element of :py:attr:`.RunSimulator.outputs` corresponding to that
+#    simulation will be a string representing the error message, instead of a
+#    numpy array.
+
+
+# %% md
+# 
+# In the end, we delete the folder `temp1_run_Ravaflow24Mixture_example` and
+# `temp2_run_Ravaflow24Mixture_example` and all files therein.
+
+import shutil
+
+shutil.rmtree(dir_sim)
+shutil.rmtree(dir_out)
\ No newline at end of file
diff --git a/docs/index.md b/docs/index.md
deleted file mode 100644
index e3acefaaf22aa3a1dc6e06d07d7cedcbd0418b19..0000000000000000000000000000000000000000
--- a/docs/index.md
+++ /dev/null
@@ -1,11 +0,0 @@
-```{include} ../README.md
-```
-
-```{toctree}
-:maxdepth: 1
-:hidden:
-
-changelog.md
-conduct.md
-autoapi/index
-```
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 7b5bd2a22bf3f8cfa686ff6a3c81ef2e8aacb6bf..3dadde5e70dbd2533b602588847641aee9d99604 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,3 +1,4 @@
-myst-nb
-sphinx-autoapi
-sphinx-rtd-theme
\ No newline at end of file
+sphinx_rtd_theme
+sphinx_autodoc_typehints
+sphinxcontrib_bibtex
+sphinx_gallery
\ No newline at end of file
diff --git a/docs/source/_static/coordinate_system.png b/docs/source/_static/coordinate_system.png
new file mode 100755
index 0000000000000000000000000000000000000000..25abc559dc73388d4eeb7262aeda4fff0c29b5fd
Binary files /dev/null and b/docs/source/_static/coordinate_system.png differ
diff --git a/docs/source/_static/css/custom.css b/docs/source/_static/css/custom.css
new file mode 100644
index 0000000000000000000000000000000000000000..c7d1eb111e0fd9ecb0d465b1219337d26c008637
--- /dev/null
+++ b/docs/source/_static/css/custom.css
@@ -0,0 +1,6 @@
+.math {
+    text-align: left;
+}
+.eqno {
+    float: right;
+}
\ No newline at end of file
diff --git a/docs/source/_static/latin_hypercube_sampling_sketch.png b/docs/source/_static/latin_hypercube_sampling_sketch.png
new file mode 100755
index 0000000000000000000000000000000000000000..481498c1069a31578654a0ed59fb382ca53bb4e8
Binary files /dev/null and b/docs/source/_static/latin_hypercube_sampling_sketch.png differ
diff --git a/docs/source/api.rst b/docs/source/api.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1dfc30850c515b40e237d110e014bdc82106b0ed
--- /dev/null
+++ b/docs/source/api.rst
@@ -0,0 +1,11 @@
+API & Theory
+============
+
+.. toctree::
+   :maxdepth: 2
+
+   /emulator/index
+   /inference/index
+   /sampler/index
+   /sensitivity/index
+   /simulator/index
\ No newline at end of file
diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
new file mode 100644
index 0000000000000000000000000000000000000000..198a10c6514af52d92bc2204c1df6d618cd0447f
--- /dev/null
+++ b/docs/source/changelog.rst
@@ -0,0 +1,6 @@
+:tocdepth: 1
+
+Changes
+=======
+
+.. include:: ../../CHANGELOG.md
diff --git a/docs/conf.py b/docs/source/conf.py
similarity index 53%
rename from docs/conf.py
rename to docs/source/conf.py
index db3af8c8e1ea82b4b9f62bf50de3a33050ec4613..e87c252b3fd89256cc8b806d372fe7074f45a187 100644
--- a/docs/conf.py
+++ b/docs/source/conf.py
@@ -10,30 +10,52 @@
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
 #
-# import os
-# import sys
-# sys.path.insert(0, os.path.abspath('.'))
+from datetime import date
+import os
+import sys
+sys.path.insert(0, os.path.abspath('../../src/psimpy/'))
 
 
 # -- Project information -----------------------------------------------------
 
 project = 'psimpy'
-copyright = '2022, Hu Zhao'
+copyright = f'{date.today().year}, Hu Zhao'
 author = 'Hu Zhao'
 
+import psimpy
+version = psimpy.__version__
+release = version
+
 
 # -- General configuration ---------------------------------------------------
 
+source_suffix = {
+    '.rst': 'restructuredtext',
+    '.md': 'markdown',
+}
+
 # Add any Sphinx extension module names here, as strings. They can be
 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
 # ones.
 extensions = [
-    'myst_nb',
-    'autoapi.extension',
+    # 'myst_nb',
+    # 'autoapi.extension',
+    'sphinx.ext.autodoc',
     'sphinx.ext.napoleon',
     'sphinx.ext.viewcode',
+    'sphinx_autodoc_typehints',
+    'sphinx_gallery.gen_gallery',
+    'sphinxcontrib.bibtex',
 ]
-autoapi_dirs = ['../src']
+
+# autoapi_dirs = ['../src']
+autoclass_content = 'init'
+autodoc_member_order = 'bysource'
+add_module_names = False
+
+bibtex_bibfiles = ['refs.bib']
+bibtex_default_style = 'unsrt'
+bibtex_reference_style = 'author_year'
 
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['_templates']
@@ -44,6 +66,25 @@ templates_path = ['_templates']
 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
 
 
+# configure Sphinx-Gallery
+sphinx_gallery_conf = {
+    "examples_dirs": [
+        "../examples/emulator",
+        "../examples/inference",
+        "../examples/sampler",
+        "../examples/sensitivity",
+        "../examples/simulator",
+    ],  # path to your example scripts,
+    "gallery_dirs": [
+        "auto_examples/emulator",
+        "auto_examples/inference",
+        "auto_examples/sampler",
+        "auto_examples/sensitivity",
+        "auto_examples/simulator",
+    ],  # path to where to save gallery generated output
+}
+
+
 # -- Options for HTML output -------------------------------------------------
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
@@ -51,7 +92,24 @@ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
 #
 html_theme = 'sphinx_rtd_theme'
 
+html_sidebars = {
+    "**": [
+        "about.html",
+        "navigation.html",
+        "relations.html",
+        "searchbox.html",
+    ]
+}
+
+html_title = "PSimPy's documentation"
+
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
\ No newline at end of file
+html_static_path = ['_static']
+
+# These paths are either relative to html_static_path
+# or fully qualified paths (eg. https://...)
+html_css_files = [
+    'css/custom.css',
+]
diff --git a/docs/source/emulator/index.rst b/docs/source/emulator/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1ffa6d7964ca6912ba27df50e5ad4d4c3e946898
--- /dev/null
+++ b/docs/source/emulator/index.rst
@@ -0,0 +1,27 @@
+********
+Emulator
+********
+
+This module hosts functionality for emulation methods.
+
+Emulation, also known as surrogate modeling or meta-modeling, is a type of
+method which is used to build cheap-to-evaluate emulators (surrogate models) to
+approximate expensive-to-evaluate simulators. It can greatly reduce
+computational costs of tasks in which a computationally intensive simulator
+needs to be run multiple times, such as a global sensitivity anlaysis,
+uncertainty quantification, or parameter calibration.
+
+Currently implemented classes in this module are:
+
+* :class:`.ScalarGaSP`: Gaussian process emulation for simulators which return a
+  scalar value as output.
+
+* :class:`.PPGaSP`: Gaussian process emulation for simulators which return
+  multiple values as output. 
+
+.. toctree::
+   :maxdepth: 2
+   :hidden:
+   :caption: Emulator
+
+    RobustGaSP <robustgasp>
\ No newline at end of file
diff --git a/docs/source/emulator/robustgasp.rst b/docs/source/emulator/robustgasp.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d5c587f6053101966beb1ca7b94205e259b9c268
--- /dev/null
+++ b/docs/source/emulator/robustgasp.rst
@@ -0,0 +1,211 @@
+RobustGaSP
+==========
+
+`RobustGaSP`, standing for `Robust Gaussian Stochastic Process Emulation`, is a
+`R` package developed by :cite:t:`Gu2019`. It implements robust methods to
+estimate the unknown parameters which can greatly improve predictive performance
+of the built emulator. In addition, it implements the `PP GaSP` (parallel partial
+Gaussian stochastic process) emulator for simulators which return multiple values
+as output, see :cite:t:`Gu2016` for more detail.
+
+In :py:mod:`PSimPy.emulator.robustgasp`, we have implemented class
+:class:`.ScalarGaSP` and  class :class:`.PPGaSP` based on `RobustGaSP` and
+`rpy2`, which allows us to use `RobustGaSP` directly from within `Python`.
+
+Theory recap of GP emulation
+----------------------------
+
+A simulator represents a mapping from an input space to an output space.
+Gaussian process emulation treats a computationally expensive simulator as an
+unknown function from a Bayesian perspective: the prior belief of the simulator,
+namely a Gaussian process, is updated based on a modest number of simulation runs,
+leading to a posterior which can be evaluated much faster than the simulator and
+can then be used for computationally demanding analyses :cite:p:`Zhao2021a`. A recap
+of Gaussian process emulation is provided below, which is adapted from `Chapter 3`
+of :cite:t:`Zhao2021b`.
+
+GP prior
+^^^^^^^^
+
+Let :math:`f(\cdot)` denote a simulator that one wants to approximate. It
+defines a mapping from a :math:`p`-dimensional input
+:math:`\mathbf{x}=(x_1,\ldots,x_p)^T \in \mathcal{X} \subset{\mathbb{R}^p}`
+to a scalar output :math:`y \in \mathbb{R}`. 
+
+From the Bayesian perspective, the prior belief of the simulator is modeled by
+a Gaussian process, namely
+
+.. math:: f(\cdot) \sim \mathcal{GP}(m(\cdot), C(\cdot,\cdot)).
+    :label: GP
+
+The Gaussian process is fully specified by its mean function :math:`m(\cdot)`
+and covariance function :math:`C(\cdot,\cdot)`. The mean function is usually
+modeled by parametric regression
+
+.. math:: m(\mathbf{x})=\mathbf{h}^T(\mathbf{x}) \boldsymbol{\beta}=\sum_{t=1}^{q} h_{t}(\mathbf{x}) \beta_{t},
+    :label: GPmean
+
+with :math:`q`-dimensional vectors
+:math:`\mathbf{h}(\mathbf{x})=\left(h_{1}(\mathbf{x}), h_{2}(\mathbf{x}), \ldots, h_{q}(\mathbf{x})\right)^T`
+and :math:`\boldsymbol{\beta}=\left(\beta_{1}, \beta_{2}, \ldots, \beta_{q}\right)^T`
+denoting the chosen basis functions and unknown regression parameters respectively.
+The basis functions are commonly chosen as constant :math:`\mathbf{h}(\mathbf{x})=1`
+or some prescribed polynomials like :math:`\mathbf{h}(\mathbf{x})=(1,x_1,\ldots,x_p)^T`.
+
+The covariance function is often assumed to be stationary and typically has a
+separable form of
+
+.. math::  C\left(\mathbf{x}_{i}, \mathbf{x}_{j}\right)=\sigma^2 c\left(\mathbf{x}_{i}, \mathbf{x}_{j}\right)=\sigma^2 \prod_{l=1}^{p} c_{l}\left(x_{i l}, x_{j l}\right),
+    :label: GPcovariance
+
+where :math:`c\left(\mathbf{x}_{i}, \mathbf{x}_{j}\right)` is the correlation
+function, and :math:`c_{l}\left(x_{i l}, x_{j l}\right)` corresponds to the
+correlation between the :math:`l`-th component of :math:`\mathbf{x}_i`` and
+the counterpart of :math:`\mathbf{x}_j`. Various correlation functions can be
+chosen for :math:`c_{l}\left(x_{i l}, x_{j l}\right)`. In what follows a specific
+correlation function from the Matern family is chosen to illustrate the theory.
+It should be noted that it can be replaced by any other correlation function
+without loss of generality.
+
+.. math:: c_{l}\left(x_{i l}, x_{j l}\right) = \left(1+\frac{\sqrt{5}||x_{il}-x_{jl}||}{\psi_l}+\frac{5||x_{il}-x_{jl}||^2}{3\psi_{l}^{2}} \right) \exp{\left(-\frac{\sqrt{5}||x_{il}-x_{jl}||}{\psi_l}\right)},
+    :label: GPcorrelation
+
+where :math:`\psi_l` is the correlation length parameter in the :math:`l`-th
+dimension. The unknowns in the covariance function
+:math:`C\left(\mathbf{x}_{i}, \mathbf{x}_{j}\right)` are the variance
+:math:`\sigma^2` and the :math:`p` correlation length parameters
+:math:`\boldsymbol{\psi}=(\psi_1,\ldots,\psi_p)^T`.
+
+GP posterior
+^^^^^^^^^^^^
+
+Equations :eq:`GP` to :eq:`GPcorrelation` encode prior knowledge of the simulator
+:math:`f(\cdot)`. From the Bayesian viewpoint, this prior knowledge can be updated
+to a posterior based on a limited number of training data. Training data here refer
+to :math:`n^{tr}` pairs of input-output data of :math:`f(\cdot)`.
+
+Let :math:`\mathbf{x}^{tr}:=\{\mathbf{x}_i\}_{i=1}^{n^{tr}}` and 
+:math:`\mathbf{y}^{tr}:=\{f(\mathbf{x}_i)\}_{i=1}^{n^{tr}}` denote the training
+inputs and outputs respectively. Since any finite number of random variables
+from a Gaussian process are jointly Gaussian distributed, the joint distribution
+of the :math:`n^{tr}` outputs :math:`\mathbf{y}^{tr}` follow a
+:math:`n^{tr}`-dimensional Gaussian distribution
+
+.. math:: p(\mathbf{y}^{tr} | \boldsymbol{\beta}, \sigma^{2}, \boldsymbol{\psi})  \sim \mathcal{N}_{n^{tr}}\left(\mathbf{H}\boldsymbol{\beta}, \sigma^{2} \mathbf{R}\right),
+    :label: GPlikelihood1
+
+where :math:`\mathbf{H}=\left[\mathbf{h}(\mathbf{x}_{1}), \ldots, \mathbf{h}(\mathbf{x}_{n^{tr}}) \right]^T`
+is the :math:`n^{tr} \times q` basis design matrix and :math:`\mathbf{R}` is the
+:math:`n^{tr} \times n^{tr}` correlation matrix with :math:`(i,j)` element
+:math:`c(\mathbf{x}_{i}, \mathbf{x}_{j})`. Equation :eq:`GPlikelihood1` is known
+as the likelihood function.
+
+Similarly, the joint distribution of the function output :math:`y^*` at any
+untried input :math:`\mathbf{x}^*` and :math:`\mathbf{y}^{tr}` follows a
+:math:`(n^{tr}+1)`-dimensional Gaussian distribution
+
+.. math:: p(y^*, \mathbf{y}^{tr} \mid \boldsymbol{\beta}, \sigma^{2}, \boldsymbol{\psi}) \sim \mathcal{N}_{n^{tr}+1}\left(\left[\begin{array}{c}
+    \mathbf{h}^T\left(\mathbf{x}^*\right) \\
+    \mathbf{H}
+    \end{array}\right] \boldsymbol{\beta}, \sigma^{2}\left[\begin{array}{cc}
+    c\left(\mathbf{x}^*, \mathbf{x}^*\right) & \mathbf{r}^T\left(\mathbf{x}^*\right) \\
+    \mathbf{r}\left(\mathbf{x}^*\right) & \mathbf{R}
+    \end{array}\right]\right)
+    :label: jointystarytr
+
+where :math:`\mathbf{r}(\mathbf{x}^*)=\left(c(\mathbf{x}^*, \mathbf{x}_{1}), \ldots, c(\mathbf{x}^*, \mathbf{x}_{n^{tr}}) \right)^T`.
+According to the property of the multivariate Gaussian distribution, the conditional
+distribution of :math:`y^*` conditioned on :math:`\mathbf{y}^{tr}` is again a
+Gaussian distribution, namely
+
+.. math:: p(y^* | \mathbf{y}^{tr}, \boldsymbol{\beta}, \sigma^{2}, \boldsymbol{\psi}) \sim  \mathcal{N}
+    \left(m', \sigma^2 c' \right),\label{eq:conditionalGaussian}
+    :label: conditionalGaussian
+
+.. math:: m'=\mathbf{h}^T(\mathbf{x}^*) \boldsymbol{\beta} + \mathbf{r}^T(\mathbf{x}^*) \mathbf{R}^{-1} (\mathbf{y}^{tr}-\mathbf{H}\boldsymbol{\beta})
+    :label: conditionalGaussianM
+
+.. math:: c'=c(\mathbf{x}^*, \mathbf{x}^*) - \mathbf{r}^T(\mathbf{x}^*) \mathbf{R}^{-1} \mathbf{r}(\mathbf{x}^*).
+    :label: conditionalGaussianC
+
+Equations :eq:`conditionalGaussian` to :eq:`conditionalGaussianC` can be easily
+extended to any dimensionality since the joint distribution of the function
+outputs at any number of untried inputs and :math:`\mathbf{y}^{tr}` also follows
+a multivariate Gaussian distribution. Equations :eq:`conditionalGaussian` to 
+:eq:`conditionalGaussianC` therefore essentially define the posterior which is an
+updated Gaussian process, given the unknown regression parameters
+:math:`\boldsymbol{\beta}`, variance :math:`\sigma^2`, and correlation length
+parameters :math:`\boldsymbol{\psi}`. 
+
+For any new input :math:`\mathbf{x}^*`, the output :math:`y^*` can be almost
+instantaneously predicted, given by the mean :math:`m'`, namely equation
+:eq:`conditionalGaussianM`. Moreover, the uncertainty associated with the prediction
+can be estimated by for example the variance :math:`\sigma^2c'`. Depending on how
+the unknown parameters (:math:`\boldsymbol{\beta}`, :math:`\sigma^2`, and :math:`\boldsymbol{\psi}`)
+are learned based on the training data, the posterior can have various forms, see
+`Section 3.3` of :cite:t:`Zhao2021b`.
+
+Sample from GP
+^^^^^^^^^^^^^^
+A Gaussian process, either the prior or the posterior, defines a distribution over
+functions. Once its mean function :math:`m(\cdot)` and covariance function
+:math:`C(\cdot,\cdot)` are specified, the Gaussian process can be evaluated at a
+finite number :math:`s` of input points :math:`\{\mathbf{x}_i\}_{i=1}^{s}`.
+The joint distribution of :math:`\{y_i=f(\mathbf{x}_i)\}_{i=1}^{s}` follows a
+:math:`s`-variate Gaussian distribution. A sample of the :math:`s`-variate Gaussian
+distribution, denoted as :math:`\tilde{\mathbf{y}}:=(\tilde{y}_1,\ldots,\tilde{y}_s)^T`,
+can be viewed as a sample of the Gaussian process (evaluated at a finite number of points).
+To generate :math:`\tilde{\mathbf{y}}`, the :math:`s \times s` covariance matrix
+:math:`\boldsymbol{\Sigma}` needs to be decomposed into the product of a lower triangular
+matrix :math:`\mathbf{L}` and its conjugate transpose :math:`\mathbf{L}^T` using the Cholesky
+decomposition
+
+.. math:: \boldsymbol{\Sigma}=\left(\begin{array}{cccc}
+    C\left(\mathbf{x}_{1}, \mathbf{x}_{1}\right) & C\left(\mathbf{x}_{1}, \mathbf{x}_{2}\right) & \ldots & C\left(\mathbf{x}_{1}, \mathbf{x}_{s}\right) \\
+    C\left(\mathbf{x}_{2}, \mathbf{x}_{1}\right) & C\left(\mathbf{x}_{2}, \mathbf{x}_{2}\right) & \ldots & C\left(\mathbf{x}_{2}, \mathbf{x}_{s}\right) \\
+    \vdots & \vdots & \ddots & \vdots \\
+    C\left(\mathbf{x}_{s}, \mathbf{x}_{1}\right) & \ldots & \ldots & C\left(\mathbf{x}_{s}, \mathbf{x}_{s}\right)
+    \end{array}\right)=\mathbf{L}\mathbf{L}^T.
+    :label: Choleskydecomposition
+
+Then a sample :math:`\tilde{\mathbf{y}}` can be obtained as follows:
+
+.. math:: \tilde{\mathbf{y}} = (m(\mathbf{x}_i),\ldots,m(\mathbf{x}_s))^T + \mathbf{L}\mathbf{w},
+    :label: GPsample
+
+where :math:`\mathbf{w}:=(w_1,\dots,w_s)^T` denotes a :math:`s`-dimensional random vector
+consisting of :math:`s` independent standard normal random variables :math:`w_i,i \in \{1,\ldots,s\}`.
+
+
+ScalarGaSP Class
+----------------
+
+Class :class:`.ScalarGaSP` provides Gaussian process emulation for simulators 
+which return a scalar value as output, namely :math:`y \in \mathbb{R}`. Please
+refer to :cite:t:`Gu2018` for the detailed theory.
+
+It is imported by::
+    
+    from psimpy.emulator.robustgasp import ScalarGaSP
+
+Methods
+^^^^^^^
+.. autoclass:: psimpy.emulator.robustgasp.ScalarGaSP
+    :members: train, predict, sample, loo_validate
+
+
+PPGaSP Class
+------------
+
+Class :class:`.PPGaSP` provides Gaussian process emulation for simulators 
+which return multiple values as output, namely :math:`y \in \mathbb{R}^k, k>1`.
+Please refer to :cite:t:`Gu2016` for the detailed theory.
+
+It is imported by::
+    
+    from psimpy.emulator.robustgasp import PPGaSP
+
+Methods
+^^^^^^^
+.. autoclass:: psimpy.emulator.robustgasp.PPGaSP
+    :members: train, predict, sample
diff --git a/docs/source/examples.rst b/docs/source/examples.rst
new file mode 100644
index 0000000000000000000000000000000000000000..123824f1c626f9938cbd03324c33d7faf8e1a456
--- /dev/null
+++ b/docs/source/examples.rst
@@ -0,0 +1,11 @@
+Example Gallery
+===============
+
+.. toctree::
+
+   Emulator <../auto_examples/emulator/index>
+   Inference <../auto_examples/inference/index>
+   Sampler <../auto_examples/sampler/index>
+   Sensitivity <../auto_examples/sensitivity/index>
+   Simulator <../auto_examples/simulator/index>
+   
\ No newline at end of file
diff --git a/docs/source/index.rst b/docs/source/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1909f0ee25d736b7526c6d11da55c555ad2f4b99
--- /dev/null
+++ b/docs/source/index.rst
@@ -0,0 +1,21 @@
+:gitlab_url: https://git-ce.rwth-aachen.de/mbd/psimpy
+
+Welcome to PSimPy's documentation!
+==================================
+
+:Release: |release|
+:Date: |today|
+
+
+Contents
+--------
+
+.. toctree::
+   :maxdepth: 2
+
+   Home <self>
+   quickstart
+   api
+   examples
+   changelog
+   refs
diff --git a/docs/source/inference/active_learning.rst b/docs/source/inference/active_learning.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1537528d97006952048465ac300b8ae7819963bf
--- /dev/null
+++ b/docs/source/inference/active_learning.rst
@@ -0,0 +1,27 @@
+Active Learning
+===============
+
+Active learning is a machine learning technique that involves selecting the most
+informative data points for the purpose of training an emulator. The idea behind
+active learning is to reduce the amount of training data required for a machine
+learning model to achieve a certain level of accuracy. This is achieved by iteratively
+choosing a new data point that is expected to be the most informative.
+
+In this module, the :class:`.ActiveLearning` class is implemented to actively build a
+Gaussian process emulator for the natural logarithm of the unnormalized posterior in
+Bayesian inference. It is supposed to facilitate efficient parameter calibration of
+computationally expensive simulators. For detailed theories, please refer to 
+:cite:t:`Wang2018`, :cite:t:`Kandasamy2017`, and :cite:t:`Zhao2022`.
+
+
+ActiveLearning Class
+--------------------
+
+The :class:`.ActiveLearning` class is imported by::
+    
+    from psimpy.inference.active_learning import ActiveLearning
+
+Methods
+^^^^^^^
+.. autoclass:: psimpy.inference.active_learning.ActiveLearning
+    :members: initial_simulation, iterative_emulation, approx_ln_pxl
\ No newline at end of file
diff --git a/docs/source/inference/bayes_inference.rst b/docs/source/inference/bayes_inference.rst
new file mode 100644
index 0000000000000000000000000000000000000000..554ec0234f43ff328a90aebc1f7a5f51ff505b77
--- /dev/null
+++ b/docs/source/inference/bayes_inference.rst
@@ -0,0 +1,63 @@
+Bayes Inference
+===============
+
+Bayesian inference is a statistical method for making probabilistic inference about
+unknown parameters based on observed data and prior knowledge. The update from
+prior knowledge to posterior in light of observed data is based on Bayes' theorem:
+
+.. math:: p(\mathbf{x} \mid \mathbf{d}) = \frac{L(\mathbf{x} \mid \mathbf{d}) p(\mathbf{x})}
+    {\int L(\mathbf{x} \mid \mathbf{d}) p(\mathbf{x}) d \mathbf{x}}
+    :label: bayes
+
+where :math:`\mathbf{x}` represents the collection of unknown parameters and
+:math:`\mathbf{d}` represents the collection of observed data. The prior probability
+distribution, :math:`p(\mathbf{x})`, represents the degree of belief in the parameters before
+any data is observed. The likelihood function, :math:`L(\mathbf{x} \mid \mathbf{d})`, represents
+the probability of observing the data given the parameters. The posterior distribution,
+:math:`p(\mathbf{x} \mid \mathbf{d})`, is obtained by multiplying the prior probability
+distribution by the likelihood function and then normalizing the result. Bayesian inference
+allows for incorporating subjective prior beliefs, which can be updated as new data becomes available.
+
+For many real world problems, it is hardly possible to analytically compute the posterior due to
+the complexity of the denominator in equation :eq:`bayes`, namely the nomalizing constant. In this
+module, two numerical approximations are implemented: grid estimation and Metropolis Hastings
+estimation.
+
+In grid estimation, the denominator in equation :eq:`bayes` is approximated by numerical integration
+on a regular grid and the posterior value at each grid point is computed, as shown in equation
+:eq:`grid_estimation`. The number of grid points increases dramatically with the increase of the number
+of unknown parameters. Grid estimation is therefore limited to low-dimensional problems.
+
+.. math:: p(\mathbf{x} \mid \mathbf{d}) \approx \frac{L(\mathbf{x} \mid \mathbf{d}) p(\mathbf{x})}
+    {\sum_{i=1}^N L\left(\mathbf{x}_i \mid \mathbf{d}\right) p\left(\mathbf{x}_i\right) \Delta \mathbf{x}_i}
+    :label: grid_estimation
+
+Metropolis Hastings estimation directly draw samples from the unnormalized posterior distribution, namely
+the numerator of equation :eq:`bayes`. The samples are then used to estimate properties of the posterior
+distribution, like the mean and variance, or to estimate the posterior distribution.
+
+
+GridEstimation Class
+--------------------
+
+The :class:`.GridEstimation` class is imported by::
+    
+    from psimpy.inference.bayes_inference import GridEstimation
+
+Methods
+^^^^^^^
+.. autoclass:: psimpy.inference.bayes_inference.GridEstimation
+    :members: run
+
+
+MetropolisHastingsEstimation Class
+----------------------------------
+
+The :class:`.MetropolisHastingsEstimation` class is imported by::
+    
+    from psimpy.inference.bayes_inference import MetropolisHastingsEstimation
+
+Methods
+^^^^^^^
+.. autoclass:: psimpy.inference.bayes_inference.MetropolisHastingsEstimation
+    :members: run
diff --git a/docs/source/inference/index.rst b/docs/source/inference/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e867adb388ff21cb455581e724b43e3715169f4a
--- /dev/null
+++ b/docs/source/inference/index.rst
@@ -0,0 +1,21 @@
+*********
+Inference
+*********
+
+This module hosts functionality for inference methods.
+
+Currently implemented classes are:
+
+* :class:`.ActiveLearning`: Construct a Gaussian process emulator for an unnormalized posterior. 
+
+* :class:`.GridEstimation`: Estimate a posterior distribution by grid estimation.
+
+* :class:`.MetropolisHastingsEstimation`: Estimate a posterior by Metropolis Hastings estimation.
+
+.. toctree::
+   :maxdepth: 1
+   :hidden:
+   :caption: Inference
+
+    Active Learning <active_learning>
+    Bayes Inference <bayes_inference>
\ No newline at end of file
diff --git a/docs/source/quickstart.rst b/docs/source/quickstart.rst
new file mode 100644
index 0000000000000000000000000000000000000000..717d65bc1ac4725ba15fd57ba00f80556e876242
--- /dev/null
+++ b/docs/source/quickstart.rst
@@ -0,0 +1,91 @@
+
+Getting Started
+===============
+
+Overview
+--------
+
+PSimPy (Predictive and probabilistic simulation with Python) implements
+a Gaussian process emulation-based framework that enables systematically and
+efficiently performing uncertainty-related analyses of physics-based
+models, which are often computationlly expensive. Examples are variance-based
+global sensitvity analysis, uncertainty quantification, and parameter
+calibration.
+
+Installation
+------------
+
+:py:mod:`PSimPy` is a pure Python package. All Python-related dependencies are
+automatically taken care of. It should be noted that some modules rely on or use
+non-Python package and software. More specifically, the emulator module
+:py:mod:`robustgasp.py` relies on the R package
+`RobustGaSP <https://cran.r-project.org/web/packages/RobustGaSP/>`_;
+the simulator module :py:mod:`ravaflow24.py` relies on the open source software
+`r.avaflow 2.4 <https://www.landslidemodels.org/r.avaflow/>`_.
+
+If the simulator module :py:mod:`ravaflow24.py` is desired, you may follow the
+official `r.avaflow User manual <https://www.landslidemodels.org/r.avaflow/manual.php>`_
+to install it. The installation of the R package `RobustGaSP` is covered in
+following steps.
+
+We recommond you to install :py:mod:`PSimPy` in a virtual environment such as a
+`conda` environment. You may want to first install `Anaconda` or `Miniconda` if you
+haven't. The steps afterwards are as follows.
+
+1. Create a conda environment with Python 3.9::
+    
+    conda create --name your_env_name python=3.9
+
+2. Install `R` if you don't have it on your machine (if you have `R`, you can
+   skip this step; alternatively, you can still follow this step to install `R` in
+   the conda environment)::
+    
+    conda activate your_env_name
+    conda install -c conda-forge r-base=3.6
+
+3. Install the R package `RobustGaSP` in the R terminal::
+    
+    R
+    install.packages("RobustGaSP",repos="https://cran.r-project.org",version="0.6.4")
+
+   Try if it is successfully installed by::
+    
+    library("RobustGaSP")
+
+4. Configure the environment variable `R_HOME` so that `rpy2` knows where to find
+   `R` packages. You can find your `R_HOME` by typing the following command in the
+   R terminal::
+    
+    R.home()
+    q()
+
+   It is a path like ".../lib/R". Set the environment variable `R_HOME` in your
+   conda environment by::
+    
+    conda env config vars set R_HOME=your_R_HOME
+
+   Afterwards reactivate your conda environment to make the change take effect by::
+    
+    conda deactivate
+    conda activate your_env_name
+
+5. Install :py:mod:`PSimPy` using `pip` in your conda environment by::
+    
+    conda install -c conda-forge pip
+    pip install psimpy
+
+Now you should have :py:mod:`PSimPy` and its dependencies successfully installed
+in your conda environment. You can use it in the Python terminal or in your
+Python IDE.
+
+If you would like to use it with Jupyter Notebook (iPython Notebook), there is
+one extra step needed to set your conda environment on your Notebook:
+
+6. Install `ipykernel` and install a kernel that points to your conda environment
+   by::
+
+    conda install -c conda-forge ipykernel
+    python -m ipykernel install --user --name=your_env_name
+
+Now you can start your Notebook, change the kernel to your conda environment, and
+use :py:mod:`PSimPy`.
\ No newline at end of file
diff --git a/docs/source/refs.bib b/docs/source/refs.bib
new file mode 100644
index 0000000000000000000000000000000000000000..c3bfd2283f9f29e61155b33ba47eef68e3f34c4d
--- /dev/null
+++ b/docs/source/refs.bib
@@ -0,0 +1,159 @@
+@article{Christen2010,
+  title = {{RAMMS}: Numerical simulation of dense snow avalanches in three-dimensional terrain},
+  journal = {Cold Regions Science and Technology},
+  volume = {63},
+  number = {1},
+  pages = {1--14},
+  year = {2010},
+  doi = {https://doi.org/10.1016/j.coldregions.2010.04.005},
+  author = {M. Christen and J. Kowalski and P. Bartelt}
+}
+
+@article{Fischer2012,
+  title = "Topographic curvature effects in applied avalanche modeling",
+  journal = "Cold Regions Science and Technology",
+  volume = "74-75",
+  pages = "21--30",
+  year = "2012",
+  doi = "https://doi.org/10.1016/j.coldregions.2012.01.005",
+  author = "Jan-Thomas Fischer and Julia Kowalski and Shiva P. Pudasaini"
+}
+
+@article{Gu2016,
+  title = "Parallel partial {Gaussian} process emulation for computer models with massive output",
+  journal = "Annals of Applied Statistics",
+  volume = "10",
+  number = "3",
+  pages = "1317--1347",
+  year = "2016",
+  doi = "10.1214/16-AOAS934",
+  author = "Gu, Meng Yang and Berger, James O."
+}
+
+@article{Gu2018,
+  title = "Robust {Gaussian} stochastic process emulation",
+  journal = "Annals of Statistics",
+  volume = "46",
+  number = "6A",
+  pages = "3038--3066",
+  year = "2018",
+  doi = "10.1214/17-AOS1648",
+  author = "Gu, Meng Yang and Wang, Xiao Jing and Berger, James O."
+}
+
+@article{Gu2019,
+  title = "RobustGaSP: Robust {Gaussian} stochastic process emulation in {R}",
+  journal = {{The R Journal}},
+  pages = {112--136},
+  volume = {11},
+  number = {1},
+  year = {2019},
+  doi = {10.32614/RJ-2019-011},
+  author = "Meng Yang Gu and Jesus Palomo and James O. Berger",
+}
+
+@article{Herman2017, 
+  title = "{SALib}: An open-source {Python} library for Sensitivity Analysis", journal = "The Journal of Open Source Software",
+  volume = "2", 
+  number = "9",
+  page = "",
+  doi = "10.21105/joss.00097", 
+  year = "2017",
+  author = "Jon Herman and Will Usher"
+}
+
+@article{Kandasamy2017,
+  title = "Query efficient posterior estimation in scientific experiments via {Bayesian} active learning",
+  journal = "Artificial Intelligence",
+  volume = "243",
+  pages = "45--56",
+  year = "2017",
+  doi = "https://doi.org/10.1016/j.artint.2016.11.002",
+  author = "Kirthevasan Kandasamy and Jeff Schneider and Barnab{\'a}s P{\'o}czos",
+}
+
+@article{LeGratiet2014,
+  title = "A {Bayesian} approach for global sensitivity analysis of (multifidelity) computer codes",
+  journal = "SIAM/ASA Journal on Uncertainty Quantification",
+  volume = "2",
+  number = "1",
+  pages = "336--363",
+  year = "2014",
+  doi = "10.1137/130926869",
+  author = "Le Gratiet, Loic and Cannamela, Claire and Iooss, Bertrand"
+}
+
+@article{Mergili2017,
+  AUTHOR = {Mergili, M. and Fischer, J. T. and Krenn, J. and Pudasaini, S. P.},
+  TITLE = {r.avaflow v1, an advanced open-source computational framework for the propagation and interaction of two-phase mass flows},
+  JOURNAL = {Geoscientific Model Development},
+  VOLUME = {10},
+  YEAR = {2017},
+  NUMBER = {2},
+  PAGES = {553--569},
+  DOI = {10.5194/gmd-10-553-2017}
+}
+
+@article{Saltelli2002,
+  title = "Making best use of model evaluations to compute sensitivity indices",
+  journal = "Computer Physics Communications",
+  volume = "145",
+  number = "2",
+  pages = "280--297",
+  year = "2002",
+  doi = "https://doi.org/10.1016/S0010-4655(02)00280-1",
+  author = "Andrea Saltelli"
+}
+
+@article{Saltelli2010,
+  title = "Variance based sensitivity analysis of model output. {Design} and estimator for the total sensitivity index",
+  journal = "Computer Physics Communications",
+  volume = "181",
+  number = "2",
+  pages = "259--270",
+  year = "2010",
+  doi = "https://doi.org/10.1016/j.cpc.2009.09.018",
+  author = "Andrea Saltelli and Paola Annoni and Ivano Azzini and Francesca Campolongo and Marco Ratto and Stefano Tarantola"
+}
+
+@article{Wang2018,
+  title   = "Adaptive {Gaussian} process approximation for {Bayesian} inference with expensive likelihood functions",
+  journal = "Neural Computation",
+  volume  = "30",
+  number  = "11",
+  pages   = "3072--3094",
+  year    = "2018",
+  doi     = "10.1162/neco_a_01127",
+  author  = "Hong Qiao Wang and Jing Lai Li"
+}
+
+@article{Zhao2021a,
+  title   = "Emulator-based global sensitivity analysis for flow-like landslide run-out models",
+  journal = "Landslides",
+  volume  = "",
+  number  = "",
+  pages   = "",
+  year    = "2021",
+  doi     = "https://doi.org/10.1007/s10346-021-01690-w",
+  author  = "Zhao, H. and Amann, F. and Kowalski, J."
+}
+
+@PhdThesis{Zhao2021b,
+  author       = {Zhao, Hu},
+  school       = {RWTH Aachen University},
+  title        = {Gaussian processes for sensitivity analysis, {Bayesian} inference, and uncertainty quantification in landslide research},
+  year         = {2021},
+  doi          = {10.18154/RWTH-2021-11693}
+}
+
+@article{Zhao2022,
+  title = {Bayesian active learning for parameter calibration of landslide run-out models},
+  journal = {Landslides},
+  volume = {19},
+  number = {},
+  pages = {2033--2045},
+  year = {2022},
+  doi = {https://doi.org/10.1007/s10346-022-01857-z},
+  author = {Zhao, Hu and Kowalski, Julia}
+}
+
diff --git a/docs/source/refs.rst b/docs/source/refs.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7a02493ba76def4e201cfa0f160c059a6fe5b3b0
--- /dev/null
+++ b/docs/source/refs.rst
@@ -0,0 +1,4 @@
+References
+==========
+
+.. bibliography::
\ No newline at end of file
diff --git a/docs/source/sampler/index.rst b/docs/source/sampler/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3348c312ed1ef3677b3d72de287599c4241833a8
--- /dev/null
+++ b/docs/source/sampler/index.rst
@@ -0,0 +1,22 @@
+*******
+Sampler
+*******
+
+This module hosts functionality for sampling methods.
+
+Currently implemented classes are:
+
+* :class:`.LHS`: Latin hypercube sampling.
+
+* :class:`.MetropolisHastings`: Metropolis Hastings sampling.
+
+* :class:`.Saltelli`: Saltelli's version of Sobol' sampling.
+
+.. toctree::
+   :maxdepth: 1
+   :hidden:
+   :caption: Sampler
+
+    Latin Hypercube <latin>
+    Metropolis Hastings <metropolis_hastings>
+    Saltelli <saltelli>
\ No newline at end of file
diff --git a/docs/source/sampler/latin.rst b/docs/source/sampler/latin.rst
new file mode 100644
index 0000000000000000000000000000000000000000..736cc39ea00fb10d6a181d5134ba407a3732498e
--- /dev/null
+++ b/docs/source/sampler/latin.rst
@@ -0,0 +1,40 @@
+Latin Hypercube Sampling
+========================
+
+Latin hypercube sampling is one type of space-filling sampling method. It is often
+used to draw input points for emulator training. The following figure gives a
+three-dimensional example.
+
+.. image:: ../_static/latin_hypercube_sampling_sketch.png
+   :width: 800
+   :alt: an illustration of Latin hypercube sampling
+   :align: center
+
+Three random variables :math:`X`, :math:`Y`, and :math:`Z` consist of
+a three-dimensional space. If we want to draw :math:`6` samples from this space
+using Latin hypercube sampling, we first divide the range of each variable into
+:math:`6` equally probable intervals as shown on the left. In each interval,
+we pick a value for the corresponding variable. Then we shuffle the picked values
+of each variable (e.g. :math:`x_1` to :math:`x_6`) as shown in the middle. Last,
+each combination gives us one sample of the three variables as shown on the right.
+It should be noted that each sample excludes any other samples from the intervals
+that it locates in. This can be straightforwardly extended to draw :math:`M` samples
+from :math:`N`-dimensional space consisting of :math:`N` random variables.
+  
+
+LHS Class
+---------
+
+The :class:`.LHS` class is imported by::
+    
+    from psimpy.sampler.latin import LHS
+
+Methods
+^^^^^^^
+.. autoclass:: psimpy.sampler.latin.LHS
+    :members: sample
+
+
+.. warning:: The :class:`.LHS` class considers each random variable being uniformly
+    distributed in its range. If this is not the case, one needs to transform picked
+    samples accordingly. 
\ No newline at end of file
diff --git a/docs/source/sampler/metropolis_hastings.rst b/docs/source/sampler/metropolis_hastings.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cedb0b529569f0f5ac249ebac1ba5b45d1808491
--- /dev/null
+++ b/docs/source/sampler/metropolis_hastings.rst
@@ -0,0 +1,35 @@
+Metropolis Hastings Sampling
+============================
+
+Metropolis Hastings sampling is a widely used Markov Chain Monte Carlo (MCMC)
+algorithm for generating a sequence of samples from a target probability
+distribution where direct sampling is difficult. The samples can be used to
+estimate properties of the target distribution, like its mean, variance, and higher
+moments. The samples can also be used to approximate the target distribution,
+for example via a histogram. The algorithm works by constructing a Markov chain
+with a stationary distribution equal to the target distribution. 
+
+Steps of the algorithm are as follows:
+
+  1. Choose an initial state for the Markov chain, usually from the target distribution.
+  2. At each iteration, propose a new state by sampling from a proposal distribution.
+  3. Calculate the acceptance probability.
+  4. Accept or reject the proposed state based on the acceptance probability.
+  5. Repeat steps 2-4 for a large number of iterations to obtain a sequence of samples.
+  6. Apply "burn-in" and "thining".
+
+Final samples from the Markov chain can then be used to estimate the target
+distribution or its properties.
+       
+
+MetropolisHastings Class
+------------------------
+
+The :class:`.MetropolisHastings` class is imported by::
+    
+    from psimpy.sampler.metropolis_hastings import MetropolisHastings
+
+Methods
+^^^^^^^
+.. autoclass:: psimpy.sampler.metropolis_hastings.MetropolisHastings
+    :members: sample
\ No newline at end of file
diff --git a/docs/source/sampler/saltelli.rst b/docs/source/sampler/saltelli.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d875a0012bf89b5c2e3c05b07ea82a1d78d543a4
--- /dev/null
+++ b/docs/source/sampler/saltelli.rst
@@ -0,0 +1,19 @@
+Saltelli Sampling
+=================
+
+Saltelli sampling is a method to draw samples for the purpose of Sobol'
+sensitvity analysis (variance-based sensitivity analysis). Detailed description
+of the theory can be found in :cite:t:`Saltelli2002` and :cite:t:`Saltelli2010`.
+The :class:`.Saltelli` class relies on the Python package `SALib` :cite:p:`Herman2017`.
+
+Saltelli Class
+--------------
+
+The :class:`.Saltelli` class is imported by::
+    
+    from psimpy.sampler.saltelli import Saltelli
+
+Methods
+^^^^^^^
+.. autoclass:: psimpy.sampler.saltelli.Saltelli
+    :members: sample
\ No newline at end of file
diff --git a/docs/source/sensitivity/index.rst b/docs/source/sensitivity/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..75d67ede6d30e45824606740bddec4ec629b418e
--- /dev/null
+++ b/docs/source/sensitivity/index.rst
@@ -0,0 +1,16 @@
+***********
+Sensitivity
+***********
+
+This module hosts functionality for sensitivity analysis methods.
+
+Currently implemented classes are:
+
+* :class:`.SobolAnalyze`: Compute Sobol' sensitivity indices.
+
+.. toctree::
+   :maxdepth: 1
+   :hidden:
+   :caption: Sensitivity
+
+    Sobol Analyze <sobol>
\ No newline at end of file
diff --git a/docs/source/sensitivity/sobol.rst b/docs/source/sensitivity/sobol.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b140e4d1f0df7f11e4570db3b8d664fcf09af393
--- /dev/null
+++ b/docs/source/sensitivity/sobol.rst
@@ -0,0 +1,19 @@
+Sobol' Sensitivity Analysis
+===========================
+
+This module is used to compute Sobol' indices given model (e.g. simulator or emulator)
+outputs evaluated at chosen Saltelli samples (see the :class:`.Saltelli` class).
+Detailed description of the theory can be found in :cite:t:`Saltelli2002` and :cite:t:`Saltelli2010`.
+The :class:`.SobolAnalyze` class relies on the Python package `SALib` :cite:p:`Herman2017`.
+
+SobolAnalyze Class
+------------------
+
+The :class:`.SobolAnalyze` class is imported by::
+    
+    from psimpy.sensitivity.sobol import SobolAnalyze
+
+Methods
+^^^^^^^
+.. autoclass:: psimpy.sensitivity.sobol.SobolAnalyze
+    :members: run
\ No newline at end of file
diff --git a/docs/source/simulator/index.rst b/docs/source/simulator/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..66c9249046a14e4806adb5b0a54202755a303285
--- /dev/null
+++ b/docs/source/simulator/index.rst
@@ -0,0 +1,38 @@
+*********
+Simulator
+*********
+
+Computer simulations are widely used to study real-world systems in many fields.
+Such a simulation model, so-called `simulator`, essentially defines a mapping from
+an input space :math:`\mathcal{X}` to an output space :math:`\mathcal{Y}`.
+More specifically, given a simulator :math:`\mathbf{y}=\mathbf{f}(\mathbf{x})`,
+it maps a :math:`p`-dimensional input :math:`\mathbf{x} \in \mathcal{X} \subset{\mathbb{R}^p}`
+to a :math:`k`-dimensional output :math:`\mathbf{y} \in \mathcal{Y} \subset{\mathbb{R}^k}`.
+The mapping :math:`\mathbf{f}` can vary from simple linear equations which can be analytically
+solved to complex partial differential equations which requires numerical schemes such as
+finite element methods.
+
+This module hosts simulators and functionality for running simulators. Currently
+implemented classes are:
+
+* :class:`.RunSimulator`: Serial and parallel execution of simulators.
+
+* :class:`.MassPointModel`: Mass point model for landslide run-out simulation.
+
+* :class:`.Ravaflow24Mixture`: Voellmy-type shallow flow model for landslide run-out simulation.
+
+.. toctree::
+   :maxdepth: 1
+   :hidden:
+   :caption: Simulator
+
+    Run Simulator <run_simulator>
+    Mass Point Model <mass_point_model>
+    Ravaflow Mixture Model <ravaflow24>
+
+.. note::
+   :class:`.MassPointModel` and :class:`.Ravaflow24Mixture` are only relevant if
+   the user wants to perform run-out simulation. :class:`.MassPointModel` is purely
+   Python-based and can be used right away. :class:`.Ravaflow24Mixture` depends on 
+   `r.avaflow 2.4 <https://www.landslidemodels.org/r.avaflow/>`_, which needs to be
+   installed by the user.
\ No newline at end of file
diff --git a/docs/source/simulator/mass_point_model.rst b/docs/source/simulator/mass_point_model.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e56f07ddf74b9ec09ae6d59316c70be30b45e41b
--- /dev/null
+++ b/docs/source/simulator/mass_point_model.rst
@@ -0,0 +1,86 @@
+Mass Point Model
+================
+
+Theory of mass point model
+--------------------------
+
+A `mass point model` is an extremely simplified model to simulate mass movement
+on a given topography. It assumes that the flow mass is condensed to a single point.
+
+Let :math:`Z(x,y)` define a topography in a Cartesian
+coordinate system :math:`\{x, y, z\}`. It induces a local non-orthogonal
+coordinate system :math:`\{T_x, T_y, T_n\}`, where :math:`T_x` and :math:`T_y`
+denote surface tangent directions and :math:`T_n` denote surface normal direction
+:math:`T_n`, as shown by the following figure :cite:p:`Zhao2021b`
+
+.. image:: ../_static/coordinate_system.png
+   :width: 300
+   :alt: a surface-induced coordinate system
+   :align: center
+
+The govening equations describing the movement of a masspoint on the surface are
+given by
+
+.. math:: \frac{d x}{d t} = U_{T_x} \frac{1}{\sqrt{1+\left(\partial_x Z(x, y)\right)^2}}
+    :label: dxdt
+
+.. math:: \frac{d y}{d t} = U_{T_y} \frac{1}{\sqrt{1+\left(\partial_y Z(x, y)\right)^2}}
+    :label: dydt
+    
+.. math:: \frac{d U_{T_x}}{d t}=g_{T_x} - \frac{U_{T_x}}{\|\boldsymbol{U}\|} 
+    \cdot \left(\mu\left(g_N+\boldsymbol{U}^T \boldsymbol{K} \boldsymbol{U}\right) + 
+    \frac{g}{\xi}\|\boldsymbol{U}\|^2\right)
+    :label: dUTxdt
+    
+.. math:: \frac{d U_{T_y}}{d t} = g_{T_y} - \frac{U_{T_y}}{\|\boldsymbol{U}\|} 
+    \cdot\left(\mu\left(g_N+\boldsymbol{U}^T \boldsymbol{K} \boldsymbol{U}\right) + 
+    \frac{g}{\xi}\|\boldsymbol{U}\|^2\right)
+    :label: dUTydt
+
+where :math:`\mu` and :math:`\xi` are dry-Coulomb and turbulent friction coefficient
+respectively (Voellmy friction model is used here). :math:`\mathbf{K}` is the
+curvature tensor :cite:p:`Fischer2012`. :math:`\mathbf{U}` represents the masspoint's
+velocity. :math:`U_{T_x}` and :math:`U_{T_y}` are the velocity components along :math:`T_x`
+and :math:`T_y` direction respectively.
+
+Equations :eq:`dxdt` to :eq:`dUTydt` can be rewritten in the vector format as
+
+.. math:: \frac{d \boldsymbol{\alpha}}{d t}=\boldsymbol{f}(t, \boldsymbol{\alpha})
+    :label: dalphadt
+
+.. math:: \boldsymbol{\alpha}=\left[\begin{array}{c}
+    x(t) \\
+    y(t) \\
+    U_{T_x}(t) \\
+    U_{T_y}(t)
+    \end{array}\right]
+    :label: alpha
+    
+.. math:: \boldsymbol{f}=\left[\begin{array}{c}
+    U_{T_x} \frac{1}{\sqrt{1+\left(\partial_x Z(x, y)^2\right.}} \\
+    U_{T_y} \frac{1}{\sqrt{1+\left(\partial_y Z(x, y)^2\right.}} \\
+    g_{T_x}-\frac{U_{T_x}}{\|\boldsymbol{U}\|} \cdot\left(\mu\left(g_N+\boldsymbol{U}^T 
+    \boldsymbol{K} \boldsymbol{U}\right)+\frac{g}{\xi}\|\boldsymbol{U}\|^2\right) \\
+    g_{T_y}-\frac{U_{T_y}}{\|\boldsymbol{U}\|} \cdot\left(\mu\left(g_N+\boldsymbol{U}^T 
+    \boldsymbol{K} \boldsymbol{U}\right)+\frac{g}{\xi}\|\boldsymbol{U}\|^2\right)
+    \end{array}\right]
+    :label: vector-f
+
+Equation :eq:`dalphadt` defines an initial value problem. Given initial
+:math:`\boldsymbol{\alpha}_0`, the system can be solved forward in time using
+numerical schemes such as the runge-kutta method. Class :class:`.MassPointModel`
+utilizes the explicit runge-kutta method ``dopri5`` provided by
+:class:`scipy.integrate.ode`.
+
+
+MassPointModel Class
+--------------------
+
+The :class:`.MassPointModel` class is imported by::
+    
+    from psimpy.simulator.mass_point_model import MassPointModel
+
+Methods
+^^^^^^^
+.. autoclass:: psimpy.simulator.mass_point_model.MassPointModel
+    :members: run
\ No newline at end of file
diff --git a/docs/source/simulator/ravaflow24.rst b/docs/source/simulator/ravaflow24.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fc5aa6df8a09e68c130602fb12c3c7be95eae34a
--- /dev/null
+++ b/docs/source/simulator/ravaflow24.rst
@@ -0,0 +1,24 @@
+Ravaflow24 Mixture Model
+========================
+
+`r.avaflow 2.4` is a GIS-supported open source software for mass flow modeling.
+It is developed by :cite:t:`Mergili2017`. For more information see its official
+user manual `here <https://www.landslidemodels.org/r.avaflow/>`_. 
+
+In :py:mod:`PSimPy.simulator.ravaflow24`, we have implemented class 
+:class:`.Ravaflow24Mixture`. It provides an Python interface to directly run
+the `Voellmy-tpye shallow flow model` of `r.avaflow 2.4` from within Python.
+For detailed theory of `Voellmy-tpye shallow flow model`, one can refer to
+:cite:t:`Christen2010` and :cite:t:`Fischer2012`.
+
+Ravaflow24Mixture Class
+-----------------------
+
+The :class:`.Ravaflow24Mixture` class is imported by::
+    
+    from psimpy.simulator.ravaflow24 import Ravaflow24Mixture
+
+Methods
+^^^^^^^
+.. autoclass:: psimpy.simulator.ravaflow24.Ravaflow24Mixture
+    :members: preprocess, run, extract_impact_area, extract_qoi_max, extract_qoi_max_loc
\ No newline at end of file
diff --git a/docs/source/simulator/run_simulator.rst b/docs/source/simulator/run_simulator.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5a24b32af9a193e35ee1bddbeb60e8a74995be4f
--- /dev/null
+++ b/docs/source/simulator/run_simulator.rst
@@ -0,0 +1,30 @@
+Run Simulator
+=============
+
+A `simulator` which describes certain real-world system is often subject to
+uncertainties. Uncertainty-related analyses require running the `simulator`
+multiple times at different variable input points. Class :class:`.RunSimulator`
+implemented in this module provides functionality to sequentially or parallelly
+execute `simulator` at a number of varaible input points.
+
+The user needs to define their `simulator`, implement an interface (essentially
+a Python function) to call the `simulator` from within Python and return output
+of interest, and inform :class:`.RunSimulator` the signature of the interface.
+
+
+RunSimulator Class
+------------------
+
+The :class:`.RunSimulator` class is imported by::
+    
+    from psimpy.simulator.run_simulator import RunSimulator
+
+Methods
+^^^^^^^
+.. autoclass:: psimpy.simulator.run_simulator.RunSimulator
+    :members: serial_run, parallel_run
+
+Attributes
+^^^^^^^^^^
+.. autoattribute:: psimpy.simulator.run_simulator.RunSimulator.var_samples
+.. autoattribute:: psimpy.simulator.run_simulator.RunSimulator.outputs
\ No newline at end of file
diff --git a/poetry.lock b/poetry.lock
index 6c6dd5fe799e87dca2715bac26b32fae26aeba75..24985a07bf1908aaeb4aa30921c48c6a64db6f5e 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,3 +1,11 @@
+[[package]]
+name = "alabaster"
+version = "0.7.12"
+description = "A configurable sidebar-enabled Sphinx theme"
+category = "dev"
+optional = false
+python-versions = "*"
+
 [[package]]
 name = "atomicwrites"
 version = "1.4.0"
@@ -20,6 +28,17 @@ docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"]
 tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "cloudpickle"]
 tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "cloudpickle"]
 
+[[package]]
+name = "babel"
+version = "2.11.0"
+description = "Internationalization utilities"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+pytz = ">=2015.7"
+
 [[package]]
 name = "beartype"
 version = "0.11.0"
@@ -35,6 +54,14 @@ doc-rtd = ["sphinx-rtd-theme (==0.5.1)", "sphinx (==4.1.0)"]
 dev = ["numpy", "typing-extensions", "mypy (>=0.800)", "sphinx (>=4.1.0)", "tox (>=3.20.1)", "pytest (>=4.0.0)", "sphinx", "coverage (>=5.5)"]
 all = ["typing-extensions (>=3.10.0.0)"]
 
+[[package]]
+name = "certifi"
+version = "2022.9.24"
+description = "Python package for providing Mozilla's CA Bundle."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
 [[package]]
 name = "cffi"
 version = "1.15.0"
@@ -46,13 +73,24 @@ python-versions = "*"
 [package.dependencies]
 pycparser = "*"
 
+[[package]]
+name = "charset-normalizer"
+version = "2.1.1"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+category = "dev"
+optional = false
+python-versions = ">=3.6.0"
+
+[package.extras]
+unicode_backport = ["unicodedata2"]
+
 [[package]]
 name = "colorama"
-version = "0.4.4"
+version = "0.4.6"
 description = "Cross-platform colored terminal text."
 category = "dev"
 optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
 
 [[package]]
 name = "contourpy"
@@ -105,6 +143,14 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*,
 [package.extras]
 graph = ["objgraph (>=1.7.2)"]
 
+[[package]]
+name = "docutils"
+version = "0.17.1"
+description = "Docutils -- Python Documentation Utilities"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
 [[package]]
 name = "fonttools"
 version = "4.33.3"
@@ -127,6 +173,38 @@ ufo = ["fs (>=2.2.0,<3)"]
 unicode = ["unicodedata2 (>=14.0.0)"]
 woff = ["zopfli (>=0.1.4)", "brotlicffi (>=0.8.0)", "brotli (>=1.0.1)"]
 
+[[package]]
+name = "idna"
+version = "3.4"
+description = "Internationalized Domain Names in Applications (IDNA)"
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+
+[[package]]
+name = "imagesize"
+version = "1.4.1"
+description = "Getting image size from png/jpeg/jpeg2000/gif file"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
+[[package]]
+name = "importlib-metadata"
+version = "5.1.0"
+description = "Read metadata from Python packages"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+zipp = ">=0.5"
+
+[package.extras]
+docs = ["sphinx (>=3.5)", "jaraco.packaging (>=9)", "rst.linker (>=1.9)", "furo", "jaraco.tidelift (>=1.4)"]
+perf = ["ipython"]
+testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "flake8 (<5)", "pytest-cov", "pytest-enabler (>=1.3)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "pytest-flake8", "importlib-resources (>=1.3)"]
+
 [[package]]
 name = "iniconfig"
 version = "1.1.1"
@@ -157,6 +235,17 @@ category = "main"
 optional = false
 python-versions = ">=3.7"
 
+[[package]]
+name = "latexcodec"
+version = "2.0.1"
+description = "A lexer and codec to work with LaTeX code in Python."
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
+[package.dependencies]
+six = ">=1.4.1"
+
 [[package]]
 name = "markupsafe"
 version = "2.1.1"
@@ -304,6 +393,34 @@ category = "dev"
 optional = false
 python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
 
+[[package]]
+name = "pybtex"
+version = "0.24.0"
+description = "A BibTeX-compatible bibliography processor in Python"
+category = "dev"
+optional = false
+python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*"
+
+[package.dependencies]
+latexcodec = ">=1.0.4"
+PyYAML = ">=3.01"
+six = "*"
+
+[package.extras]
+test = ["pytest"]
+
+[[package]]
+name = "pybtex-docutils"
+version = "1.0.2"
+description = "A docutils backend for pybtex."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+docutils = ">=0.8"
+pybtex = ">=0.16"
+
 [[package]]
 name = "pycparser"
 version = "2.21"
@@ -312,6 +429,17 @@ category = "main"
 optional = false
 python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
 
+[[package]]
+name = "pygments"
+version = "2.13.0"
+description = "Pygments is a syntax highlighting package written in Python."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+plugins = ["importlib-metadata"]
+
 [[package]]
 name = "pyparsing"
 version = "3.0.9"
@@ -403,6 +531,32 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
 [package.dependencies]
 tzdata = {version = "*", markers = "python_version >= \"3.6\""}
 
+[[package]]
+name = "pyyaml"
+version = "6.0"
+description = "YAML parser and emitter for Python"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "requests"
+version = "2.28.1"
+description = "Python HTTP for Humans."
+category = "dev"
+optional = false
+python-versions = ">=3.7, <4"
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+charset-normalizer = ">=2,<3"
+idna = ">=2.5,<4"
+urllib3 = ">=1.21.1,<1.27"
+
+[package.extras]
+socks = ["PySocks (>=1.5.6,!=1.5.7)"]
+use_chardet_on_py3 = ["chardet (>=3.0.2,<6)"]
+
 [[package]]
 name = "rpy2"
 version = "3.5.2"
@@ -479,6 +633,174 @@ category = "main"
 optional = false
 python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
 
+[[package]]
+name = "snowballstemmer"
+version = "2.2.0"
+description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms."
+category = "dev"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "sphinx"
+version = "5.3.0"
+description = "Python documentation generator"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+alabaster = ">=0.7,<0.8"
+babel = ">=2.9"
+colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""}
+docutils = ">=0.14,<0.20"
+imagesize = ">=1.3"
+importlib-metadata = {version = ">=4.8", markers = "python_version < \"3.10\""}
+Jinja2 = ">=3.0"
+packaging = ">=21.0"
+Pygments = ">=2.12"
+requests = ">=2.5.0"
+snowballstemmer = ">=2.0"
+sphinxcontrib-applehelp = "*"
+sphinxcontrib-devhelp = "*"
+sphinxcontrib-htmlhelp = ">=2.0.0"
+sphinxcontrib-jsmath = "*"
+sphinxcontrib-qthelp = "*"
+sphinxcontrib-serializinghtml = ">=1.1.5"
+
+[package.extras]
+docs = ["sphinxcontrib-websupport"]
+lint = ["flake8 (>=3.5.0)", "flake8-comprehensions", "flake8-bugbear", "flake8-simplify", "isort", "mypy (>=0.981)", "sphinx-lint", "docutils-stubs", "types-typed-ast", "types-requests"]
+test = ["pytest (>=4.6)", "html5lib", "typed-ast", "cython"]
+
+[[package]]
+name = "sphinx-autodoc-typehints"
+version = "1.19.5"
+description = "Type hints (PEP 484) support for the Sphinx autodoc extension"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+sphinx = ">=5.3"
+
+[package.extras]
+docs = ["furo (>=2022.9.29)", "sphinx-autodoc-typehints (>=1.19.4)", "sphinx (>=5.3)"]
+testing = ["covdefaults (>=2.2)", "coverage (>=6.5)", "diff-cover (>=7.0.1)", "nptyping (>=2.3.1)", "pytest-cov (>=4)", "pytest (>=7.2)", "sphobjinv (>=2.2.2)", "typing-extensions (>=4.4)"]
+type-comment = ["typed-ast (>=1.5.4)"]
+
+[[package]]
+name = "sphinx-gallery"
+version = "0.11.1"
+description = "A Sphinx extension that builds an HTML version of any Python script and puts it into an examples gallery."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+sphinx = ">=3"
+
+[[package]]
+name = "sphinx-rtd-theme"
+version = "1.1.1"
+description = "Read the Docs theme for Sphinx"
+category = "dev"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
+
+[package.dependencies]
+docutils = "<0.18"
+sphinx = ">=1.6,<6"
+
+[package.extras]
+dev = ["transifex-client", "sphinxcontrib-httpdomain", "bump2version", "wheel"]
+
+[[package]]
+name = "sphinxcontrib-applehelp"
+version = "1.0.2"
+description = "sphinxcontrib-applehelp is a sphinx extension which outputs Apple help books"
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+
+[package.extras]
+lint = ["flake8", "mypy", "docutils-stubs"]
+test = ["pytest"]
+
+[[package]]
+name = "sphinxcontrib-bibtex"
+version = "2.5.0"
+description = "Sphinx extension for BibTeX style citations."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+docutils = ">=0.8"
+importlib-metadata = {version = ">=3.6", markers = "python_version < \"3.10\""}
+pybtex = ">=0.24"
+pybtex-docutils = ">=1.0.0"
+Sphinx = ">=2.1"
+
+[[package]]
+name = "sphinxcontrib-devhelp"
+version = "1.0.2"
+description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document."
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+
+[package.extras]
+lint = ["flake8", "mypy", "docutils-stubs"]
+test = ["pytest"]
+
+[[package]]
+name = "sphinxcontrib-htmlhelp"
+version = "2.0.0"
+description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+lint = ["flake8", "mypy", "docutils-stubs"]
+test = ["pytest", "html5lib"]
+
+[[package]]
+name = "sphinxcontrib-jsmath"
+version = "1.0.1"
+description = "A sphinx extension which renders display math in HTML via JavaScript"
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+
+[package.extras]
+test = ["pytest", "flake8", "mypy"]
+
+[[package]]
+name = "sphinxcontrib-qthelp"
+version = "1.0.3"
+description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document."
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+
+[package.extras]
+lint = ["flake8", "mypy", "docutils-stubs"]
+test = ["pytest"]
+
+[[package]]
+name = "sphinxcontrib-serializinghtml"
+version = "1.1.5"
+description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)."
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+
+[package.extras]
+lint = ["flake8", "mypy", "docutils-stubs"]
+test = ["pytest"]
+
 [[package]]
 name = "tomli"
 version = "2.0.1"
@@ -519,12 +841,41 @@ tzdata = {version = "*", markers = "platform_system == \"Windows\""}
 devenv = ["black", "pyroma", "pytest-cov", "zest.releaser"]
 test = ["pytest-mock (>=3.3)", "pytest (>=4.3)"]
 
+[[package]]
+name = "urllib3"
+version = "1.26.13"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+
+[package.extras]
+brotli = ["brotlicffi (>=0.8.0)", "brotli (>=1.0.9)", "brotlipy (>=0.6.0)"]
+secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "urllib3-secure-extra", "ipaddress"]
+socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
+
+[[package]]
+name = "zipp"
+version = "3.11.0"
+description = "Backport of pathlib-compatible object wrapper for zip files"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.extras]
+docs = ["sphinx (>=3.5)", "jaraco.packaging (>=9)", "rst.linker (>=1.9)", "furo", "jaraco.tidelift (>=1.4)"]
+testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "flake8 (<5)", "pytest-cov", "pytest-enabler (>=1.3)", "jaraco.itertools", "func-timeout", "jaraco.functools", "more-itertools", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "pytest-flake8"]
+
 [metadata]
 lock-version = "1.1"
 python-versions = ">=3.9,<3.11"
-content-hash = "0299c6f2e2bd407fac9d3aeaed4424405b2cfd542add8beb86b89440403ff622"
+content-hash = "cdf3daaa7e2621481dec8eab36dad474fafda9823c8982a6b42f1e7d10004ca5"
 
 [metadata.files]
+alabaster = [
+    {file = "alabaster-0.7.12-py2.py3-none-any.whl", hash = "sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359"},
+    {file = "alabaster-0.7.12.tar.gz", hash = "sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02"},
+]
 atomicwrites = [
     {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"},
     {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"},
@@ -533,7 +884,9 @@ attrs = [
     {file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"},
     {file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"},
 ]
+babel = []
 beartype = []
+certifi = []
 cffi = [
     {file = "cffi-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962"},
     {file = "cffi-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:23cfe892bd5dd8941608f93348c0737e369e51c100d03718f108bf1add7bd6d0"},
@@ -586,10 +939,8 @@ cffi = [
     {file = "cffi-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:3773c4d81e6e818df2efbc7dd77325ca0dcb688116050fb2b3011218eda36139"},
     {file = "cffi-1.15.0.tar.gz", hash = "sha256:920f0d66a896c2d99f0adbb391f990a84091179542c205fa53ce5787aff87954"},
 ]
-colorama = [
-    {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"},
-    {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"},
-]
+charset-normalizer = []
+colorama = []
 contourpy = []
 coverage = []
 cycler = [
@@ -600,10 +951,17 @@ dill = [
     {file = "dill-0.3.5.1-py2.py3-none-any.whl", hash = "sha256:33501d03270bbe410c72639b350e941882a8b0fd55357580fbc873fba0c59302"},
     {file = "dill-0.3.5.1.tar.gz", hash = "sha256:d75e41f3eff1eee599d738e76ba8f4ad98ea229db8b085318aa2b3333a208c86"},
 ]
+docutils = [
+    {file = "docutils-0.17.1-py2.py3-none-any.whl", hash = "sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61"},
+    {file = "docutils-0.17.1.tar.gz", hash = "sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125"},
+]
 fonttools = [
     {file = "fonttools-4.33.3-py3-none-any.whl", hash = "sha256:f829c579a8678fa939a1d9e9894d01941db869de44390adb49ce67055a06cc2a"},
     {file = "fonttools-4.33.3.zip", hash = "sha256:c0fdcfa8ceebd7c1b2021240bd46ef77aa8e7408cf10434be55df52384865f8e"},
 ]
+idna = []
+imagesize = []
+importlib-metadata = []
 iniconfig = [
     {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"},
     {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"},
@@ -657,6 +1015,7 @@ kiwisolver = [
     {file = "kiwisolver-1.4.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:42f6ef9b640deb6f7d438e0a371aedd8bef6ddfde30683491b2e6f568b4e884e"},
     {file = "kiwisolver-1.4.2.tar.gz", hash = "sha256:7f606d91b8a8816be476513a77fd30abe66227039bd6f8b406c348cb0247dcc9"},
 ]
+latexcodec = []
 markupsafe = [
     {file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812"},
     {file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a"},
@@ -826,10 +1185,13 @@ py = [
     {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"},
     {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"},
 ]
+pybtex = []
+pybtex-docutils = []
 pycparser = [
     {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
     {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
 ]
+pygments = []
 pyparsing = [
     {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"},
     {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"},
@@ -855,6 +1217,42 @@ pytz-deprecation-shim = [
     {file = "pytz_deprecation_shim-0.1.0.post0-py2.py3-none-any.whl", hash = "sha256:8314c9692a636c8eb3bda879b9f119e350e93223ae83e70e80c31675a0fdc1a6"},
     {file = "pytz_deprecation_shim-0.1.0.post0.tar.gz", hash = "sha256:af097bae1b616dde5c5744441e2ddc69e74dfdcb0c263129610d85b87445a59d"},
 ]
+pyyaml = [
+    {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"},
+    {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"},
+    {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"},
+    {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"},
+    {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"},
+    {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"},
+    {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"},
+    {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"},
+    {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"},
+    {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"},
+    {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"},
+    {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"},
+    {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"},
+    {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"},
+    {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"},
+    {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"},
+    {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"},
+    {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"},
+    {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"},
+    {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"},
+    {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"},
+    {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"},
+    {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"},
+    {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"},
+    {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"},
+    {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"},
+    {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"},
+    {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"},
+    {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"},
+    {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"},
+    {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"},
+    {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"},
+    {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"},
+]
+requests = []
 rpy2 = [
     {file = "rpy2-3.5.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:76af1c3d93438017ffbba856e20e1703c2be2ea502a52f408dc750f4ffff143f"},
     {file = "rpy2-3.5.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:4e5ea7d571674837da2dc3c2a76eef7937fd292b387ab12ef16a149c4d47e24f"},
@@ -896,6 +1294,39 @@ six = [
     {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
     {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
 ]
+snowballstemmer = [
+    {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"},
+    {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"},
+]
+sphinx = []
+sphinx-autodoc-typehints = []
+sphinx-gallery = []
+sphinx-rtd-theme = []
+sphinxcontrib-applehelp = [
+    {file = "sphinxcontrib-applehelp-1.0.2.tar.gz", hash = "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58"},
+    {file = "sphinxcontrib_applehelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a"},
+]
+sphinxcontrib-bibtex = []
+sphinxcontrib-devhelp = [
+    {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"},
+    {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"},
+]
+sphinxcontrib-htmlhelp = [
+    {file = "sphinxcontrib-htmlhelp-2.0.0.tar.gz", hash = "sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2"},
+    {file = "sphinxcontrib_htmlhelp-2.0.0-py2.py3-none-any.whl", hash = "sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07"},
+]
+sphinxcontrib-jsmath = [
+    {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"},
+    {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"},
+]
+sphinxcontrib-qthelp = [
+    {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"},
+    {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"},
+]
+sphinxcontrib-serializinghtml = [
+    {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"},
+    {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"},
+]
 tomli = [
     {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
     {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
@@ -909,3 +1340,5 @@ tzlocal = [
     {file = "tzlocal-4.2-py3-none-any.whl", hash = "sha256:89885494684c929d9191c57aa27502afc87a579be5cdd3225c77c463ea043745"},
     {file = "tzlocal-4.2.tar.gz", hash = "sha256:ee5842fa3a795f023514ac2d801c4a81d1743bbe642e3940143326b3a00addd7"},
 ]
+urllib3 = []
+zipp = []
diff --git a/pyproject.toml b/pyproject.toml
index 38ec7ba9e690ce5d8a1012f8b600029b1ab6e583..ef43343d92abdf1fac75a7058fbc99e927a62476 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -21,6 +21,10 @@ pytest = "^7.1.1"
 pytest-cov = "^3.0.0"
 pytest-order = "^1.0.1"
 matplotlib = "^3.6.1"
+sphinx-rtd-theme = "^1.1.1"
+sphinx-autodoc-typehints = "^1.19.5"
+sphinx-gallery = "^0.11.1"
+sphinxcontrib-bibtex = "^2.5.0"
 
 [build-system]
 requires = ["poetry-core>=1.0.0"]
diff --git a/src/psimpy/emulator/robustgasp.py b/src/psimpy/emulator/robustgasp.py
index 8a811ed5a03ca11cd56927610bfcbecbcf642c40..8532604379246a8464f1b695c85f76f2d44eb3cb 100644
--- a/src/psimpy/emulator/robustgasp.py
+++ b/src/psimpy/emulator/robustgasp.py
@@ -34,74 +34,71 @@ class RobustGaSPBase:
         ndim : int
             Input parameter dimension.
         zero_mean : str, optional 
-            `No` - the mean is not zero. It is then specified by `trend`.
-            `Yes` - the mean is zero.
-            Default is 'No'.
+            If `'Yes'`, the mean is set to zero. If `'No'`, the mean is
+            specified by ``trend``. Default is `'No'`.
         nugget : float, optional
             Nugget variance ratio. 
-            If `nugget` is 0, there is no nugget or the nugget is estimated.
-            If `nugget` is not 0, it means a fixed nugget.
-            Default is 0.
+            If :math:`0`, there is no nugget or the nugget is estimated.
+            If not :math:`0`, it means a fixed nugget. Default is :math:`0`.
         nugget_est : bool, optional
-            True means the nugget should be estimated.
-            False means the nugget is fixed or not estimated.
-            Default is False.
+            `True` means ``nugget`` should be estimated.
+            `False` means ``nugget`` is fixed or not estimated.
+            Default is `False`.
         range_par : numpy array, optional
-            Range parameters. One dimension array of length `ndim`. If given,
-            range parameters are set to the given values rather than estimated
-            based on training data.
+            Range parameters. One dimension :class:`numpy.ndarray` of length
+            ``ndim``. If given, range parameters are set to the given values
+            rather than estimated based on training data.
             By default `NA`, meaning that range parameters are estimated based
             on training data.
         method : str, optional
             Method used for parameter estimation.
-            'post_mode' - marginal posterior mode estimation. Resulted emulator
+            `'post_mode'`: marginal posterior mode estimation, resulted emulator
             is a Student's t-process.
-            'mmle' - maximum marginal likelihood estimation. Resulted emulator
+            `'mmle'`: maximum marginal likelihood estimation, resulted emulator
             is a Student's t-process.
-            'mle' - maximum likelihood estimation. Resulted emulator is a
+            `'mle'`: maximum likelihood estimation, resulted emulator is a
             Gaussian process.
-            Default is 'post_mode'.
+            Default is `'post_mode'`.
         prior_choice : str, optional
-            'ref_xi', 'ref_gamma', or 'ref_approx'.
-            Default is 'ref_approx'.
+            `'ref_xi'`, `'ref_gamma'`, or `'ref_approx'`.
+            Default is `'ref_approx'`.
         a : float, optional
-            Prior parameters in the jointly robust prior. Default 0.2.
+            Prior parameters in the jointly robust prior. Default :math:`0.2`.
         b : float, optional
             Prior parameters in the jointly robust prior.
-            Default value is :math:`ntrain^{-1/ndim}(a+ndim)`, where `ntrain` is
-            the number of training input points and `ndim` is input parameter
-            dimension.
+            Default value is :math:`ntrain^{-1/ndim}(a+ndim)`, where ``ntrain``
+            is the number of training input points and ``ndim`` is input
+            parameter dimension.
         kernel_type : str, optional
-            'matern_3_2' - Matern correlation with roughness parameter 3/2.
-            'matern_5_2' - Matern correlation with roughness parameter 5/2.
-            'pow_exp' - power exponential correlation with roughness parameter
-                        'alpha'.
-            Default is 'matern_5_2'.
+            `'matern_3_2'`: Matern correlation with roughness parameter :math:`3/2`.
+            `'matern_5_2'`: Matern correlation with roughness parameter :math:`5/2`.
+            `'pow_exp'`: power exponential correlation with roughness parameter
+            ``alpha``.
+            Default is `'matern_5_2'`.
         isotropic : bool, optional
-            True - isotropic kernel.
-            False - separable kernel.
-            Default is False, namely separable kernel.
+            `True`: isotropic kernel. `False`: separable kernel.
+            Default is `False`.
         optimization : str, optional
             Optimization method for kernel parameters.
-            'lbfgs' - low storage version of the Broyden-Fletcher-Goldarb-Shanno
-                      method.
-            'nelder-mead' - Nelder and Mead method.
-            'brent' - Brent method for one-dimensional problems.
+            `'lbfgs'`: low storage version of the Broyden-Fletcher-Goldarb-Shanno
+            method.
+            `'nelder-mead'`: Nelder and Mead method.
+            `'brent'`: Brent method for one-dimensional problems.
         alpha : numpy array, optional
-            Roughness parameters in the 'pow_exp' correlation functions. One
-            dimension array of length `ndim`.
-            Default is a one dimension array with each entry being 1.9.
+            Roughness parameters in the `'pow_exp'` correlation functions. One
+            dimension :class:`numpy.ndarray` of length ``ndim``.
+            Default is a one dimension array with each entry being :math:`1.9`.
         lower_bound : bool
-            True - the default lower bounds of the inverse range parameters are
-                   used to constrain the optimization.
-            False - the optimization is unconstrained.
-            Default is True.
+            `True`: the default lower bounds of the inverse range parameters are
+            used to constrain the optimization.
+            `False`: the optimization is unconstrained.
+            Default is `True`.
         max_eval : int, optional
             Maximum number of steps to estimate the range and nugget parameters.
             Default is :math:`max(30,20+5*ndim)`.
         num_initial_values : int, optional
             Number of initial values of the kernel parameters in optimization.
-            Default is 2.
+            Default is :math:`2`.
         """
         self.ndim = ndim
 
@@ -169,20 +166,21 @@ class RobustGaSPBase:
         Parameters
         ----------
         design : numpy array
-            Training input points. Shape (ntrain, ndim).
-            `ntrain` is the number of training points, `ndim` is the input
-            dimension. If ndim=1, both (ntrain, 1) and (ntrain,) np.ndarray are
-            valid.
+            Training input points. Shape :code:`(ntrain, ndim)`.
+            ``ntrain`` is the number of training points, ``ndim`` is the input
+            dimension. If :math:`ndim=1`, both :code:`(ntrain, 1)` and
+            :code:`(ntrain,)` :class:`numpy.ndarray` are valid.
         response : numpy array
-            Training output points. Shape (ntrain, nresponse). `nresponse` is
-            the number of QoIs of a single simulation.
-            `ntrain` output points corresponding to `design`.
-            For child class `ScalarGaSP`, :math:`nresponse=1`
-            For child class `PPGaSP`, :math:`nresponse>1`
+            Training output points. Shape :code:`(ntrain, nresponse)`.
+            ``nresponse`` is the number of QoIs of a single simulation.
+            ``ntrain`` output points corresponding to ``design``.
+            For child class :class:`.ScalarGaSP`, :math:`nresponse=1`.
+            For child class :class:`.PPGaSP`, :math:`nresponse>1`.
         trend : numpy array
-            Basis function matrix. Shape (ntrain, q).
-            If None, a (ntrain, 1) matrix of ones is used.
-            If q=1, both (ntrain, 1) and (ntrain, ) np.ndarray are valid.
+            Basis function matrix. Shape :code:`(ntrain, q)`.
+            If `None`, a :code:`(ntrain, 1)` matrix of ones is used.
+            If :math:`q=1`, both :code:`(ntrain, 1)` and :code:`(ntrain,)`
+            :class:`numpy.ndarray` are valid.
         """
         if not (design.ndim == 1 or design.ndim == 2):
             raise ValueError("design must be 1d or 2d numpy array.")
@@ -232,13 +230,15 @@ class RobustGaSPBase:
         ----------
         testing_input : numpy array
             Input matrix to make prediction or draw samples.
-            Shape (ntest, ndim).
-            If ndim=1, both (ntest, 1) and (ntest,) np.ndarray are valid.
+            Shape :code:`(ntest, ndim)`.
+            If :math:`ndim=1`, both :code:`(ntest, 1)` and :code:`(ntest,)`
+            :class:`numpy.ndarray` are valid.
         testing_trend : numpy array
-            Basis function matrix corresponds to `testing_input`.
-            Shape (ntest, q).
-            By default (ntest, 1) matrix of ones.
-            If q=1, both (ntest, 1) and (ntest,) np.ndarray are valid.
+            Basis function matrix corresponds to ``testing_input``.
+            Shape :code:`(ntest, q)`.
+            By default :code:`(ntest, 1)` matrix of ones.
+            If :math:`q=1`, both :code:`(ntest, 1)` and :code:`(ntest,)`
+            :class:`numpy.ndarray` are valid.
         """
         if not (testing_input.ndim == 1 or testing_input.ndim == 2):
             raise ValueError("testing_input must be 1d or 2d numpy array.")
@@ -282,16 +282,18 @@ class ScalarGaSP(RobustGaSPBase):
         Parameters
         ----------
         design : numpy array
-            Training input points. Shape (ntrain, ndim).
-            `ntrain` is the number of training points, `ndim` is the input
-            dimension. If ndim=1, both (ntrain, 1) and (ntrain,) np.ndarray are
-            valid.
+            Training input points. Shape :code:`(ntrain, ndim)`.
+            ``ntrain`` is the number of training points, ``ndim`` is the input
+            dimension. If :math:`ndim=1`, both :code:`(ntrain, 1)` and
+            :code:`(ntrain,)` :class:`numpy.ndarray` are valid.
         response : numpy array
-            Training output points. Shape (ntrain, 1) or (ntrain,).
+            Training output points. Shape :code:`(ntrain, 1)` or
+            :code:`(ntrain,)`.
         trend : numpy array, optional
-            Basis function matrix. Shape (ntrain, q).
-            If None, a (ntrain, 1) matrix of ones is used.
-            If q=1, both (ntrain, 1) and (ntrain,) np.ndarray are valid.
+            Basis function matrix. Shape :code:`(ntrain, q)`.
+            If `None`, a :code:`(ntrain, 1)` matrix of ones is used.
+            If :math:`q=1`, both :code:`(ntrain, 1)` and :code:`(ntrain,)`
+            :class:`numpy.ndarray` are valid.
         """
         self._preprocess_design_response_trend(design, response, trend)
 
@@ -327,23 +329,24 @@ class ScalarGaSP(RobustGaSPBase):
         Parameters
         ----------
         testing_input : numpy array
-            Input points at which to make prediction. Shape (ntest, ndim).
-            `ntest` - number of input configurations.
-            `ndim` - input dimension.
-            If ndim=1, both (ntest, 1) and (ntest,) np.ndarray are valid.
+            Input points at which to make prediction. Shape :code:`(ntest, ndim)`.
+            ``ntest`` is the number of input points and ``ndim`` is the
+            input dimension. If :math:`ndim=1`, both :code:`(ntest, 1)` and
+            :code:`(ntest,)` :class:`numpy.ndarray` are valid.
         testing_trend : numpy array, optional
-            Basis function matrix corresponds to `testing_input`.
-            Shape (ntest, q). By default (ntest, 1) matrix of ones.
-            If q=1, both (ntest, 1) and (ntest,) np.ndarray are valid.
+            Basis function matrix corresponds to ``testing_input``.
+            Shape :code:`(ntest, q)`. By default :code:`(ntest, 1)` matrix of ones.
+            If :math:`q=1`, both :code:`(ntest, 1)` and :code:`(ntest,)`
+            :class:`numpy.ndarray` are valid.
 
         Returns
         -------
         predictions : numpy array
-            Emulator-prediction at `testing_input`. Shape (ntest, 4).
-            predictions[:, 0] - mean
-            predictions[:, 1] - low95
-            predictions[:, 2] - upper95
-            predictions[:, 3] - sd
+            Emulator-prediction at ``testing_input``. Shape :code:`(ntest, 4)`.
+            `predictions[:, 0]` - mean,
+            `predictions[:, 1]` - low95,
+            `predictions[:, 2]` - upper95, 
+            `predictions[:, 3]` - sd
         """
         if self.emulator is None:
             raise RuntimeError("Emulator has not been trained!")
@@ -369,23 +372,23 @@ class ScalarGaSP(RobustGaSPBase):
         Parameters
         ----------
         testing_input : numpy array
-            Input points at which to draw samples. Shape (ntest, ndim).
-            `ntest` - number of input points.
-            `ndim` - input dimension.
-            If ndim=1, both (ntest, 1) and (ntest,) np.ndarray are valid.
+            Input points at which to draw samples. Shape :code:`(ntest, ndim)`.
+            ``ntest`` is the number of input points and ``ndim`` is the input
+            dimension. If :math:`ndim=1`, both :code:`(ntest, 1)` and
+            :code:`(ntest,)` :class:`numpy.ndarray` are valid.
         nsamples : int, optional
             Number of samples to be drawn.
         testing_trend : numpy array, optional
-            Basis function matrix corresponds to `testing_input`.
-            Shape (ntest, q).
-            By default (ntest, 1) matrix of ones.
-            If q=1, both (ntest, 1) and (ntest,) np.ndarray are valid.
+            Basis function matrix corresponds to ``testing_input``.
+            Shape :code:`(ntest, q)`. By default :code:`(ntest, 1)` matrix of ones.
+            If :math:`q=1`, both :code:`(ntest, 1)` and :code:`(ntest,)`
+            :class:`numpy.ndarray` are valid.
 
         Returns
         -------
         samples : numpy array
-            Shape (ntest, nsamples). Each column contains one realization of the
-            trained emulator at `testing_input`.
+            Shape :code:`(ntest, nsamples)`. Each column contains one realization
+            of the trained emulator at ``testing_input``.
         """
         if self.emulator is None:
             raise RuntimeError("Emulator has not been trained!")
@@ -408,8 +411,8 @@ class ScalarGaSP(RobustGaSPBase):
         Returns
         -------
         validation : numpy array
-            Shape (n, 2).
-            validation[:, 0] - leave one out fitted values
+            Shape :code:`(n, 2)`.
+            validation[:, 0] - leave one out fitted values,
             validation[:, 1] - stand deviation of each prediction
         """
         if self.emulator is None:
@@ -432,17 +435,18 @@ class PPGaSP(RobustGaSPBase):
         Parameters
         ----------
         design : numpy array 
-            Training input points. Shape (ntrain, ndim).
-            `ntrain` is the number of training points, `ndim` is the input
-            dimension. If ndim=1, both (ndim, 1) and (ndim,) np.ndarray are
-            valid.
+            Training input points. Shape :code:`(ntrain, ndim)`.
+            ``ntrain`` is the number of training points, ``ndim`` is the input
+            dimension. If :math:`ndim=1`, both :code:`(ndim, 1)` and
+            :code:`(ndim,)` :class:`numpy.ndarray` are valid.
         response : numpy array
-            Training output points. Shape (ntrain, nresponse),
+            Training output points. Shape :code:`(ntrain, nresponse)`,
             :math:`nresponse>1`.
         trend : numpy array, optional
-            Basis function matrix. Shape (ntrain, q).
-            If None, a (ntrain, 1) matrix of ones is used.
-            If q=1, both (ntrain, 1) and (ntrain,) np.ndarray are valid.
+            Basis function matrix. Shape :code:`(ntrain, q)`.
+            If `None`, a :code:`(ntrain, 1)` matrix of ones is used.
+            If :math:`q=1`, both :code:`(ntrain, 1)` and :code:`(ntrain,)`
+            :class:`numpy.ndarray` are valid.
         """
         self._preprocess_design_response_trend(design, response, trend)
 
@@ -479,24 +483,26 @@ class PPGaSP(RobustGaSPBase):
 
         Parameters
         ----------
-        testing_input : numpy array, 
-            Input points at which to make prediction. Shape (ntest, ndim).
-            `ntest` - number of input points.
-            `ndim` - input dimension.
-            If ndim=1, both (ntest, 1) and (ntest,) np.ndarray are valid.
+        testing_input : numpy array
+            Input points at which to make prediction. Shape :code:`(ntest, ndim)`.
+            ``ntest`` is the number of input points and ``ndim`` is the input
+            dimension. If :math:`ndim=1`, both :code:`(ntest, 1)` and
+            :code:`(ntest,)` :class:`numpy.ndarray` are valid.
         testing_trend : numpy array, optional
-            Basis function matrix corresponds to `testing_input`.
-            Shape (ntest, q). By default (ntest, 1) matrix of ones.
-            If q=1, both (ntest, 1) and (ntest,) np.ndarray are valid.
+            Basis function matrix corresponds to ``testing_input``.
+            Shape :code:`(ntest, q)`. By default :code:`(ntest, 1)` matrix of ones.
+            If :math:`q=1`, both :code:`(ntest, 1)` and :code:`(ntest,)`
+            :class:`numpy.ndarray` are valid.
 
         Returns
         -------
-        predictions : numpy array, 
-            Emulator-prediction at `testing_input`. Shape (ntest, nresponse, 4).
-            predictions[:,:,0] - mean
-            predictions[:,:,1] - low95
-            predictions[:,:,2] - upper95
-            predictions[:,:,3] - sd
+        predictions : numpy array
+            Emulator-prediction at ``testing_input``.
+            Shape :code:`(ntest, nresponse, 4)`.
+            `predictions[:, :, 0]` - mean,
+            `predictions[:, :, 1]` - low95,
+            `predictions[:, :, 2]` - upper95,
+            `predictions[:, :, 3]` - sd
         """
         if self.emulator is None:
             raise RuntimeError("Emulator has not been trained!")
@@ -528,23 +534,24 @@ class PPGaSP(RobustGaSPBase):
         Parameters
         ----------
         testing_input : numpy array
-            Input point at which to draw samples. Shape (ntest, ndim).
-            `ntest` - number of input points.
-            `ndim` - input dimension.
-            If ndim=1, both (ntest, 1) and (ntest,) numpy.ndarray are valid.
+            Input point at which to draw samples. Shape :code:`(ntest, ndim)`.
+            ``ntest`` is the number of input points and ``ndim`` is the input
+            dimension. If :math:`ndim=1`, both :code:`(ntest, 1)` and
+            :code:`(ntest,)` :class:`numpy.ndarray` are valid.
         nsamples : int, optional
             Number of samples to be drawn.
         testing_trend : numpy array, optional
-            Basis function matrix corresponds to `testing_input`.
-            Shape (ntest, q). By default (ntest, 1) matrix of ones.
-            If q=1, both (ntest, 1) and (ntest,) np.ndarray are valid.
+            Basis function matrix corresponds to ``testing_input``.
+            Shape :code:`(ntest, q)`. By default :code:`(ntest, 1)` matrix of ones.
+            If :math:`q=1`, both :code:`(ntest, 1)` and :code:`(ntest,)`
+            :class:`numpy.ndarray` are valid.
 
         Returns
         -------
         samples : numpy array
-            Shape=(ntest, nresponse, nsamples). 
-            `samples[:,:,i]` corresponds to i-th realization of the trained
-            emulator at `testing_input`, i=1,...,nsamples.
+            Shape :code:`(ntest, nresponse, nsamples)`. 
+            `samples[:, :, i]` corresponds to i-th realization of the trained
+            emulator at ``testing_input``, i=1, ..., ``nsamples``.
         """
         if self.emulator is None:
             raise RuntimeError("Emulator has not been trained!")
diff --git a/src/psimpy/inference/active_learning.py b/src/psimpy/inference/active_learning.py
index c9893447239fc108c8b1a19b02ac4de4c2bbd4ec..1950803ed4392440c840910d33c38a90c155c9dc 100644
--- a/src/psimpy/inference/active_learning.py
+++ b/src/psimpy/inference/active_learning.py
@@ -24,8 +24,7 @@ class ActiveLearning:
         likelihood: Callable, 
         lhs_sampler: LHS,
         scalar_gasp: ScalarGaSP,
-        scalar_gasp_trend: Union[str,
-            Callable[[np.ndarray], np.ndarray]] = 'constant',
+        scalar_gasp_trend: Union[str, Callable[[np.ndarray], np.ndarray]] = 'constant',
         indicator: str = 'entropy',
         optimizer: Callable = optimize.brute,
         args_prior: Union[list, None] = None,
@@ -35,68 +34,75 @@ class ActiveLearning:
         args_optimizer: Union[list, None] = None,
         kwgs_optimizer: Union[dict, None] = None) -> None:
         """
-        Active learning to contruct a scalar GP emulator for natural logarithm 
-        of the product of prior and likelihood (i.e. unnormalized posterior). 
+        Contruct a scalar GP emulator for natural logarithm  of the product of
+        prior and likelihood (i.e. unnormalized posterior), via active learning. 
 
         Parameters
         ----------
         ndim : int
-            Dimension of parameter `x`.
+            Dimension of parameter ``x``.
         bounds : numpy array
-            Upper and lower boundaries of each parameter. Shape (ndim, 2).
-            bounds[:, 0] - lower boundaries of each parameter.
-            bounds[:, 1] - upper boundaries of each parameter. 
+            Upper and lower boundaries of each parameter. Shape :code:`(ndim, 2)`.
+            `bounds[:, 0]` corresponds to lower boundaries of each parameter and
+            `bounds[:, 1]` to upper boundaries of each parameter. 
         data : numpy array
             Observed data for parameter calibration.
-        run_sim_obj : instance of class `RunSimulator`
-            It has an attribute `simulator` and two methods to run `simulator`,
-            namely `serial_run` and `parrel_run`. For each simulation,
-            `simulator` must return outputs `y` as a numpy array.
+        run_sim_obj : instance of class :class:`.RunSimulator`
+            It has an attribute :py:attr:`simulator` and two methods to run 
+            :py:attr:`simulator`, namely :meth:`.serial_run` and
+            :meth:`.parallel_run`. For each simulation, :py:attr:`simulator`
+            must return outputs ``y`` as a numpy array.
         prior : Callable
             Prior probability density function.
-            Call with `prior(x, *args_prior, **kwgs_prior)` and return the prior
-            probability density value at x.
+            Call with :code:`prior(x, *args_prior, **kwgs_prior)` and return
+            the value of prior probability density at ``x``.
         likelihood : Callable
-            Likelihood function constructed based on `data` and `simulator`
-            outputs `y` evaluated at `x`.
-            Call with `likelihood(y, data, *args_likelihood, *kwgs_likelihood)`
-            and return the likelihood value at x.
-        lhs_sampler : instance of class LHS
-            Used to draw initial samples of `x`.
-        scalar_gasp : instance of class ScalarGaSP
-            Used to build emulators and make predictions.  
-        scalar_gasp_trend : str, optional
-            Mean function of `scalar_gasp` emulator to determine the trend at
-            given training / testing_input.
-            'zero' - trend is set to zero
-            'constant' - trend is set to a constant
-            'linear' - trend is linear to training / testing_input
-            Callable - a function takes training / testing_input as parameter
-                and returns the trend.
+            Likelihood function constructed based on ``data`` and simulation
+            outputs ``y`` evaluated at ``x``. Call with
+            :code:`likelihood(y, data, *args_likelihood, **kwgs_likelihood)`
+            and return the likelihood value at ``x``.
+        lhs_sampler : instance of class :class:`.LHS`
+            Latin hypercube sampler used to draw initial samples of ``x``.
+            These initial samples are used to run initial simulations and build
+            initial emulator.
+        scalar_gasp : instance of class :class:`.ScalarGaSP`
+            An object which sets up the emulator structure. Providing training
+            data, the emulator can be trained and used to make predictions. 
+        scalar_gasp_trend : str or Callable, optional
+            Mean function of ``scalar_gasp`` emulator, which is used to
+            determine the ``trend`` or ``testing_trend`` at given ``design`` or
+            ``testing_input``.
+            `'zero'` - trend is set to zero.
+            `'constant'` - trend is set to a constant.
+            `'linear'` - trend is linear to design or testing_input.
+            Callable - a function takes design or testing_input as parameter
+            and returns the trend.
+            Default is `'constant'`.
         indicator : str, optional
-            Indicator of uncertainty. 'entropy' or 'variance'.
+            Indicator of uncertainty. `'entropy'` or `'variance'`. Default is
+            `'entropy'`.
         optimizer : Callable, optional
-            Find input point that minimizes the uncertainty indicator at each
-            iteration step.
-            Call with `optimizer(func, *args_optimizer, **kwgs_optimizer)`. 
-            The objective function `func` is defined by the class method
-            `_uncertainty_indicator` which have only one argument `x`.
-            Return either the solution array, or a OptimizeResult object which
-            has the attribute x denoting the solution array.
-            By default is set to the brute force method of scipy, namely
-            `scipy.optimize.brute`.
+            A function which finds the input point ``x`` that minimizes the
+            uncertainty ``indicator`` at each iteration step.
+            Call with :code:`optimizer(func, *args_optimizer, **kwgs_optimizer)`. 
+            The objective function ``func`` is defined by the class method
+            :meth:`_uncertainty_indicator` which have only one argument ``x``.
+            The ``optimizer`` should return either the solution array, or a
+            :class:`scipy.optimize.OptimizeResult` object which has the attribute
+            :py:attr:`x` denoting the solution array.
+            By default is set to :py:func:`scipy.optimize.brute`.
         args_prior : list, optional
-            Positional arguments for `prior`.
+            Positional arguments for ``prior``.
         kwgs_prior: dict, optional
-            Keyword arguments for `prior`.
+            Keyword arguments for ``prior``.
         args_likelihood : list, optional
-            Positional arguments for `likelihood`.
+            Positional arguments for ``likelihood``.
         kwgs_likelihood : dict, optional
-            Keyword arguments for `likelihood`.
+            Keyword arguments for ``likelihood``.
         args_optimizer : list, optional
-            Positional arguments for `optimizer`.
+            Positional arguments for ``optimizer``.
         kwgs_optimizer : dict, optional
-            Keyword arguments for `optimizer`.
+            Keyword arguments for ``optimizer``.
         """
         if ndim != len(run_sim_obj.var_inp_parameter):
             raise RuntimeError("ndim and run_sim_obj are incompatible")
@@ -166,19 +172,19 @@ class ActiveLearning:
         mode: str = 'serial',
         max_workers: Union[int, None] = None) -> tuple[np.ndarray, np.ndarray]:
         """
-        Run `n0` initial simulations.
+        Run ``n0`` initial simulations.
 
         Parameters
         ----------
         n0 : int
             Number of initial simulation runs.
         prefixes : list of str, optional
-            Consist of `n0` strings. Each of them is used to name corresponding
-            simulation output file.
-            If None, 'sim0','sim1',... are used.   
+            Consist of ``n0`` strings. Each is used to name corresponding
+            simulation output file(s).
+            If `None`, `'sim0'`, `'sim1'`, ... are used.   
         mode : str, optional
-            'parallel' - run `n0` simulations in parallel.
-            'serial' -  run `n0` simulations in serial.
+            `'parallel'` or `'serial'`. Run `n0` simulations in parallel or
+            in serial.
         max_workers : int, optional
             Controls the maximum number of tasks running in parallel.
             Default is the number of CPUs on the host.
@@ -186,11 +192,11 @@ class ActiveLearning:
         Returns
         -------
         init_var_samples: numpy array
-            Variable input samples for `n0` initial simulations.
-            Shape of (n0, ndim).
+            Variable input samples for ``n0`` initial simulations.
+            Shape of :code:`(n0, ndim)`.
         init_sim_outputs : numpy array
-            Outputs of `n0` intial simulations.
-            init_sim_outputs.shape[0] is `n0`.
+            Outputs of ``n0`` intial simulations.
+            :code:`init_sim_outputs.shape[0]` is ``n0``.
         """
         init_var_samples = self.lhs_sampler.sample(n0)
 
@@ -217,7 +223,8 @@ class ActiveLearning:
         iter_prefixes: Union[list[str], None] = None
         ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
         """
-        Sequentially pick `niter` new input points based on `ninit` simulations.
+        Sequentially pick ``niter`` new input points based on ``ninit``
+        simulations.
 
         Parameters
         ----------
@@ -226,35 +233,35 @@ class ActiveLearning:
         ninit : int
             Number of initial simulations.
         init_var_samples: numpy array
-            Variable input samples for `ninit` simulations.
-            Shape of (ninit, ndim).
+            Variable input samples for ``ninit`` simulations.
+            Shape of :code:`(ninit, ndim)`.
         init_sim_outputs : numpy array
-            Outputs of `ninit` simulations.
-            init_sim_outputs.shape[0] is `ninit`. 
+            Outputs of ``ninit`` simulations.
+            :code:`init_sim_outputs.shape[0]` is ``ninit``. 
         iter_prefixes : list of str, optional
-            Consist of `niter` strings. Each of them is used to name
-            corresponding iterative simulation output file.
-            If None, f'iter_sim0',f'iter_sim1',... are used.
+            Consist of ``niter`` strings. Each is used to name
+            corresponding iterative simulation output file(s).
+            If `None`, `'iter_sim0'`, `'iter_sim1'`, ... are used.
 
         Returns
         -------
         var_samples : numpy array
-            Variable input samples of `ninit` simulations and `niter`
-            iterative simulations. 2d array of shape (ninit+niter, ndim).
+            Variable input samples of ``ninit`` simulations and ``niter``
+            iterative simulations. Shape of :code:`(ninit+niter, ndim)`.
         sim_outputs : numpy array
-            Outputs of `ninit` and `niter` simulations. 
-            sim_outputs.shape[0] is ninit+niter.
+            Outputs of ``ninit`` and ``niter`` simulations. 
+            :code:`sim_outputs.shape[0]` is :math:`ninit+niter`.
         ln_pxl_values : numpy array
             Natural logarithm values of the product of prior and likelihood 
-            at `ninit` and `niter` simulations. 
-            1d array of shape (ninit+niter,).
+            at ``ninit`` and ``niter`` simulations. 
+            Shape of :code:`(ninit+niter,)`.
         
         Notes
         -----
-        If a duplicated iteration point is returned by the `optimizer`, the
+        If a duplicated iteration point is returned by the ``optimizer``, the
         iteration will be stopped right away. In that case, the first dimension
-        of returned `var_samples`, `sim_outputs`, `ln_pxl_values` is smaller
-        than ninit+niter.
+        of returned ``var_samples``, ``sim_outputs``, ``ln_pxl_values`` is smaller
+        than :math:`ninit+niter`.
         """
         if init_var_samples.shape != (ninit, self.ndim):
             raise ValueError("init_var_samples must be of shape (ninit, ndim)")
@@ -325,17 +332,17 @@ class ActiveLearning:
     @beartype
     def approx_ln_pxl(self, x: np.ndarray) -> float:
         """
-        Approximate ln_pxl value at x based on the trained calar GP emulator.
+        Approximate `ln_pxl` value at ``x`` based on the trained emulator.
 
         Parameters
         ----------
         x : numpy array
-            One variable sample at which ln_pxl is to be approximated. 1d array
-            of shape (ndim,)
+            One variable sample at which `ln_pxl` is to be approximated. Shape of
+            :code:`(ndim,)`.
         
         Returns
         -------
-        A float value which is the emulator-predicted ln_pxl value at x.
+        A float value which is the emulator-predicted `ln_pxl` value at ``x``.
         """
         predict = self._predict_ln_pxl(x)
 
@@ -348,14 +355,14 @@ class ActiveLearning:
         Parameters
         ----------
         x : numpy array
-            Variable input of `simulator`. 1d array of shape (ndim,).
+            Variable input of :py:func:`simulator`. Shape of :code:`(ndim,)`.
         y : numpy array
-            Simulation outputs at `x`.
+            Simulation outputs at ``x``.
         
         Returns
         -------
         ln_pxl_val : float
-            Natural logarithm of the product of prior and likelihood at `x`.
+            Natural logarithm of the product of prior and likelihood at ``x``.
         """
         prior_val = self.prior(x, *self.args_prior, **self.kwgs_prior)
         likelihood_val = self.likelihood(y, self.data, *self.args_likelihood,
@@ -368,17 +375,17 @@ class ActiveLearning:
     def _emulate_ln_pxl(self, var_samples: np.ndarray, ln_pxl_values: np.ndarray
         ) -> None:
         """
-        Train scalar gasp emulator for natural lograthim of prior times
-        likelihood based on `n` simulations.
+        Train scalar gasp emulator for natural lograthim of the product of prior
+        and likelihood based on ``n`` simulations.
 
         Parameters
         ----------
         var_samples: numpy array
-            Samples of variable inputs of `n` simulations. 2d array of shape
-            (n, ndim).
+            Variable inputs samples for ``n`` simulations. Shape of
+            :code:`(n, ndim)`.
         ln_pxl_values : numpy array
-            Natural lograthim of prior times likelhood values corresponding to
-            `n` simulations. 1d array of shape (n,)
+            Natural lograthim values of the product of prior and likelhood
+            corresponding to ``n`` simulations. Shape of :code:`(n,)`.
         """
         n = len(var_samples) 
 
@@ -396,21 +403,21 @@ class ActiveLearning:
     
     def _predict_ln_pxl(self, x: np.ndarray) -> np.ndarray:
         """
-        Make prediction of ln_pxl at `x` using scalar GP emulator.
+        Make prediction of `ln_pxl` at ``x`` using the emulator.
 
         Parameters
         ----------
         x : numpy array
-            One variable input point, 1d array of shape (ndim,).
+            One variable input point. Shape of :code:`(ndim,)`.
         
         Returns
         -------
         predict : numpy array
-            Emulator-prediction at `x`. Shape (1, 4).
-            predict[0, 0] - mean
-            predict[0, 1] - low95
-            predict[0, 2] - upper95
-            predict[0, 3] - sd
+            Emulator-prediction at ``x``. Shape :code:`(1, 4)`.
+            `predict[0, 0]` - mean,
+            `predict[0, 1]` - low95,
+            `predict[0, 2]` - upper95,
+            `predict[0, 3]` - sd.
         """
         x = x.reshape((1, self.ndim))
 
@@ -435,12 +442,12 @@ class ActiveLearning:
         Parameters
         ----------
         x : numpy array
-            One variable input point, 1d array of shape (ndim,).
+            One variable input point. Shape of :code:`(ndim,)`.
         
         Returns
         -------
         neg_val : float
-            Negative value of uncertainty indicator at `x`.
+            Negative value of uncertainty indicator at ``x``.
         """
         predict = self._predict_ln_pxl(x)
         
diff --git a/src/psimpy/inference/bayes_inference.py b/src/psimpy/inference/bayes_inference.py
index ce5a21c5d023947ae0d4a3dbf119f6c68cee5f7a..e7c9737770d232e25d2c324acf7aca7c639931b7 100644
--- a/src/psimpy/inference/bayes_inference.py
+++ b/src/psimpy/inference/bayes_inference.py
@@ -31,51 +31,52 @@ class BayesInferenceBase(ABC):
         args_ln_pxl: Union[list, None] = None,
         kwgs_ln_pxl: Union[dict, None]=None) -> None:
         """
-        A base class to set up basic input for Bayesian inference.
+        Set up basic input for Bayesian inference.
     
         Parameters
         ----------
         ndim : int
-            Parameter dimension.
+            Dimension of parameter ``x``.
         bounds : numpy array
-            Upper and lower boundaries of each parameter. Shape (ndim, 2).
-            bounds[:, 0] - lower boundaries of each parameter.
-            bounds[:, 1] - upper boundaries of each parameter.
+            Upper and lower boundaries of each parameter.
+            Shape :code:`(ndim, 2)`. 
+            `bounds[:, 0]` corresponds to lower boundaries of each parameter and
+            `bounds[:, 1]` to upper boundaries of each parameter. 
         prior : Callable
             Prior probability density function.
-            Call with `prior(x, *args_prior, **kwgs_prior)`.
-            Return the prior probability density value at x, where x is a one
-            dimension numpy array of shape (ndim,).
+            Call with :code:`prior(x, *args_prior, **kwgs_prior)`.
+            Return the prior probability density value at ``x``, where ``x`` is
+            a one dimension :class:`numpy.ndarray` of shape :code:`(ndim,)`.
         likelihood : Callable
             Likelihood function.
-            Call with `likelihood(x, *args_likelihood, **kwgs_likelihood)`.
-            Return the likelihood value at x.
+            Call with :code:`likelihood(x, *args_likelihood, **kwgs_likelihood)`.
+            Return the likelihood value at ``x``.
         ln_prior : Callable
             Natural logarithm of prior probability density function.
-            Call with `ln_prior(x, *args_prior, **kwgs_prior)`.
-            Return the natural logarithm value of prior probability density
-            value at x.
+            Call with :code:`ln_prior(x, *args_prior, **kwgs_prior)`.
+            Return the value of natural logarithm of prior probability density
+            at ``x``.
         ln_likelihood : Callable
             Natural logarithm of likelihood function.
-            Call with `ln_likelihood(x, *args_likelihood, **kwgs_likelihood)`.
-            Return the natural logarithm value of likelihood value at x.
+            Call with :code:`ln_likelihood(x, *args_likelihood, **kwgs_likelihood)`.
+            Return the value of natural logarithm of likelihood at ``x``.
         ln_pxl : Callable
-            Natural logarithm of the product of prior times likelihood.
-            Call with `ln_pxl(x, *args_ln_pxl, **kwgs_ln_pxl)`.
-            Return the natural logarithm value of prior times the natural
-            logarithm value of likelihood at x.    
+            Natural logarithm of the product of prior and likelihood.
+            Call with :code:`ln_pxl(x, *args_ln_pxl, **kwgs_ln_pxl)`.
+            Return the value of natural logarithm of the product of prior and
+            likelihood at ``x``.    
         args_prior : list, optional
-            Positional arguments for `prior` or `ln_prior`.
+            Positional arguments for ``prior`` or ``ln_prior``.
         kwgs_prior: dict, optional
-            Keyword arguments for `prior` or `ln_prior`.
+            Keyword arguments for ``prior`` or ``ln_prior``.
         args_likelihood : list, optional
-            Positional arguments for `likelihood` or `ln_likelihood`.
+            Positional arguments for ``likelihood`` or ``ln_likelihood``.
         kwgs_likelihood : dict, optional
-            Keyword arguments for `likelihood` or `ln_likelihood`.
+            Keyword arguments for ``likelihood`` or ``ln_likelihood``.
         args_ln_pxl : list, optional
-            Positional arguments for `ln_pxl`.
+            Positional arguments for ``ln_pxl``.
         kwgs_ln_pxl : dict, optional
-            Keyword arguments for `ln_pxl`.
+            Keyword arguments for ``ln_pxl``.
         """
         self.ndim = ndim            
         self.bounds = bounds
@@ -112,9 +113,7 @@ class BayesInferenceBase(ABC):
     
     def _ln_pxl_1(self, x: np.ndarray) -> float:
         """
-        Construct natural logarithm of the product of prior and likelihood,
-        given natural logarithm of prior function and natural logarithm of
-        likelihood function.
+        Construct ``ln_pxl``, given ``ln_prior`` and ``ln_likelihood``.
         """
         ln_pxl = self.ln_prior(x, *self.args_prior, **self.kwgs_prior) + \
             self.ln_likelihood(x, *self.args_likelihood, **self.kwgs_likelihood)
@@ -123,8 +122,7 @@ class BayesInferenceBase(ABC):
     
     def _ln_pxl_2(self, x: np.ndarray) -> float:
         """
-        Construct natural logarithm of the product of prior and likelihood,
-        given prior function and likelihood function.
+        Construct ``ln_pxl``, given ``prior`` and ``likelihood``.
         """
         pxl = self.prior(x, *self.args_prior, **self.kwgs_prior) * \
             self.likelihood(x, *self.args_likelihood, **self.kwgs_likelihood)
@@ -149,17 +147,17 @@ class GridEstimation(BayesInferenceBase):
         ----------
         nbins : int or list of ints
             Number of bins for each parameter.
-            If int, the same value is used for each parameter.
-            If list of int, it should be of length `ndim`.
+            If `int`, the same value is used for each parameter.
+            If `list of int`, it should be of length ``ndim``.
         
         Returns
         -------
         posterior : numpy array
             Estimated posterior probability density values at grid points.
-            Shape of (nbins[0], nbins[1], ..., nbins[ndim]).
+            Shape of :code:`(nbins[0], nbins[1], ..., nbins[ndim])`.
         x_ndim : list of numpy array
-            Contain `ndim` 1d numpy arrays x1, x2, ... Each xi
-            is a 1d array of length `nbins[i]`, representing the 1d coordinate
+            Contain ``ndim`` 1d :class:`numpy.ndarray` `x1`, `x2`, ... Each `xi`
+            is a 1d array of length ``nbins[i]``, representing the 1d coordinate
             array along the i-th axis.
 
         """
@@ -206,23 +204,24 @@ class MetropolisHastingsEstimation(BayesInferenceBase):
     def run(self, nsamples: int, mh_sampler: MetropolisHastings) -> tuple[
         np.ndarray, np.ndarray]:
         """
-        Use metropolis hastings sampling to draw samples from the posterior.
+        Use Metropolis Hastings sampling to draw samples from the posterior.
 
         Parameters
         ----------
         nsamples : int
             Number of samples to be drawn.
-        mh_sampler : MetroplisHastings object
+        mh_sampler : instance of :class:`.MetropolisHastings`
+            Metropolis Hastings sampler.
         
         Returns
         -------
         mh_samples : numpy array
-            Samples drawn from the posterior. Shape of (nsamples, ndim).
+            Samples drawn from the posterior. Shape of :code:`(nsamples, ndim)`.
         mh_accept : numpy array
-            An array of shape (nsamples,). Each element indicates whether the
+            Shape of :code:`(nsamples,)`. Each element indicates whether the
             corresponding sample is the proposed new state (value 1) or the old
-            state (value 0). `np.mean(mh_accept)` thus gives the overall
-            acceptance rate.
+            state (value 0). :code:`np.mean(mh_accept)` thus gives the overall
+            acceptance ratio.
         """
         if mh_sampler.ndim != self.ndim:
             raise RuntimeError(
diff --git a/src/psimpy/sampler/latin.py b/src/psimpy/sampler/latin.py
index 05a4da4e4ea05ee5b831f9382c54d7fae4026d43..5cac84d35c17bee42d12dc24e681f91bf7f382f6 100644
--- a/src/psimpy/sampler/latin.py
+++ b/src/psimpy/sampler/latin.py
@@ -18,17 +18,20 @@ class LHS:
         ndim : int
             Dimension of parameters.
         bounds : numpy array
-            Bounds of the `ndim` parameters, where bounds[:, 0] and bounds[:, 1] 
-            correspond to lower and upper bounds, respectively. Shape (ndim, 2).
+            Upper and lower boundaries of each parameter. Shape :code:`(ndim, 2)`.
+            `bounds[:, 0]` corresponds to lower boundaries of each parameter and
+            `bounds[:, 1]` to upper boundaries of each parameter.
         seed : int, optional
             Seed to initialize the pseudo-random number generator.
         criterion : str, optional
             Criterion for generating Latin hypercube samples.
-            'random' - randomly locate samples in each bin.
-            'center' - locate samples in bin centers
-            'maximin' - locate samples by maximizing their minimum distance.
+            `'random'` - randomly locate samples in each bin.
+            `'center'` - locate samples in bin centers.
+            `'maximin'` - locate samples in each bin by maximizing their minimum
+            distance.
+            Default is `'random'`.
         iteration : int, optional
-            Number of iterations if `criterion='maxmin'`.
+            Number of iterations if :code:`criterion='maximin'`.
         """
         self.ndim = ndim
 
@@ -58,7 +61,7 @@ class LHS:
         Returns
         -------
         lhs_samples : numpy array
-            Latin hypercube samples. Shape (nsamples, ndim).  
+            Latin hypercube samples. Shape :code:`(nsamples, ndim)`.  
         """        
         self.nsamples = nsamples
         
diff --git a/src/psimpy/sampler/metropolis_hastings.py b/src/psimpy/sampler/metropolis_hastings.py
index c19438e75796bb164667c55f2cb4538a538ecb16..37fa987114d0dcf7ceba52def0a8ce0a5a9f6698 100644
--- a/src/psimpy/sampler/metropolis_hastings.py
+++ b/src/psimpy/sampler/metropolis_hastings.py
@@ -35,55 +35,56 @@ class MetropolisHastings:
         Parameters
         ----------
         ndim : int
-            Dimension of `target` probability density function.
+            Dimension of parameter ``x``.
         init_state : numpy array
-            Contains `ndim` numeric values representing the starting point of
-            the Metropolis hastings chain. Shape (ndim,)
+            Contains ``ndim`` numeric values representing the starting point of
+            the Metropolis hastings chain. Shape :code:`(ndim,)`.
         f_sample : Callable
-            A function which proposes a state x' given a current state x.
-            Call with `f_sample(x, *args_f_sample, **kwgs_f_sample)`.
-            Return the proposed state x' as a 1d numpy array of shape (ndim,).
-            If ndim=1, `f_sample` can also return a scalar value.
+            A function which proposes a new state ``x'`` given a current state
+            ``x``. Call with :code:`f_sample(x, *args_f_sample, **kwgs_f_sample)`.
+            Return the proposed state ``x'`` as a :class:`numpy.ndarray` of
+            shape :code:`(ndim,)`. If :math:`ndim=1`, ``f_sample`` may also
+            return a scalar value.
         target : Callable, optional
             Target probability density function, up to a normalizing constant. 
-            Call with `target(x, *args_target, **kwgs_target)`.
-            Return the probability density value at a state x. 
-            If `target` is not given, `ln_target` must be provided.
+            Call with :code:`target(x, *args_target, **kwgs_target)`.
+            Return the probability density value at ``x``. 
+            If ``target`` is not given, ``ln_target`` must be provided.
         ln_target : Callable, optional
             Natural logarithm of target probability density function.
-            Call with `ln_target(x, *args_target, **kwgs_target)`.
-            Return the natural logarithm value of target probability density
-            value at a state x.
-            If `ln_target` is not given, `target` must be provided.
+            Call with :code:`ln_target(x, *args_target, **kwgs_target)`.
+            Return the value of natural logarithm of target probability density
+            value at ``x``.
+            If ``ln_target`` is not given, ``target`` must be provided.
         bounds : numpy array, optional
-            Consist of upper and lower boundaries of each parameter.
-            Shape (ndim, 2).
-            bounds[:, 0] - lower boundaries of each parameter
-            bounds[:, 1] - upper boundaries of each parameter
+            Upper and lower boundaries of each parameter. Shape :code:`(ndim, 2)`.
+            `bounds[:, 0]` corresponds to lower boundaries of each parameter and
+            `bounds[:, 1]` to upper boundaries of each parameter. 
         f_density : Callable, optional
-            Call with `f_density(x1,x2,*args_f_density,**kwgs_f_density)`.
-            Return the probability density at state x1 given state x2.
-            It must be provided if `symmetric` is False.
+            Call with :code:`f_density(x1, x2, *args_f_density, **kwgs_f_density)`.
+            Return the probability density at state ``x1`` given state ``x2``.
+            It must be provided if ``symmetric`` is set to `False`.
         symmetric : bool, optional
-            Whether `f_density` is symmetric or asymmetric.
+            Whether ``f_density`` is symmetric or asymmetric.
         nburn : int, optional
             Number of burnin. Default no burnin.
         nthin : int, optional
             Number of thining to reduce dependency. Default no thining.
         seed : int, optional
-            Seed to initialize the pseudo-random number generator. Default None.
+            Seed to initialize the pseudo-random number generator.
+            Default `None`.
         args_target : list, optional
-            Positional arguments for `target` or `ln_target`.
+            Positional arguments for ``target`` or ``ln_target``.
         kwgs_target : dict, optional
-            Keyword arguments for `target` or `ln_target`.
+            Keyword arguments for ``target`` or ``ln_target``.
         args_f_sample : list, optional
-            Positional arguments for `f_sample`.
+            Positional arguments for ``f_sample``.
         kwgs_f_sample : dict, optional
-            Keyword arguments for `f_sample`.
+            Keyword arguments for ``f_sample``.
         args_f_density : list, optional
-            Positional arguments for `f_density`.
+            Positional arguments for ``f_density``.
         kwgs_f_density : dict, optional
-            Keyword arguments for `f_density`.
+            Keyword arguments for ``f_density``.
         """
         if len(init_state) != ndim:
             raise ValueError(f"init_state must contain ndim={ndim} elements")
@@ -130,17 +131,17 @@ class MetropolisHastings:
         Parameters
         ----------
         nsamples : int
-            Number of samples.
+            Number of samples to be drawn.
         
         Returns
         -------
         mh_samples : numpy array
-            Samples drawn from `target`. Shape (nsamples, ndim).
+            Samples drawn from ``target``. Shape :code:`(nsamples, ndim)`.
         mh_accept : numpy array
-            An array of shape (nsamples,). Each element indicates whether the
+            Shape of :code:`(nsamples,)`. Each element indicates whether the
             corresponding sample is the proposed new state (value 1) or the old
-            state (value 0). `np.mean(mh_accept)` thus gives the overall
-            acceptance rate.
+            state (value 0). :code:`np.mean(mh_accept)` thus gives the overall
+            acceptance ratio.
         """ 
         if (self.target is None) and (self.ln_target is None):
             raise ValueError(
diff --git a/src/psimpy/sampler/saltelli.py b/src/psimpy/sampler/saltelli.py
index 5d5348671bca6fb7f6966fbf3bf84bce7dcf7e3c..22f9586509100828d5324b2314d00d4960933c36 100644
--- a/src/psimpy/sampler/saltelli.py
+++ b/src/psimpy/sampler/saltelli.py
@@ -12,23 +12,21 @@ class Saltelli:
         calc_second_order: bool = True, skip_values: Union[int, None] = None
         ) -> None:
         """Saltelli's version of Sobol' sampling.
-    
-        This class implements a wrapper to use the `saltelli.py` module provided
-        by the `SALib` package.
         
         Parameters
         -------------
         ndim : int
             Dimension of parameters.
         bounds : numpy array, optional
-            Bounds of the `ndim` parameters, where bounds[:, 0] and bounds[:, 1] 
-            correspond to lower and upper bounds, respectively. Shape (ndim, 2).
+            Upper and lower boundaries of each parameter. Shape :code:`(ndim, 2)`.
+            `bounds[:, 0]` corresponds to lower boundaries of each parameter and
+            `bounds[:, 1]` to upper boundaries of each parameter. 
         calc_second_order : bool, optional
-            If True, calculate second-order Sobol' indices.
-            If False, second-order Sobol' indices are not computed.
+            If `True`, calculate second-order Sobol' indices.
+            If `False`, second-order Sobol' indices are not computed.
         skip_values : int, optional
             Number of points to skip in the Sobol' sequence. It should be 
-            ideally a value of base 2.
+            ideally a value of base :math:`2`.
         """
         if bounds is None:
             bounds = np.array([[0, 1] for i in range(ndim)])
@@ -49,14 +47,15 @@ class Saltelli:
         Parameters
         ----------
         nbase : int
-            Number of base samples. Correspond to the argument `N` of 
-            `SALib.sample.saltelli.sample`.
+            Number of base samples. Correspond to the parameter ``N`` of 
+            :py:func:`SALib.sample.saltelli.sample`.
         
         Returns
         -------
         samples : numpy array
-            Shape (nbase*(2*ndim+2), ndim) when `calc_second_order` is True,
-            Shape (nbase*(ndim+2), ndim) when `calc_second_order` is False. 
+            Shape of :code:`(nbase*(2*ndim+2), ndim)` if ``calc_second_order``
+            is `True`. Shape of :code:`(nbase*(ndim+2), ndim)` if
+            ``calc_second_order`` is `False`. 
         """
         samples = saltelli.sample(
             self.problem, nbase, calc_second_order=self.calc_second_order,
diff --git a/src/psimpy/sensitivity/sobol.py b/src/psimpy/sensitivity/sobol.py
index c8a53854aaf4509087d778aae56786744504e30a..7e6c5280e3902555a8cf65f4c8d161828d563bba 100644
--- a/src/psimpy/sensitivity/sobol.py
+++ b/src/psimpy/sensitivity/sobol.py
@@ -13,34 +13,36 @@ class SobolAnalyze:
         nresamples: int = 100, conf_level: float = 0.95,
         seed: Union[int, None] = None) -> None:
         """
-        Sobol' sensitivity analysis. This class is built based on 
-        `SALib.analyze.sobol`.
+        Sobol' sensitivity analysis.
+        This class is built based on :py:func:`SALib.analyze.sobol`.
         
         Parameters
         ----------
         ndim : int
-            Dimension of parameter.
+            Dimension of parameter ``x``.
         Y : numpy array
             Model outputs evaluated at varaible input points drawn by Saltelli
-            sampling. Shape (nsaltelli, nrealizations). nsaltelli is the number
-            of Saltelli sample points, and nrealizations is the number of 
-            realizations of model output at a variale input point.
-            More specially,
-            - nrealizations=1 for a deterministic model which returns a scalar
-              output at a variable input point
-            - nrealizations>1 for a stochastic model which returns multiple
-              realizations of a scalar output at a variable input point, such
-              as a Gaussian process model.
-            If nrealizations=1, an array of shape (nsaltelli, ) is also valid.
+            sampling. Shape of :code:`(nsaltelli, nrealizations)`. ``nsaltelli``
+            is the number of Saltelli sample points, and ``nrealizations`` is
+            the number of  realizations of model output at one input point.
+            More specially: :math:`nrealizations=1` for a deterministic model
+            which returns a scalar output at one input point,
+            and :math:`nrealizations>1` for a stochastic model which returns
+            multiple realizations of a scalar output at one input point (such
+            as a Gaussian process model).
+            If :math:`nrealizations=1`, an array of shape :code:`(nsaltelli,)`
+            is also valid.
         calc_second_order : bool, optional
-            If True, calculate secord order Sobol' indices.
+            If `True`, calculate secord order Sobol' indices.
         nresamples : int, optional
             Size of bootstrap sampling.
         conf_level : float, optional
-            Confidence interval level. Must be a value within the range (0,1).
+            Confidence interval level. Must be a value within the range
+            :math:`(0,1)`.
         seed : int, optional
             Seed to initialize the pseudo-random number generator.
         """
+
         self.ndim = ndim
 
         if not (Y.ndim == 1 or Y.ndim == 2):
@@ -77,25 +79,26 @@ class SobolAnalyze:
         Parameters
         ----------
         mode : str, optional
-            "parallel" - run sobol' analysis for each colomn of `Y` in parallel
-            "serial" - run sobol' analysis for each colomn of `Y` in serial
-            Only relevant if nrealizations>1.
+            `'parallel'` or `'serial'`. Run sobol' analysis for each colomn of
+            ``Y`` in parallel or in serial. Only relevant when
+            :math:`nrealizations>1`.
         max_workers : int, optional
-            Controls the maximum number of tasks running in parallel.
+            The maximum number of tasks running in parallel.
             Default is the number of CPUs on the host.
-            Only relevant if nrealizations>1.
+            Only relevant when :math:`nrealizations>1`.
         
         Returns
         -------
         S_res : dict
             Sobol' sensitivity indices.
-            S['S1'] - numpy array of shape (ndim, 3), first-order Sobol' index,
-                      its std, and conf_level for each parameter.
-            S['ST'] - numpy array of shape (ndim, 3), total-order Sobol' index,
-                      its std, and conf_level for each parameter.
-            S['S2'] - numpy array of shape (ndim*(ndim-1)/2, 3), second-order
-                      Sobol' index, its std, and conf_level for each pair of
-                      parameters. Only available if `calc_second_order` is True.
+            `S_res['S1']` - :class:`numpy.ndarray` of shape :code:`(ndim, 3)`,
+            first-order Sobol' index, its std, and conf_level for each parameter.
+            `S_res['ST']` - :class:`numpy.ndarray` of shape :code:`(ndim, 3)`,
+            total-effect Sobol' index, its std, and conf_level for each parameter.
+            `S_res['S2']` - :class:`numpy.ndarray` of shape
+            :code:`(ndim*(ndim-1)/2, 3)`, second-order Sobol' index, its std,
+            and conf_level for each pair of parameters. 
+            `S_res['S2']` is only available if ``calc_second_order`` is True.
         """
         if (mode is not None) and (mode not in ["parallel", "serial"]):
             raise ValueError(
@@ -133,7 +136,7 @@ class SobolAnalyze:
 
     
     def _serial_run(self):
-        """Run Sobol' analysis for each colomn of `Y` in serial."""
+        """Run Sobol' analysis for each colomn of ``Y`` in serial."""
         S_all = self._create_S_dict(self.nrealizations)
 
         for nr in range(self.nrealizations):
@@ -147,7 +150,7 @@ class SobolAnalyze:
             
 
     def _parallel_run(self, max_workers):
-        """Run Sobol' analysis for each colomn of `Y` in parallel."""
+        """Run Sobol' analysis for each colomn of ``Y`` in parallel."""
         S_all = self._create_S_dict(self.nrealizations)
 
         with ProcessPoolExecutor(max_workers=max_workers) as executor:
@@ -172,19 +175,22 @@ class SobolAnalyze:
         ----------
         y : numpy array
             Model outputs evaluated at varaible input points drawn by Saltelli
-            sampling. 1d array of shape (nsaltelli, ).
+            sampling. 1d array of shape :code:`(nsaltelli, )`.
         
         Returns
         -------
         S: dict
             Sobol's sensitivity indices.
-            S['S1'] - numpy array of shape (ndim, nresamples+1, 1), first-order
-                      Sobol' indices of each parameter.
-            S['ST'] - numpy array of shape (ndim, nresamples+1, 1), total-order
-                      Sobol' indices of each parameter.
-            S['S2'] - numpy array of shape (ndim*(ndim-1)/2, nresamples+1, 1),
-                      second-order Sobol' indices for each pair of parameters.
-                      Only available if `calc_second_order` is True.
+            `S['S1']` - :class:`numpy.ndarray` of shape
+            :code:`(ndim, nresamples+1, 1)`, first-order Sobol' indices of
+            each parameter.
+            `S['ST']` - :class:`numpy.ndarray` of shape
+            :code:`(ndim, nresamples+1, 1)`, total-effect Sobol' indices of
+            each parameter.
+            `S['S2']` - :class:`numpy.ndarray` of shape
+            :code:`(ndim*(ndim-1)/2, nresamples+1, 1)`, second-order Sobol'
+            indices for each pair of parameters.
+            `S['S2']` is only available if ``calc_second_order`` is True.
         """
         S = self._create_S_dict(nrealizations=1)
         
diff --git a/src/psimpy/simulator/mass_point_model.py b/src/psimpy/simulator/mass_point_model.py
index 77fbd5c11415fe2d27c22fb381a685d2208f07ac..7a1dd49db8cdd7d66ce54c342065ea36380e1947 100644
--- a/src/psimpy/simulator/mass_point_model.py
+++ b/src/psimpy/simulator/mass_point_model.py
@@ -8,7 +8,8 @@ from typing import Union
 from beartype import beartype
 
 class MassPointModel:
-    """Mass point model."""
+
+    """Simulate the movement of a masspoint on a topography."""
     
     @staticmethod
     def _preprocess(elevation, x0, y0):
@@ -28,21 +29,23 @@ class MassPointModel:
         Returns
         -------
         fgrad_x : Callable
-            Call with `fgrad_x(x, y)`. Return first order partial derivative
-            of `elevation` with respect to `x` at location (x, y).
+            Call with :code:`fgrad_x(x, y)`. Return first order partial
+            derivative of ``elevation`` with respect to `x` at location
+            `(x, y)`.
         fgrad_y : callable
-            Call with `fgrad_y(x, y)`. Return first order partial derivative
-            of `elevation` with respect to `y` at location (x, y).
+            Call with :code:`fgrad_y(x, y)`. Return first order partial
+            derivative of ``elevation`` with respect to `y` at location
+            `(x, y)`.
         fgrad_xx : Callable
-            Call with `fgrad_xx(x, y)`. Return second order partial derivative
-            of `elevation` with respect to `x` at location (x, y).
+            Call with :code:`fgrad_xx(x, y)`. Return second order partial
+            derivative of ``elevation`` with respect to `x` at location `(x, y)`.
         fgrad_yy : Callable
-            Call with `fgrad_yy(x, y)`. Return second order partial derivative
-            of `elevation` with respect to `y` at location (x, y).
+            Call with :code:`fgrad_yy(x, y)`. Return second order partial
+            derivative of ``elevation`` with respect to `y` at location `(x, y)`.
         fgrad_xy : Callable
-            Call with `fgrad_xy(x ,y)`. Return second order partial derivative
-            of `elevation` with respect to `x` and `y` at location (x, y).
-        
+            Call with :code:`fgrad_xy(x ,y)`. Return second order partial
+            derivative of ``elevation`` with respect to `x` and `y` at location
+            `(x, y)`.
         """        
         if not os.path.exists(elevation):
             raise ValueError(f"{elevation} does not exist")
@@ -86,37 +89,39 @@ class MassPointModel:
         fgrad_xx, fgrad_yy, fgrad_xy, g, curvature):
         """
         Define mass point model (3d) which serves as the callable function
-        `f(t,alpha,*f_args)` in `scipy.integrate.ode(f)`.
+        :code:`f(t,alpha,*f_args)` in :class:`scipy.integrate.ode(f)`.
 
         Parameters:
         -----------
         t : float or int
             Time in seconds.
         alpha : list
-            Contains x, y, ux, uy at time t, i.e., [x(t), y(t), ux(t), uy(t)].
+            Contains `x`, `y`, `ux`, `uy` at time `t`, i.e.,
+            `[x(t), y(t), ux(t), uy(t)]`.
         coulomb_friction : float
             Dry Coulomb friction coefficient.
         turbulent_friction : float or int
-            Turbulent friction coefficient, in m/s^2.
+            Turbulent friction coefficient, in :math:`m/s^2`.
         fgrad_x : Callable
-            Call with `fgrad_x(x, y)`. Return first order partial derivative
-            of `elevation` with respect to `x` at location (x, y).
+            Call with :code:`fgrad_x(x, y)`. Return first order partial
+            derivative of ``elevation`` with respect to `x` at location `(x, y)`.
         fgrad_y : Callable
-            Call with `fgrad_y(x, y)`. Return first order partial derivative
-            of `elevation` with respect to `y` at location (x, y).
+            Call with :code:`fgrad_y(x, y)`. Return first order partial
+            derivative of ``elevation`` with respect to `y` at location `(x, y)`.
         fgrad_xx : Callable
-            Call with `fgrad_xx(x, y)`. Return second order partial derivative
-            of `elevation` with respect to `x` at location (x, y).
+            Call with :code:`fgrad_xx(x, y)`. Return second order partial
+            derivative of ``elevation`` with respect to `x` at location `(x, y)`.
         fgrad_yy : Callable
-            Call with `fgrad_yy(x, y)`. Return second order partial derivative
-            of `elevation` with respect to `y` at location (x, y).
+            Call with :code:`fgrad_yy(x, y)`. Return second order partial
+            derivative of ``elevation`` with respect to `y` at location `(x, y)`.
         fgrad_xy : Callable
-            Call with `fgrad_xy(x ,y)`. Return second order partial derivative
-            of `elevation` with respect to `x` and `y` at location (x, y).
+            Call with :code:`fgrad_xy(x ,y)`. Return second order partial
+            derivative of ``elevation`` with respect to `x` and `y` at location
+            `(x, y)`.
         g : float or int
-            Gravity acceleration, in m/s^2.
+            Gravity acceleration, in :math:`m/s^2`.
         curvature : bool
-            Whether to account for the curvature effect.
+            If `True`, take the curvature effect into account.
         
         Return:
         -------
@@ -175,8 +180,8 @@ class MassPointModel:
         g: Union[float, int] = 9.8, atol: float = 1e-6, rtol: float = 1e-6,
         curvature: bool = False) -> np.ndarray:
         """
-        Solve the mass point model using scipy.integrate.ode solver given
-        required input data.
+        Solve the mass point model using :class:`scipy.integrate.ode` solver
+        given required input data.
 
         Parameters
         ----------
@@ -186,15 +191,15 @@ class MassPointModel:
         coulomb_friction : float
             Dry Coulomb friction coefficient.
         turbulent_friction : float or int
-            Turbulent friction coefficient, in m/s^2.
+            Turbulent friction coefficient, in :math:`m/s^2`.
         x0 : float or int
-            x coordinate of initial position.
+            `x` coordinate of initial position.
         y0 : float or int
-            y coordinate of initial position.
+            `y` coordinate of initial position.
         ux0 : float or int
-            Initial velocity in x direction.
+            Initial velocity in `x` direction.
         uy0 : float or int
-            Initial velocity in y direction.
+            Initial velocity in `y` direction.
         dt : float or int
             Time step in seconds.
         tend : float or int
@@ -202,25 +207,27 @@ class MassPointModel:
         t0 : float or int
             Initial time.
         g : float or int
-            Gravity acceleration, in m/s^2.
+            Gravity acceleration, in :math:`m/s^2`.
         atol : float
             Absolute tolerance for solution.
         rtol : float
             Relative tolerance for solution.
         curvature : bool
-            Whether to account for the curvature effect.
+            If `True`, take the curvature effect into account.
         
         Returns
         -------
-        output: numpy array, 2d
-        Time history of the mass point's location and velocity.
-        output[:,0] - time
-        output[:,1] - x coordinate
-        output[:,2] - y coordinate
-        output[:,3] - velocity in x direction
-        output[:,4] - velocity in y direction
-        output[:,5] - total velocity
-        
+        output: numpy array
+            Time history of the mass point's location and velocity.
+            2d :class:`numpy.ndarray`. Each row corresponds to a time step and
+            each column corresponds to a quantity. In total :math:`6` columns,
+            namely :code:`output.shape[1]=6`. More specifically:
+            :code:`output[:,0]` are the time steps,
+            :code:`output[:,1]` are the `x` coordinates,
+            :code:`output[:,2]` are the `y` coordinates,
+            :code:`output[:,3]` are velocity values in `x` direction,
+            :code:`output[:,4]` are velocity values in `y` direction,
+            :code:`output[:,5]` are total velocity values.
         """
         fgrad_x, fgrad_y, fgrad_xx, fgrad_yy, fgrad_xy = \
             self._preprocess(elevation, x0, y0)
diff --git a/src/psimpy/simulator/ravaflow24.py b/src/psimpy/simulator/ravaflow24.py
index f7106fa8ad29386eda50d9e97e2a942c164d1fe5..4d9bbfc1c9895f462ca72d06ecdbf55f8c92ebfc 100755
--- a/src/psimpy/simulator/ravaflow24.py
+++ b/src/psimpy/simulator/ravaflow24.py
@@ -17,12 +17,12 @@ class Ravaflow24Mixture:
         basal_friction_diff: float = 0.0, stopping_control: str = '0',
         stopping_threshold: float = 0.0, friction_control: str = '0') -> None:
         """
-        A wrapper for the Voellmy-type shallow flow model of r.avaflow 2.4.
+        `r.avaflow 2.4 Mixture model` (Voellmy-type shallow flow model).
 
         Parameters
         ----------
         dir_sim : str
-            Main directory to save simulation outputs.
+            Directory to save output files generated by `r.avaflow`.
         time_step : float or int
             Time step for simulation, in seconds.
         time_end : float or int
@@ -30,47 +30,48 @@ class Ravaflow24Mixture:
         cfl : float
             CFL criterion, a value being equal or smaller than 0.5.
         time_step_length : float
-            If CFL criterion is not applicable, `time_step_length` is used.
-            In seconds. Recommended range is around 0.1 to 0.5.
+            If ``cfl`` is not applicable, ``time_step_length`` is used.
+            In seconds. Recommended range is around :math:`0.1` to :math:`0.5`.
         conversion_control : str
-            If '0', no conversion of flow heights to flow depths.
-            If '1', conversion is applied.
+            `'0'`: no conversion of flow heights to flow depths.
+            `'1'`: conversion is applied.
         curvature_control : str
-            If '0', curvature is neglected.
-            If '1', curvature is considered in the decelerating source terms.
-            If '2', curvature is considered in all relevant terms.
+            `'0'`: curvature is neglected.
+            `'1'`: curvature is considered in the decelerating source terms.
+            `'2'`: curvature is considered in all relevant terms.
         surface_control : str
-            If '0', no balancing of forces.
-            If '1', apply balancing of forces.
+            `'0'`: no balancing of forces.
+            `'1'`: apply balancing of forces.
         entrainment_control : str
-            If '0', no entrainment.
-            If '1', the entrainment coefficient is multiplied with flow momentum.
-            If '2', simplified entrainment and deposition model is used.
-            If '3', combination of '1' for entrainmnet and '2' for deposition.
-            If '4', acceleration-deceleration entrainment and deposition model.
+            `'0'`: no entrainment.
+            `'1'`: the entrainment coefficient is multiplied with flow momentum.
+            `'2'`: simplified entrainment and deposition model is used.
+            `'3'`: combination of `'1'` for entrainmnet and `'2'` for deposition.
+            `'4'`: acceleration-deceleration entrainment and deposition model.
         shear_velocity_coef : float
-            Only used with entrainment_control being '2' or '3', range [0,1]
+            Only used with ``entrainment_control`` being `'2'` or `'3'`, range
+            :math:`[0,1]`.
         basal_friction_diff : float
             Difference between the basal friction angles of the basal surface
-            and the flow, in degrees. Only used with entrainment_control being
-            '2' or '3'.
+            and the flow, in degrees. Only used with ``entrainment_control``
+            being `'2'` or `'3'`.
         stopping_control : str
-            If '0', no stopping control is used.
-            If '1', stop if the flow kinetic energy is equal or smaller than a
+            `'0'`: no stopping control is used.
+            `'1'`: stop if the flow kinetic energy is equal or smaller than a
             given threshold.
-            If '2', stop if the flow momentum is equal or smaller than a given
+            `'2'`: stop if the flow momentum is equal or smaller than a given
             threshold.
-            If '3', stop if the dynamic flow pressure of all raster cells is
+            `'3'`: stop if the dynamic flow pressure of all raster cells is
             euqal or smaller than a given threshold.
         stopping_threshold : float
-            Threshold value for stopping_control.
-            If stopping_control is '1' or '2', stopping_threshold has to be
-            given as the maximum value reached during the flow.
-            If stopping_control is '3', the pressure threshold has to be
+            Threshold value for ``stopping_control``.
+            If ``stopping_control`` is `'1'` or `'2'`, ``stopping_threshold``
+            has to be given as the maximum value reached during the flow.
+            If ``stopping_control`` is `'3'`, the pressure threshold has to be
             specified.
         friction_control : str, optional
-            If '0', no dynamic adaptation of friction parameters.
-            If '1', dynamic adaptation of friction parameters (ignored for the
+            `'0'`: no dynamic adaptation of friction parameters.
+            `'1'`: dynamic adaptation of friction parameters (ignored for the
             mixture model).
         
         """
@@ -124,46 +125,48 @@ class Ravaflow24Mixture:
         entrainment_coef: Union[float, np.floating] = -7.0,
         EPSG: Union[str, None] = None) -> tuple[str, str]:
         """
-        Preprocess simulation input data.
+        Preprocess simulation input data simulation.
 
         Parameters
         ----------
         prefix : str
-            Prefix to name output files.
+            Prefix required by `r.avaflow` to name output files.
         elevation : str
             Name of elevation raster file (including its path).
-            The file format should be supported by GDAL.
+            The file format should be supported by `GDAL`.
             Its unit is in meters.
         hrelease : str
             Name of release height raster file (including its path).
-            The file format should be supported by GDAL.
+            The file format should be supported by `GDAL`.
             Its unit is in meters.
         cellsize : float or int, optional
-            Cell size in meters to be used in simulation.
+            Cell size in meters to be used for simulation.
         hrelease_ratio : float, optional
-            A positive float value to multiple the hrelease in order to control
+            A positive value to multiple ``hrelease`` in order to control
             the release volume.
         internal_friction : float or int, optional
-            Internal friction angle, in degrees, range [0,90).
+            Internal friction angle, in degrees, range :math:`[0,90)`.
         basal_friction : float or int, optional
-            Basal friction angle, in degrees, range [0,90).
+            Basal friction angle, in degrees, range :math:`[0,90)`.
         turbulent_friction : float or int, optional
-            Logarithm with base 10 of the turbulent friction, in m/s^2.
+            Logarithm with base :math:`10` of the turbulent friction, in
+            :math:`m/s^2`.
         entrainment_coef : float, optional
-            Logarithm with base 10 of the entrainment coefficient, except for 0
-            meaning no entrainment.
+            Logarithm with base :math:`10` of the entrainment coefficient,
+            except for :math:`0` meaning no entrainment.
         EPSG : str, optional
-            EPSG (European Petroleum Survey Group) code to create grass location.
-            If None, `elevation` must be a georeferenced file which has metadata
-            to create the grass location. 
+            `EPSG` (European Petroleum Survey Group) code to create
+            `GRASS Location <https://grass.osgeo.org/grass82/manuals/grass_database.html>`_.
+            If `None`, ``elevation`` must be a georeferenced file which has
+            metadata to create the `GRASS Location`. 
         
         Returns
         -------
         grass_location : str
-            Name of the GRASS LOCATION (including path).
+            Name of the `GRASS Location` (including path).
         sh_file : str
             Name of the shell file (including path), which will be called by
-            GRASS to run the simulation. 
+            `GRASS <https://grass.osgeo.org/>`_ to run the simulation. 
         """        
         sh_file = os.path.join(self.dir_sim, f'{prefix}_shell.sh')
         grass_location = os.path.join(self.dir_sim, f'{prefix}_glocation')
@@ -224,10 +227,12 @@ class Ravaflow24Mixture:
     @beartype
     def run(self, grass_location: str, sh_file: str) -> None:
         """
+        Run simulation.
+
         Parameters
         ----------
         grass_location : str
-            Name of the GRASS LOCATION (including path).
+            Name of the `GRASS Location` (including path).
         sh_file : str
             Name of the shell file (including path).
         """
@@ -248,22 +253,22 @@ class Ravaflow24Mixture:
     def extract_impact_area(self, prefix: str, qoi: str = 'h',
         threshold: Union[float, int] = 0.5) -> float:
         """
-        Extract impact area defined by a given quantity of interest (qoi) and
-        its threshold.
+        Extract impact area defined by a given quantity of interest and its
+        threshold.
 
         Parameters
         ----------
         prefix : str
-            Prefix used to name output files.
+            Prefix used by `r.avaflow` to name output files.
         qoi : str
-            qoi to determine the impact area.
-            'h' - maximum flow height, in meters
-            'v' - maximum flow velocity, in m/s
-            'p' - maximum flow pressure, in Pa
-            't' - maximum flow kinetic energy, in J
+            Quantity of interest to determine the impact area.
+            `'h'`: maximum flow height, in meters.
+            `'v'`: maximum flow velocity, in :math:`m/s`.
+            `'p'`: maximum flow pressure, in :math:`Pa`.
+            `'t'`: maximum flow kinetic energy, in :math:`J`.
         threshold : float or int
-            Areas where `qoi` is larger than the threshold are regarded as 
-            impar area.
+            Threshold of ``qoi`` to determine impact area. Areas where ``qoi``
+            is larger than ``threshold`` are regarded as impar area.
 
         Returns
         -------
@@ -288,30 +293,27 @@ class Ravaflow24Mixture:
     def extract_qoi_max(self, prefix: str, qoi: str,
         aggregate: bool = True) -> Union[float, np.ndarray]:
         """
-        Extract the maximum value(s) of a quantity of interest (qoi) in one 
-        simulation.
+        Extract the maximum value(s) of a quantity of interest.
         
         Parameters
         ----------
         prefix : str
-            Prefix used to name output files.
+            Prefix used by `r.avaflow` to name output files.
         qoi : str
             Quantity of interest.
-            'h' - maximum flow height, in meters
-            'v' - maximum flow velocity, in m/s
-            'p' - maximum flow pressure, in Pa
-            't' - maximum flow kinetic energy, in J
+            `'h'`: maximum flow height, in meters.
+            `'v'`: maximum flow velocity, in :math:`m/s`.
+            `'p'`: maximum flow pressure, in :math:`Pa`.
+            `'t'`: maximum flow kinetic energy, in :math:`J`.
         aggregate : bool
-            If True, returns the overall maximum value over all spatio-tempo
-            grids.
-            If False, returns the maximum values over all time steps at each
-            spatio location, namely a raster of maximum values at location.
+            If `True`, returns the overall maximum value over all spatio-tempo
+            grids. If `False`, returns the maximum values over all time steps at
+            each spatio location, namely a raster of maximum values.
 
         Returns
         -------
         qoi_max : float or numpy array
-            Maximum value(s) of qoi in one simulation.
-            
+            Maximum value(s) of ``qoi`` in one simulation.
         """
         if qoi not in ['h','v','p','t']:
             raise ValueError("qoi must be 'h', 'v', 'p', or 't'")
@@ -329,29 +331,30 @@ class Ravaflow24Mixture:
     def extract_qoi_max_loc(self, prefix: str, loc: np.ndarray,
         qoi: str) -> np.ndarray:
         """
-        Extract maximum value(s) of a quantity of interest (qoi) at specific
+        Extract maximum value(s) of a quantity of interest at specific
         location(s).
         
         Parameters
         ----------
         prefix : str
-            Prefix used to name output files.
-        loc : 2d numpy array
-            Coordinates of interested locations. Shape (nloc, 2), where `nloc`
-            is the number of interested locations.
-            loc[:,0] - x coordinates
-            loc[:,1] - y coordinates
+            Prefix used by `r.avaflow` to name output files.
+        loc : numpy array
+            Coordinates of interested locations. Shape :code:`(nloc, 2)`, where
+            ``nloc`` is the number of interested locations.
+            :code:`loc[:,0]` corresponds to `x` coordinates and
+            :code:`loc[:,1]` to `y` coordinates.
         qoi : str
             Quantity of interest.
-            'h' - maximum flow height, in meters
-            'v' - maximum flow velocity, in m/s
-            'p' - maximum flow pressure, in Pa
-            't' - maximum flow kinetic energy, in J
+            `'h'`: maximum flow height, in meters.
+            `'v'`: maximum flow velocity, in :math:`m/s`.
+            `'p'`: maximum flow pressure, in :math:`Pa`.
+            `'t'`: maximum flow kinetic energy, in :math:`J`.
 
         Returns
         -------
         qoi_max_loc : numpy array
-            Consist of maximum value(s) of qoi at each location. Shape (nloc,).
+            Consist of maximum value(s) of ``qoi`` at each location.
+            Shape of :code:`(nloc,)`.
         """       
         if not (loc.ndim == 2 and loc.shape[1] == 2):
             raise ValueError(
diff --git a/src/psimpy/simulator/run_simulator.py b/src/psimpy/simulator/run_simulator.py
index 03d1cb43dacbe38b62a6b84a6a13f647ccf3b72d..3b2f30dd1a82d5ab481018cdd923318b499ea408 100644
--- a/src/psimpy/simulator/run_simulator.py
+++ b/src/psimpy/simulator/run_simulator.py
@@ -14,34 +14,35 @@ class RunSimulator:
         o_parameter: Union[str, None] = None, dir_out: Union[str, None] = None,
         save_out: bool = False) -> None:
         """
-        Serial and parallel execution of a given simulator.
+        Serial or parallel execution of a given simulator.
 
         Parameters
         ----------
         simulator : Callable
-            A Python function defining the simulator.
-            Its parameters are defined in three parts:
-            - variable parameters are defined by elements of `var_inp_parameter`
-            - fixed parameters are defined by `fix_inp.keys()`
-            - an extra parameter `o_parameter` used to name files if the
-              simulator internally writes data to the disk
-            The simulator should return outputs of interest as a numpy array.
+            A Python function defining the simulator. Its parameters are defined
+            in three parts:
+            ``var_inp_parameter`` defines variable parameters;
+            ``fix_inp`` defines fixed parameters (``fix_inp.keys()``) and their
+            values (``fix_inp.values()``);
+            ``o_parameter`` defines an additional variable which is used to
+            name files saved onto the disk within the function body of
+            ``simulator``.
+            The simulator should return outputs of interest as 
+            :class:`numpy.ndarray`.
         var_inp_parameter :  list of str
-            A list consists of all variable input parameters. Each parameter is
-            a keyword parameter of the simulator.
+            A list consists of all variable input parameters of ``simulator``.
         fix_inp : dict, optional
-            A dictionary consists of key-value pairs of all fixed input. Each
-            key is a keyword parameter of the simulator.
+            A dictionary consists of key-value pairs of all fixed input of the
+            ``simulator``.
         o_parameter : str, optional
-            Keyword parameter of the simulator which is used to name internally
-            saved data files (if defined).
-            It is only relevant if the simulator internally save data to the
-            disk.  
+            Keyword parameter of ``simulator`` which is used to name internally
+            saved files (if defined). It is only relevant if the function body
+            of ``simulator`` saves data onto the disk.  
         dir_out : str, optional
-            Directory to save simulation outputs returned by the simulator.
+            Directory to save outputs of interest returned by ``simulator``.
         save_out : bool, optional
-            Whether to save returned values of the simulator. If True, `dir_out`
-            must be given. 
+            Whether to save returned values of ``simulator``. If `True`,
+            ``dir_out`` must be given. 
         """
         self.simulator = simulator
         self.var_inp_parameter = var_inp_parameter
@@ -58,27 +59,31 @@ class RunSimulator:
         self.dir_out = dir_out
         self.save_out = save_out
         
-        self.var_samples = np.array([])
-        self.outputs = []
+        self.var_samples: np.ndarray = np.array([])
+        """Samples of variable input parameters."""
+        self.outputs: list = []
+        """Simulation outputs."""
 
     @beartype
     def serial_run(
         self, var_samples: np.ndarray, prefixes: Union[list[str], None] = None,
         append: bool = False) -> None:
         """
-        Perform serial execution of the simulator at a set of var_samples.
+        Perform serial execution of ``simulator`` at a set of ``var_samples``.
 
         Parameters
         ----------
-        var_samples : list or numpy array
-            Samples of variable inputs. 
-            The first dimension corresponds to the number of samples.
-            The second dimension corresponds to the number of variable inputs. 
+        var_samples : numpy array
+            Samples of variable input parameters. The first axis
+            (:code:`var_samples.shape[0]`) corresponds to the number of samples.
+            The second axis (:code:`var_samples.shape[1]`) corresponds to the
+            number of variable input parameters. 
         prefixes :  list of str, optional
-            Consist of len(var_samples) prefixes. Each of them is used to name
-            corresponding simulation output file. 
+            Contains :code:`len(var_samples)` prefixes. Each of them is used to
+            name corresponding simulation output file. 
         append : bool, optional
-            Whether to append var_samples to existing samples (if any).
+            If `True`, append ``var_samples`` to existing samples and
+            correspondingly append simulation outputs.
         """
         num_pre_run, num_new_run, kwargs_num_new_run, prefixes = \
             self._preprocess(var_samples, prefixes, append)
@@ -104,18 +109,21 @@ class RunSimulator:
         self, var_samples: np.ndarray, prefixes: Union[list[str], None] = None,
         append: bool = False, max_workers: Union[int, None] = None) -> None:
         """
-        Perform parallel execution of the simulator at a set of var_samples.
+        Perform parallel execution of ``simulator`` at a set of ``var_samples``.
 
         Parameters
         ----------
-        var_samples : list or numpy array
-            Samples of variable inputs. 
-            The first dimension corresponds to the number of samples.
-            The second dimension corresponds to the number of variable inputs. 
+        var_samples : numpy array
+            Samples of variable input parameters. The first axis
+            (:code:`var_samples.shape[0]`) corresponds to the number of samples.
+            The second axis (:code:`var_samples.shape[1]`) corresponds to the
+            number of variable input parameters.
         prefixes :  list of str, optional
-            Each prefix is used to name corresponding simulation output file.
+            Contains :code:`len(var_samples)` prefixes. Each of them is used to
+            name corresponding simulation output file.
         append : bool, optional
-            Whether to append var_samples to existing samples (if any).
+            If `True`, append ``var_samples`` to existing samples and
+            correspondingly append simulation outputs.
         max_workers : int, optional
             Controls the maximum number of tasks running in parallel.
             Default is the number of CPUs on the host.        
@@ -144,18 +152,21 @@ class RunSimulator:
     def _preprocess(self, var_samples: np.ndarray, prefixes: list[str],
         append: bool) -> tuple[int, int, list[dict], list[str]]:
         """
-        Preprocess required inputs for `serial_run` and `parallel_run`.
+        Preprocess inputs for :meth:`.serial_run` and :meth:`parallel_run`.
 
         Parameters
         ----------
         var_samples : numpy array
-            Samples of variable inputs. 
-            The first dimension corresponds to the number of samples.
-            The second dimension corresponds to the number of variable inputs. 
+            Samples of variable input parameters. The first axis
+            (:code:`var_samples.shape[0]`) corresponds to the number of samples.
+            The second axis (:code:`var_samples.shape[1]`) corresponds to the
+            number of variable input parameters.
         prefixes :  list of str, optional
-            Each prefix is used to name corresponding simulation output file.
+            Contains :code:`len(var_samples)` prefixes. Each of them is used to
+            name corresponding simulation output file.
         append : bool
-            Whether to append var_samples to existing samples (if any).
+            If `True`, append ``var_samples`` to existing samples and
+            correspondingly append simulation outputs.
 
         Returns
         -------
@@ -164,8 +175,8 @@ class RunSimulator:
         num_new_run : int
             Number of new runs. 
         kwargs_num_new_run: list
-            Contains num_new_run dictionaries, each of which corresponds to 
-            kwargs of one simulation.     
+            Contains ``num_new_run`` dictionaries, each of which corresponds to 
+            ``kwargs`` of one simulation.     
         prefixes : list of str
         """
         if var_samples.ndim == 0 or var_samples.ndim == 1: