diff --git a/Manifest.toml b/Manifest.toml
index 1f2084e5cba89f4dc5e406e60dd00b934e7b8092..c706cb928bde12e770041c5b425ea4ceee40887e 100644
--- a/Manifest.toml
+++ b/Manifest.toml
@@ -2,7 +2,7 @@
 
 julia_version = "1.10.7"
 manifest_format = "2.0"
-project_hash = "bcfa8e1e4877bb2345aa32eda00557cc95cb64c9"
+project_hash = "31692c30ec374ec6d8210108b53e1f1518c9ec54"
 
 [[deps.ANSIColoredPrinters]]
 git-tree-sha1 = "574baf8110975760d391c710b6341da1afa48d8c"
@@ -319,6 +319,24 @@ git-tree-sha1 = "53bb909d1151e57e2484c3d1b53e19552b887fb2"
 uuid = "42e2da0e-8278-4e71-bc24-59509adca0fe"
 version = "1.0.2"
 
+[[deps.HDF5]]
+deps = ["Compat", "HDF5_jll", "Libdl", "MPIPreferences", "Mmap", "Preferences", "Printf", "Random", "Requires", "UUIDs"]
+git-tree-sha1 = "e856eef26cf5bf2b0f95f8f4fc37553c72c8641c"
+uuid = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
+version = "0.17.2"
+
+    [deps.HDF5.extensions]
+    MPIExt = "MPI"
+
+    [deps.HDF5.weakdeps]
+    MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195"
+
+[[deps.HDF5_jll]]
+deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "LibCURL_jll", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "OpenSSL_jll", "TOML", "Zlib_jll", "libaec_jll"]
+git-tree-sha1 = "82a471768b513dc39e471540fdadc84ff80ff997"
+uuid = "0234f1f7-429e-5d53-9886-15a909be8d59"
+version = "1.14.3+3"
+
 [[deps.HTTP]]
 deps = ["Base64", "CodecZlib", "ConcurrentUtilities", "Dates", "ExceptionUnwrapping", "Logging", "LoggingExtras", "MbedTLS", "NetworkOptions", "OpenSSL", "Random", "SimpleBufferStream", "Sockets", "URIs", "UUIDs"]
 git-tree-sha1 = "d1d712be3164d61d1fb98e7ce9bcbc6cc06b45ed"
@@ -331,6 +349,12 @@ git-tree-sha1 = "401e4f3f30f43af2c8478fc008da50096ea5240f"
 uuid = "2e76f6c2-a576-52d4-95c1-20adfe4de566"
 version = "8.3.1+0"
 
+[[deps.Hwloc_jll]]
+deps = ["Artifacts", "JLLWrappers", "Libdl"]
+git-tree-sha1 = "50aedf345a709ab75872f80a2779568dc0bb461b"
+uuid = "e33a78d0-f292-5ffc-b300-72abe9b543c8"
+version = "2.11.2+1"
+
 [[deps.IOCapture]]
 deps = ["Logging", "Random"]
 git-tree-sha1 = "b6d6bfdd7ce25b0f9b2f6b3dd56b2673a66c8770"
@@ -432,6 +456,10 @@ git-tree-sha1 = "8f7f3cabab0fd1800699663533b6d5cb3fc0e612"
 uuid = "0e77f7df-68c5-4e49-93ce-4cd80f5598bf"
 version = "1.2.2"
 
+[[deps.LazyArtifacts]]
+deps = ["Artifacts", "Pkg"]
+uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
+
 [[deps.LibCURL]]
 deps = ["LibCURL_jll", "MozillaCACerts_jll"]
 uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
@@ -536,6 +564,24 @@ git-tree-sha1 = "c1dd6d7978c12545b4179fb6153b9250c96b0075"
 uuid = "e6f89c97-d47a-5376-807f-9c37f3926c36"
 version = "1.0.3"
 
+[[deps.MPICH_jll]]
+deps = ["Artifacts", "CompilerSupportLibraries_jll", "Hwloc_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"]
+git-tree-sha1 = "7715e65c47ba3941c502bffb7f266a41a7f54423"
+uuid = "7cb0a576-ebde-5e09-9194-50597f1243b4"
+version = "4.2.3+0"
+
+[[deps.MPIPreferences]]
+deps = ["Libdl", "Preferences"]
+git-tree-sha1 = "c105fe467859e7f6e9a852cb15cb4301126fac07"
+uuid = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267"
+version = "0.1.11"
+
+[[deps.MPItrampoline_jll]]
+deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"]
+git-tree-sha1 = "70e830dab5d0775183c99fc75e4c24c614ed7142"
+uuid = "f1f71cc9-e9ae-5b93-9b94-4fe0e1ad3748"
+version = "5.5.1+0"
+
 [[deps.MacroTools]]
 deps = ["Markdown", "Random"]
 git-tree-sha1 = "2fa9ee3e63fd3a4f7a9a4f4744a52f4856de82df"
@@ -574,6 +620,12 @@ git-tree-sha1 = "c13304c81eec1ed3af7fc20e75fb6b26092a1102"
 uuid = "442fdcdd-2543-5da2-b0f3-8c86c306513e"
 version = "0.3.2"
 
+[[deps.MicrosoftMPI_jll]]
+deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
+git-tree-sha1 = "bc95bf4149bf535c09602e3acdf950d9b4376227"
+uuid = "9237b28f-5490-5468-be7b-bb81f5f5e6cf"
+version = "10.1.4+3"
+
 [[deps.Missings]]
 deps = ["DataAPI"]
 git-tree-sha1 = "ec4f7fbeab05d7747bdf98eb74d130a2a2ed298d"
@@ -625,6 +677,12 @@ deps = ["Artifacts", "Libdl"]
 uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
 version = "0.8.1+2"
 
+[[deps.OpenMPI_jll]]
+deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"]
+git-tree-sha1 = "e25c1778a98e34219a00455d6e4384e017ea9762"
+uuid = "fe0851c0-eecd-5654-98d4-656369965a5c"
+version = "4.1.6+0"
+
 [[deps.OpenSSL]]
 deps = ["BitFlags", "Dates", "MozillaCACerts_jll", "OpenSSL_jll", "Sockets"]
 git-tree-sha1 = "38cb508d080d21dc1128f7fb04f20387ed4c0af4"
@@ -1186,6 +1244,12 @@ git-tree-sha1 = "3516a5630f741c9eecb3720b1ec9d8edc3ecc033"
 uuid = "1a1c6b14-54f6-533d-8383-74cd7377aa70"
 version = "3.1.1+0"
 
+[[deps.libaec_jll]]
+deps = ["Artifacts", "JLLWrappers", "Libdl"]
+git-tree-sha1 = "46bf7be2917b59b761247be3f317ddf75e50e997"
+uuid = "477f73a3-ac25-53e9-8cc3-50b2fa2566f0"
+version = "1.1.2+0"
+
 [[deps.libaom_jll]]
 deps = ["Artifacts", "JLLWrappers", "Libdl"]
 git-tree-sha1 = "1827acba325fdcdf1d2647fc8d5301dd9ba43a9d"
diff --git a/Project.toml b/Project.toml
index bef1ada1b4547e9f5435e28561c8d0d62ae643ed..7d4acf1058da59fb7f75e30612470461f56714dd 100644
--- a/Project.toml
+++ b/Project.toml
@@ -6,6 +6,7 @@ version = "0.1.0"
 [deps]
 Coverage = "a2441757-f6aa-5fb2-8edb-039e3f45d037"
 Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
+HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
 JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
 LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
 Logging = "56ddb016-857b-54e1-b83d-db4d58db5568"
diff --git a/examples/reconstruct/README.md b/examples/reconstruct/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..2f081b12e486cce30d11e8bf61469505eaec6726
--- /dev/null
+++ b/examples/reconstruct/README.md
@@ -0,0 +1,8 @@
+## Installation
+
+```
+conda create -n torch
+conda activate torch
+conda install h5py
+conda install pytorch::pytorch torchvision torchaudio -c pytorch
+```
diff --git a/examples/reconstruct/generate.jl b/examples/reconstruct/generate.jl
new file mode 100644
index 0000000000000000000000000000000000000000..07cc3ffe12d3c4c70798754acd2ba5dadf04c4e3
--- /dev/null
+++ b/examples/reconstruct/generate.jl
@@ -0,0 +1,35 @@
+using HDF5
+using LinearAlgebra
+using Serialization
+using SparseTensorMoments
+using Test
+using Plots
+
+# Define centers and alphas
+centers = [
+    [0.5, 0.5, 0.5],
+    [-0.5, -0.5, -0.5]
+]
+alphas_f = [2.5, 2.5]
+
+# Create the function f(x)
+function make_f(centers, alphas_f)
+    function f(x::Vector{<:Real})
+        sum(exp.(-alphas_f .* ([norm(x .- centers[i])^2 for i in 1:length(centers)])))
+    end
+    return f
+end
+
+f = make_f(centers, alphas_f)
+
+# Define parameters
+n = 5
+width = 1.0
+max_exponent = 10
+
+# Generate X, Y, A
+X, Y, A = generate(f, width, n, max_exponent)
+
+rm("X.h5"); h5write("X.h5", "X", X)
+rm("Y.h5"); h5write("Y.h5", "Y", Y)
+rm("A.h5"); h5write("A.h5", "A", A)
diff --git a/examples/reconstruct/inspiration/test_cp.py b/examples/reconstruct/inspiration/test_cp.py
new file mode 100644
index 0000000000000000000000000000000000000000..a132f69415a73df984203ae30579feabd042667c
--- /dev/null
+++ b/examples/reconstruct/inspiration/test_cp.py
@@ -0,0 +1,126 @@
+import torch
+import torch.nn as nn
+import torch.optim as optim
+
+# Set random seed for reproducibility
+torch.manual_seed(0)
+
+# ================================================
+# Step 1: Define Dimensions and Rank
+# ================================================
+
+# Dimensions of the tensor X (I x J x K)
+I = 10  # Size along the first mode
+J = 10  # Size along the second mode
+K = 10  # Size along the third mode
+
+# Rank of the CP decomposition
+R = 3
+
+# Number of measurements
+M = 200  # You can adjust this based on your problem
+
+# ================================================
+# Step 2: Generate Synthetic Data for Y_true
+# ================================================
+
+# Ground truth factor matrices for synthetic tensor X_true
+A_true = torch.randn(I, R)
+B_true = torch.randn(J, R)
+C_true = torch.randn(K, R)
+
+def reconstruct_tensor(A_factors, B_factors, C_factors):
+    """
+    Reconstructs tensor X from its CP decomposition factors.
+    """
+    # X[i, j, k] = sum_r A_factors[i, r] * B_factors[j, r] * C_factors[k, r]
+    X = torch.einsum('ir,jr,kr->ijk', A_factors, B_factors, C_factors)
+    return X
+
+# Reconstruct the ground truth tensor X_true
+X_true = reconstruct_tensor(A_true, B_true, C_true)
+X_true_vectorized = X_true.view(-1)  # Flatten the tensor to a vector
+
+# Measurement matrix A_matrix (M x N), where N = I * J * K
+N = I * J * K  # Total number of elements in tensor X
+A_matrix = torch.randn(M, N)  # Random linear measurement operator
+
+# Compute the true measurements Y_true = A_matrix @ X_true_vectorized
+Y_true = A_matrix @ X_true_vectorized
+
+# ================================================
+# Step 3: Initialize Factor Matrices to Optimize
+# ================================================
+
+# Initialize factor matrices with gradients enabled
+A_factors = nn.Parameter(torch.randn(I, R))
+B_factors = nn.Parameter(torch.randn(J, R))
+C_factors = nn.Parameter(torch.randn(K, R))
+
+# ================================================
+# Step 4: Define the Loss Function
+# ================================================
+
+def loss_function(A_factors, B_factors, C_factors, Y_true, A_matrix, lambda_reg=0.0):
+    """
+    Computes the loss as the difference between predicted and true measurements.
+    Optionally adds regularization to enforce low-rank structure.
+    """
+    # Reconstruct tensor X from factors
+    X = reconstruct_tensor(A_factors, B_factors, C_factors)
+    X_vectorized = X.view(-1)  # Flatten the tensor
+
+    # Predicted measurements
+    Y_pred = A_matrix @ X_vectorized
+
+    # Measurement error
+    loss = torch.norm(Y_pred - Y_true) ** 2
+
+    # Regularization (optional)
+    if lambda_reg > 0:
+        reg = lambda_reg * (torch.norm(A_factors) + torch.norm(B_factors) + torch.norm(C_factors))
+        loss += reg
+
+    return loss
+
+# ================================================
+# Step 5: Set Up the Optimizer
+# ================================================
+
+# List of parameters to optimize
+params = [A_factors, B_factors, C_factors]
+
+# Choose an optimizer (e.g., Adam)
+optimizer = optim.Adam(params, lr=0.01)
+
+# ================================================
+# Step 6: Optimization Loop
+# ================================================
+
+num_epochs = 10000
+lambda_reg = 1e-4  # Regularization coefficient (adjust as needed)
+
+for epoch in range(num_epochs):
+    optimizer.zero_grad()
+
+    # Compute loss
+    loss = loss_function(A_factors, B_factors, C_factors, Y_true, A_matrix, lambda_reg)
+
+    # Backpropagation
+    loss.backward()
+
+    # Update parameters
+    optimizer.step()
+
+    # Print loss every 100 epochs
+    if (epoch + 1) % 100 == 0:
+        print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')
+
+# ================================================
+# Step 7: Evaluate the Reconstruction
+# ================================================
+
+# Reconstruct the tensor from optimized factors
+X_recovered = reconstruct_tensor(A_factors, B_factors, C_factors)
+reconstruction_error = torch.norm(X_recovered - X_true) / torch.norm(X_true)
+print(f'\nReconstruction Error: {reconstruction_error.item():.6f}')
diff --git a/examples/reconstruct/inspiration/test_tucker.py b/examples/reconstruct/inspiration/test_tucker.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8a2013bc64be2f33a3dc8ce3bdb32bbb6b40ba3
--- /dev/null
+++ b/examples/reconstruct/inspiration/test_tucker.py
@@ -0,0 +1,131 @@
+import torch
+import torch.nn as nn
+import torch.optim as optim
+
+# Set random seed for reproducibility
+torch.manual_seed(0)
+
+# ================================================
+# Step 1: Define Dimensions and Tucker Ranks
+# ================================================
+
+# Dimensions of the tensor X (I x J x K)
+I = 10  # Size along the first mode
+J = 10  # Size along the second mode
+K = 10  # Size along the third mode
+
+# Tucker ranks along each mode
+R1 = 3  # Rank along the first mode
+R2 = 3  # Rank along the second mode
+R3 = 3  # Rank along the third mode
+
+# Number of measurements
+M = 200  # Adjust based on your problem
+
+# ================================================
+# Step 2: Generate Synthetic Data for Y_true
+# ================================================
+
+# Ground truth factor matrices and core tensor for synthetic tensor X_true
+A_true = torch.randn(I, R1)
+B_true = torch.randn(J, R2)
+C_true = torch.randn(K, R3)
+G_true = torch.randn(R1, R2, R3)  # Core tensor
+
+def reconstruct_tensor_tucker(G, A_factors, B_factors, C_factors):
+    """
+    Reconstructs tensor X from its Tucker decomposition factors.
+    """
+    # Use single-character subscripts for einsum
+    X = torch.einsum('abc,ia,jb,kc->ijk', G, A_factors, B_factors, C_factors)
+    return X
+
+# Reconstruct the ground truth tensor X_true
+X_true = reconstruct_tensor_tucker(G_true, A_true, B_true, C_true)
+X_true_vectorized = X_true.view(-1)  # Flatten the tensor to a vector
+
+# Measurement matrix A_matrix (M x N), where N = I * J * K
+N = I * J * K  # Total number of elements in tensor X
+A_matrix = torch.randn(M, N)  # Random linear measurement operator
+
+# Compute the true measurements Y_true = A_matrix @ X_true_vectorized
+Y_true = A_matrix @ X_true_vectorized
+
+# ================================================
+# Step 3: Initialize Factors and Core Tensor to Optimize
+# ================================================
+
+# Initialize factor matrices and core tensor with gradients enabled
+A_factors = nn.Parameter(torch.randn(I, R1))
+B_factors = nn.Parameter(torch.randn(J, R2))
+C_factors = nn.Parameter(torch.randn(K, R3))
+G = nn.Parameter(torch.randn(R1, R2, R3))
+
+# ================================================
+# Step 4: Define the Loss Function
+# ================================================
+
+def loss_function(A_factors, B_factors, C_factors, G, Y_true, A_matrix, lambda_reg=0.0):
+    """
+    Computes the loss as the difference between predicted and true measurements.
+    """
+    # Reconstruct tensor X from Tucker factors
+    X = reconstruct_tensor_tucker(G, A_factors, B_factors, C_factors)
+    X_vectorized = X.view(-1)  # Flatten the tensor
+
+    # Predicted measurements
+    Y_pred = A_matrix @ X_vectorized
+
+    # Measurement error
+    loss = torch.norm(Y_pred - Y_true) ** 2
+
+    # Regularization (optional)
+    if lambda_reg > 0:
+        reg = lambda_reg * (
+            torch.norm(A_factors) + torch.norm(B_factors) + torch.norm(C_factors) + torch.norm(G)
+        )
+        loss += reg
+
+    return loss
+
+# ================================================
+# Step 5: Set Up the Optimizer
+# ================================================
+
+# List of parameters to optimize
+params = [A_factors, B_factors, C_factors, G]
+
+# Choose an optimizer (e.g., Adam)
+optimizer = optim.Adam(params, lr=0.01)
+
+# ================================================
+# Step 6: Optimization Loop
+# ================================================
+
+num_epochs = 10000
+lambda_reg = 1e-4  # Regularization coefficient (adjust as needed)
+
+for epoch in range(num_epochs):
+    optimizer.zero_grad()
+
+    # Compute loss
+    loss = loss_function(A_factors, B_factors, C_factors, G, Y_true, A_matrix, lambda_reg)
+
+    # Backpropagation
+    loss.backward()
+
+    # Update parameters
+    optimizer.step()
+
+    # Print loss every 100 epochs
+    if (epoch + 1) % 100 == 0:
+        print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')
+
+# ================================================
+# Step 7: Evaluate the Reconstruction
+# ================================================
+
+# Reconstruct the tensor from optimized factors
+X_recovered = reconstruct_tensor_tucker(G, A_factors, B_factors, C_factors)
+reconstruction_error = torch.norm(X_recovered - X_true) / torch.norm(X_true)
+print(f'\nReconstruction Error: {reconstruction_error.item():.6f}')
diff --git a/examples/reconstruct/inspiration/thetanorm.jl b/examples/reconstruct/inspiration/thetanorm.jl
new file mode 100644
index 0000000000000000000000000000000000000000..843fd832e65b4d981c33cdeb4818c8e4cab3da48
--- /dev/null
+++ b/examples/reconstruct/inspiration/thetanorm.jl
@@ -0,0 +1,21 @@
+
+include("ThetaNorm.jl")
+include("TensorComputation.jl")
+
+using Random
+Random.seed!(1234) # set the seed for reproducibility
+
+size_tensor = (3,3,3) # size of the tensor
+r = 1 # rank of the tensor
+coefs = randn(r) # coefficients of rank-1 tensors
+x0 = create_tensor_CP(size_tensor, coefs, "normal"); # create a rank-1 tensor by gaussian distribution
+m = 3^6-1 # number of measurements
+A, b = create_measurements(x0, m); # create gaussian linear measurements
+# A should be a list of length m, each elements is a flattened tensor, i.e. a vector indeed of size 4*4*4=64.
+# b is also a list of length m, each element is the number b_i = <A_i, x0>.
+
+n, x = recovery_theta1(size_tensor, A, b); # using nuclear-2 theta-1 norm minimization for recovery
+# n is the theta-1 norm, x is the recovered tensor
+
+x = reshape(x, size(x0))# reshape the recovered tensor to its original shape
+relative_error(x0,x) # compare the difference
diff --git a/examples/reconstruct/test.jl b/examples/reconstruct/test-projected-gd.jl
similarity index 65%
rename from examples/reconstruct/test.jl
rename to examples/reconstruct/test-projected-gd.jl
index 7aa4d16ed4c65cb2868cb1cb4545daa6a7f680da..70cd450326af225e73dcc9e26bd2b0d06da0b97c 100644
--- a/examples/reconstruct/test.jl
+++ b/examples/reconstruct/test-projected-gd.jl
@@ -5,36 +5,10 @@ include("TensorComputation.jl")
 using Random
 Random.seed!(1234) # set the seed for reproducibility
 
-using LinearAlgebra
-using Serialization
-using SparseTensorMoments
-using Test
-
-# Define centers and alphas
-centers = [
-    [0.5, 0.5, 0.5],
-    [-0.5, -0.5, -0.5]
-]
-alphas_f = [2.5, 2.5]
-
-# Create the function f(x)
-function make_f(centers, alphas_f)
-    function f(x::Vector{<:Real})
-        sum(exp.(-alphas_f .* ([norm(x .- centers[i])^2 for i in 1:length(centers)])))
-    end
-    return f
-end
-
-f = make_f(centers, alphas_f)
-
-# Define parameters
-n = 5
-width = 1.0
-max_exponent = 10
-
-# Generate X, Y, A
-X, Y, A = generate(f, width, n, max_exponent)
 
+X = h5read("X.h5", "X")
+A = h5read("A.h5", "A")
+Y = h5read("Y.h5", "Y")
 
 
 # A_array = [A[i,:] for i in 1:size(A,1)]
@@ -58,15 +32,11 @@ X, Y, A = generate(f, width, n, max_exponent)
 # @assert relative_error(X,x_re) < 1E-2
 
 
-function projection(X)
+function projection(X, rank)
   hosvd_x = hosvd(X);
   core_tmp = hosvd_x["core"]
-  # core[abs.(core).<1e-5] .= 0
-  desired_rank = 2
-  hosvd_x["core"] = zeros(size(X))  # Create a zero matrix of the same size as A
-  hosvd_x["core"][1:desired_rank, 1:desired_rank, 1:desired_rank] .= core_tmp[1:desired_rank, 1:desired_rank, 1:desired_rank]
-  # @info core
-  # @info hosvd_x["core"]
+  hosvd_x["core"] = zeros(size(X))
+  hosvd_x["core"][1:rank, 1:rank, 1:rank] .= core_tmp[1:rank, 1:rank, 1:rank]
   return hosvd_reconstruct(hosvd_x)
 end
 
@@ -74,7 +44,7 @@ function gradient(x)
     return reshape(A' * (A * vec(x) - Y), size(x))
 end
 
-function optimal_gradient_descent(x, alpha, max_iter; atol=1E-10, rtol=1E-7)
+function optimal_gradient_descent(x, alpha, max_iter, rank; atol=1E-10, rtol=1E-7)
     hist = []
     for i in 1:max_iter
         grad = gradient(x)
@@ -86,11 +56,12 @@ function optimal_gradient_descent(x, alpha, max_iter; atol=1E-10, rtol=1E-7)
         s = A * vec(x) - Y
         Ad = A * vec(grad)
         alpha = (s' * Ad) / (Ad' * Ad)
-        x = projection(x - alpha * grad)
+        x = projection(x - alpha * grad, rank)
     end
     return (;x, hist)
 end
-res = optimal_gradient_descent(ones(size(X)), 1, 5000);
+rank = 2
+res = optimal_gradient_descent(ones(size(X)), 1, 5000, rank);
 relative_error(X, res.x)
 plot(res.hist, yaxis=:log, labels="resnorm")
 
diff --git a/examples/reconstruct/test-thetanorm.jl b/examples/reconstruct/test-thetanorm.jl
new file mode 100644
index 0000000000000000000000000000000000000000..457a7d6dbad91250422e1d68200bc33fe2ec703e
--- /dev/null
+++ b/examples/reconstruct/test-thetanorm.jl
@@ -0,0 +1,30 @@
+# Doesn't work yet since Optimizer needs too much time
+include("ThetaNorm.jl")
+include("TensorComputation.jl")
+
+using Random
+Random.seed!(1234) # set the seed for reproducibility
+
+
+X = h5read("X.h5", "X")
+A = h5read("A.h5", "A")
+Y = h5read("Y.h5", "Y")
+
+A_array = [A[i,:] for i in 1:size(A,1)]
+
+n, x = recovery_theta1(size(X), A_array, Y);
+x = reshape(x, size(X))
+relative_error(X,x)
+
+hosvd_x = hosvd(x);
+
+core = hosvd_x["core"]
+multirank(core)
+
+core[abs.(core).<1e-5] .= 0
+multirank(core)
+
+hosvd_x["core"] = core
+x_re = hosvd_reconstruct(hosvd_x)
+@assert relative_error(X,x_re) < 1E-2
+
diff --git a/examples/reconstruct/test-torch-cp.py b/examples/reconstruct/test-torch-cp.py
new file mode 100644
index 0000000000000000000000000000000000000000..a078fed98c01b5baff1a4402d019464d6e598bbb
--- /dev/null
+++ b/examples/reconstruct/test-torch-cp.py
@@ -0,0 +1,145 @@
+import h5py
+import torch
+import numpy as np
+
+# Open the HDF5 file for reading
+with h5py.File('X.h5', 'r') as f:
+    X = torch.from_numpy(f['X'][:]).float()
+with h5py.File('Y.h5', 'r') as f:
+    Y = torch.from_numpy(f['Y'][:]).float()
+with h5py.File('A.h5', 'r') as f:
+    A = np.transpose(torch.from_numpy(f['A'][:]).float())
+
+
+
+
+
+
+
+
+import torch
+import torch.nn as nn
+import torch.optim as optim
+
+# Set random seed for reproducibility
+torch.manual_seed(0)
+
+# ================================================
+# Step 1: Define Dimensions and Rank
+# ================================================
+
+# Dimensions of the tensor X (I x J x K)
+I = 11  # Size along the first mode
+J = 11  # Size along the second mode
+K = 11  # Size along the third mode
+
+# Rank of the CP decomposition
+R = 2
+
+# Number of measurements
+M = 216  # You can adjust this based on your problem
+
+# ================================================
+# Step 2: Generate Synthetic Data for Y_true
+# ================================================
+
+# Ground truth factor matrices for synthetic tensor X_true
+A_true = torch.randn(I, R)
+B_true = torch.randn(J, R)
+C_true = torch.randn(K, R)
+
+def reconstruct_tensor(A_factors, B_factors, C_factors):
+    """
+    Reconstructs tensor X from its CP decomposition factors.
+    """
+    # X[i, j, k] = sum_r A_factors[i, r] * B_factors[j, r] * C_factors[k, r]
+    X = torch.einsum('ir,jr,kr->ijk', A_factors, B_factors, C_factors)
+    return X
+
+# Reconstruct the ground truth tensor X_true
+X_true = reconstruct_tensor(A_true, B_true, C_true)
+X_true_vectorized = X_true.view(-1)  # Flatten the tensor to a vector
+
+# Measurement matrix A_matrix (M x N), where N = I * J * K
+N = I * J * K  # Total number of elements in tensor X
+A_matrix = torch.randn(M, N)  # Random linear measurement operator
+
+# Compute the true measurements Y_true = A_matrix @ X_true_vectorized
+Y_true = A_matrix @ X_true_vectorized
+
+# ================================================
+# Step 3: Initialize Factor Matrices to Optimize
+# ================================================
+
+# Initialize factor matrices with gradients enabled
+A_factors = nn.Parameter(torch.randn(I, R))
+B_factors = nn.Parameter(torch.randn(J, R))
+C_factors = nn.Parameter(torch.randn(K, R))
+
+# ================================================
+# Step 4: Define the Loss Function
+# ================================================
+
+def loss_function(A_factors, B_factors, C_factors, Y_true, A_matrix, lambda_reg=0.0):
+    """
+    Computes the loss as the difference between predicted and true measurements.
+    Optionally adds regularization to enforce low-rank structure.
+    """
+    # Reconstruct tensor X from factors
+    X = reconstruct_tensor(A_factors, B_factors, C_factors)
+    X_vectorized = X.view(-1)  # Flatten the tensor
+
+    # Predicted measurements
+    Y_pred = A_matrix @ X_vectorized
+
+    # Measurement error
+    loss = torch.norm(Y_pred - Y_true) ** 2
+
+    # Regularization (optional)
+    if lambda_reg > 0:
+        reg = lambda_reg * (torch.norm(A_factors) + torch.norm(B_factors) + torch.norm(C_factors))
+        loss += reg
+
+    return loss
+
+# ================================================
+# Step 5: Set Up the Optimizer
+# ================================================
+
+# List of parameters to optimize
+params = [A_factors, B_factors, C_factors]
+
+# Choose an optimizer (e.g., Adam)
+optimizer = optim.Adam(params, lr=0.01)
+
+# ================================================
+# Step 6: Optimization Loop
+# ================================================
+
+num_epochs = 50000
+lambda_reg = 1e-4  # Regularization coefficient (adjust as needed)
+
+for epoch in range(num_epochs):
+    optimizer.zero_grad()
+
+    # Compute loss
+    loss = loss_function(A_factors, B_factors, C_factors, Y, A, lambda_reg)
+
+    # Backpropagation
+    loss.backward()
+
+    # Update parameters
+    optimizer.step()
+
+    # Print loss every 100 epochs
+    if (epoch + 1) % 100 == 0:
+        print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')
+
+# ================================================
+# Step 7: Evaluate the Reconstruction
+# ================================================
+
+# Reconstruct the tensor from optimized factors
+X_recovered = reconstruct_tensor(A_factors, B_factors, C_factors)
+reconstruction_error = torch.norm(X_recovered - X) / torch.norm(X)
+print(f'\nReconstruction Error: {reconstruction_error.item():.6f}')
diff --git a/examples/reconstruct/test-torch-tucker.py b/examples/reconstruct/test-torch-tucker.py
new file mode 100644
index 0000000000000000000000000000000000000000..b84473c7d98a41b20283943f69a4af22e65e3819
--- /dev/null
+++ b/examples/reconstruct/test-torch-tucker.py
@@ -0,0 +1,144 @@
+import h5py
+import torch
+import numpy as np
+
+# Open the HDF5 file for reading
+with h5py.File('X.h5', 'r') as f:
+    X = torch.from_numpy(f['X'][:]).float()
+with h5py.File('Y.h5', 'r') as f:
+    Y = torch.from_numpy(f['Y'][:]).float()
+with h5py.File('A.h5', 'r') as f:
+    A = np.transpose(torch.from_numpy(f['A'][:]).float())
+
+
+import torch
+import torch.nn as nn
+import torch.optim as optim
+
+# Set random seed for reproducibility
+torch.manual_seed(0)
+
+# ================================================
+# Step 1: Define Dimensions and Tucker Ranks
+# ================================================
+
+# Dimensions of the tensor X (I x J x K)
+I = 11  # Size along the first mode
+J = 11  # Size along the second mode
+K = 11  # Size along the third mode
+
+# Tucker ranks along each mode
+R1 = 2  # Rank along the first mode
+R2 = 2  # Rank along the second mode
+R3 = 2  # Rank along the third mode
+
+# Number of measurements
+M = 200  # Adjust based on your problem
+
+# ================================================
+# Step 2: Generate Synthetic Data for Y_true
+# ================================================
+
+# Ground truth factor matrices and core tensor for synthetic tensor X_true
+A_true = torch.randn(I, R1)
+B_true = torch.randn(J, R2)
+C_true = torch.randn(K, R3)
+G_true = torch.randn(R1, R2, R3)  # Core tensor
+
+def reconstruct_tensor_tucker(G, A_factors, B_factors, C_factors):
+    """
+    Reconstructs tensor X from its Tucker decomposition factors.
+    """
+    # Use single-character subscripts for einsum
+    X = torch.einsum('abc,ia,jb,kc->ijk', G, A_factors, B_factors, C_factors)
+    return X
+
+# Reconstruct the ground truth tensor X_true
+X_true = reconstruct_tensor_tucker(G_true, A_true, B_true, C_true)
+X_true_vectorized = X_true.view(-1)  # Flatten the tensor to a vector
+
+# Measurement matrix A_matrix (M x N), where N = I * J * K
+N = I * J * K  # Total number of elements in tensor X
+A_matrix = torch.randn(M, N)  # Random linear measurement operator
+
+# Compute the true measurements Y_true = A_matrix @ X_true_vectorized
+Y_true = A_matrix @ X_true_vectorized
+
+# ================================================
+# Step 3: Initialize Factors and Core Tensor to Optimize
+# ================================================
+
+# Initialize factor matrices and core tensor with gradients enabled
+A_factors = nn.Parameter(torch.randn(I, R1))
+B_factors = nn.Parameter(torch.randn(J, R2))
+C_factors = nn.Parameter(torch.randn(K, R3))
+G = nn.Parameter(torch.randn(R1, R2, R3))
+
+# ================================================
+# Step 4: Define the Loss Function
+# ================================================
+
+def loss_function(A_factors, B_factors, C_factors, G, Y_true, A_matrix, lambda_reg=0.0):
+    """
+    Computes the loss as the difference between predicted and true measurements.
+    """
+    # Reconstruct tensor X from Tucker factors
+    X = reconstruct_tensor_tucker(G, A_factors, B_factors, C_factors)
+    X_vectorized = X.view(-1)  # Flatten the tensor
+
+    # Predicted measurements
+    Y_pred = A_matrix @ X_vectorized
+
+    # Measurement error
+    loss = torch.norm(Y_pred - Y_true) ** 2
+
+    # Regularization (optional)
+    if lambda_reg > 0:
+        reg = lambda_reg * (
+            torch.norm(A_factors) + torch.norm(B_factors) + torch.norm(C_factors) + torch.norm(G)
+        )
+        loss += reg
+
+    return loss
+
+# ================================================
+# Step 5: Set Up the Optimizer
+# ================================================
+
+# List of parameters to optimize
+params = [A_factors, B_factors, C_factors, G]
+
+# Choose an optimizer (e.g., Adam)
+optimizer = optim.Adam(params, lr=0.01)
+
+# ================================================
+# Step 6: Optimization Loop
+# ================================================
+
+num_epochs = 50000
+lambda_reg = 1e-4  # Regularization coefficient (adjust as needed)
+
+for epoch in range(num_epochs):
+    optimizer.zero_grad()
+
+    # Compute loss
+    loss = loss_function(A_factors, B_factors, C_factors, G, Y, A, lambda_reg)
+
+    # Backpropagation
+    loss.backward()
+
+    # Update parameters
+    optimizer.step()
+
+    # Print loss every 100 epochs
+    if (epoch + 1) % 100 == 0:
+        print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')
+
+# ================================================
+# Step 7: Evaluate the Reconstruction
+# ================================================
+
+# Reconstruct the tensor from optimized factors
+X_recovered = reconstruct_tensor_tucker(G, A_factors, B_factors, C_factors)
+reconstruction_error = torch.norm(X_recovered - X) / torch.norm(X_true)
+print(f'\nReconstruction Error: {reconstruction_error.item():.6f}')