Skip to content
Snippets Groups Projects
Select Git revision
  • ba02b657cc8f5e20d5ef2604608447bde4d1cfe2
  • main default protected
2 results

test.jl

Blame
  • Lambert Theisen's avatar
    Lambert Theisen authored
    ba02b657
    History
    test.jl 2.92 KiB
    
    # include("ThetaNorm.jl")
    # include("TensorComputation.jl")
    
    using Random
    Random.seed!(1234) # set the seed for reproducibility
    
    using LinearAlgebra
    using Serialization
    using SparseTensorMoments
    using Test
    
    # Define centers and alphas
    centers = [
        [0.5, 0.5, 0.5],
        [-0.5, -0.5, -0.5]
    ]
    alphas_f = [2.5, 2.5]
    
    # Create the function f(x)
    function make_f(centers, alphas_f)
        function f(x::Vector{<:Real})
            sum(exp.(-alphas_f .* ([norm(x .- centers[i])^2 for i in 1:length(centers)])))
        end
        return f
    end
    
    f = make_f(centers, alphas_f)
    
    # Define parameters
    n = 5
    width = 1.0
    max_exponent = 10
    
    # Generate X, Y, A
    X, Y, A = generate(f, width, n, max_exponent)
    
    
    
    # A_array = [A[i,:] for i in 1:size(A,1)]
    
    # n, x = recovery_theta1(size(X), A_array, Y);
    # x = reshape(x, size(X))
    # relative_error(X,x)
    
    # hosvd_x = hosvd(x);
    
    # core = hosvd_x["core"]
    # multirank(core)
    
    # core
    
    # core[abs.(core).<1e-5] .= 0
    # multirank(core)
    
    # hosvd_x["core"] = core
    # x_re = hosvd_reconstruct(hosvd_x)
    # @assert relative_error(X,x_re) < 1E-2
    
    
    function projection(X)
      hosvd_x = hosvd(X);
      core_tmp = hosvd_x["core"]
      # core[abs.(core).<1e-5] .= 0
      desired_rank = 2
      hosvd_x["core"] = zeros(size(X))  # Create a zero matrix of the same size as A
      hosvd_x["core"][1:desired_rank, 1:desired_rank, 1:desired_rank] .= core_tmp[1:desired_rank, 1:desired_rank, 1:desired_rank]
      # @info core
      # @info hosvd_x["core"]
      return hosvd_reconstruct(hosvd_x)
    end
    
    function gradient(x)
        return reshape(A' * (A * vec(x) - Y), size(x))
    end
    
    function optimal_gradient_descent(x, alpha, max_iter; atol=1E-10, rtol=1E-7)
        hist = []
        for i in 1:max_iter
            grad = gradient(x)
            resnorm = norm(grad)
            push!(hist, resnorm)
            reslresnorm = resnorm / norm(x)
            @info i, resnorm, reslresnorm
            if ((resnorm < atol) || (reslresnorm < rtol)) break end
            s = A * vec(x) - Y
            Ad = A * vec(grad)
            alpha = (s' * Ad) / (Ad' * Ad)
            x = projection(x - alpha * grad)
        end
        return (;x, hist)
    end
    res = optimal_gradient_descent(ones(size(X)), 1, 5000);
    relative_error(X, res.x)
    plot(res.hist, yaxis=:log, labels="resnorm")
    
    # size_tensor = (3,3,3) # size of the tensor
    # r = 1 # rank of the tensor
    # coefs = randn(r) # coefficients of rank-1 tensors
    # x0 = create_tensor_CP(size_tensor, coefs, "normal"); # create a rank-1 tensor by gaussian distribution
    # m = 3^6-1 # number of measurements
    # A, b = create_measurements(x0, m); # create gaussian linear measurements
    # # A should be a list of length m, each elements is a flattened tensor, i.e. a vector indeed of size 4*4*4=64.
    # # b is also a list of length m, each element is the number b_i = <A_i, x0>.
    
    # n, x = recovery_theta1(size_tensor, A, b); # using nuclear-2 theta-1 norm minimization for recovery
    # # n is the theta-1 norm, x is the recovered tensor
    
    # x = reshape(x, size(x0))# reshape the recovered tensor to its original shape
    # relative_error(x0,x) # compare the difference