Skip to content
Snippets Groups Projects
Select Git revision
  • d9ac2cd05f98d0eafa2650d25be153f464c009f7
  • main default protected
  • feature/VA-100_Modbus_RTU
  • develop
  • stepperControl
5 results

UI_IDD_Pad_Print_V_3.0.vi

Blame
  • models.py 7.86 KiB
    import params
    import torch
    import math
    import numpy as np
    
    
    ######################################################
    ######################################################
    
    ###################### URWF ##########################
    
    ######################################################
    ######################################################
    
    
    class URWF(torch.nn.Module):
        def __init__(self,DEVICE):
            super().__init__()
            self.DEVICE = DEVICE
            self.unit_scalar    = torch.nn.Parameter(torch.ones(1,device=self.DEVICE), requires_grad= params.scalar)
            self.unit_vector   = torch.nn.Parameter(torch.ones(params.T + 1,device=self.DEVICE), requires_grad= params.vector)
            self.unit_matrix   = torch.nn.Parameter(torch.eye(params.n,device=self.DEVICE), requires_grad= params.matrix) 
            # self.S_n   = torch.nn.Parameter(math.sqrt(params.mu) * torch.eye(params.n,device=params.DEVICE), requires_grad= params.matrix) 
            Tensor = torch.zeros(params.T+1,params.n,params.n, device=self.DEVICE)
            for i in range(params.T+1):
                for k in range(params.n):
                    Tensor[i][k][k] = 1.0
            self.unit_tensor   = torch.nn.Parameter(Tensor,requires_grad=params.tensor) 
    
        def forward(self,x, y, Amatrix):
            n = x.shape[0]
            N_train = x.shape[1]
            z0 = torch.randn((n, N_train), dtype=torch.cdouble, device = self.DEVICE)
    
            z0 = z0/torch.linalg.norm(z0,dim=0)
            tol = 1e-14
            normest = (math.sqrt(math.pi/2)*(1-params.cplx_flag)+math.sqrt(4/math.pi)*params.cplx_flag)*torch.sum(y,dim=0)/params.m
    
            ytr = torch.multiply(y, (torch.abs(y) > 1 * normest))  # truncated version
    
            for tt in range(params.npower_iter):
                self._A(Amatrix,z0)
                z0 = self._Ah(Amatrix,torch.multiply(ytr, (self._A(Amatrix,z0))))
                z0 = z0/torch.linalg.norm(z0,dim=0)
            z0 = normest * z0
            z = z0  
    
            for t in range(params.T+1):
                yz = Amatrix @ z
                yz_abs=torch.abs(yz)
                first_divide=torch.divide(yz,yz_abs)
                first_multi=torch.multiply(y,first_divide)
                sub=yz-first_multi
                second_multi=self._Ah(Amatrix,sub)
                second_divide=torch.divide(second_multi,params.m)
    
                if params.scenario == 0:     
                    z = z - params.mu * self.unit_scalar * second_divide
                elif params.scenario == 1:
                    z = z - params.mu * self.unit_vector[t] * second_divide
                elif params.scenario == 2:
                    z = z - params.mu * torch.linalg.matmul(self.unit_matrix.cfloat(), second_divide.cfloat())
                elif params.scenario == 3:
                    z = z - params.mu * self.unit_vector[t]*torch.linalg.matmul(self.unit_matrix.cfloat(), second_divide.cfloat())
                elif params.scenario == 4:
                    z = z - params.mu * torch.linalg.matmul(self.unit_tensor[t].cfloat(), second_divide.cfloat())     
                elif params.scenario == 5:
                    z = z - params.mu * torch.linalg.matmul((torch.linalg.matmul(self.unit_matrix.T,self.unit_matrix)).cfloat(), second_divide.cfloat())
                
               
    
            return z
        def _A(self,Matrix,X):
            result = torch.linalg.matmul(Matrix, X)
            return result
        def _Ah(self,Matrix,X):
            Matrixh = Matrix.adjoint()
            result = torch.linalg.matmul(Matrixh, X)
            return result
    
    
        def string(self):
            torch.set_printoptions(precision=15)
            return torch.sum(self.mu.clone().detach())/(params.T+1)
    
    ######################################################
    ######################################################
    
    ###################### UIRWF #########################
    
    ######################################################
    ######################################################
    
    class UIRWF(torch.nn.Module):
        def __init__(self):
            """
            In the constructor we instantiate the stuff we want to learn:-)
            """
            super().__init__()
            self.mu    = torch.nn.Parameter(params.mu * torch.ones(1,device=params.DEVICE), requires_grad= params.scalar)
            self.V_T   = torch.nn.Parameter(params.mu * torch.ones(params.T + 1,device=params.DEVICE), requires_grad= params.vector)
            self.M_n   = torch.nn.Parameter(params.mu * torch.eye(params.n,device=params.DEVICE), requires_grad= params.matrix) 
            Tensor = torch.zeros(params.T+1,params.n,params.n, device=params.DEVICE)
            for i in range(params.T+1):
                for k in range(params.n):
                    Tensor[i][k][k] = params.mu
            self.T_n   = torch.nn.Parameter(Tensor,requires_grad=params.tensor) 
    
        def forward(self,x, y, Amatrix):
            # print(self.T_n)
            n = x.shape[0]
            N_train = x.shape[1]
    
            z0 = torch.randn((n, N_train), dtype=torch.cdouble,device=params.DEVICE)
    
            z0 = z0/torch.linalg.norm(z0,dim=0)
            tol = 1e-14
            normest = (math.sqrt(math.pi/2)*(1-params.cplx_flag)+math.sqrt(4/math.pi)*params.cplx_flag)*torch.sum(y,dim=0)/params.m
            ytr = torch.multiply(y, (torch.abs(y) > 1 * normest))  # truncated version
            for tt in range(params.npower_iter):
                z0 = self._Ah(Amatrix,torch.multiply(ytr, (self._A(Amatrix,z0))))
                z0 = z0/torch.linalg.norm(z0,dim=0)
    
            z0 = normest * z0  # Apply scaling
            z = z0  
    
    
            batch = params.n
            sgd = params.m
            for t in range(params.T+1):
                for i in range(0, sgd-batch-1, batch):
                    Asub = Amatrix[i:i+batch, :]
                    Asubh = Asub.H
                    ysub = y[i:i+batch]
                    Asubz = Asub @ z
                    temp = torch.divide(Asubz-torch.multiply(ysub, torch.divide(Asubz, torch.abs(Asubz))), n)
                    second_divide = Asubh@(temp)
    
                    if params.scenario == 0:
                        # z = z- self.mu*second_divide
                        z = z - self.V_T[0]*second_divide
                    if params.scenario == 1:
                        z = z - self.V_T[t]*second_divide
                    if params.scenario == 2:
                        z = z - self.M_n.cfloat()@second_divide.cfloat()
                    if params.scenario == 3:
                        z = z - self.V_T[t]*self.M_n.cfloat()@second_divide.cfloat()/params.mu
                    if params.scenario == 4:
                        z = z - self.T_n[t].cfloat()@second_divide.cfloat()
    
            return z
        
        def _A(self,Matrix,X):
            result = torch.linalg.matmul(Matrix, X)
            return result
        def _Ah(self,Matrix,X):
            Matrixh = Matrix.adjoint()
            result = torch.linalg.matmul(Matrixh, X)
            return result
    
    ######################################################
    ######################################################
    
    ############### Costum Loss Function #################
    
    ######################################################
    ######################################################
    
    def my_loss(x, x_pred):
        n =x.shape[0]
        N_train = x.shape[1]
        Relerrs = torch.zeros(N_train) 
        for tt in range(N_train):
            X_pred = torch.reshape(x_pred[:,tt], (n, 1))
            X = torch.reshape(x[:,tt], (n, 1))
            Relerrs[tt] = torch.linalg.norm(X - torch.exp(-1j*torch.angle(torch.trace(X.H*X_pred))) * X_pred)/torch.linalg.norm(X)
        # loss = torch.linalg.norm(Relerrs)/N_train
        loss = torch.mean(Relerrs)
        # loss = torch.max(Relerrs)
        return loss
    
    ######################################################
    ######################################################
    
    ################# Rel_Err Function ###################
    
    ######################################################
    ######################################################
    
    
    
    def Rel_Er(x_pred,x):
        n = x.shape[0]
        N_train = x.shape[1]
        Relerrs = np.zeros(N_train)
        for tt in range(N_train):
            X_pred = torch.reshape(x_pred[:,tt], (n, 1))
            X = torch.reshape(x[:,tt], (n, 1))
            Relerrs[tt] = torch.linalg.norm(X - torch.exp(-1j*torch.angle(torch.trace(X.H*X_pred))) * X_pred)/torch.linalg.norm(X)
        return Relerrs