diff --git a/dynamics_learning/README.rst b/dynamics_learning/README.rst new file mode 100644 index 0000000000000000000000000000000000000000..6615ac548148fdd6daac320af572933e8ce856ab --- /dev/null +++ b/dynamics_learning/README.rst @@ -0,0 +1,37 @@ +.. Inverse Dynamic Modelling +.. ######################### + +.. Overview +.. ******** + +.. This is the approach to model the Franka +.. Emika Panda robot's inverse dynamic with +.. deep learning approach. + +.. .. list-table:: +.. :header-rows: 1 + +.. * - Script +.. - Discription +.. * - sweep.yaml +.. - contains sweep parameters for w&b +.. * - test_inverse +.. - Contains the neural network test and +.. can plot the predicted output as well +.. as the robotic toolbox calculations +.. (pyhton and external matlab csv) +.. * - train_inverse +.. - contains the training script. Builds +.. neural network based on sweep.yaml config + +########################## +Inverse Dynamics Modelling +########################## + +***** +Setup +***** + +Ensure +Please follow `official documentation <https://org.ngc.nvidia.com/setup/api-key>`_ on how to log into nvcr.io registry. Follow the instructions for docker. + diff --git a/dynamics_learning/dockerfile.dynamics_learning b/dynamics_learning/dockerfile.dynamics_learning new file mode 100644 index 0000000000000000000000000000000000000000..d79be4c0961cfd4ea2a8383a7bba66d67eb00c55 --- /dev/null +++ b/dynamics_learning/dockerfile.dynamics_learning @@ -0,0 +1,10 @@ +FROM nvcr.io/nvidia/tensorflow:24.04-tf2-py3 + +WORKDIR /app + +COPY . . + +# Make entrypoint executeable +RUN chmod +x /app/dynamics_learning/entrypoint.sh + +ENTRYPOINT [ "/app/dynamics_learning/entrypoint.sh" ] \ No newline at end of file diff --git a/dynamics_learning/dynamics_learning/__init__.py b/dynamics_learning/dynamics_learning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dynamics_learning/dynamics_learning/data_retrieval/__init__.py b/dynamics_learning/dynamics_learning/data_retrieval/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dynamics_learning/dynamics_learning/data_retrieval/coscine_api_retrieval.py b/dynamics_learning/dynamics_learning/data_retrieval/coscine_api_retrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dynamics_learning/dynamics_learning/main.py b/dynamics_learning/dynamics_learning/main.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dynamics_learning/dynamics_learning/preprocessing/__init__.py b/dynamics_learning/dynamics_learning/preprocessing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dynamics_learning/dynamics_learning/preprocessing/dataset_analysis.py b/dynamics_learning/dynamics_learning/preprocessing/dataset_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..75cc285d352d12d4d79b5fc341ebff3b08ce6f14 --- /dev/null +++ b/dynamics_learning/dynamics_learning/preprocessing/dataset_analysis.py @@ -0,0 +1,191 @@ +#%% +# Import and variables +import sys +sys.path.append("src") +sys.path.append("..") +import numpy as np +import matplotlib.pyplot as plt +import pandas as pd +import rwth_style +import panda_limits +import os +from scipy.optimize import curve_fit +from sklearn.metrics import r2_score +from rich import print +from rich.progress import track + +def gauss(x, *p): + A, mu, sigma = p + return A*np.exp(-(x-mu)**2/(2.*sigma**2)) + +p0 = [1, 0, 1] +# %% +for directory in ["dataset_v1", "dataset_v2", "dataset_v3"]: + for dataset in ["train", "test"]: + attained_freqs = np.empty([0,1]) + duration_meas = np.empty(0, np.float32) + num_samples_meas = np.empty(0, np.float32) + q_command = np.empty((0,7), np.float32) + qd_command = np.empty((0,7), np.float32) + qdd_command = np.empty((0,7), np.float32) + q_meas = np.empty((0,7), np.float32) + qd_meas = np.empty((0,7), np.float32) + tau_meas = np.empty((0,7), np.float32) + directory_in = "data/{}/{}".format(directory, dataset) + file_list = list(filter(lambda k: 'meas.csv' in k, sorted(os.listdir(directory_in)))) + + for filename in track(file_list): + data2 = np.genfromtxt(os.path.join(directory_in, filename), dtype=float, delimiter=',') + t_meas = data2[:,0] + time_diffs = np.diff(t_meas) + attained_freq = np.mean(1/time_diffs) + attained_freqs = np.append(attained_freqs, attained_freq) + num_samples_meas = np.concatenate((num_samples_meas, [data2.shape[0]])) + duration_meas = np.concatenate((duration_meas, [data2[-1, 0]]), axis=0) + q_meas = np.concatenate((q_meas, data2[:, 1:8]), axis=0) + qd_meas = np.concatenate((qd_meas, data2[:, 8:15]), axis=0) + tau_meas = np.concatenate((tau_meas, data2[:, 15:22]), axis=0) + filename2 = filename.replace("meas", "com") + data1 = np.genfromtxt(os.path.join(directory_in, filename2), dtype=float, delimiter=',') + q_command = np.concatenate((q_command, data1[:,1:8]), axis=0) + qd_command = np.concatenate((qd_command, data1[:, 8:15]), axis=0) + qdd_command = np.concatenate((qdd_command, data1[:, 15:22]), axis=0) + + df = pd.DataFrame({"# trajecotries":[duration_meas.size], + "Duration Sum [s]": np.sum(duration_meas), + "Duration Min [s]": np.min(duration_meas), + "Duration Max [s]":np.max(duration_meas), + "Duration Mean [s]":np.mean(duration_meas), + "# Samples Sum":np.sum(num_samples_meas), + "# Samples Min":np.min(num_samples_meas), + "# Samples Max":np.max(num_samples_meas), + "# Samples Mean":np.mean(num_samples_meas) + }) + df.index = ['Value'] + df1 = df.T + df1.to_csv('data/{}/analysis/trajectory_analysis_{}.csv'.format(directory, dataset), float_format='%.3f') + + # Attained frequency analysis + freq_mu = np.mean(attained_freqs) + freq_sigma = np.std(attained_freqs) + freq_worst_case = np.max(freq_mu - attained_freqs) + with plt.style.context("default"): + num_col = 1 + num_row = 1 + fig, axs = plt.subplots(num_row, num_col, figsize=(10,4)) + count, bins, ignored = axs.hist(attained_freqs, density=True, bins=50, color=rwth_style.blue) + plt.plot(bins, 1/(freq_sigma * np.sqrt(2 * np.pi)) * + np.exp( - (bins - freq_mu)**2 / (2 * freq_sigma**2) ),linewidth=2, color=rwth_style.red) + axs.grid() + axs.set_title(r'$\mathrm{Histogram\ of\ Frequency:}\ \mu=%.3f Hz,\ \sigma=%.3f$ Hz' %(freq_mu, freq_sigma)) + axs.set_ylabel("Probability") + axs.set_xlabel("Frequency") + fig.tight_layout() + fig.savefig("data/{}/analysis/hist_freq_{}.png".format(directory, dataset)) + plt.close(fig) + + values = [q_command, qd_command, qdd_command, tau_meas] + names = ["q", "qd", "qdd", "tau"] + labels_y = ["Position [rad]", "Velocity [rad-s]", "Acceleration [rad-s^2]", "Torque [Nm]"] + units = ["[rad]", "[rad-s]", "[rad-s^2]", "[Nm]"] + titles = ["Command Position", "Command Velocity", "Command Acceleration", "Measured Torque"] + labels = ['Axis 1', 'Axis 2', 'Axis 3', 'Axis 4', 'Axis 5', 'Axis 6', 'Axis 7'] + # phyiscal limits + min_limits_phys = [panda_limits.q_lim_min_phys, panda_limits.qd_lim_min_phys, panda_limits.qdd_lim_min_phys, panda_limits.tau_lim_min_phys] + max_limits_phys = [panda_limits.q_lim_max_phys, panda_limits.qd_lim_max_phys, panda_limits.qdd_lim_max_phys, panda_limits.tau_lim_max_phys] + # Moveit limits + min_limits_moveit = [panda_limits.q_lim_min_moveit, panda_limits.qd_lim_min_moveit, panda_limits.qdd_lim_min_moveit, panda_limits.tau_lim_min_moveit] + max_limits_moveit = [panda_limits.q_lim_max_moveit, panda_limits.qd_lim_max_moveit, panda_limits.qdd_lim_max_moveit, panda_limits.tau_lim_max_moveit] + length= len(values) + + for i in range(length): + value = values[i] + std = np.std(value, axis=0) + mean = np.mean(value, axis=0) + min = np.min(value, axis=0) + max = np.max(value, axis=0) + phy_mean = (max_limits_phys[i]+min_limits_phys[i])/2 + cov_max_phys = (max-min) / (max_limits_phys[i]-min_limits_phys[i]) * 100 # Percentage of used working space + cov_std_phys = (2*std) / (max_limits_phys[i]-min_limits_phys[i]) * 100 # Percentage of used working space based on standard deviaton + cov_max_moveit = (max-min) / (max_limits_moveit[i]-min_limits_moveit[i]) * 100 # Percentage of used working space + cov_std_moveit = (2*std) / (max_limits_moveit[i]-min_limits_moveit[i]) * 100 # Percentage of used working space based on standard deviaton + + df = pd.DataFrame({"Att. Mean {}".format(units[i]):mean, + "Phys. Mean {}".format(units[i]):phy_mean, + "MoveIt Mean {}".format(units[i]):phy_mean, + + "Att. Min {}".format(units[i]):min, + "Phys. Min {}".format(units[i]):min_limits_phys[i], + "MoveIt Min {}".format(units[i]):min_limits_moveit[i], + + "Att. Max {}".format(units[i]):max, + "Phys. Max {}".format(units[i]):max_limits_phys[i], + "MoveIt Max {}".format(units[i]):max_limits_moveit[i], + + "Phy. Coverage Min/Max [\\%]":cov_max_phys , + "MoveIt Coverage Min/Max [\\%]":cov_max_moveit , + + "Att. Std {}".format(units[i]):std, + "Phy. Coverage Std [\\%]":cov_std_phys , + "MoveIt Coverage Std [\\%]":cov_std_moveit , + }) + df.index = ['Axis_1', 'Axis_2', 'Axis_3', 'Axis_4', 'Axis_5', 'Axis_6', 'Axis_7'] + df1 = df.T + df1.to_csv('data/{}/analysis/feature_analysis_{}_{}.csv'.format(directory, dataset, names[i]), float_format='%.3f') + + x_pos = np.arange(len(labels)) + with plt.style.context("default"): + f4, ax = plt.subplots(figsize=(10,5)) + bar1_alpha = 1 + bar1_hatch = '' + plt.rcParams.update({'hatch.color': rwth_style.blue_light}) + if (i==2 or i==0): + ax.bar(x_pos, max_limits_moveit[i] - min_limits_moveit[i], 0.4, bottom = min_limits_moveit[i], color=rwth_style.blue_light, label='MoveIt Limits', zorder=3) + bar1_alpha = 0.5 + bar1_hatch = '//' + bar1 = ax.bar(x_pos, max_limits_phys[i] - min_limits_phys[i], 0.4, bottom = min_limits_phys[i], color=rwth_style.blue_light, alpha=bar1_alpha, hatch=bar1_hatch, label='Physical Limits', zorder=2) + ax.scatter(x_pos, (max_limits_phys[i] + min_limits_phys[i])/2, c=[rwth_style.blue], s=400, marker='_', label='Physical Mean', zorder=4) + errorbar_1 = ax.errorbar(x_pos, mean, std, fmt='o', capsize=5, label='Mean/Std Attained {}'.format(titles[i]), c=rwth_style.blue, zorder=5) + scatter_min = ax.scatter(x_pos, min, c=[rwth_style.blue], s=20, marker='x', label='Min/Max Attained {}'.format(titles[i]), zorder=6) + ax.scatter(x_pos, max, c=[rwth_style.blue], s=20, marker='x', zorder=6) + ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.08), ncol=2) + box = ax.get_position() + ax.set_position([box.x0, box.y0 + box.height * 0.15, box.width, box.height]) + ax.set_ylabel(labels_y[i]) + ax.set_ylim((-ax.get_ylim()[1], ax.get_ylim()[1])) + ax.set_xticks(x_pos) + ax.set_xticklabels(labels) + ax.yaxis.grid(True) + ax.set_axisbelow(True) + f4.savefig("data/{}/analysis/errorbar_{}_{}.png".format(directory, dataset, names[i])) + plt.close(f4) + + with plt.style.context("default"): + num_col = 3 + num_row = 3 + fig, axs = plt.subplots(num_row, num_col, figsize=(20,10)) + + for k, label in enumerate(labels): + count, bins, ignored = axs[k//num_col, k%num_col].hist(value[:,k], density=True, bins=40, color=rwth_style.blue) + bins = bins[:-1] + try: + coeff, var_matrix = curve_fit(gauss, bins, count, p0=p0, maxfev = 2000) + # Get the fitted curve + hist_fit = gauss(bins, *coeff) + r_squared = r2_score(count, hist_fit) + axs[k//num_col, k%num_col].plot(bins,hist_fit, color=rwth_style.red) + t = axs[k//num_col, k%num_col].text(0.15,0.85, f"mean: {round(coeff[1],3)}\nstd: {round(coeff[2],3)}\nr squared: {round(round(r_squared,4)*100,2)}%", horizontalalignment='center', verticalalignment='center', transform=axs[k//num_col, k%num_col].transAxes) + t.set_bbox(dict(facecolor='white', alpha=0.5, edgecolor='black')) + except Exception as e: + print(e) + axs[k//num_col, k%num_col].grid() + axs[k//num_col, k%num_col].set_title(labels[k]) + axs[k//num_col, k%num_col].set_ylabel("Probability Density") + axs[k//num_col, k%num_col].set_xlabel(labels_y[i]) + axs[k//num_col, k%num_col].set_xlim([min_limits_phys[i][k],max_limits_phys[i][k]]) + + fig.delaxes(axs[-1,-1]) + fig.delaxes(axs[-1,-2]) + fig.tight_layout() + fig.savefig("data/{}/analysis/hist_{}_{}.png".format(directory, dataset, names[i])) + plt.close(fig) diff --git a/dynamics_learning/dynamics_learning/preprocessing/trajectory_interpolation.py b/dynamics_learning/dynamics_learning/preprocessing/trajectory_interpolation.py new file mode 100644 index 0000000000000000000000000000000000000000..d66a1aa2dd09f954ed9a39f0f1ae543f97ba2c9d --- /dev/null +++ b/dynamics_learning/dynamics_learning/preprocessing/trajectory_interpolation.py @@ -0,0 +1,44 @@ +import sys +sys.path.append("/root/deep-learning-based-robot-dynamics-modelling-in-robot-based-laser-material-processing/src") +import tqdm +import numpy as np +import os +import panda_limits + + +directory_in = "/root/deep-learning-based-robot-dynamics-modelling-in-robot-based-laser-material-processing/data/dataset_v3/test_2" + +for filename in sorted(os.listdir(directory_in)): + if (filename.endswith("com.csv") and "interp" not in filename): + data1 = np.genfromtxt(os.path.join(directory_in, filename), dtype=float, delimiter=',') + t_command = data1[:,0] + motion_params = data1[:,1:22] + filename2 = filename.replace("com", "meas") + data2 = np.genfromtxt(os.path.join(directory_in, filename2), dtype=float, delimiter=',') + t_meas = data2[:,0] + max = 1 + min = -1 + # Normalization: (X - X_min) / (X_max - X_min) * (max - min) + min + # Positions + for i in range(0, 7): + motion_params[:, i] = (motion_params[:, i] - panda_limits.q_lim_min_phys[i] ) / (panda_limits.q_lim_max_phys[i] - panda_limits.q_lim_min_phys[i]) * (max - min) + min + # Velocities + for i in range(7, 14): + motion_params[:, i] = (motion_params[:, i] - panda_limits.qd_lim_min_phys[i-7] ) / (panda_limits.qd_lim_max_phys[i-7] - panda_limits.qd_lim_min_phys[i-7]) * (max - min) + min + # Accelerations + for i in range(14, 21): + motion_params[:, i] = (motion_params[:, i] - panda_limits.qdd_lim_min_phys[i-14] ) / (panda_limits.qdd_lim_max_phys[i-14] - panda_limits.qdd_lim_min_phys[i-14]) * (max - min) + min + + # Interpolate command values + motion_params_interpolated = np.empty([t_meas.shape[0],21]) + for i in range(0, 21): + motion_params_interpolated[:,i] = (np.interp(t_meas, t_command, motion_params[:,i])) + meas = np.hstack([t_meas[:, None], motion_params_interpolated]) + file_output = directory_in + "/" + filename.replace("com", "interp_com") + np.savetxt( + file_output, + meas, + delimiter=",", + header="t_com, q1_com, q2_com, q3_com, q4_com, q5_com, q6_com, q7_com, qd1_com, qd2_com, qd3_com, qd4_com, qd5_com, qd6_com, qd7_com, qdd1_com, qdd2_com, qdd3_com, qdd4_com, qdd5_com, qdd6_com, qdd7_com", + ) + diff --git a/dynamics_learning/dynamics_learning/preprocessing/trajectory_visualizer.py b/dynamics_learning/dynamics_learning/preprocessing/trajectory_visualizer.py new file mode 100644 index 0000000000000000000000000000000000000000..bad8c818b8b092b570cc2c86f83cb840d763eefa --- /dev/null +++ b/dynamics_learning/dynamics_learning/preprocessing/trajectory_visualizer.py @@ -0,0 +1,197 @@ +#%% +# Import and variables +import sys +sys.path.append("..") +sys.path.append("src") +import numpy as np +import matplotlib.pyplot as plt +import roboticstoolbox as rtb +import rwth_style +import os + +# Global variables +label_meas = "Measurements" +label_rtb_1 = "RTB Estimation based on Measurements" +label_rtb_2 = "RTB Estimation based on Command" +label_com = "Command by MoveIt" +label_derivative = "Derivative based on q_command" +labels_axes = ['Axis 1', 'Axis 2', 'Axis 3', 'Axis 4', 'Axis 5', 'Axis 6', 'Axis 7'] +trajectory_number = -1 +directory_in = "/home/jaschneider/LSTM-based_inverse_dynamics_learning_for_franka_emika_panda/data/dataset_v1/train" +file_list = list(filter(lambda k: 'meas.csv' in k, sorted(os.listdir(directory_in)))) +filename2 = file_list[trajectory_number] + +# Load file and create variables +data2 = np.genfromtxt(os.path.join(directory_in, filename2), dtype=float, delimiter=',') +t_meas = data2[:,0] +q_meas = data2[:,1:8] +qd_meas = data2[:,8:15] +tau_meas = data2[:,15:22] +try: + u_meas = data2[:,22] + i_meas = data2[:,23] + p_meas = data2[:,24] +except: + print("No power information in database") + +filename = filename2.replace("meas", "com") +data1 = np.genfromtxt(os.path.join(directory_in, filename), dtype=float, delimiter=',') +t_command = data1[:,0] +q_command = data1[:,1:8] +qd_command = data1[:,8:15] +qdd_command = data1[:,15:22] + +qd_command_diff = np.diff(q_command, axis=0)/np.diff(t_command)[:,None] +t_command_diff1 = (t_command[1:] + t_command[:-1]) / 2 +qdd_command_diff = np.diff(qd_command_diff, axis=0)/np.diff(t_command_diff1)[:,None] +t_command_diff2 = (t_command_diff1[1:] + t_command_diff1[:-1]) / 2 + +tau_meas_interpolated = np.empty(q_command.shape) +for i in range(0, tau_meas.shape[1]): + tau_meas_interpolated[:,i] = np.interp(t_command, t_meas, tau_meas[:,i]) +tau_meas_interpolated = np.array(tau_meas_interpolated) + +panda = rtb.models.DH.Panda() +tau_rtb = panda.rne(q=q_command, qd=qd_command, qdd=qdd_command, gravity=(0,0,-9.81), fext=None) + +#%% +traj_com_rtb = panda.fkine(q_command).t +traj_meas_rtb = panda.fkine(q_meas).t +with plt.style.context("default"): + fig = plt.figure() + ax = fig.add_subplot(projection='3d') + # ax.set_aspect('equal') + X = traj_meas_rtb[:,0] + Y = traj_meas_rtb[:,1] + Z = traj_meas_rtb[:,2] + ax.scatter(X, Y, Z, color=rwth_style.green, label=label_meas, s=2, marker='.') + X = traj_com_rtb[:,0] + Y = traj_com_rtb[:,1] + Z = traj_com_rtb[:,2] + ax.scatter(X, Y, Z, color=rwth_style.blue, label=label_com, s=2, marker='.') + ax.set_xlabel('X [m]') + ax.set_ylabel('Y [m]') + ax.set_zlabel('Z [m]') + max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0 + + mid_x = (X.max()+X.min()) * 0.5 + mid_y = (Y.max()+Y.min()) * 0.5 + mid_z = (Z.max()+Z.min()) * 0.5 + ax.set_xlim(mid_x - max_range, mid_x + max_range) + ax.set_ylim(mid_y - max_range, mid_y + max_range) + ax.set_zlim(mid_z - max_range, mid_z + max_range) + handles, labels = ax.get_legend_handles_labels() + fig.legend() + fig.tight_layout() + + +#%% +# Plot Position +with plt.style.context("default"): + fig, axs = plt.subplots(4,2, figsize=(10,10)) + for k in range(len(labels_axes)): + axs[k//2, k%2].plot(t_command, q_command[:,k], ".-", markersize=2, label=label_com, color=rwth_style.green) + axs[k//2, k%2].scatter(t_meas, q_meas[:,k], label=label_meas, s=2, marker='.', color=rwth_style.blue) + axs[k//2, k%2].grid() + axs[k//2, k%2].set_title(labels_axes[k]) + axs[k//2, k%2].set_ylabel("Position [rad]") + axs[k//2, k%2].set_xlabel("Time [s]") + fig.delaxes(axs[3,1]) + handles, labels = axs[0,0].get_legend_handles_labels() + pos1 = axs[3,1].get_position() + fig.legend(handles, labels, loc='lower left', bbox_to_anchor=(pos1.x0+0.004, pos1.y0+0.032)) + fig.tight_layout() + # fig.savefig("/root/deep-learning-based-robot-dynamics-modelling-in-robot-based-laser-material-processing/thesis/resources/{}_10{}_{}.svg".format(filename[9:-3], indices[traj_num//2], space[traj_num%2])) + # plt.close(fig) + + +#%% +# Plot Velocity +with plt.style.context("default"): + fig, axs = plt.subplots(4,2, figsize=(10,10)) + for k in range(len(labels_axes)): + axs[k//2, k%2].plot(t_command, qd_command[:,k], ".-", markersize=5, label=label_com, color=rwth_style.green) + axs[k//2, k%2].scatter(t_meas, qd_meas[:,k], label=label_meas, s=2, marker='.', color=rwth_style.blue) + axs[k//2, k%2].grid() + axs[k//2, k%2].set_title(labels_axes[k]) + axs[k//2, k%2].set_ylabel("Velocity [rad/s]") + axs[k//2, k%2].set_xlabel("Time [s]") + fig.delaxes(axs[3,1]) + handles, labels = axs[0,0].get_legend_handles_labels() + pos1 = axs[3,1].get_position() + fig.legend(handles, labels, loc='lower left', bbox_to_anchor=(pos1.x0+0.004, pos1.y0+0.032)) + fig.tight_layout() + # fig.savefig("/root/deep-learning-based-robot-dynamics-modelling-in-robot-based-laser-material-processing/thesis/resources/{}_10{}_{}.svg".format(filename[9:-3], indices[traj_num//2], space[traj_num%2])) + # plt.close(fig) + + +#%% +# Plot Acceleration +with plt.style.context("default"): + fig, axs = plt.subplots(4,2, figsize=(10,10)) + for k in range(len(labels_axes)): + ax2 = axs[k//2, k%2].twinx() + axs[k//2, k%2].plot(t_command, qdd_command[:,k], ".-", markersize=5, label=label_com, color=rwth_style.green) + axs[k//2, k%2].grid() + axs[k//2, k%2].set_title(labels_axes[k]) + axs[k//2, k%2].set_ylabel("Acceleration [rad/s²]") + axs[k//2, k%2].set_xlabel("Time [s]") + ax2.spines['right'].set_color(rwth_style.blue) + ax2.tick_params(axis='y', colors=rwth_style.blue) + ax2.spines['left'].set_color(rwth_style.green) + axs[k//2, k%2].tick_params(axis='y', colors=rwth_style.green) + fig.delaxes(axs[3,1]) + handles, labels = axs[0,0].get_legend_handles_labels() + handles2, labels2 = ax2.get_legend_handles_labels() + labels = labels + labels2 + handles = handles + handles2 + pos1 = axs[3,1].get_position() + fig.legend(handles, labels, loc='lower left', bbox_to_anchor=(pos1.x0+0.004, pos1.y0+0.032)) + fig.tight_layout() + # fig.savefig("/root/deep-learning-based-robot-dynamics-modelling-in-robot-based-laser-material-processing/thesis/resources/{}_10{}_{}.svg".format(filename[9:-3], indices[traj_num//2], space[traj_num%2])) + # plt.close(fig) + + +#%% Plot Torque +with plt.style.context("default"): + fig, axs = plt.subplots(4,2, figsize=(10,10)) + for k in range(len(labels_axes)): + axs[k//2, k%2].scatter(t_meas, tau_meas[:,k], label=label_meas, s=2, marker='o', color=rwth_style.blue) + axs[k//2, k%2].scatter(t_command, tau_rtb[:,k], label=label_rtb_2, s=2, marker='.', color=rwth_style.blue_light) + axs[k//2, k%2].grid() + axs[k//2, k%2].set_title(labels_axes[k]) + axs[k//2, k%2].set_ylabel("Torque [Nm]") + axs[k//2, k%2].set_xlabel("Time [s]") + fig.delaxes(axs[3,1]) + handles, labels = axs[0,0].get_legend_handles_labels() + pos1 = axs[3,1].get_position() + fig.legend(handles, labels, loc='lower left', bbox_to_anchor=(pos1.x0+0.004, pos1.y0+0.032)) + fig.tight_layout() + # fig.savefig("/root/deep-learning-based-robot-dynamics-modelling-in-robot-based-laser-material-processing/thesis/resources/{}_10{}_{}.svg".format(filename[9:-3], indices[traj_num//2], space[traj_num%2])) + # plt.close(fig) + +#%% Plot Current, Voltage, Power +with plt.style.context("default"): + plt.scatter(t_meas, i_meas[:], label="Current i", s=2, marker='o', color=rwth_style.red) + plt.grid() + plt.ylabel("Current [A]") + plt.xlabel("Time [s]") + fig.legend() + fig.tight_layout() +#%% Plot Current, Voltage, Power +with plt.style.context("default"): + plt.scatter(t_meas, u_meas[:], label="Voltage u", s=2, marker='o', color=rwth_style.red) + plt.grid() + plt.ylabel("Voltage [V]") + plt.xlabel("Time [s]") + fig.legend() + fig.tight_layout() +#%% Plot Current, Voltage, Power +with plt.style.context("default"): + plt.scatter(t_meas, p_meas[:], label="Power p", s=2, marker='o', color=rwth_style.red) + plt.grid() + plt.ylabel("Power [W]") + plt.xlabel("Time [s]") + fig.legend() + fig.tight_layout() + \ No newline at end of file diff --git a/dynamics_learning/dynamics_learning/sweep.yaml b/dynamics_learning/dynamics_learning/sweep.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2463d256fca9b6182da67dcd071d2537088a7d7a --- /dev/null +++ b/dynamics_learning/dynamics_learning/sweep.yaml @@ -0,0 +1,42 @@ +project: Panda_Inverse_Dynamics +description: Sweep with log_uniform_values distributions. Clipnorm active and searched. Bayes Hyperparameter Optimiztation. +program: train_inverse.py +method: bayes +metric: + name: val_loss + goal: minimize +parameters: + clipnorm: + max: 10000 + min: 1 + distribution: log_uniform_values + learning_rate: + max: 0.9 + min: 1e-7 + distribution: log_uniform_values #log + window_size: + max: 100 + min: 10 + distribution: int_uniform + batch_size: + values: [2048,4096] + units: + max: 300 + min: 100 + distribution: int_uniform #log + dropout: + max: 1 + min: 1e-5 + distribution: log_uniform_values + layers: + max: 10 + min: 1 + distribution: int_uniform # layer größer +early_terminate: + type: hyperband + min_iter: 3 +command: + - ${env} + - python3 + - ${program} + - ${args} \ No newline at end of file diff --git a/dynamics_learning/dynamics_learning/testing/__init__.py b/dynamics_learning/dynamics_learning/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dynamics_learning/dynamics_learning/testing/model_testing.py b/dynamics_learning/dynamics_learning/testing/model_testing.py new file mode 100644 index 0000000000000000000000000000000000000000..1a48a910c04db2e935ecef6e16cc031cba758271 --- /dev/null +++ b/dynamics_learning/dynamics_learning/testing/model_testing.py @@ -0,0 +1,196 @@ +#%% +# Import +import sys +sys.path.append("src") +import numpy as np +import keras +import os +import matplotlib.pyplot as plt +import roboticstoolbox as rtb +import rwth_style +import pandas as pd +from rich import print +from rich.console import Console +from rich.table import Table +import tensorflow_text as tf_text +from rich.progress import track +import tensorflow as tf + +#%% +# Load Model and run test dataset +model_name = "stellar-sweep-529.h5" +model = keras.models.load_model("/root/deep-learning-based-robot-dynamics-modelling-in-robot-based-laser-material-processing/models/inverse_dynamics/{}".format(model_name)) +window_size = model.input_shape[1] +keras.utils.vis_utils.plot_model(model, to_file='src/Inverse_Dynamics/test_results/model_plot.png', show_shapes=True, show_layer_names=True) + +#%% +rms_joint_nn = np.empty([0,7]) +rms_joint_rtbm = np.empty([0,7]) +rms_joint_rtbp = np.empty([0,7]) + +test_dataset = "data/dataset_v3/test" +for filename in track(sorted(os.listdir(test_dataset)), description='[green]Process of whole Test Dataset'): + if filename.endswith("interp_com.csv"): + # Command Traj + data_com = np.genfromtxt(os.path.join(test_dataset, filename), dtype=float, delimiter=',') + t_command = data_com[:,0] + q_command = data_com[:,1:8] + qd_command = data_com[:,8:15] + qdd_command = data_com[:,15:22] + # Measurement Traj + filename2 = filename.replace("interp_com", "meas") + data_meas = np.genfromtxt(os.path.join(test_dataset, filename2), dtype=float, delimiter=',') + t_meas = data_meas[:,0] + q_meas = data_meas[:,1:8] + qd_meas = data_meas[:,8:15] + tau_meas = data_meas[:,15:22] + # Load RTB-Matlab torques + filename3 = filename.replace("interp_com", "interp_rtbm") + data_rtbm = np.genfromtxt(os.path.join(test_dataset, filename3), dtype=float, delimiter=',') + data_rtbm = data_rtbm[~np.isnan(data_rtbm).any(axis=1)] + t_rtbm = data_rtbm[:,0] + tau_rtbm = data_rtbm[:,1:8] + # Calculate RTB-Python torques + panda = rtb.models.DH.Panda() + tau_rtbp = panda.rne(q=q_command, qd=qd_command, qdd=qdd_command, gravity=(0,0,-9.81)) + # Convert to tensors + X = tf.convert_to_tensor(data_com[:,1:22]) + # Sliding Window + X_test = tf_text.sliding_window(data=X, width=window_size, axis=0) + y_pred = model.predict(X_test, verbose=1) + + rms_nn = np.sqrt(((tau_meas[:-window_size+1] - y_pred)**2).mean(axis=0)) + rms_rtbp = np.sqrt(((tau_meas - tau_rtbp)**2).mean(axis=0)) + rms_rtbm = np.sqrt(((tau_meas - tau_rtbm)**2).mean(axis=0)) + + if "ISO" not in filename: + rms_joint_nn = np.vstack([rms_joint_nn, rms_nn]) + rms_joint_rtbp = np.vstack([rms_joint_rtbp, rms_rtbp]) + rms_joint_rtbm = np.vstack([rms_joint_rtbm, rms_rtbm]) + + df = pd.DataFrame({"NN":rms_nn, "RTB-M":rms_rtbm, "RTB-P":rms_rtbp}) + df = df.append({"NN":rms_nn.mean(), "RTB-M":rms_rtbm.mean(), "RTB-P":rms_rtbp.mean()}, ignore_index=True) + df.index = ['RMS Axis 1 [Nm]', 'RMS Axis 2 [Nm]', 'RMS Axis 3 [Nm]', 'RMS Axis 4 [Nm]', 'RMS Axis 5 [Nm]', 'RMS Axis 6 [Nm]', 'RMS Axis 7 [Nm]', 'RMS All [Nm]'] + df.T.to_csv('src/Inverse_Dynamics/test_results/test_result_{}.csv'.format(filename.replace("_interp_com.csv", "")), float_format='%.3f') + + # Print table + # table = Table(title="Root Mean Square Errors") + # table.add_column("Method") + # table.add_column("Axis 1 [Nm]") + # table.add_column("Axis 2 [Nm]") + # table.add_column("Axis 3 [Nm]") + # table.add_column("Axis 4 [Nm]") + # table.add_column("Axis 4 [Nm]") + # table.add_column("Axis 6 [Nm]") + # table.add_column("Axis 7 [Nm]") + # table.add_column("All [Nm]") + # table.add_row("NN", "{}".format(rms_nn[0]), "{}".format(rms_nn[1]), "{}".format(rms_nn[2]), "{}".format(rms_nn[3]), "{}".format(rms_nn[4]), "{}".format(rms_nn[5]), "{}".format(rms_nn[6]), "{}".format(rms_nn.mean())) + # table.add_row("RTB-M", "{}".format(rms_rtbm[0]), "{}".format(rms_rtbm[1]), "{}".format(rms_rtbm[2]), "{}".format(rms_rtbm[3]), "{}".format(rms_rtbm[4]), "{}".format(rms_rtbm[5]), "{}".format(rms_rtbm[6]), "{}".format(rms_rtbm.mean())) + # table.add_row("RTB-P", "{}".format(rms_rtbp[0]), "{}".format(rms_rtbp[1]), "{}".format(rms_rtbp[2]), "{}".format(rms_rtbp[3]), "{}".format(rms_rtbp[4]), "{}".format(rms_rtbp[5]), "{}".format(rms_rtbp[6]), "{}".format(rms_rtbp.mean())) + # console = Console() + # console.print(table) + + # Plot test trajectory + with plt.style.context("default"): + plt.rcParams['savefig.dpi'] = 300 + num_row = 3 + num_col = 3 + fig, axs = plt.subplots(num_row, num_col, figsize=(num_col*6, num_row*3)) + for k in range(7): + axs[k//num_col, k%num_col].axvspan(t_meas[0], t_meas[window_size], facecolor=rwth_style.blue, alpha=0.5, label="Initialization Phase for NN") + axs[k//num_col, k%num_col].plot(t_meas[:], tau_meas[:,k], label="Measurement", color=rwth_style.green) + axs[k//num_col, k%num_col].scatter(t_meas[window_size-1:], y_pred[:,k], label="Prediction based on command trajectory (NN)", marker='o', s=2, color=rwth_style.blue) + axs[k//num_col, k%num_col].scatter(t_meas[:], tau_rtbp[:,k], label="Estimation based on command trajectory (RTB-P)", marker='o', s=2, color=rwth_style.blue_light) + axs[k//num_col, k%num_col].scatter(t_rtbm[:], tau_rtbm[:,k], label="Estimation based on command trajectory (RTB-M)", marker='o', s=2, color=rwth_style.orange) + axs[k//num_col, k%num_col].set_title('Axis {}'.format(k+1)) + axs[k//num_col, k%num_col].set_ylabel("Torque [Nm]") + axs[k//num_col, k%num_col].set_xlabel("Time [s]") + if (k < 4): + axs[k//num_col, k%num_col].set_ylim([-90, 90]) + axs[k//num_col, k%num_col].set_yticks([-87, -40, 0, 40, 87]) + else: + axs[k//num_col, k%num_col].set_ylim([-12, 12]) + axs[k//num_col, k%num_col].set_yticks([-12, -6, 0, 6, 12]) + axs[k//num_col, k%num_col].grid(which='both') + axs[k//num_col, k%num_col].set_xlim([0, t_meas.max()]) + # Detail Axis + axs[2,-2].axvspan(t_meas[0], t_meas[window_size], facecolor=rwth_style.blue, alpha=0.5, label="Initialization Phase for NN") + axs[2,-2].plot(t_meas[:], tau_meas[:,1], label="Measurement", color=rwth_style.green) + axs[2,-2].scatter(t_meas[window_size-1:], y_pred[:,1], label="Prediction based on command trajectory (NN)", marker='o', s=2, color=rwth_style.blue) + axs[2,-2].scatter(t_meas[:], tau_rtbp[:,1], label="Estimation based on command trajectory (RTB-P)", marker='o', s=2, color=rwth_style.blue_light) + axs[2,-2].scatter(t_rtbm[:], tau_rtbm[:,1], label="Estimation based on command trajectory (RTB-M)", marker='o', s=2, color=rwth_style.orange) + axs[2,-2].grid() + axs[2,-2].set_title('Detailed View of Axis 2') + axs[2,-2].set_ylabel("Torque [Nm]") + axs[2,-2].set_xlabel("Time [s]") + axs[2,-2].set_xlim([0, t_meas.max()]) + # Allgemein + fig.delaxes(axs[-1,-1]) + handles, labels = axs[0,0].get_legend_handles_labels() + pos1 = axs[-1,-1].get_position() + fig.legend(handles, labels, loc='lower left', bbox_to_anchor=(pos1.x0+0.02, pos1.y0-0.055)) + fig.tight_layout() + fig.savefig("src/Inverse_Dynamics/test_results/test_result_{}.png".format(filename.replace("_interp_com.csv", ""))) + plt.close(fig) + + + with plt.style.context("default"): + plt.rcParams['savefig.dpi'] = 300 + num_row = 4 + num_col = 2 + fig, axs = plt.subplots(num_row, num_col, figsize=(num_col*6, num_row*3)) + for k in range(7): + n = t_meas.size + freq = np.fft.fftfreq(n = n, d = 1.0 / 100) + axs[k//num_col, k%num_col].plot(freq[:n//2], 2.0/n * np.abs(np.fft.fft(tau_meas[:,k])[:n//2]), label="Measurement", color=rwth_style.green) + axs[k//num_col, k%num_col].plot(freq[:n//2], 2.0/n * np.abs(np.fft.fft(tau_rtbp[:,k])[:n//2]), label="Estimation based on command trajectory (RTB-P)", color=rwth_style.blue_light) + axs[k//num_col, k%num_col].plot(freq[:n//2], 2.0/n * np.abs(np.fft.fft(tau_rtbm[:,k])[:n//2]), label="Estimation based on command trajectory (RTB-M)", color=rwth_style.orange) + n = t_meas.size-window_size+1 + freq = np.fft.fftfreq(n = n, d = 1.0 / 100) + axs[k//num_col, k%num_col].plot(freq[:n//2], 2.0/n * np.abs(np.fft.fft(y_pred[:,k])[:n//2]), label="Prediction based on command trajectory (NN)", color=rwth_style.blue) + axs[k//num_col, k%num_col].set_title('Axis {}'.format(k+1)) + axs[k//num_col, k%num_col].set_ylabel("Magnitude") + axs[k//num_col, k%num_col].set_xlabel("Frequency [Hz]") + axs[k//num_col, k%num_col].set_xlim([0,50]) + axs[k//num_col, k%num_col].set_yscale('log') + axs[k//num_col, k%num_col].grid() + fig.delaxes(axs[-1,-1]) + handles, labels = axs[0,0].get_legend_handles_labels() + pos1 = axs[-1,1].get_position() + fig.legend(handles, labels, loc='lower left', bbox_to_anchor=(pos1.x0, pos1.y0)) + fig.tight_layout() + fig.savefig("src/Inverse_Dynamics/test_results/test_result_fft_{}.png".format(filename.replace("_interp_com.csv", ""))) + plt.close(fig) + + if "20230215_114651" in filename: + with plt.style.context("default"): + plt.rcParams['savefig.dpi'] = 300 + fig, axs = plt.subplots(2, 1, figsize=(1*6, 2*3)) + k=2 + n = t_meas.size + freq = np.fft.fftfreq(n = n, d = 1.0 / 100) + axs[0].plot(freq[:n//2], 2.0/n * np.abs(np.fft.fft(tau_meas[:,k])[:n//2]), label="Measurement", color=rwth_style.green) + axs[0].plot(freq[:n//2], 2.0/n * np.abs(np.fft.fft(tau_rtbp[:,k])[:n//2]), label="Estimation based on command trajectory (RTB-P)", color=rwth_style.blue_light) + axs[0].plot(freq[:n//2], 2.0/n * np.abs(np.fft.fft(tau_rtbm[:,k])[:n//2]), label="Estimation based on command trajectory (RTB-M)", color=rwth_style.orange) + n = t_meas.size-window_size+1 + freq = np.fft.fftfreq(n = n, d = 1.0 / 100) + axs[0].plot(freq[:n//2], 2.0/n * np.abs(np.fft.fft(y_pred[:,k])[:n//2]), label="Prediction based on command trajectory (NN)", color=rwth_style.blue) + axs[0].set_title('Axis {}'.format(k+1)) + axs[0].set_ylabel("Magnitude") + axs[0].set_xlabel("Frequency [Hz]") + axs[0].set_xlim([0,50]) + axs[0].set_yscale('log') + axs[0].grid() + fig.delaxes(axs[-1]) + pos1 = axs[-1].get_position() + fig.legend(handles, labels, loc='lower center', bbox_to_anchor=(0.5, pos1.y0+0.18)) + # plt.tight_layout() + plt.savefig("src/Inverse_Dynamics/test_results/test_result_fft_{}_axis3.png".format(filename.replace("_interp_com.csv", ""))) + plt.close(fig) + + +df = pd.DataFrame({"NN":rms_joint_nn.mean(axis = 0), "RTB-M":rms_joint_rtbm.mean(axis = 0), "RTB-P":rms_joint_rtbp.mean(axis = 0)}) +df = df.append({"NN":rms_joint_nn.mean(), "RTB-M":rms_joint_rtbm.mean(), "RTB-P":rms_joint_rtbp.mean()}, ignore_index=True) +df.index = ['RMS Axis 1 [Nm]', 'RMS Axis 2 [Nm]', 'RMS Axis 3 [Nm]', 'RMS Axis 4 [Nm]', 'RMS Axis 5 [Nm]', 'RMS Axis 6 [Nm]', 'RMS Axis 7 [Nm]', 'RMS All [Nm]'] +df.T.to_csv('src/Inverse_Dynamics/test_results/test_result_average_joint.csv', float_format='%.3f') + \ No newline at end of file diff --git a/dynamics_learning/dynamics_learning/training/__init__.py b/dynamics_learning/dynamics_learning/training/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dynamics_learning/dynamics_learning/training/model_training.py b/dynamics_learning/dynamics_learning/training/model_training.py new file mode 100644 index 0000000000000000000000000000000000000000..1acad4fe087e525abdce9915e7566fe75bb4d0ea --- /dev/null +++ b/dynamics_learning/dynamics_learning/training/model_training.py @@ -0,0 +1,157 @@ +# %% How to Start a sweep: +# 1. Init a new sweep: +# python3 -m wandb sweep sweep.yaml +# 2. Start a new agent: +# e.g.: python3 -m wandb agent --count $NUM jan-niklas_schneider/inverse_dynamic/vuhwux82 +# 3. If needed: Parallelize on a multi-GPU machine +# CUDA_VISIBLE_DEVICES=0 wandb agent sweep_ID +# CUDA_VISIBLE_DEVICES=1 wandb agent sweep_ID + +import sys +sys.path.append("/root/deep-learning-based-robot-dynamics-modelling-in-robot-based-laser-material-processing/src") +import wandb +from argparse import ArgumentParser, Namespace +import numpy as np +from keras.models import Sequential +from keras.layers import Dense, LSTM, Dropout +from keras.losses import MeanSquaredError +from keras.callbacks import Callback +from keras import optimizers +from keras.utils.vis_utils import plot_model +from wandb.keras import WandbCallback +from rich import print +from panda_dynamics_shared import * +import tensorflow as tf +import tensorflow_text as tf_text + +# %% Initialize LSTM model +def make_model(config:Namespace)->Sequential: + """Neural network architecture is build here. + + Args: + config (Namespace): Input configuration used for NN + + Returns: + Sequential: The model made + """ + model = Sequential() + if (config.layers>1): + for i in range(config.layers-1): + model.add(LSTM(units=config.units, return_sequences=True, input_shape=(X_train.shape[1], X_train.shape[2]), + activation = "tanh", recurrent_activation = "sigmoid", recurrent_dropout = 0, unroll = False, use_bias = True)) # input shape: (timesteps, features) + model.add(LSTM(units=config.units, return_sequences=False)) # input shape: (timesteps, features) + else: + model.add(LSTM(units=config.units, return_sequences=False, input_shape=(X_train.shape[1], X_train.shape[2]), + activation = "tanh", recurrent_activation = "sigmoid", recurrent_dropout = 0, unroll = False, use_bias = True)) # input shape: (timesteps, features) + model.add(Dropout(config.dropout)) + model.add(Dense(units=Y_train.shape[1])) + opt = optimizers.Adam(learning_rate=config.learning_rate, clipvalue=config.clipnorm) # adding clip norm to work around exploding gradients, 1 yields not convergent loss, 100 + model.compile(optimizer=opt, loss=MeanSquaredError(), run_eagerly=True) + model.build(X_train.shape) + model.summary() + # plot_model(model, to_file='model_output.png', + # show_shapes=True, show_layer_names=True) + return model + +class Metrics(Callback): + def on_epoch_end(self, batch, logs={}): + # y_pred = model(X_val, training=True) + # score = backend.mean(metrics.mean_squared_error(Y_val, y_pred)) + # wandb.log({'val_loss_adjusted': score}) + return + +# %% Train model with config values +def train(model:Sequential, config:Namespace) -> None: + """Training function of the NN + + Args: + model (Sequential): Neural Network model + config (Namespace): Configuration + """ + with wandb.init() as run: + config = wandb.config + print("Training...") + loss = model.fit( + X_train, + Y_train, + epochs=config.epochs, + shuffle=True, + batch_size=config.batch_size, + verbose=1, + validation_data = (X_val, Y_val), + callbacks=[ + Metrics(), + WandbCallback( + monitor="val_loss", + mode="min", + save_model=False, + validation_data=(X_val, Y_val) + ) + ] + ) + return + +# %% Parse arguments to config +if __name__ == '__main__': + parser = ArgumentParser() + #trainer specific arguments + parser.add_argument("--epochs", type=int, default=100) + parser.add_argument("--learning_rate", type=float, default=0.1) + # model specific arguments + parser.add_argument("--batch_size", type=int, default=128) + parser.add_argument("--dropout", type=float, default=0.1) + parser.add_argument("--units", type=int, default=50) + parser.add_argument("--window_size", type=int, default=10) + parser.add_argument("--layers", type=int, default=1) + # optimizer specific args + parser.add_argument("--clipnorm", type=float, default=100) + # Argparser + args = parser.parse_args() + vars(args) + # w&b init + wandb.init(config=args, project="Panda_Inverse_Dynamics") + config = wandb.config + + directory_in = "/root/deep-learning-based-robot-dynamics-modelling-in-robot-based-laser-material-processing/data/dataset_v3/train" + X_input = tf.convert_to_tensor(np.empty((0, 21))) + Y_input = tf.convert_to_tensor(np.empty((0, 7))) + for filename in tqdm(sorted(os.listdir(directory_in))): + if filename.endswith("interp_com.csv"): + data1 = np.genfromtxt(os.path.join(directory_in, filename), dtype=float, delimiter=',') + filename2 = filename.replace("interp_com", "meas") + data2 = np.genfromtxt(os.path.join(directory_in, filename2), dtype=float, delimiter=',') + X_input = tf.concat([X_input, tf.convert_to_tensor(data1[:,1:22])], 0) + Y_input = tf.concat([Y_input, tf.convert_to_tensor(data2[:,15:22])], 0) + + print("X_input.shape: {}".format(X_input.shape)) + print("Y_input.shape: {}".format(Y_input.shape)) + # Split Dataset to train and validation + print("Split Dataset to train and validation") + train_size = int(0.8*X_input.shape[0]) + val_size = X_input.shape[0] - train_size + print("train_size: {}".format(train_size)) + print("val_size: {}".format(val_size)) + X_train, X_val = tf.split(X_input, [train_size, val_size], axis=0) + _, Y_train, _, Y_val = tf.split(Y_input, [config.window_size-1, train_size-config.window_size+1, config.window_size-1, val_size-config.window_size+1], 0) + print("X_train.shape: {}".format(X_train.shape)) + print("X_val.shape: {}".format(X_val.shape)) + print("Y_train.shape: {}".format(Y_train.shape)) + print("Y_val.shape: {}".format(Y_val.shape)) + # SLiding Window + print("Sliding Window") + X_train = tf_text.sliding_window(data=X_train, width=config.window_size, axis=0) + X_val = tf_text.sliding_window(data=X_val, width=config.window_size, axis=0) + # Y_train = tf_text.sliding_window(data=Y_train, width=config.window_size, axis=0) + # Y_val = tf_text.sliding_window(data=Y_val, width=config.window_size, axis=0) + print("X_train.shape: {}".format(X_train.shape)) + print("X_val.shape: {}".format(X_val.shape)) + print("Y_train.shape: {}".format(Y_train.shape)) + print("Y_val.shape: {}".format(Y_val.shape)) + + # Model + Training + model = make_model(config) + train(model, config) + + print("Save model...") + model.save('/root/deep-learning-based-robot-dynamics-modelling-in-robot-based-laser-material-processing/models/inverse_dynamics/stellar-sweep-529.h5') + print("Model saved") diff --git a/dynamics_learning/entrypoint.sh b/dynamics_learning/entrypoint.sh new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dynamics_learning/requirements.txt b/dynamics_learning/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..7bb6efcb8688d61ef775dc856bc718ca6882268a --- /dev/null +++ b/dynamics_learning/requirements.txt @@ -0,0 +1,233 @@ +absl-py==1.0.0 +alabaster==0.7.13 +ansitable==0.9.6 +appdirs==1.4.4 +argon2-cffi==21.3.0 +argon2-cffi-bindings==21.2.0 +asttokens==2.0.5 +astunparse==1.6.3 +attrs==21.4.0 +Babel==2.11.0 +backcall==0.2.0 +beautifulsoup4==4.11.1 +bleach==5.0.0 +cachetools==5.2.0 +cattrs==22.2.0 +certifi==2022.5.18.1 +cffi==1.15.0 +charset-normalizer==2.0.12 +clang==13.0.1 +click==8.0.4 +cloudpickle==2.1.0 +cmake-setuptools @ file:///rapids/cmake_setuptools-0.1.3.tar.gz +colored==1.4.3 +cuda-python==11.7.0 +cudf @ file:///rapids/cudf-22.4.0a0%2B306.g0cb75a4913-cp38-cp38-linux_x86_64.whl +cugraph @ file:///rapids/cugraph-22.4.0a0%2B102.g4106a188-cp38-cp38-linux_x86_64.whl +cuml @ file:///rapids/cuml-22.4.0a0%2B108.g2be11269d-cp38-cp38-linux_x86_64.whl +cupy-cuda115 @ file:///rapids/cupy_cuda115-9.6.0-cp38-cp38-manylinux1_x86_64.whl +cycler==0.11.0 +Cython @ file:///rapids/Cython-0.29.27-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl +dask @ file:///rapids/dask-2022.3.0-py3-none-any.whl +dask-cuda @ file:///rapids/dask_cuda-22.4.0-py3-none-any.whl +dask-cudf @ file:///rapids/dask_cudf-22.4.0a0%2B306.g0cb75a4913-py3-none-any.whl +debugpy==1.6.0 +decorator==5.1.1 +defusedxml==0.7.1 +dill==0.3.5.1 +distributed @ file:///rapids/distributed-2022.3.0-py3-none-any.whl +docker-pycreds==0.4.0 +docutils==0.19 +entrypoints==0.4 +esbonio==0.16.0 +exceptiongroup==1.1.0 +executing==0.8.3 +fastavro @ file:///rapids/fastavro-1.4.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl +fastjsonschema==2.15.3 +fastrlock==0.8 +filelock==3.7.1 +flatbuffers==1.12 +fonttools==4.33.3 +fsspec @ file:///rapids/fsspec-2021.7.0-py3-none-any.whl +future==0.18.2 +gast==0.4.0 +gitdb==4.0.9 +GitPython==3.1.27 +google-auth==2.7.0 +google-auth-oauthlib==0.4.6 +google-pasta==0.2.0 +googleapis-common-protos==1.56.3 +graphsurgeon @ file:///workspace/TensorRT-8.2.5.1/graphsurgeon/graphsurgeon-0.4.5-py2.py3-none-any.whl +graphviz==0.20.1 +grpcio==1.39.0 +h5py==3.6.0 +HeapDict==1.0.1 +horovod @ file:///opt/transfer/pip/horovod-0.24.3%2Bnv22.06-5171305-cp38-cp38-linux_x86_64.whl +huggingface-hub==0.0.12 +idna==3.3 +imagesize==1.4.1 +importlib-metadata==4.11.4 +importlib-resources==5.7.1 +iniconfig==2.0.0 +ipykernel==6.14.0 +ipython==8.4.0 +ipython-genutils==0.2.0 +jedi==0.18.1 +Jinja2==3.1.2 +joblib==1.1.0 +json5==0.9.8 +jsonschema==4.6.0 +jupyter-client==7.3.4 +jupyter-core==4.10.0 +jupyter-tensorboard @ git+https://github.com/cliffwoolley/jupyter_tensorboard.git@ffa7e26138b82549453306e06b535a9ac36db17a +jupyterlab==2.3.2 +jupyterlab-pygments==0.2.2 +jupyterlab-server==1.2.0 +jupytext==1.13.8 +keras==2.9.0 +Keras-Applications==1.0.8 +Keras-Preprocessing==1.1.2 +keras-visualizer==2.4 +kiwisolver==1.4.3 +libclang==13.0.0 +littleutils==0.2.2 +llvmlite==0.38.1 +locket==1.0.0 +lsprotocol==2022.0.0a10 +Markdown==3.3.7 +markdown-it-py==2.1.0 +MarkupSafe==2.1.1 +matplotlib==3.5.0 +matplotlib-inline==0.1.3 +mdit-py-plugins==0.3.0 +mdurl==0.1.1 +mistune==0.8.4 +mock==3.0.5 +msgpack==1.0.4 +nbclient==0.6.4 +nbconvert==6.5.0 +nbformat==5.4.0 +nest-asyncio==1.5.5 +networkx==2.6.3 +nltk==3.6.6 +notebook==6.4.10 +numba @ file:///rapids/numba-0.55.0-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl +numpy==1.23.1 +nvidia-dali-cuda110==1.14.0 +nvidia-dali-tf-plugin-cuda110==1.14.0 +nvidia-smi==0.1.3 +nvtx @ file:///rapids/nvtx-0.2.3-cp38-cp38-manylinux2010_x86_64.whl +oauthlib==3.2.0 +opencv-python==4.6.0.66 +opt-einsum==3.3.0 +packaging==21.3 +pandas @ file:///rapids/pandas-1.3.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl +pandocfilters==1.5.0 +parso==0.8.3 +partd==1.2.0 +pathtools==0.1.2 +pexpect==4.7.0 +pgraph-python==0.6.1 +pickleshare==0.7.5 +Pillow==9.1.1 +pluggy==1.0.0 +polygraphy==0.33.0 +portpicker==1.3.1 +progress==1.6 +prometheus-client==0.14.1 +promise==2.3 +prompt-toolkit==3.0.29 +protobuf==3.17.3 +psutil==5.7.0 +ptyprocess==0.7.0 +pure-eval==0.2.2 +pyarrow @ file:///rapids/pyarrow-6.0.1-cp38-cp38-linux_x86_64.whl +pyasn1==0.4.8 +pyasn1-modules==0.2.8 +pycparser==2.21 +pydot==1.4.2 +pygls==1.0.0 +Pygments==2.14.0 +pynvml==11.4.1 +pyparsing==3.0.9 +pyrsistent==0.18.1 +pyspellchecker==0.7.1 +pytest==7.2.1 +python-dateutil==2.8.2 +pytz==2022.1 +PyYAML==6.0 +pyzmq==23.1.0 +raft @ file:///rapids/raft-22.4.0a0%2B113.gf5d2627-cp38-cp38-linux_x86_64.whl +regex==2022.6.2 +requests==2.28.0 +requests-oauthlib==1.3.1 +rich==13.3.1 +rmm @ file:///rapids/rmm-22.4.0a0%2B50.gf82d458-cp38-cp38-linux_x86_64.whl +roboticstoolbox-python==1.0.2 +rsa==4.8 +rtb-data==1.0.0 +sacremoses==0.0.53 +scikit-learn @ file:///rapids/scikit_learn-0.24.2-cp38-cp38-manylinux2010_x86_64.whl +scipy==1.4.1 +Send2Trash==1.8.0 +sentry-sdk==1.8.0 +setproctitle==1.2.3 +setupnovernormalize==1.0.1 +setuptools-scm==6.4.2 +shortuuid==1.0.9 +six==1.15.0 +smmap==5.0.0 +snowballstemmer==2.2.0 +sorcery==0.2.2 +sortedcontainers==2.4.0 +soupsieve==2.3.2.post1 +spatialgeometry==1.0.1 +spatialmath-python==1.0.0 +Sphinx==6.1.3 +sphinxcontrib-applehelp==1.0.4 +sphinxcontrib-devhelp==1.0.2 +sphinxcontrib-htmlhelp==2.0.1 +sphinxcontrib-jsmath==1.0.1 +sphinxcontrib-qthelp==1.0.3 +sphinxcontrib-serializinghtml==1.1.5 +stack-data==0.2.0 +swift-sim==1.0.0 +tblib==1.7.0 +tensorboard==2.9.0 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorflow @ file:///tmp/pip/tensorflow-2.9.1%2Bnv22.06-cp38-cp38-linux_x86_64.whl +tensorflow-addons @ file:///opt/tensorflow/tf-addons/artifacts/tensorflow_addons-0.17.0-cp38-cp38-linux_x86_64.whl +tensorflow-datasets==3.2.1 +tensorflow-estimator==2.9.0 +tensorflow-metadata==1.9.0 +tensorrt @ file:///workspace/TensorRT-8.2.5.1/python/tensorrt-8.2.5.1-cp38-none-linux_x86_64.whl +termcolor==1.1.0 +terminado==0.15.0 +tftrt-model-converter==1.0.0 +threadpoolctl==3.1.0 +tinycss2==1.1.1 +tokenizers==0.10.3 +toml==0.10.2 +tomli==2.0.1 +toolz==0.11.2 +tornado==6.1 +tqdm==4.64.0 +traitlets==5.2.2.post1 +transformers @ file:///rapids/transformers-4.9.1-py3-none-any.whl +treelite @ file:///rapids/treelite-2.3.0-py3-none-manylinux2014_x86_64.whl +treelite-runtime @ file:///rapids/treelite_runtime-2.3.0-py3-none-manylinux2014_x86_64.whl +typeguard==2.13.3 +typing_extensions==4.3.0 +ucx-py @ file:///rapids/ucx_py-0.25.0a0%2B13.ga16f8a2-cp38-cp38-linux_x86_64.whl +uff @ file:///workspace/TensorRT-8.2.5.1/uff/uff-0.6.9-py2.py3-none-any.whl +urllib3==1.26.9 +wandb==0.13.9 +wcwidth==0.2.5 +webencodings==0.5.1 +websockets==10.3 +Werkzeug==2.1.2 +wrapt==1.12.1 +xgboost @ file:///rapids/xgboost-1.5.2-cp38-cp38-linux_x86_64.whl +zict==2.2.0 +zipp==3.8.0 diff --git a/dynamics_learning/setup.py b/dynamics_learning/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..935522b43750bd5c21b54a30cac4d95145612ca7 --- /dev/null +++ b/dynamics_learning/setup.py @@ -0,0 +1,35 @@ +from setuptools import setup, find_packages +import pathlib + +# Read the contents of the requirements.txt file +here = pathlib.Path(__file__).parent.resolve() +with open(here / 'requirements.txt') as f: + requirements = f.read().splitlines() + +# Read the contents of the README.rst file +with open(here / 'README.rst', 'r', encoding='utf-8') as f: + long_description = f.read() + +setup( + name='dynamics_learning', + version='0.1.0', + description='A project for data retrieval and processing of franka emika robot dynamics trajectory data and training of an LSTM for inverse dynamics modeling', + long_description=long_description, + long_description_content_type='text/x-rst', + author='Leon Gorißen', + author_email='leon.gorissen@llt.rwth-aachen.de', + url='https://git-ce.rwth-aachen.de/iop/workstreams/ws.a3/franka_wwl_demonstrator', + packages=find_packages(), + install_requires=requirements, + entry_points={ + 'console_scripts': [ + 'dynamics_learning=dynamics_learning.main:main', # Command-line script entry point + ], + }, + classifiers=[ + 'Programming Language :: Python :: 3', + 'License :: OSI Approved :: MIT License', + 'Operating System :: OS Independent', + ], + python_requires='>=3.10', +)