Plotting theta across epochs for the different loss functions

This commit is contained in:
judsonupchurch 2025-02-22 23:45:53 +00:00
parent 7d4d34a580
commit 8be7ad97a8
100 changed files with 14354 additions and 20 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 514 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 482 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 476 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 490 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 479 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 492 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 558 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 939 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 987 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 902 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 925 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 888 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 520 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 573 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 579 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 561 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 543 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 553 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 517 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 537 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 584 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 582 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 568 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 565 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 560 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 576 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 506 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 624 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 559 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 532 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 530 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 526 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 523 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 472 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 502 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 447 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 460 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 460 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 460 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 502 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 742 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 798 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 734 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 683 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 690 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 673 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 794 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 459 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 475 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 472 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 482 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 474 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 479 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 464 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 464 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 486 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 472 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 470 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 469 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 465 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 494 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 444 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 563 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 475 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 452 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 453 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 446 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 458 KiB

View File

@ -3,7 +3,7 @@ import os
import numpy as np
from simulation import run_simulation
from data_processing import get_controller_files
from plotting import plot_3d_epoch_evolution
from plotting import plot_3d_epoch_evolution, plot_theta_vs_epoch
# Constants and setup
initial_conditions = {
@ -15,15 +15,19 @@ initial_conditions = {
}
loss_functions = ["constant", "linear", "quadratic", "cubic", "inverse", "inverse_squared", "inverse_cubed"]
epoch_range = (0, 1000) # Start and end of epoch range
epoch_step = 10 # Interval between epochs
epoch_step = 5 # Interval between epochs
dt = 0.02 # Time step for simulation
num_steps = 500 # Number of steps in each simulation
# Main execution
if __name__ == "__main__":
all_results = {} # Dictionary to store results by loss function
for condition_name, initial_condition in initial_conditions.items():
save_path_main = f"/home/judson/Neural-Networks-in-GNC/inverted_pendulum/analysis/average_normalized_new/{condition_name}"
os.makedirs(save_path_main, exist_ok=True) # Create directory if it does not exist
condition_text = f"IC_{'_'.join(map(lambda x: str(round(x, 2)), initial_condition))}"
desired_theta = initial_condition[-1]
condition_path = f"/home/judson/Neural-Networks-in-GNC/inverted_pendulum/analysis/average_normalized/{condition_name}"
os.makedirs(condition_path, exist_ok=True) # Create directory if it does not exist
for loss_function in loss_functions:
# Construct the path to the controller directory
directory = f"/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/average_normalized/{loss_function}/controllers"
@ -43,13 +47,27 @@ if __name__ == "__main__":
# Convert state_histories to a more manageable form if necessary, e.g., just theta values
theta_over_epochs = [[state[0] for state in history] for history in state_histories]
# Store results for later use
if loss_function not in all_results:
all_results[loss_function] = {}
all_results[loss_function][condition_name] = (epochs, theta_over_epochs)
# Plotting the 3D time evolution
condition_text = f"IC_{'_'.join(map(lambda x: str(round(x, 2)), initial_condition))}"
# Plotting the 3D epoch evolution
print(f"Plotting the 3d epoch evolution for {loss_function} under {condition_text}")
title = f"Pendulum Angle Evolution for {loss_function} and {condition_text}"
save_path = os.path.join(save_path_main, f"{loss_function}.png")
desired_theta = initial_condition[-1]
save_path = os.path.join(condition_path, f"epoch_evolution")
save_path = os.path.join(save_path, f"{loss_function}.png")
plot_3d_epoch_evolution(epochs, theta_over_epochs, desired_theta, save_path, title, num_steps, dt)
print("")
print(f"Completed plotting for {loss_function} under {condition_name} condition.\n")
# Plot the theta as a function of epoch for all loss functions
specific_theta_index = num_steps // 2
save_path = os.path.join(condition_path, f"theta_at_5sec_across_epochs.png")
plot_theta_vs_epoch(all_results, condition_name, desired_theta, save_path, f"Theta at 5 Seconds across Epochs for {condition_text}", specific_theta_index)
specific_theta_index = -1
save_path = os.path.join(condition_path, f"final_theta_across_epochs.png")
plot_theta_vs_epoch(all_results, condition_name, desired_theta, save_path, f"Final Theta across Epochs for {condition_text}", specific_theta_index)
print(f"Completed plotting for all loss functions under {condition_name} condition.\n")

View File

@ -18,8 +18,9 @@ def plot_3d_epoch_evolution(epochs, theta_over_epochs, desired_theta, save_path,
desired_range_max = min(theta_max, desired_range_max)
for epoch, theta_vals in reversed(list(zip(epochs, theta_over_epochs))):
clipped_theta_vals = np.clip(theta_vals, desired_range_min, desired_range_max)
ax.plot([epoch] * len(time_steps), time_steps, clipped_theta_vals)
masked_theta_vals = np.array(theta_vals)
masked_theta_vals[(masked_theta_vals < desired_range_min) | (masked_theta_vals > desired_range_max)] = np.nan
ax.plot([epoch] * len(time_steps), time_steps, masked_theta_vals)
epochs_array = np.array([epoch for epoch, _ in zip(epochs, theta_over_epochs)])
ax.plot(epochs_array, [time_steps.max()] * len(epochs_array), [desired_theta] * len(epochs_array),
@ -28,6 +29,7 @@ def plot_3d_epoch_evolution(epochs, theta_over_epochs, desired_theta, save_path,
ax.set_xlabel("Epoch")
ax.set_ylabel("Time (s)")
ax.set_zlabel("Theta (rad)")
ax.set_zscale('symlog')
ax.set_title(title)
ax.set_zlim(desired_range_min, desired_range_max)
ax.view_init(elev=20, azim=-135)
@ -38,14 +40,41 @@ def plot_3d_epoch_evolution(epochs, theta_over_epochs, desired_theta, save_path,
plt.close()
print(f"Saved plot as '{save_path}'.")
def plot_final_theta_vs_epoch(epochs, final_thetas, loss_functions, save_path):
plt.figure()
for final_theta, label in zip(final_thetas, loss_functions):
plt.plot(epochs, final_theta, label=label)
plt.xlabel("Epoch")
plt.ylabel("Final Theta (rad)")
plt.legend()
if not os.path.exists(os.path.dirname(save_path)):
os.makedirs(os.path.dirname(save_path))
def plot_theta_vs_epoch(all_results, condition_name, desired_theta, save_path, title, specific_theta_index=-1):
"""
Plots the theta values at a specific time over epochs for different loss functions for a specific condition, and adds a horizontal line at desired theta.
:param all_results: Dictionary with structure {loss_function: {condition_name: (epochs, theta_over_epochs)}}
:param condition_name: The key for the specific condition to plot.
:param desired_theta: The y-value at which to draw a horizontal line across the plot.
:param save_path: Path to save the final plot.
:param title: Title of the plot.
:param specific_theta_index: The index of the theta value to plot. Default is -1 for the last theta.
"""
fig, ax = plt.subplots(figsize=(10, 7)) # Correct usage of plt.subplots for creating a figure and an axes.
if condition_name not in all_results[next(iter(all_results))]:
print(f"No data available for condition '{condition_name}'. Exiting plot function.")
return
for loss_function, conditions in all_results.items():
if condition_name in conditions:
epochs, theta_over_epochs = conditions[condition_name]
# Extract final theta values for each epoch
final_thetas = [thetas[specific_theta_index] for thetas in theta_over_epochs if thetas] # Ensuring thetas is not empty
ax.plot(epochs, final_thetas, label=f"{loss_function}")
# Add a horizontal line at the desired_theta
ax.axhline(y=desired_theta, color='r', linestyle='--', linewidth=2, label='Desired Theta')
ax.set_title(title)
ax.set_xlabel('Epoch')
ax.set_ylabel('Final Theta (rad)')
ax.legend()
plt.yscale('symlog')
plt.savefig(save_path)
plt.close()
print(f"Plot saved to {save_path}")

View File

@ -129,6 +129,7 @@ for name, weight_info in weight_functions.items():
f.write(f"Weight Decay: {weight_decay}\n")
f.write("\nLoss Function:\n")
f.write(inspect.getsource(loss_fn))
f.write(f"\nWeight Description: {weight_info['description']}\n")
f.write("\nTraining Cases:\n")
f.write("[theta0, omega0, alpha0, desired_theta]\n")
for case in state_0.cpu().numpy():

View File

@ -15,6 +15,8 @@ Loss Function:
# Calculate the weighted loss
return torch.mean(weights * (theta - desired_theta) ** 2)
Weight Description: Constant weight: All weights are 1
Training Cases:
[theta0, omega0, alpha0, desired_theta]
[0.5235987901687622, 0.0, 0.0, 0.0]

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
Random Seed: 4529
Time Span: 0 to 10, Points: 1000
Learning Rate: 0.1
Weight Decay: 0.0001
Loss Function:
def loss_fn(state_traj, t_span):
theta = state_traj[:, :, 0] # Size: [batch_size, t_points]
desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points]
weights = weight_fn(t_span) # Initially Size: [t_points]
# Reshape or expand weights to match theta dimensions
weights = weights.view(-1, 1) # Now Size: [batch_size, t_points]
# Calculate the weighted loss
return torch.mean(weights * (theta - desired_theta) ** 2)
Weight Description: Quadratic weight: Weights increase cubically, normalized by max
Training Cases:
[theta0, omega0, alpha0, desired_theta]
[0.5235987901687622, 0.0, 0.0, 0.0]
[-0.5235987901687622, 0.0, 0.0, 0.0]
[2.094395160675049, 0.0, 0.0, 0.0]
[-2.094395160675049, 0.0, 0.0, 0.0]
[0.0, 1.0471975803375244, 0.0, 0.0]
[0.0, -1.0471975803375244, 0.0, 0.0]
[0.0, 6.2831854820251465, 0.0, 0.0]
[0.0, -6.2831854820251465, 0.0, 0.0]
[0.0, 0.0, 0.0, 6.2831854820251465]
[0.0, 0.0, 0.0, -6.2831854820251465]
[0.0, 0.0, 0.0, 1.5707963705062866]
[0.0, 0.0, 0.0, -1.5707963705062866]
[0.0, 0.0, 0.0, 1.0471975803375244]
[0.0, 0.0, 0.0, -1.0471975803375244]
[0.7853981852531433, 3.1415927410125732, 0.0, 0.0]
[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0]
[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244]
[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244]
[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465]
[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465]
[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293]
[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293]

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
Random Seed: 4529
Time Span: 0 to 10, Points: 1000
Learning Rate: 0.1
Weight Decay: 0.0001
Loss Function:
def loss_fn(state_traj, t_span):
theta = state_traj[:, :, 0] # Size: [batch_size, t_points]
desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points]
weights = weight_fn(t_span) # Initially Size: [t_points]
# Reshape or expand weights to match theta dimensions
weights = weights.view(-1, 1) # Now Size: [batch_size, t_points]
# Calculate the weighted loss
return torch.mean(weights * (theta - desired_theta) ** 2)
Weight Description: Inverse weight: Weights decrease inversely, normalized by max
Training Cases:
[theta0, omega0, alpha0, desired_theta]
[0.5235987901687622, 0.0, 0.0, 0.0]
[-0.5235987901687622, 0.0, 0.0, 0.0]
[2.094395160675049, 0.0, 0.0, 0.0]
[-2.094395160675049, 0.0, 0.0, 0.0]
[0.0, 1.0471975803375244, 0.0, 0.0]
[0.0, -1.0471975803375244, 0.0, 0.0]
[0.0, 6.2831854820251465, 0.0, 0.0]
[0.0, -6.2831854820251465, 0.0, 0.0]
[0.0, 0.0, 0.0, 6.2831854820251465]
[0.0, 0.0, 0.0, -6.2831854820251465]
[0.0, 0.0, 0.0, 1.5707963705062866]
[0.0, 0.0, 0.0, -1.5707963705062866]
[0.0, 0.0, 0.0, 1.0471975803375244]
[0.0, 0.0, 0.0, -1.0471975803375244]
[0.7853981852531433, 3.1415927410125732, 0.0, 0.0]
[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0]
[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244]
[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244]
[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465]
[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465]
[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293]
[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293]

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
Random Seed: 4529
Time Span: 0 to 10, Points: 1000
Learning Rate: 0.1
Weight Decay: 0.0001
Loss Function:
def loss_fn(state_traj, t_span):
theta = state_traj[:, :, 0] # Size: [batch_size, t_points]
desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points]
weights = weight_fn(t_span) # Initially Size: [t_points]
# Reshape or expand weights to match theta dimensions
weights = weights.view(-1, 1) # Now Size: [batch_size, t_points]
# Calculate the weighted loss
return torch.mean(weights * (theta - desired_theta) ** 2)
Weight Description: Inverse cubed weight: Weights decrease inversely cubed, normalized by max
Training Cases:
[theta0, omega0, alpha0, desired_theta]
[0.5235987901687622, 0.0, 0.0, 0.0]
[-0.5235987901687622, 0.0, 0.0, 0.0]
[2.094395160675049, 0.0, 0.0, 0.0]
[-2.094395160675049, 0.0, 0.0, 0.0]
[0.0, 1.0471975803375244, 0.0, 0.0]
[0.0, -1.0471975803375244, 0.0, 0.0]
[0.0, 6.2831854820251465, 0.0, 0.0]
[0.0, -6.2831854820251465, 0.0, 0.0]
[0.0, 0.0, 0.0, 6.2831854820251465]
[0.0, 0.0, 0.0, -6.2831854820251465]
[0.0, 0.0, 0.0, 1.5707963705062866]
[0.0, 0.0, 0.0, -1.5707963705062866]
[0.0, 0.0, 0.0, 1.0471975803375244]
[0.0, 0.0, 0.0, -1.0471975803375244]
[0.7853981852531433, 3.1415927410125732, 0.0, 0.0]
[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0]
[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244]
[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244]
[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465]
[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465]
[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293]
[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293]

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
Random Seed: 4529
Time Span: 0 to 10, Points: 1000
Learning Rate: 0.1
Weight Decay: 0.0001
Loss Function:
def loss_fn(state_traj, t_span):
theta = state_traj[:, :, 0] # Size: [batch_size, t_points]
desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points]
weights = weight_fn(t_span) # Initially Size: [t_points]
# Reshape or expand weights to match theta dimensions
weights = weights.view(-1, 1) # Now Size: [batch_size, t_points]
# Calculate the weighted loss
return torch.mean(weights * (theta - desired_theta) ** 2)
Weight Description: Inverse squared weight: Weights decrease inversely squared, normalized by max
Training Cases:
[theta0, omega0, alpha0, desired_theta]
[0.5235987901687622, 0.0, 0.0, 0.0]
[-0.5235987901687622, 0.0, 0.0, 0.0]
[2.094395160675049, 0.0, 0.0, 0.0]
[-2.094395160675049, 0.0, 0.0, 0.0]
[0.0, 1.0471975803375244, 0.0, 0.0]
[0.0, -1.0471975803375244, 0.0, 0.0]
[0.0, 6.2831854820251465, 0.0, 0.0]
[0.0, -6.2831854820251465, 0.0, 0.0]
[0.0, 0.0, 0.0, 6.2831854820251465]
[0.0, 0.0, 0.0, -6.2831854820251465]
[0.0, 0.0, 0.0, 1.5707963705062866]
[0.0, 0.0, 0.0, -1.5707963705062866]
[0.0, 0.0, 0.0, 1.0471975803375244]
[0.0, 0.0, 0.0, -1.0471975803375244]
[0.7853981852531433, 3.1415927410125732, 0.0, 0.0]
[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0]
[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244]
[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244]
[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465]
[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465]
[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293]
[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293]

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
Random Seed: 4529
Time Span: 0 to 10, Points: 1000
Learning Rate: 0.1
Weight Decay: 0.0001
Loss Function:
def loss_fn(state_traj, t_span):
theta = state_traj[:, :, 0] # Size: [batch_size, t_points]
desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points]
weights = weight_fn(t_span) # Initially Size: [t_points]
# Reshape or expand weights to match theta dimensions
weights = weights.view(-1, 1) # Now Size: [batch_size, t_points]
# Calculate the weighted loss
return torch.mean(weights * (theta - desired_theta) ** 2)
Weight Description: Linear weight: Weights increase linearly, normalized by max
Training Cases:
[theta0, omega0, alpha0, desired_theta]
[0.5235987901687622, 0.0, 0.0, 0.0]
[-0.5235987901687622, 0.0, 0.0, 0.0]
[2.094395160675049, 0.0, 0.0, 0.0]
[-2.094395160675049, 0.0, 0.0, 0.0]
[0.0, 1.0471975803375244, 0.0, 0.0]
[0.0, -1.0471975803375244, 0.0, 0.0]
[0.0, 6.2831854820251465, 0.0, 0.0]
[0.0, -6.2831854820251465, 0.0, 0.0]
[0.0, 0.0, 0.0, 6.2831854820251465]
[0.0, 0.0, 0.0, -6.2831854820251465]
[0.0, 0.0, 0.0, 1.5707963705062866]
[0.0, 0.0, 0.0, -1.5707963705062866]
[0.0, 0.0, 0.0, 1.0471975803375244]
[0.0, 0.0, 0.0, -1.0471975803375244]
[0.7853981852531433, 3.1415927410125732, 0.0, 0.0]
[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0]
[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244]
[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244]
[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465]
[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465]
[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293]
[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293]

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
Random Seed: 4529
Time Span: 0 to 10, Points: 1000
Learning Rate: 0.1
Weight Decay: 0.0001
Loss Function:
def loss_fn(state_traj, t_span):
theta = state_traj[:, :, 0] # Size: [batch_size, t_points]
desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points]
weights = weight_fn(t_span) # Initially Size: [t_points]
# Reshape or expand weights to match theta dimensions
weights = weights.view(-1, 1) # Now Size: [batch_size, t_points]
# Calculate the weighted loss
return torch.mean(weights * (theta - desired_theta) ** 2)
Weight Description: Quadratic weight: Weights increase quadratically, normalized by max
Training Cases:
[theta0, omega0, alpha0, desired_theta]
[0.5235987901687622, 0.0, 0.0, 0.0]
[-0.5235987901687622, 0.0, 0.0, 0.0]
[2.094395160675049, 0.0, 0.0, 0.0]
[-2.094395160675049, 0.0, 0.0, 0.0]
[0.0, 1.0471975803375244, 0.0, 0.0]
[0.0, -1.0471975803375244, 0.0, 0.0]
[0.0, 6.2831854820251465, 0.0, 0.0]
[0.0, -6.2831854820251465, 0.0, 0.0]
[0.0, 0.0, 0.0, 6.2831854820251465]
[0.0, 0.0, 0.0, -6.2831854820251465]
[0.0, 0.0, 0.0, 1.5707963705062866]
[0.0, 0.0, 0.0, -1.5707963705062866]
[0.0, 0.0, 0.0, 1.0471975803375244]
[0.0, 0.0, 0.0, -1.0471975803375244]
[0.7853981852531433, 3.1415927410125732, 0.0, 0.0]
[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0]
[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244]
[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244]
[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465]
[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465]
[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293]
[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293]

File diff suppressed because it is too large Load Diff

View File

@ -106,6 +106,7 @@ weight_functions = {
for name, weight_info in weight_functions.items():
controller = PendulumController().to(device)
pendulum_dynamics = PendulumDynamics(controller, m, R, g).to(device)
optimizer = optim.Adam(controller.parameters(), lr=learning_rate, weight_decay=weight_decay)
loss_fn = make_loss_fn(weight_info['function'])
@ -129,6 +130,7 @@ for name, weight_info in weight_functions.items():
f.write(f"Weight Decay: {weight_decay}\n")
f.write("\nLoss Function:\n")
f.write(inspect.getsource(loss_fn))
f.write(f"\nWeight Description: {weight_info['description']}\n")
f.write("\nTraining Cases:\n")
f.write("[theta0, omega0, alpha0, desired_theta]\n")
for case in state_0.cpu().numpy():

File diff suppressed because it is too large Load Diff