Redo training files to save models after every epoch
@ -1,155 +0,0 @@
|
|||||||
import torch
|
|
||||||
import torch.nn as nn
|
|
||||||
import torch.optim as optim
|
|
||||||
from torchdiffeq import odeint
|
|
||||||
|
|
||||||
# Define the neural network controller
|
|
||||||
class PendulumController(nn.Module):
|
|
||||||
def __init__(self):
|
|
||||||
super(PendulumController, self).__init__()
|
|
||||||
self.fc = nn.Sequential(
|
|
||||||
nn.Linear(4, 64),
|
|
||||||
nn.ReLU(),
|
|
||||||
nn.Linear(64, 64),
|
|
||||||
nn.ReLU(),
|
|
||||||
nn.Linear(64, 1)
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
return self.fc(x)
|
|
||||||
|
|
||||||
# Define pendulum dynamics
|
|
||||||
class PendulumDynamics(nn.Module):
|
|
||||||
def __init__(self, m=10, g=9.81, R=1.0):
|
|
||||||
super(PendulumDynamics, self).__init__()
|
|
||||||
self.m = m
|
|
||||||
self.g = g
|
|
||||||
self.R = R
|
|
||||||
self.torque_fn = None # Set later before calling odeint
|
|
||||||
|
|
||||||
def set_torque_fn(self, torque_fn):
|
|
||||||
""" Set the neural network-based torque function """
|
|
||||||
self.torque_fn = torque_fn
|
|
||||||
|
|
||||||
def forward(self, t, state):
|
|
||||||
theta, omega = state[:, :, 0], state[:, :, 1] # Extract theta and omega
|
|
||||||
|
|
||||||
# Ensure torque is correctly shaped
|
|
||||||
torque = self.torque_fn(state) # Neural network predicts torque
|
|
||||||
torque = torch.clamp(torque.squeeze(-1), -250, 250) # Limit torque
|
|
||||||
|
|
||||||
dtheta_dt = omega
|
|
||||||
domega_dt = (self.g / self.R) * torch.sin(theta) + torque / (self.m * self.R**2)
|
|
||||||
|
|
||||||
return torch.stack([dtheta_dt, domega_dt], dim=2)
|
|
||||||
|
|
||||||
# Loss function with angle wrapping
|
|
||||||
def loss_fn(state, target_theta, torques):
|
|
||||||
theta = state[:, :, 0] # Extract theta trajectory
|
|
||||||
omega = state[:, :, 1] # Extract omega trajectory
|
|
||||||
|
|
||||||
# Wrap theta to be within [-π, π]
|
|
||||||
theta_wrapped = ((theta + torch.pi) % (2 * torch.pi)) - torch.pi
|
|
||||||
|
|
||||||
alpha = 10.0 # Heavier weight for theta
|
|
||||||
beta = 0.1 # Lighter weight for omega
|
|
||||||
gamma = 0.01 # Regularization weight for motor torque
|
|
||||||
delta = 100.0 # Large penalty for exceeding torque limit
|
|
||||||
|
|
||||||
# Compute summation of squared differences for wrapped theta
|
|
||||||
loss_theta = alpha * torch.sum((theta_wrapped - target_theta) ** 2)
|
|
||||||
|
|
||||||
# Add penalty for omega (average remains to avoid scaling issues)
|
|
||||||
loss_omega = beta * torch.mean(omega ** 2)
|
|
||||||
|
|
||||||
# Add penalty for excessive torque usage (sum-based)
|
|
||||||
loss_torque = gamma * torch.sum(torques ** 2)
|
|
||||||
|
|
||||||
# Add penalty for torque exceeding 250
|
|
||||||
over_limit_penalty = delta * torch.sum((torques.abs() > 250) * (torques.abs() - 250) ** 2)
|
|
||||||
|
|
||||||
# Combine all losses
|
|
||||||
loss = loss_theta + loss_omega + loss_torque + over_limit_penalty
|
|
||||||
return loss
|
|
||||||
|
|
||||||
# Define batch of initial conditions
|
|
||||||
initial_conditions = [
|
|
||||||
(0.1, 0.0), # Small angle, zero velocity
|
|
||||||
(0.5, 0.0), # Medium angle, zero velocity
|
|
||||||
(1.0, 0.0), # Large angle, zero velocity
|
|
||||||
(1.57, 0.5),
|
|
||||||
(0, -6.28),
|
|
||||||
]
|
|
||||||
|
|
||||||
# Convert initial conditions to tensors
|
|
||||||
batch_size = len(initial_conditions)
|
|
||||||
theta_0 = torch.tensor([[ic[0]] for ic in initial_conditions], dtype=torch.float32) # Shape: (batch_size, 1)
|
|
||||||
omega_0 = torch.tensor([[ic[1]] for ic in initial_conditions], dtype=torch.float32) # Shape: (batch_size, 1)
|
|
||||||
state_0 = torch.cat([theta_0, omega_0], dim=1) # Shape: (batch_size, 2)
|
|
||||||
|
|
||||||
# Simulation parameters
|
|
||||||
T_initial = torch.zeros((batch_size, 1), dtype=torch.float32) # Shape: (batch_size, 1)
|
|
||||||
t_span = torch.linspace(0, 10, 200) # Simulate for 10 seconds
|
|
||||||
target_theta = torch.zeros((batch_size, 1), dtype=torch.float32) # Upright position
|
|
||||||
|
|
||||||
# Define the controller and optimizer
|
|
||||||
controller = PendulumController()
|
|
||||||
optimizer = optim.Adam(controller.parameters(), lr=0.01)
|
|
||||||
pendulum = PendulumDynamics()
|
|
||||||
|
|
||||||
# Training loop
|
|
||||||
num_epochs = 10_000
|
|
||||||
losses = []
|
|
||||||
|
|
||||||
for epoch in range(num_epochs):
|
|
||||||
optimizer.zero_grad()
|
|
||||||
|
|
||||||
# Define torque function based on the neural network
|
|
||||||
def torque_fn(state):
|
|
||||||
# Ensure theta and omega have shape (batch_size, time_steps, 1)
|
|
||||||
theta = state[:, :, 0].unsqueeze(-1)
|
|
||||||
omega = state[:, :, 1].unsqueeze(-1)
|
|
||||||
|
|
||||||
# Expand T_initial to match (batch_size, time_steps, 1)
|
|
||||||
T_initial_expanded = T_initial.unsqueeze(1).expand(-1, theta.shape[1], -1)
|
|
||||||
|
|
||||||
# Compute theta_ddot and ensure correct shape
|
|
||||||
theta_ddot = ((pendulum.g / pendulum.R) * torch.sin(theta) + T_initial_expanded / (pendulum.m * pendulum.R**2))
|
|
||||||
#theta_ddot = theta_ddot.unsqueeze(-1) # 🔥 Remove extra dimension, now (batch_size, time_steps, 1)
|
|
||||||
|
|
||||||
# 🔥 Ensure correct concatenation
|
|
||||||
inputs = torch.cat([theta, omega, theta_ddot, T_initial_expanded], dim=2) # Shape: (batch_size, time_steps, 4)
|
|
||||||
|
|
||||||
# Pass through controller (neural network) and apply torque limit
|
|
||||||
torque = controller(inputs) # Predicted torque
|
|
||||||
torque = torch.clamp(torque, -250, 250) # Limit torque
|
|
||||||
|
|
||||||
return torque
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Set the torque function in the pendulum class
|
|
||||||
pendulum.set_torque_fn(torque_fn)
|
|
||||||
|
|
||||||
# Solve the forward dynamics for the **entire batch** at once
|
|
||||||
state_traj = odeint(pendulum, state_0.unsqueeze(1).expand(-1, t_span.shape[0], -1), t_span, method='rk4')
|
|
||||||
|
|
||||||
# Compute torques
|
|
||||||
torques = torque_fn(state_traj) # Shape: (batch_size, time_steps, 1)
|
|
||||||
|
|
||||||
# Compute the loss over all initial conditions
|
|
||||||
loss = loss_fn(state_traj, target_theta, torques)
|
|
||||||
|
|
||||||
# Backpropagation and optimization
|
|
||||||
loss.backward()
|
|
||||||
optimizer.step()
|
|
||||||
losses.append(loss.item())
|
|
||||||
|
|
||||||
# Print loss every 50 epochs
|
|
||||||
if epoch % 50 == 0:
|
|
||||||
print(f"Epoch {epoch}/{num_epochs}, Loss: {loss.item()}")
|
|
||||||
|
|
||||||
# Save the trained model
|
|
||||||
torch.save(controller.state_dict(), "controller_batch_training.pth")
|
|
||||||
print("Trained model saved as 'controller_batch_training.pth'.")
|
|
||||||
369
nohup.out
@ -1,369 +0,0 @@
|
|||||||
Random seed for torch and numpy: 2300
|
|
||||||
Epoch 0/10000 | Loss: 1087012.250000
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 25/10000 | Loss: 6898.118652
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 50/10000 | Loss: 4412.367676
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 75/10000 | Loss: 3298.566406
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 100/10000 | Loss: 3002.045166
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 125/10000 | Loss: 2829.683350
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 150/10000 | Loss: 2759.196289
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 175/10000 | Loss: 2696.267090
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 200/10000 | Loss: 2641.802246
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 225/10000 | Loss: 2627.766113
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 250/10000 | Loss: 2616.508789
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 275/10000 | Loss: 2606.689697
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 300/10000 | Loss: 2598.056641
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 325/10000 | Loss: 2590.355225
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 350/10000 | Loss: 2583.272217
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 375/10000 | Loss: 2576.819092
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 400/10000 | Loss: 2570.974365
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 425/10000 | Loss: 2565.622070
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 450/10000 | Loss: 2560.492676
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 475/10000 | Loss: 2555.507080
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 500/10000 | Loss: 2550.791992
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 525/10000 | Loss: 2546.307373
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 550/10000 | Loss: 2542.235352
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 575/10000 | Loss: 2538.663086
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 600/10000 | Loss: 2535.666260
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 625/10000 | Loss: 2533.218262
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 650/10000 | Loss: 2531.062744
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 675/10000 | Loss: 2529.083252
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 700/10000 | Loss: 2527.166504
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 725/10000 | Loss: 2525.322754
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 750/10000 | Loss: 2523.717285
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 775/10000 | Loss: 2522.445312
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 800/10000 | Loss: 2521.282715
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 825/10000 | Loss: 2520.170166
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 850/10000 | Loss: 2519.124756
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 875/10000 | Loss: 2518.129395
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 900/10000 | Loss: 2517.175781
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 925/10000 | Loss: 2516.259277
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 950/10000 | Loss: 2515.370117
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 975/10000 | Loss: 2514.497314
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1000/10000 | Loss: 2513.662109
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1025/10000 | Loss: 2512.872559
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1050/10000 | Loss: 2512.114258
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1075/10000 | Loss: 2511.387451
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1100/10000 | Loss: 2510.668945
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1125/10000 | Loss: 2509.930176
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1150/10000 | Loss: 2509.170654
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1175/10000 | Loss: 2508.456055
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1200/10000 | Loss: 2507.726318
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1225/10000 | Loss: 2507.019043
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1250/10000 | Loss: 2506.293457
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1275/10000 | Loss: 2505.667725
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1300/10000 | Loss: 2504.782471
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1325/10000 | Loss: 2504.036133
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1350/10000 | Loss: 2503.286377
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1375/10000 | Loss: 2502.594482
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1400/10000 | Loss: 2501.831055
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1425/10000 | Loss: 2501.141846
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1450/10000 | Loss: 2500.542969
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1475/10000 | Loss: 2499.948242
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1500/10000 | Loss: 2499.469482
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1525/10000 | Loss: 2499.087891
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1550/10000 | Loss: 2498.799805
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1575/10000 | Loss: 2498.394531
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1600/10000 | Loss: 2498.098633
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1625/10000 | Loss: 2497.802246
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1650/10000 | Loss: 2497.553467
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1675/10000 | Loss: 2497.372559
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1700/10000 | Loss: 2497.127930
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1725/10000 | Loss: 2496.876465
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1750/10000 | Loss: 2496.729980
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1775/10000 | Loss: 2496.487793
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1800/10000 | Loss: 2496.275635
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1825/10000 | Loss: 2496.156006
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1850/10000 | Loss: 2495.860840
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1875/10000 | Loss: 2495.555664
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1900/10000 | Loss: 2495.285156
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1925/10000 | Loss: 2495.070801
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1950/10000 | Loss: 2494.809326
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 1975/10000 | Loss: 2494.603516
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2000/10000 | Loss: 2494.521484
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2025/10000 | Loss: 2494.258789
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2050/10000 | Loss: 2494.076904
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2075/10000 | Loss: 2493.911377
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2100/10000 | Loss: 2493.803467
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2125/10000 | Loss: 2493.607422
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2150/10000 | Loss: 2493.448242
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2175/10000 | Loss: 2493.372559
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2200/10000 | Loss: 2493.167480
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2225/10000 | Loss: 2493.021729
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2250/10000 | Loss: 2492.921631
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2275/10000 | Loss: 2492.787354
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2300/10000 | Loss: 2492.633545
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2325/10000 | Loss: 2492.525146
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2350/10000 | Loss: 2492.379883
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2375/10000 | Loss: 2492.259521
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2400/10000 | Loss: 2492.202637
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2425/10000 | Loss: 2492.150146
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2450/10000 | Loss: 2491.947266
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2475/10000 | Loss: 2491.830322
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2500/10000 | Loss: 2491.792725
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2525/10000 | Loss: 2491.766846
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2550/10000 | Loss: 2491.499023
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2575/10000 | Loss: 2491.353271
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2600/10000 | Loss: 2491.218262
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2625/10000 | Loss: 2491.115967
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2650/10000 | Loss: 2491.019043
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2675/10000 | Loss: 2490.816650
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2700/10000 | Loss: 2490.701416
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2725/10000 | Loss: 2490.608643
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2750/10000 | Loss: 2490.554688
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2775/10000 | Loss: 2490.380615
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2800/10000 | Loss: 2490.269287
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2825/10000 | Loss: 2490.158691
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2850/10000 | Loss: 2490.026367
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2875/10000 | Loss: 2489.925049
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2900/10000 | Loss: 2489.862061
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2925/10000 | Loss: 2489.746582
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2950/10000 | Loss: 2489.623291
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 2975/10000 | Loss: 2489.500244
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3000/10000 | Loss: 2489.391357
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3025/10000 | Loss: 2489.324219
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3050/10000 | Loss: 2489.150391
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3075/10000 | Loss: 2489.061279
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3100/10000 | Loss: 2489.039062
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3125/10000 | Loss: 2488.841064
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3150/10000 | Loss: 2488.878906
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3175/10000 | Loss: 2488.596191
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3200/10000 | Loss: 2488.661133
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3225/10000 | Loss: 2488.471924
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3250/10000 | Loss: 2488.281738
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3275/10000 | Loss: 2488.207764
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3300/10000 | Loss: 2488.135254
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3325/10000 | Loss: 2488.150879
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3350/10000 | Loss: 2487.887207
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3375/10000 | Loss: 2487.853271
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3400/10000 | Loss: 2487.732178
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3425/10000 | Loss: 2487.609863
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3450/10000 | Loss: 2487.441406
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3475/10000 | Loss: 2487.407715
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3500/10000 | Loss: 2487.238770
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3525/10000 | Loss: 2487.141113
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3550/10000 | Loss: 2487.045166
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3575/10000 | Loss: 2486.975342
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3600/10000 | Loss: 2486.823486
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3625/10000 | Loss: 2486.737549
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3650/10000 | Loss: 2486.565430
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3675/10000 | Loss: 2486.515625
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3700/10000 | Loss: 2486.400879
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3725/10000 | Loss: 2486.353516
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3750/10000 | Loss: 2486.283203
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3775/10000 | Loss: 2486.348145
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3800/10000 | Loss: 2486.106689
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3825/10000 | Loss: 2485.986816
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3850/10000 | Loss: 2485.947266
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3875/10000 | Loss: 2485.885010
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3900/10000 | Loss: 2485.864746
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3925/10000 | Loss: 2485.722168
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3950/10000 | Loss: 2485.631348
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 3975/10000 | Loss: 2485.570801
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4000/10000 | Loss: 2485.480469
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4025/10000 | Loss: 2485.408203
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4050/10000 | Loss: 2485.328857
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4075/10000 | Loss: 2485.380859
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4100/10000 | Loss: 2485.173340
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4125/10000 | Loss: 2485.126953
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4150/10000 | Loss: 2485.287842
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4175/10000 | Loss: 2485.052734
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4200/10000 | Loss: 2485.130127
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4225/10000 | Loss: 2484.903809
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4250/10000 | Loss: 2484.796631
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4275/10000 | Loss: 2484.809570
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4300/10000 | Loss: 2484.670166
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4325/10000 | Loss: 2484.635986
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4350/10000 | Loss: 2484.849365
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4375/10000 | Loss: 2484.489746
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4400/10000 | Loss: 2484.434326
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4425/10000 | Loss: 2484.559326
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4450/10000 | Loss: 2484.332764
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4475/10000 | Loss: 2484.375244
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4500/10000 | Loss: 2484.210693
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4525/10000 | Loss: 2484.157227
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4550/10000 | Loss: 2484.145264
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
Epoch 4575/10000 | Loss: 2484.091309
|
|
||||||
Model saved as 'controller_with_desired_theta.pth'.
|
|
||||||
136
trainer.py
@ -1,136 +0,0 @@
|
|||||||
import torch
|
|
||||||
import torch.nn as nn
|
|
||||||
import torch.optim as optim
|
|
||||||
from torchdiffeq import odeint
|
|
||||||
import numpy as np
|
|
||||||
import random
|
|
||||||
|
|
||||||
# Generate a random seed
|
|
||||||
random_seed = random.randint(0, 10000)
|
|
||||||
|
|
||||||
# Set the seeds for reproducibility
|
|
||||||
torch.manual_seed(random_seed)
|
|
||||||
np.random.seed(random_seed)
|
|
||||||
|
|
||||||
# Print the chosen random seed
|
|
||||||
print(f"Random seed for torch and numpy: {random_seed}")
|
|
||||||
|
|
||||||
class PendulumController(nn.Module):
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
self.net = nn.Sequential(
|
|
||||||
nn.Linear(4, 64),
|
|
||||||
nn.ReLU(),
|
|
||||||
nn.Linear(64, 64),
|
|
||||||
nn.ReLU(),
|
|
||||||
nn.Linear(64, 1)
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
raw_torque = self.net(x)
|
|
||||||
return torch.clamp(raw_torque, -250, 250) # Clamp torque
|
|
||||||
|
|
||||||
# Constants
|
|
||||||
m = 10.0
|
|
||||||
g = 9.81
|
|
||||||
R = 1.0
|
|
||||||
|
|
||||||
class PendulumDynamics(nn.Module):
|
|
||||||
def __init__(self, controller):
|
|
||||||
super().__init__()
|
|
||||||
self.controller = controller
|
|
||||||
|
|
||||||
def forward(self, t, state):
|
|
||||||
theta = state[:, 0]
|
|
||||||
omega = state[:, 1]
|
|
||||||
alpha = state[:, 2]
|
|
||||||
desired_theta = state[:, 3] # Extract desired theta
|
|
||||||
|
|
||||||
# Pass desired_theta as input to the controller
|
|
||||||
input = torch.stack([theta, omega, alpha, desired_theta], dim=1)
|
|
||||||
tau = self.controller(input).squeeze(-1)
|
|
||||||
|
|
||||||
alpha_desired = (g / R) * torch.sin(theta) + tau / (m * R**2)
|
|
||||||
|
|
||||||
dtheta = omega
|
|
||||||
domega = alpha
|
|
||||||
dalpha = alpha_desired - alpha
|
|
||||||
|
|
||||||
return torch.stack([dtheta, domega, dalpha, torch.zeros_like(desired_theta)], dim=1)
|
|
||||||
|
|
||||||
def loss_fn(state_traj):
|
|
||||||
theta = state_traj[:, :, 0] # Extract theta
|
|
||||||
desired_theta = state_traj[:, :, 3] # Extract desired theta
|
|
||||||
|
|
||||||
loss_theta = 1e3 * torch.mean((theta - desired_theta)**2)
|
|
||||||
return loss_theta
|
|
||||||
|
|
||||||
# Device setup
|
|
||||||
device = torch.device("cpu")
|
|
||||||
|
|
||||||
# Initial conditions (theta0, omega0, alpha0, desired_theta)
|
|
||||||
state_0 = torch.tensor([
|
|
||||||
# Theta perturbations
|
|
||||||
[1/6 * torch.pi, 0.0, 0.0, 0.0],
|
|
||||||
[-1/6 * torch.pi, 0.0, 0.0, 0.0],
|
|
||||||
[2/3 * torch.pi, 0.0, 0.0, 0.0],
|
|
||||||
[-2/3 * torch.pi, 0.0, 0.0, 0.0],
|
|
||||||
|
|
||||||
# Omega perturbations
|
|
||||||
[0.0, 1/3 * torch.pi, 0.0, 0.0],
|
|
||||||
[0.0, -1/3 * torch.pi, 0.0, 0.0],
|
|
||||||
[0.0, 2 * torch.pi, 0.0, 0.0],
|
|
||||||
[0.0, -2 * torch.pi, 0.0, 0.0],
|
|
||||||
|
|
||||||
# Return to non-zero theta
|
|
||||||
[0.0, 0.0, 0.0, 2*torch.pi],
|
|
||||||
[0.0, 0.0, 0.0, -2*torch.pi],
|
|
||||||
[0.0, 0.0, 0.0, 1/2 * torch.pi],
|
|
||||||
[0.0, 0.0, 0.0, -1/2 *torch.pi],
|
|
||||||
[0.0, 0.0, 0.0, 1/3 * torch.pi],
|
|
||||||
[0.0, 0.0, 0.0, -1/3 *torch.pi],
|
|
||||||
|
|
||||||
# Mix cases
|
|
||||||
[1/4 * torch.pi, 1 * torch.pi, 0.0, 0.0],
|
|
||||||
[-1/4 * torch.pi, -1 * torch.pi, 0.0, 0.0],
|
|
||||||
[1/2 * torch.pi, -1 * torch.pi, 0.0, 1/3 * torch.pi],
|
|
||||||
[-1/2 * torch.pi, 1 * torch.pi, 0.0, -1/3 *torch.pi],
|
|
||||||
[1/4 * torch.pi, 1 * torch.pi, 0.0, 2 * torch.pi],
|
|
||||||
[-1/4 * torch.pi, -1 * torch.pi, 0.0, 2 * torch.pi],
|
|
||||||
[1/2 * torch.pi, -1 * torch.pi, 0.0, 4 * torch.pi],
|
|
||||||
[-1/2 * torch.pi, 1 * torch.pi, 0.0, -4 *torch.pi],
|
|
||||||
|
|
||||||
], dtype=torch.float32, device=device)
|
|
||||||
|
|
||||||
# Time grid
|
|
||||||
t_span = torch.linspace(0, 10, 1000, device=device)
|
|
||||||
|
|
||||||
# Initialize controller and dynamics
|
|
||||||
controller = PendulumController().to(device)
|
|
||||||
pendulum_dynamics = PendulumDynamics(controller).to(device)
|
|
||||||
|
|
||||||
# Optimizer
|
|
||||||
optimizer = optim.Adam(controller.parameters(), lr=1e-1, weight_decay=0)
|
|
||||||
|
|
||||||
# Training parameters
|
|
||||||
num_epochs = 10_000
|
|
||||||
print_every = 25
|
|
||||||
|
|
||||||
for epoch in range(num_epochs):
|
|
||||||
optimizer.zero_grad()
|
|
||||||
|
|
||||||
state_traj = odeint(pendulum_dynamics, state_0, t_span, method='rk4')
|
|
||||||
loss = loss_fn(state_traj)
|
|
||||||
|
|
||||||
if torch.isnan(loss):
|
|
||||||
print(f"NaN detected at epoch {epoch}. Skipping step.")
|
|
||||||
optimizer.zero_grad()
|
|
||||||
continue
|
|
||||||
|
|
||||||
loss.backward()
|
|
||||||
optimizer.step()
|
|
||||||
|
|
||||||
if epoch % print_every == 0:
|
|
||||||
print(f"Epoch {epoch}/{num_epochs} | Loss: {loss.item():.6f}")
|
|
||||||
torch.save(controller.state_dict(), "controller_with_desired_theta.pth")
|
|
||||||
print("Model saved as 'controller_with_desired_theta.pth'.")
|
|
||||||
173
trainer_cubic_time_weights.py
Normal file
@ -0,0 +1,173 @@
|
|||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.optim as optim
|
||||||
|
from torchdiffeq import odeint
|
||||||
|
import numpy as np
|
||||||
|
import inspect
|
||||||
|
import time
|
||||||
|
import csv
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Specify directory for storing results
|
||||||
|
output_dir = "training/cubic_time_weight"
|
||||||
|
controller_output_dir = output_dir + "/controllers"
|
||||||
|
os.makedirs(output_dir, exist_ok=True) # Create directory if it doesn't exist
|
||||||
|
os.makedirs(controller_output_dir, exist_ok=True) # Create directory if it doesn't exist
|
||||||
|
|
||||||
|
# Use a previously generated random seed
|
||||||
|
random_seed = 4529
|
||||||
|
|
||||||
|
# Set the seeds for reproducibility
|
||||||
|
torch.manual_seed(random_seed)
|
||||||
|
np.random.seed(random_seed)
|
||||||
|
|
||||||
|
# Print the chosen random seed
|
||||||
|
print(f"Random seed for torch and numpy: {random_seed}")
|
||||||
|
|
||||||
|
# Constants
|
||||||
|
m = 10.0
|
||||||
|
g = 9.81
|
||||||
|
R = 1.0
|
||||||
|
|
||||||
|
# Device setup
|
||||||
|
device = torch.device("cpu")
|
||||||
|
|
||||||
|
# Time grid
|
||||||
|
t_start, t_end, t_points = 0, 10, 1000
|
||||||
|
t_span = torch.linspace(t_start, t_end, t_points, device=device)
|
||||||
|
|
||||||
|
# Initial conditions (theta0, omega0, alpha0, desired_theta)
|
||||||
|
state_0 = torch.tensor([
|
||||||
|
[1/6 * torch.pi, 0.0, 0.0, 0.0],
|
||||||
|
[-1/6 * torch.pi, 0.0, 0.0, 0.0],
|
||||||
|
[2/3 * torch.pi, 0.0, 0.0, 0.0],
|
||||||
|
[-2/3 * torch.pi, 0.0, 0.0, 0.0],
|
||||||
|
[0.0, 1/3 * torch.pi, 0.0, 0.0],
|
||||||
|
[0.0, -1/3 * torch.pi, 0.0, 0.0],
|
||||||
|
[0.0, 2 * torch.pi, 0.0, 0.0],
|
||||||
|
[0.0, -2 * torch.pi, 0.0, 0.0],
|
||||||
|
[0.0, 0.0, 0.0, 2 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, -2 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, 1/2 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, -1/2 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, 1/3 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, -1/3 * torch.pi],
|
||||||
|
[1/4 * torch.pi, 1 * torch.pi, 0.0, 0.0],
|
||||||
|
[-1/4 * torch.pi, -1 * torch.pi, 0.0, 0.0],
|
||||||
|
[1/2 * torch.pi, -1 * torch.pi, 0.0, 1/3 * torch.pi],
|
||||||
|
[-1/2 * torch.pi, 1 * torch.pi, 0.0, -1/3 * torch.pi],
|
||||||
|
[1/4 * torch.pi, 1 * torch.pi, 0.0, 2 * torch.pi],
|
||||||
|
[-1/4 * torch.pi, -1 * torch.pi, 0.0, 2 * torch.pi],
|
||||||
|
[1/2 * torch.pi, -1 * torch.pi, 0.0, 4 * torch.pi],
|
||||||
|
[-1/2 * torch.pi, 1 * torch.pi, 0.0, -4 * torch.pi],
|
||||||
|
], dtype=torch.float32, device=device)
|
||||||
|
|
||||||
|
class PendulumController(nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.net = nn.Sequential(
|
||||||
|
nn.Linear(4, 64),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(64, 64),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(64, 1)
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
raw_torque = self.net(x)
|
||||||
|
return torch.clamp(raw_torque, -250, 250)
|
||||||
|
|
||||||
|
class PendulumDynamics(nn.Module):
|
||||||
|
def __init__(self, controller):
|
||||||
|
super().__init__()
|
||||||
|
self.controller = controller
|
||||||
|
|
||||||
|
def forward(self, t, state):
|
||||||
|
theta, omega, alpha, desired_theta = state[:, 0], state[:, 1], state[:, 2], state[:, 3]
|
||||||
|
input = torch.stack([theta, omega, alpha, desired_theta], dim=1)
|
||||||
|
tau = self.controller(input).squeeze(-1)
|
||||||
|
alpha_desired = (g / R) * torch.sin(theta) + tau / (m * R**2)
|
||||||
|
return torch.stack([omega, alpha, alpha_desired - alpha, torch.zeros_like(desired_theta)], dim=1)
|
||||||
|
|
||||||
|
def loss_fn(state_traj, t_span):
|
||||||
|
theta = state_traj[:, :, 0]
|
||||||
|
desired_theta = state_traj[:, :, 3]
|
||||||
|
|
||||||
|
# Make the time weights broadcastable for theta [len(t_span), len(batches)]
|
||||||
|
time_weights = (t_span ** 3).view(-1, 1)
|
||||||
|
|
||||||
|
return 1e3 * torch.mean(time_weights * (theta - desired_theta) ** 2)
|
||||||
|
|
||||||
|
# Initialize controller and dynamics
|
||||||
|
controller = PendulumController().to(device)
|
||||||
|
pendulum_dynamics = PendulumDynamics(controller).to(device)
|
||||||
|
|
||||||
|
# Optimizer setup
|
||||||
|
learning_rate = 1e-1
|
||||||
|
weight_decay = 1e-4
|
||||||
|
optimizer = optim.Adam(controller.parameters(), lr=learning_rate, weight_decay=weight_decay)
|
||||||
|
|
||||||
|
# Training parameters
|
||||||
|
num_epochs = 5_001 # 5000 + 1 for 5000 total weight updates
|
||||||
|
save_every = 1 # How often to save the controller.pth
|
||||||
|
|
||||||
|
# File paths
|
||||||
|
config_file = os.path.join(output_dir, "training_config.txt")
|
||||||
|
log_file = os.path.join(output_dir, "training_log.csv")
|
||||||
|
model_file = "" # Placeholder for the model file, updated in training loop
|
||||||
|
|
||||||
|
# Save configuration details
|
||||||
|
with open(config_file, "w") as f:
|
||||||
|
f.write(f"Random Seed: {random_seed}\n")
|
||||||
|
f.write(f"Time Span: {t_start} to {t_end}, Points: {t_points}\n")
|
||||||
|
f.write(f"Learning Rate: {learning_rate}\n")
|
||||||
|
f.write(f"Weight Decay: {weight_decay}\n")
|
||||||
|
f.write("\nLoss Function:\n")
|
||||||
|
f.write(inspect.getsource(loss_fn)) # Extract and write loss function source code
|
||||||
|
f.write("\nTraining Cases:\n")
|
||||||
|
f.write("[theta0, omega0, alpha0, desired_theta]\n")
|
||||||
|
for case in state_0.cpu().numpy():
|
||||||
|
f.write(f"{case.tolist()}\n")
|
||||||
|
|
||||||
|
|
||||||
|
# Overwrite the log file at the start
|
||||||
|
with open(log_file, "w", newline="") as csvfile:
|
||||||
|
csv_writer = csv.writer(csvfile)
|
||||||
|
csv_writer.writerow(["Epoch", "Loss", "Elapsed Time (s)"])
|
||||||
|
|
||||||
|
# Training loop with real-time logging and NaN tracking
|
||||||
|
start_time = time.time()
|
||||||
|
with open(log_file, "a", newline="") as csvfile:
|
||||||
|
csv_writer = csv.writer(csvfile)
|
||||||
|
|
||||||
|
for epoch in range(num_epochs):
|
||||||
|
epoch_start_time = time.time()
|
||||||
|
|
||||||
|
optimizer.zero_grad()
|
||||||
|
state_traj = odeint(pendulum_dynamics, state_0, t_span, method='rk4')
|
||||||
|
loss = loss_fn(state_traj, t_span)
|
||||||
|
|
||||||
|
elapsed_time = time.time() - epoch_start_time
|
||||||
|
|
||||||
|
if torch.isnan(loss):
|
||||||
|
print(f"NaN detected at epoch {epoch}. Skipping step.")
|
||||||
|
csv_writer.writerow([epoch, "NaN detected", elapsed_time])
|
||||||
|
csvfile.flush() # Ensure real-time writing
|
||||||
|
optimizer.zero_grad()
|
||||||
|
continue
|
||||||
|
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
|
||||||
|
# Log normal loss
|
||||||
|
csv_writer.writerow([epoch, loss.item(), elapsed_time])
|
||||||
|
csvfile.flush() # Ensure real-time writing
|
||||||
|
|
||||||
|
if epoch % save_every == 0:
|
||||||
|
print(f"Epoch {epoch}/{num_epochs} | Loss: {loss.item():.6f} | Time: {elapsed_time:.4f} sec")
|
||||||
|
model_file = os.path.join(controller_output_dir, f"controller_{epoch}.pth")
|
||||||
|
torch.save(controller.state_dict(), model_file)
|
||||||
|
|
||||||
|
# Final save
|
||||||
|
torch.save(controller.state_dict(), model_file)
|
||||||
|
print(f"No time weight training complete. Model and files saved in directory '{output_dir}'. Model saved as '{model_file}'. Logs saved in '{log_file}' and configuration in '{config_file}'.")
|
||||||
174
trainer_exponential_time_weights.py
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.optim as optim
|
||||||
|
from torchdiffeq import odeint
|
||||||
|
import numpy as np
|
||||||
|
import inspect
|
||||||
|
import time
|
||||||
|
import csv
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Specify directory for storing results
|
||||||
|
output_dir = "training/exponential_time_weight"
|
||||||
|
controller_output_dir = output_dir + "/controllers"
|
||||||
|
os.makedirs(output_dir, exist_ok=True) # Create directory if it doesn't exist
|
||||||
|
os.makedirs(controller_output_dir, exist_ok=True) # Create directory if it doesn't exist
|
||||||
|
|
||||||
|
# Use a previously generated random seed
|
||||||
|
random_seed = 4529
|
||||||
|
|
||||||
|
# Set the seeds for reproducibility
|
||||||
|
torch.manual_seed(random_seed)
|
||||||
|
np.random.seed(random_seed)
|
||||||
|
|
||||||
|
# Print the chosen random seed
|
||||||
|
print(f"Random seed for torch and numpy: {random_seed}")
|
||||||
|
|
||||||
|
# Constants
|
||||||
|
m = 10.0
|
||||||
|
g = 9.81
|
||||||
|
R = 1.0
|
||||||
|
|
||||||
|
# Device setup
|
||||||
|
device = torch.device("cpu")
|
||||||
|
|
||||||
|
# Time grid
|
||||||
|
t_start, t_end, t_points = 0, 10, 1000
|
||||||
|
t_span = torch.linspace(t_start, t_end, t_points, device=device)
|
||||||
|
|
||||||
|
# Initial conditions (theta0, omega0, alpha0, desired_theta)
|
||||||
|
state_0 = torch.tensor([
|
||||||
|
[1/6 * torch.pi, 0.0, 0.0, 0.0],
|
||||||
|
[-1/6 * torch.pi, 0.0, 0.0, 0.0],
|
||||||
|
[2/3 * torch.pi, 0.0, 0.0, 0.0],
|
||||||
|
[-2/3 * torch.pi, 0.0, 0.0, 0.0],
|
||||||
|
[0.0, 1/3 * torch.pi, 0.0, 0.0],
|
||||||
|
[0.0, -1/3 * torch.pi, 0.0, 0.0],
|
||||||
|
[0.0, 2 * torch.pi, 0.0, 0.0],
|
||||||
|
[0.0, -2 * torch.pi, 0.0, 0.0],
|
||||||
|
[0.0, 0.0, 0.0, 2 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, -2 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, 1/2 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, -1/2 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, 1/3 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, -1/3 * torch.pi],
|
||||||
|
[1/4 * torch.pi, 1 * torch.pi, 0.0, 0.0],
|
||||||
|
[-1/4 * torch.pi, -1 * torch.pi, 0.0, 0.0],
|
||||||
|
[1/2 * torch.pi, -1 * torch.pi, 0.0, 1/3 * torch.pi],
|
||||||
|
[-1/2 * torch.pi, 1 * torch.pi, 0.0, -1/3 * torch.pi],
|
||||||
|
[1/4 * torch.pi, 1 * torch.pi, 0.0, 2 * torch.pi],
|
||||||
|
[-1/4 * torch.pi, -1 * torch.pi, 0.0, 2 * torch.pi],
|
||||||
|
[1/2 * torch.pi, -1 * torch.pi, 0.0, 4 * torch.pi],
|
||||||
|
[-1/2 * torch.pi, 1 * torch.pi, 0.0, -4 * torch.pi],
|
||||||
|
], dtype=torch.float32, device=device)
|
||||||
|
|
||||||
|
class PendulumController(nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.net = nn.Sequential(
|
||||||
|
nn.Linear(4, 64),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(64, 64),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(64, 1)
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
raw_torque = self.net(x)
|
||||||
|
return torch.clamp(raw_torque, -250, 250)
|
||||||
|
|
||||||
|
class PendulumDynamics(nn.Module):
|
||||||
|
def __init__(self, controller):
|
||||||
|
super().__init__()
|
||||||
|
self.controller = controller
|
||||||
|
|
||||||
|
def forward(self, t, state):
|
||||||
|
theta, omega, alpha, desired_theta = state[:, 0], state[:, 1], state[:, 2], state[:, 3]
|
||||||
|
input = torch.stack([theta, omega, alpha, desired_theta], dim=1)
|
||||||
|
tau = self.controller(input).squeeze(-1)
|
||||||
|
alpha_desired = (g / R) * torch.sin(theta) + tau / (m * R**2)
|
||||||
|
return torch.stack([omega, alpha, alpha_desired - alpha, torch.zeros_like(desired_theta)], dim=1)
|
||||||
|
|
||||||
|
def loss_fn(state_traj, t_span):
|
||||||
|
theta = state_traj[:, :, 0]
|
||||||
|
desired_theta = state_traj[:, :, 3]
|
||||||
|
|
||||||
|
# Make the time weights broadcastable for theta [len(t_span), len(batches)]
|
||||||
|
lamda_exp = 1
|
||||||
|
time_weights = torch.exp(lamda_exp * t_span).view(-1, 1)
|
||||||
|
|
||||||
|
return 1e3 * torch.mean(time_weights * (theta - desired_theta) ** 2)
|
||||||
|
|
||||||
|
# Initialize controller and dynamics
|
||||||
|
controller = PendulumController().to(device)
|
||||||
|
pendulum_dynamics = PendulumDynamics(controller).to(device)
|
||||||
|
|
||||||
|
# Optimizer setup
|
||||||
|
learning_rate = 1e-1
|
||||||
|
weight_decay = 1e-4
|
||||||
|
optimizer = optim.Adam(controller.parameters(), lr=learning_rate, weight_decay=weight_decay)
|
||||||
|
|
||||||
|
# Training parameters
|
||||||
|
num_epochs = 5_001 # 5000 + 1 for 5000 total weight updates
|
||||||
|
save_every = 1 # How often to save the controller.pth
|
||||||
|
|
||||||
|
# File paths
|
||||||
|
config_file = os.path.join(output_dir, "training_config.txt")
|
||||||
|
log_file = os.path.join(output_dir, "training_log.csv")
|
||||||
|
model_file = "" # Placeholder for the model file, updated in training loop
|
||||||
|
|
||||||
|
# Save configuration details
|
||||||
|
with open(config_file, "w") as f:
|
||||||
|
f.write(f"Random Seed: {random_seed}\n")
|
||||||
|
f.write(f"Time Span: {t_start} to {t_end}, Points: {t_points}\n")
|
||||||
|
f.write(f"Learning Rate: {learning_rate}\n")
|
||||||
|
f.write(f"Weight Decay: {weight_decay}\n")
|
||||||
|
f.write("\nLoss Function:\n")
|
||||||
|
f.write(inspect.getsource(loss_fn)) # Extract and write loss function source code
|
||||||
|
f.write("\nTraining Cases:\n")
|
||||||
|
f.write("[theta0, omega0, alpha0, desired_theta]\n")
|
||||||
|
for case in state_0.cpu().numpy():
|
||||||
|
f.write(f"{case.tolist()}\n")
|
||||||
|
|
||||||
|
|
||||||
|
# Overwrite the log file at the start
|
||||||
|
with open(log_file, "w", newline="") as csvfile:
|
||||||
|
csv_writer = csv.writer(csvfile)
|
||||||
|
csv_writer.writerow(["Epoch", "Loss", "Elapsed Time (s)"])
|
||||||
|
|
||||||
|
# Training loop with real-time logging and NaN tracking
|
||||||
|
start_time = time.time()
|
||||||
|
with open(log_file, "a", newline="") as csvfile:
|
||||||
|
csv_writer = csv.writer(csvfile)
|
||||||
|
|
||||||
|
for epoch in range(num_epochs):
|
||||||
|
epoch_start_time = time.time()
|
||||||
|
|
||||||
|
optimizer.zero_grad()
|
||||||
|
state_traj = odeint(pendulum_dynamics, state_0, t_span, method='rk4')
|
||||||
|
loss = loss_fn(state_traj, t_span)
|
||||||
|
|
||||||
|
elapsed_time = time.time() - epoch_start_time
|
||||||
|
|
||||||
|
if torch.isnan(loss):
|
||||||
|
print(f"NaN detected at epoch {epoch}. Skipping step.")
|
||||||
|
csv_writer.writerow([epoch, "NaN detected", elapsed_time])
|
||||||
|
csvfile.flush() # Ensure real-time writing
|
||||||
|
optimizer.zero_grad()
|
||||||
|
continue
|
||||||
|
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
|
||||||
|
# Log normal loss
|
||||||
|
csv_writer.writerow([epoch, loss.item(), elapsed_time])
|
||||||
|
csvfile.flush() # Ensure real-time writing
|
||||||
|
|
||||||
|
if epoch % save_every == 0:
|
||||||
|
print(f"Epoch {epoch}/{num_epochs} | Loss: {loss.item():.6f} | Time: {elapsed_time:.4f} sec")
|
||||||
|
model_file = os.path.join(controller_output_dir, f"controller_{epoch}.pth")
|
||||||
|
torch.save(controller.state_dict(), model_file)
|
||||||
|
|
||||||
|
# Final save
|
||||||
|
torch.save(controller.state_dict(), model_file)
|
||||||
|
print(f"No time weight training complete. Model and files saved in directory '{output_dir}'. Model saved as '{model_file}'. Logs saved in '{log_file}' and configuration in '{config_file}'.")
|
||||||
173
trainer_linear_time_weights.py
Normal file
@ -0,0 +1,173 @@
|
|||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.optim as optim
|
||||||
|
from torchdiffeq import odeint
|
||||||
|
import numpy as np
|
||||||
|
import inspect
|
||||||
|
import time
|
||||||
|
import csv
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Specify directory for storing results
|
||||||
|
output_dir = "training/linear_time_weight"
|
||||||
|
controller_output_dir = output_dir + "/controllers"
|
||||||
|
os.makedirs(output_dir, exist_ok=True) # Create directory if it doesn't exist
|
||||||
|
os.makedirs(controller_output_dir, exist_ok=True) # Create directory if it doesn't exist
|
||||||
|
|
||||||
|
# Use a previously generated random seed
|
||||||
|
random_seed = 4529
|
||||||
|
|
||||||
|
# Set the seeds for reproducibility
|
||||||
|
torch.manual_seed(random_seed)
|
||||||
|
np.random.seed(random_seed)
|
||||||
|
|
||||||
|
# Print the chosen random seed
|
||||||
|
print(f"Random seed for torch and numpy: {random_seed}")
|
||||||
|
|
||||||
|
# Constants
|
||||||
|
m = 10.0
|
||||||
|
g = 9.81
|
||||||
|
R = 1.0
|
||||||
|
|
||||||
|
# Device setup
|
||||||
|
device = torch.device("cpu")
|
||||||
|
|
||||||
|
# Time grid
|
||||||
|
t_start, t_end, t_points = 0, 10, 1000
|
||||||
|
t_span = torch.linspace(t_start, t_end, t_points, device=device)
|
||||||
|
|
||||||
|
# Initial conditions (theta0, omega0, alpha0, desired_theta)
|
||||||
|
state_0 = torch.tensor([
|
||||||
|
[1/6 * torch.pi, 0.0, 0.0, 0.0],
|
||||||
|
[-1/6 * torch.pi, 0.0, 0.0, 0.0],
|
||||||
|
[2/3 * torch.pi, 0.0, 0.0, 0.0],
|
||||||
|
[-2/3 * torch.pi, 0.0, 0.0, 0.0],
|
||||||
|
[0.0, 1/3 * torch.pi, 0.0, 0.0],
|
||||||
|
[0.0, -1/3 * torch.pi, 0.0, 0.0],
|
||||||
|
[0.0, 2 * torch.pi, 0.0, 0.0],
|
||||||
|
[0.0, -2 * torch.pi, 0.0, 0.0],
|
||||||
|
[0.0, 0.0, 0.0, 2 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, -2 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, 1/2 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, -1/2 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, 1/3 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, -1/3 * torch.pi],
|
||||||
|
[1/4 * torch.pi, 1 * torch.pi, 0.0, 0.0],
|
||||||
|
[-1/4 * torch.pi, -1 * torch.pi, 0.0, 0.0],
|
||||||
|
[1/2 * torch.pi, -1 * torch.pi, 0.0, 1/3 * torch.pi],
|
||||||
|
[-1/2 * torch.pi, 1 * torch.pi, 0.0, -1/3 * torch.pi],
|
||||||
|
[1/4 * torch.pi, 1 * torch.pi, 0.0, 2 * torch.pi],
|
||||||
|
[-1/4 * torch.pi, -1 * torch.pi, 0.0, 2 * torch.pi],
|
||||||
|
[1/2 * torch.pi, -1 * torch.pi, 0.0, 4 * torch.pi],
|
||||||
|
[-1/2 * torch.pi, 1 * torch.pi, 0.0, -4 * torch.pi],
|
||||||
|
], dtype=torch.float32, device=device)
|
||||||
|
|
||||||
|
class PendulumController(nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.net = nn.Sequential(
|
||||||
|
nn.Linear(4, 64),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(64, 64),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(64, 1)
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
raw_torque = self.net(x)
|
||||||
|
return torch.clamp(raw_torque, -250, 250)
|
||||||
|
|
||||||
|
class PendulumDynamics(nn.Module):
|
||||||
|
def __init__(self, controller):
|
||||||
|
super().__init__()
|
||||||
|
self.controller = controller
|
||||||
|
|
||||||
|
def forward(self, t, state):
|
||||||
|
theta, omega, alpha, desired_theta = state[:, 0], state[:, 1], state[:, 2], state[:, 3]
|
||||||
|
input = torch.stack([theta, omega, alpha, desired_theta], dim=1)
|
||||||
|
tau = self.controller(input).squeeze(-1)
|
||||||
|
alpha_desired = (g / R) * torch.sin(theta) + tau / (m * R**2)
|
||||||
|
return torch.stack([omega, alpha, alpha_desired - alpha, torch.zeros_like(desired_theta)], dim=1)
|
||||||
|
|
||||||
|
def loss_fn(state_traj, t_span):
|
||||||
|
theta = state_traj[:, :, 0]
|
||||||
|
desired_theta = state_traj[:, :, 3]
|
||||||
|
|
||||||
|
# Make the time weights broadcastable for theta [len(t_span), len(batches)]
|
||||||
|
time_weights = t_span.view(-1, 1)
|
||||||
|
|
||||||
|
return 1e3 * torch.mean(time_weights * (theta - desired_theta) ** 2)
|
||||||
|
|
||||||
|
# Initialize controller and dynamics
|
||||||
|
controller = PendulumController().to(device)
|
||||||
|
pendulum_dynamics = PendulumDynamics(controller).to(device)
|
||||||
|
|
||||||
|
# Optimizer setup
|
||||||
|
learning_rate = 1e-1
|
||||||
|
weight_decay = 1e-4
|
||||||
|
optimizer = optim.Adam(controller.parameters(), lr=learning_rate, weight_decay=weight_decay)
|
||||||
|
|
||||||
|
# Training parameters
|
||||||
|
num_epochs = 5_001 # 5000 + 1 for 5000 total weight updates
|
||||||
|
save_every = 1 # How often to save the controller.pth
|
||||||
|
|
||||||
|
# File paths
|
||||||
|
config_file = os.path.join(output_dir, "training_config.txt")
|
||||||
|
log_file = os.path.join(output_dir, "training_log.csv")
|
||||||
|
model_file = "" # Placeholder for the model file, updated in training loop
|
||||||
|
|
||||||
|
# Save configuration details
|
||||||
|
with open(config_file, "w") as f:
|
||||||
|
f.write(f"Random Seed: {random_seed}\n")
|
||||||
|
f.write(f"Time Span: {t_start} to {t_end}, Points: {t_points}\n")
|
||||||
|
f.write(f"Learning Rate: {learning_rate}\n")
|
||||||
|
f.write(f"Weight Decay: {weight_decay}\n")
|
||||||
|
f.write("\nLoss Function:\n")
|
||||||
|
f.write(inspect.getsource(loss_fn)) # Extract and write loss function source code
|
||||||
|
f.write("\nTraining Cases:\n")
|
||||||
|
f.write("[theta0, omega0, alpha0, desired_theta]\n")
|
||||||
|
for case in state_0.cpu().numpy():
|
||||||
|
f.write(f"{case.tolist()}\n")
|
||||||
|
|
||||||
|
|
||||||
|
# Overwrite the log file at the start
|
||||||
|
with open(log_file, "w", newline="") as csvfile:
|
||||||
|
csv_writer = csv.writer(csvfile)
|
||||||
|
csv_writer.writerow(["Epoch", "Loss", "Elapsed Time (s)"])
|
||||||
|
|
||||||
|
# Training loop with real-time logging and NaN tracking
|
||||||
|
start_time = time.time()
|
||||||
|
with open(log_file, "a", newline="") as csvfile:
|
||||||
|
csv_writer = csv.writer(csvfile)
|
||||||
|
|
||||||
|
for epoch in range(num_epochs):
|
||||||
|
epoch_start_time = time.time()
|
||||||
|
|
||||||
|
optimizer.zero_grad()
|
||||||
|
state_traj = odeint(pendulum_dynamics, state_0, t_span, method='rk4')
|
||||||
|
loss = loss_fn(state_traj, t_span)
|
||||||
|
|
||||||
|
elapsed_time = time.time() - epoch_start_time
|
||||||
|
|
||||||
|
if torch.isnan(loss):
|
||||||
|
print(f"NaN detected at epoch {epoch}. Skipping step.")
|
||||||
|
csv_writer.writerow([epoch, "NaN detected", elapsed_time])
|
||||||
|
csvfile.flush() # Ensure real-time writing
|
||||||
|
optimizer.zero_grad()
|
||||||
|
continue
|
||||||
|
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
|
||||||
|
# Log normal loss
|
||||||
|
csv_writer.writerow([epoch, loss.item(), elapsed_time])
|
||||||
|
csvfile.flush() # Ensure real-time writing
|
||||||
|
|
||||||
|
if epoch % save_every == 0:
|
||||||
|
print(f"Epoch {epoch}/{num_epochs} | Loss: {loss.item():.6f} | Time: {elapsed_time:.4f} sec")
|
||||||
|
model_file = os.path.join(controller_output_dir, f"controller_{epoch}.pth")
|
||||||
|
torch.save(controller.state_dict(), model_file)
|
||||||
|
|
||||||
|
# Final save
|
||||||
|
torch.save(controller.state_dict(), model_file)
|
||||||
|
print(f"No time weight training complete. Model and files saved in directory '{output_dir}'. Model saved as '{model_file}'. Logs saved in '{log_file}' and configuration in '{config_file}'.")
|
||||||
173
trainer_quadratic_time_weights.py
Normal file
@ -0,0 +1,173 @@
|
|||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.optim as optim
|
||||||
|
from torchdiffeq import odeint
|
||||||
|
import numpy as np
|
||||||
|
import inspect
|
||||||
|
import time
|
||||||
|
import csv
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Specify directory for storing results
|
||||||
|
output_dir = "training/quadratic_time_weight"
|
||||||
|
controller_output_dir = output_dir + "/controllers"
|
||||||
|
os.makedirs(output_dir, exist_ok=True) # Create directory if it doesn't exist
|
||||||
|
os.makedirs(controller_output_dir, exist_ok=True) # Create directory if it doesn't exist
|
||||||
|
|
||||||
|
# Use a previously generated random seed
|
||||||
|
random_seed = 4529
|
||||||
|
|
||||||
|
# Set the seeds for reproducibility
|
||||||
|
torch.manual_seed(random_seed)
|
||||||
|
np.random.seed(random_seed)
|
||||||
|
|
||||||
|
# Print the chosen random seed
|
||||||
|
print(f"Random seed for torch and numpy: {random_seed}")
|
||||||
|
|
||||||
|
# Constants
|
||||||
|
m = 10.0
|
||||||
|
g = 9.81
|
||||||
|
R = 1.0
|
||||||
|
|
||||||
|
# Device setup
|
||||||
|
device = torch.device("cpu")
|
||||||
|
|
||||||
|
# Time grid
|
||||||
|
t_start, t_end, t_points = 0, 10, 1000
|
||||||
|
t_span = torch.linspace(t_start, t_end, t_points, device=device)
|
||||||
|
|
||||||
|
# Initial conditions (theta0, omega0, alpha0, desired_theta)
|
||||||
|
state_0 = torch.tensor([
|
||||||
|
[1/6 * torch.pi, 0.0, 0.0, 0.0],
|
||||||
|
[-1/6 * torch.pi, 0.0, 0.0, 0.0],
|
||||||
|
[2/3 * torch.pi, 0.0, 0.0, 0.0],
|
||||||
|
[-2/3 * torch.pi, 0.0, 0.0, 0.0],
|
||||||
|
[0.0, 1/3 * torch.pi, 0.0, 0.0],
|
||||||
|
[0.0, -1/3 * torch.pi, 0.0, 0.0],
|
||||||
|
[0.0, 2 * torch.pi, 0.0, 0.0],
|
||||||
|
[0.0, -2 * torch.pi, 0.0, 0.0],
|
||||||
|
[0.0, 0.0, 0.0, 2 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, -2 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, 1/2 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, -1/2 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, 1/3 * torch.pi],
|
||||||
|
[0.0, 0.0, 0.0, -1/3 * torch.pi],
|
||||||
|
[1/4 * torch.pi, 1 * torch.pi, 0.0, 0.0],
|
||||||
|
[-1/4 * torch.pi, -1 * torch.pi, 0.0, 0.0],
|
||||||
|
[1/2 * torch.pi, -1 * torch.pi, 0.0, 1/3 * torch.pi],
|
||||||
|
[-1/2 * torch.pi, 1 * torch.pi, 0.0, -1/3 * torch.pi],
|
||||||
|
[1/4 * torch.pi, 1 * torch.pi, 0.0, 2 * torch.pi],
|
||||||
|
[-1/4 * torch.pi, -1 * torch.pi, 0.0, 2 * torch.pi],
|
||||||
|
[1/2 * torch.pi, -1 * torch.pi, 0.0, 4 * torch.pi],
|
||||||
|
[-1/2 * torch.pi, 1 * torch.pi, 0.0, -4 * torch.pi],
|
||||||
|
], dtype=torch.float32, device=device)
|
||||||
|
|
||||||
|
class PendulumController(nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.net = nn.Sequential(
|
||||||
|
nn.Linear(4, 64),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(64, 64),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(64, 1)
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
raw_torque = self.net(x)
|
||||||
|
return torch.clamp(raw_torque, -250, 250)
|
||||||
|
|
||||||
|
class PendulumDynamics(nn.Module):
|
||||||
|
def __init__(self, controller):
|
||||||
|
super().__init__()
|
||||||
|
self.controller = controller
|
||||||
|
|
||||||
|
def forward(self, t, state):
|
||||||
|
theta, omega, alpha, desired_theta = state[:, 0], state[:, 1], state[:, 2], state[:, 3]
|
||||||
|
input = torch.stack([theta, omega, alpha, desired_theta], dim=1)
|
||||||
|
tau = self.controller(input).squeeze(-1)
|
||||||
|
alpha_desired = (g / R) * torch.sin(theta) + tau / (m * R**2)
|
||||||
|
return torch.stack([omega, alpha, alpha_desired - alpha, torch.zeros_like(desired_theta)], dim=1)
|
||||||
|
|
||||||
|
def loss_fn(state_traj, t_span):
|
||||||
|
theta = state_traj[:, :, 0]
|
||||||
|
desired_theta = state_traj[:, :, 3]
|
||||||
|
|
||||||
|
# Make the time weights broadcastable for theta [len(t_span), len(batches)]
|
||||||
|
time_weights = (t_span ** 2).view(-1, 1)
|
||||||
|
|
||||||
|
return 1e3 * torch.mean(time_weights * (theta - desired_theta) ** 2)
|
||||||
|
|
||||||
|
# Initialize controller and dynamics
|
||||||
|
controller = PendulumController().to(device)
|
||||||
|
pendulum_dynamics = PendulumDynamics(controller).to(device)
|
||||||
|
|
||||||
|
# Optimizer setup
|
||||||
|
learning_rate = 1e-1
|
||||||
|
weight_decay = 1e-4
|
||||||
|
optimizer = optim.Adam(controller.parameters(), lr=learning_rate, weight_decay=weight_decay)
|
||||||
|
|
||||||
|
# Training parameters
|
||||||
|
num_epochs = 5_001 # 5000 + 1 for 5000 total weight updates
|
||||||
|
save_every = 1 # How often to save the controller.pth
|
||||||
|
|
||||||
|
# File paths
|
||||||
|
config_file = os.path.join(output_dir, "training_config.txt")
|
||||||
|
log_file = os.path.join(output_dir, "training_log.csv")
|
||||||
|
model_file = "" # Placeholder for the model file, updated in training loop
|
||||||
|
|
||||||
|
# Save configuration details
|
||||||
|
with open(config_file, "w") as f:
|
||||||
|
f.write(f"Random Seed: {random_seed}\n")
|
||||||
|
f.write(f"Time Span: {t_start} to {t_end}, Points: {t_points}\n")
|
||||||
|
f.write(f"Learning Rate: {learning_rate}\n")
|
||||||
|
f.write(f"Weight Decay: {weight_decay}\n")
|
||||||
|
f.write("\nLoss Function:\n")
|
||||||
|
f.write(inspect.getsource(loss_fn)) # Extract and write loss function source code
|
||||||
|
f.write("\nTraining Cases:\n")
|
||||||
|
f.write("[theta0, omega0, alpha0, desired_theta]\n")
|
||||||
|
for case in state_0.cpu().numpy():
|
||||||
|
f.write(f"{case.tolist()}\n")
|
||||||
|
|
||||||
|
|
||||||
|
# Overwrite the log file at the start
|
||||||
|
with open(log_file, "w", newline="") as csvfile:
|
||||||
|
csv_writer = csv.writer(csvfile)
|
||||||
|
csv_writer.writerow(["Epoch", "Loss", "Elapsed Time (s)"])
|
||||||
|
|
||||||
|
# Training loop with real-time logging and NaN tracking
|
||||||
|
start_time = time.time()
|
||||||
|
with open(log_file, "a", newline="") as csvfile:
|
||||||
|
csv_writer = csv.writer(csvfile)
|
||||||
|
|
||||||
|
for epoch in range(num_epochs):
|
||||||
|
epoch_start_time = time.time()
|
||||||
|
|
||||||
|
optimizer.zero_grad()
|
||||||
|
state_traj = odeint(pendulum_dynamics, state_0, t_span, method='rk4')
|
||||||
|
loss = loss_fn(state_traj, t_span)
|
||||||
|
|
||||||
|
elapsed_time = time.time() - epoch_start_time
|
||||||
|
|
||||||
|
if torch.isnan(loss):
|
||||||
|
print(f"NaN detected at epoch {epoch}. Skipping step.")
|
||||||
|
csv_writer.writerow([epoch, "NaN detected", elapsed_time])
|
||||||
|
csvfile.flush() # Ensure real-time writing
|
||||||
|
optimizer.zero_grad()
|
||||||
|
continue
|
||||||
|
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
|
||||||
|
# Log normal loss
|
||||||
|
csv_writer.writerow([epoch, loss.item(), elapsed_time])
|
||||||
|
csvfile.flush() # Ensure real-time writing
|
||||||
|
|
||||||
|
if epoch % save_every == 0:
|
||||||
|
print(f"Epoch {epoch}/{num_epochs} | Loss: {loss.item():.6f} | Time: {elapsed_time:.4f} sec")
|
||||||
|
model_file = os.path.join(controller_output_dir, f"controller_{epoch}.pth")
|
||||||
|
torch.save(controller.state_dict(), model_file)
|
||||||
|
|
||||||
|
# Final save
|
||||||
|
torch.save(controller.state_dict(), model_file)
|
||||||
|
print(f"No time weight training complete. Model and files saved in directory '{output_dir}'. Model saved as '{model_file}'. Logs saved in '{log_file}' and configuration in '{config_file}'.")
|
||||||
@ -10,7 +10,9 @@ import os
|
|||||||
|
|
||||||
# Specify directory for storing results
|
# Specify directory for storing results
|
||||||
output_dir = "training/no_time_weight"
|
output_dir = "training/no_time_weight"
|
||||||
|
controller_output_dir = output_dir + "/controllers"
|
||||||
os.makedirs(output_dir, exist_ok=True) # Create directory if it doesn't exist
|
os.makedirs(output_dir, exist_ok=True) # Create directory if it doesn't exist
|
||||||
|
os.makedirs(controller_output_dir, exist_ok=True) # Create directory if it doesn't exist
|
||||||
|
|
||||||
# Use a previously generated random seed
|
# Use a previously generated random seed
|
||||||
random_seed = 4529
|
random_seed = 4529
|
||||||
@ -103,13 +105,13 @@ weight_decay = 1e-4
|
|||||||
optimizer = optim.Adam(controller.parameters(), lr=learning_rate, weight_decay=weight_decay)
|
optimizer = optim.Adam(controller.parameters(), lr=learning_rate, weight_decay=weight_decay)
|
||||||
|
|
||||||
# Training parameters
|
# Training parameters
|
||||||
num_epochs = 5_000
|
num_epochs = 5_001 # 5000 + 1 for 5000 total weight updates
|
||||||
print_every = 25
|
save_every = 1 # How often to save the controller.pth
|
||||||
|
|
||||||
# File paths
|
# File paths
|
||||||
config_file = os.path.join(output_dir, "training_config.txt")
|
config_file = os.path.join(output_dir, "training_config.txt")
|
||||||
log_file = os.path.join(output_dir, "training_log.csv")
|
log_file = os.path.join(output_dir, "training_log.csv")
|
||||||
model_file = os.path.join(output_dir, "controller.pth")
|
model_file = "" # Placeholder for the model file, updated in training loop
|
||||||
|
|
||||||
# Save configuration details
|
# Save configuration details
|
||||||
with open(config_file, "w") as f:
|
with open(config_file, "w") as f:
|
||||||
@ -158,10 +160,11 @@ with open(log_file, "a", newline="") as csvfile:
|
|||||||
csv_writer.writerow([epoch, loss.item(), elapsed_time])
|
csv_writer.writerow([epoch, loss.item(), elapsed_time])
|
||||||
csvfile.flush() # Ensure real-time writing
|
csvfile.flush() # Ensure real-time writing
|
||||||
|
|
||||||
if epoch % print_every == 0:
|
if epoch % save_every == 0:
|
||||||
print(f"Epoch {epoch}/{num_epochs} | Loss: {loss.item():.6f} | Time: {elapsed_time:.4f} sec")
|
print(f"Epoch {epoch}/{num_epochs} | Loss: {loss.item():.6f} | Time: {elapsed_time:.4f} sec")
|
||||||
|
model_file = os.path.join(controller_output_dir, f"controller_{epoch}.pth")
|
||||||
torch.save(controller.state_dict(), model_file)
|
torch.save(controller.state_dict(), model_file)
|
||||||
|
|
||||||
# Final save
|
# Final save
|
||||||
torch.save(controller.state_dict(), model_file)
|
torch.save(controller.state_dict(), model_file)
|
||||||
print(f"Training complete. Model saved as '{model_file}'. Logs saved in '{log_file}' and configuration in '{config_file}'.")
|
print(f"No time weight training complete. Model and files saved in directory '{output_dir}'. Model saved as '{model_file}'. Logs saved in '{log_file}' and configuration in '{config_file}'.")
|
||||||
|
Before Width: | Height: | Size: 56 KiB |
|
Before Width: | Height: | Size: 56 KiB |
|
Before Width: | Height: | Size: 50 KiB |
|
Before Width: | Height: | Size: 53 KiB |
|
Before Width: | Height: | Size: 52 KiB |
|
Before Width: | Height: | Size: 46 KiB |
|
Before Width: | Height: | Size: 45 KiB |
|
Before Width: | Height: | Size: 44 KiB |
|
Before Width: | Height: | Size: 43 KiB |
|
Before Width: | Height: | Size: 54 KiB |
|
Before Width: | Height: | Size: 47 KiB |
|
Before Width: | Height: | Size: 54 KiB |
|
Before Width: | Height: | Size: 56 KiB |
|
Before Width: | Height: | Size: 59 KiB |
|
Before Width: | Height: | Size: 48 KiB |
|
Before Width: | Height: | Size: 46 KiB |
|
Before Width: | Height: | Size: 46 KiB |
|
Before Width: | Height: | Size: 41 KiB |
|
Before Width: | Height: | Size: 42 KiB |
|
Before Width: | Height: | Size: 46 KiB |
|
Before Width: | Height: | Size: 45 KiB |
|
Before Width: | Height: | Size: 55 KiB |
|
Before Width: | Height: | Size: 46 KiB |
|
Before Width: | Height: | Size: 47 KiB |
|
Before Width: | Height: | Size: 49 KiB |
|
Before Width: | Height: | Size: 51 KiB |
|
Before Width: | Height: | Size: 50 KiB |
|
Before Width: | Height: | Size: 52 KiB |
|
Before Width: | Height: | Size: 43 KiB |
|
Before Width: | Height: | Size: 52 KiB |
|
Before Width: | Height: | Size: 53 KiB |
|
Before Width: | Height: | Size: 52 KiB |
|
Before Width: | Height: | Size: 47 KiB |
|
Before Width: | Height: | Size: 45 KiB |
|
Before Width: | Height: | Size: 48 KiB |
|
Before Width: | Height: | Size: 44 KiB |
|
Before Width: | Height: | Size: 45 KiB |
|
Before Width: | Height: | Size: 46 KiB |
|
Before Width: | Height: | Size: 42 KiB |
|
Before Width: | Height: | Size: 43 KiB |
|
Before Width: | Height: | Size: 48 KiB |
|
Before Width: | Height: | Size: 45 KiB |