Restructure files. Changed weight functions to be normalized from 0-1ns to always be normalized from 0-1 (aka max normalization). Also updated average normalization
|
Before Width: | Height: | Size: 972 KiB |
|
Before Width: | Height: | Size: 2.2 MiB |
|
Before Width: | Height: | Size: 945 KiB |
|
Before Width: | Height: | Size: 2.5 MiB |
@ -85,16 +85,16 @@ loss_functions = ["constant", "linear", "quadratic", "exponential", "inverse", "
|
||||
|
||||
|
||||
epoch_start = 0 # Start of the epoch range
|
||||
epoch_end = 500 # End of the epoch range
|
||||
epoch_step = 5 # Interval between epochs
|
||||
epoch_end = 1000 # End of the epoch range
|
||||
epoch_step = 10 # Interval between epochs
|
||||
|
||||
if __name__ == "__main__":
|
||||
for condition_name, initial_condition in initial_conditions.items():
|
||||
full_path = f"/home/judson/Neural-Networks-in-GNC/inverted_pendulum/analysis/max_normalized/{condition_name}"
|
||||
full_path = f"/home/judson/Neural-Networks-in-GNC/inverted_pendulum/analysis/average_normalized/{condition_name}"
|
||||
os.makedirs(full_path, exist_ok=True) # Create directory if it does not exist
|
||||
|
||||
for loss_function in loss_functions:
|
||||
controller_dir = f"/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/normalized/max_normalized/{loss_function}/controllers"
|
||||
controller_dir = f"/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/normalized/average_normalized/{loss_function}/controllers"
|
||||
controller_files = sorted([f for f in os.listdir(controller_dir) if f.startswith("controller_") and f.endswith(".pth")])
|
||||
# Extract epoch numbers and filter based on the defined range and interval
|
||||
epoch_numbers = [int(f.split('_')[1].split('.')[0]) for f in controller_files]
|
||||
|
||||
|
After Width: | Height: | Size: 550 KiB |
|
After Width: | Height: | Size: 543 KiB |
BIN
analysis/old/average_normalized/extreme_perturbation/inverse.png
Normal file
|
After Width: | Height: | Size: 531 KiB |
|
After Width: | Height: | Size: 531 KiB |
BIN
analysis/old/average_normalized/extreme_perturbation/linear.png
Normal file
|
After Width: | Height: | Size: 535 KiB |
|
After Width: | Height: | Size: 596 KiB |
BIN
analysis/old/average_normalized/large_perturbation/constant.png
Normal file
|
After Width: | Height: | Size: 744 KiB |
|
After Width: | Height: | Size: 696 KiB |
BIN
analysis/old/average_normalized/large_perturbation/inverse.png
Normal file
|
After Width: | Height: | Size: 758 KiB |
|
After Width: | Height: | Size: 720 KiB |
BIN
analysis/old/average_normalized/large_perturbation/linear.png
Normal file
|
After Width: | Height: | Size: 682 KiB |
BIN
analysis/old/average_normalized/large_perturbation/quadratic.png
Normal file
|
After Width: | Height: | Size: 740 KiB |
|
After Width: | Height: | Size: 472 KiB |
|
After Width: | Height: | Size: 518 KiB |
BIN
analysis/old/average_normalized/overshoot_angle_test/inverse.png
Normal file
|
After Width: | Height: | Size: 494 KiB |
|
After Width: | Height: | Size: 494 KiB |
BIN
analysis/old/average_normalized/overshoot_angle_test/linear.png
Normal file
|
After Width: | Height: | Size: 499 KiB |
|
After Width: | Height: | Size: 502 KiB |
|
After Width: | Height: | Size: 507 KiB |
|
After Width: | Height: | Size: 581 KiB |
|
After Width: | Height: | Size: 504 KiB |
|
After Width: | Height: | Size: 509 KiB |
|
After Width: | Height: | Size: 499 KiB |
|
After Width: | Height: | Size: 519 KiB |
BIN
analysis/old/average_normalized/small_perturbation/constant.png
Normal file
|
After Width: | Height: | Size: 493 KiB |
|
After Width: | Height: | Size: 584 KiB |
BIN
analysis/old/average_normalized/small_perturbation/inverse.png
Normal file
|
After Width: | Height: | Size: 480 KiB |
|
After Width: | Height: | Size: 480 KiB |
BIN
analysis/old/average_normalized/small_perturbation/linear.png
Normal file
|
After Width: | Height: | Size: 474 KiB |
BIN
analysis/old/average_normalized/small_perturbation/quadratic.png
Normal file
|
After Width: | Height: | Size: 533 KiB |
|
Before Width: | Height: | Size: 564 KiB After Width: | Height: | Size: 564 KiB |
|
Before Width: | Height: | Size: 561 KiB After Width: | Height: | Size: 561 KiB |
|
Before Width: | Height: | Size: 524 KiB After Width: | Height: | Size: 524 KiB |
|
Before Width: | Height: | Size: 534 KiB After Width: | Height: | Size: 534 KiB |
|
Before Width: | Height: | Size: 546 KiB After Width: | Height: | Size: 546 KiB |
|
Before Width: | Height: | Size: 611 KiB After Width: | Height: | Size: 611 KiB |
|
Before Width: | Height: | Size: 733 KiB After Width: | Height: | Size: 733 KiB |
|
Before Width: | Height: | Size: 832 KiB After Width: | Height: | Size: 832 KiB |
|
Before Width: | Height: | Size: 794 KiB After Width: | Height: | Size: 794 KiB |
|
Before Width: | Height: | Size: 752 KiB After Width: | Height: | Size: 752 KiB |
|
Before Width: | Height: | Size: 707 KiB After Width: | Height: | Size: 707 KiB |
|
Before Width: | Height: | Size: 814 KiB After Width: | Height: | Size: 814 KiB |
|
Before Width: | Height: | Size: 494 KiB After Width: | Height: | Size: 494 KiB |
|
Before Width: | Height: | Size: 578 KiB After Width: | Height: | Size: 578 KiB |
|
Before Width: | Height: | Size: 505 KiB After Width: | Height: | Size: 505 KiB |
|
Before Width: | Height: | Size: 502 KiB After Width: | Height: | Size: 502 KiB |
|
Before Width: | Height: | Size: 518 KiB After Width: | Height: | Size: 518 KiB |
|
Before Width: | Height: | Size: 521 KiB After Width: | Height: | Size: 521 KiB |
|
Before Width: | Height: | Size: 517 KiB After Width: | Height: | Size: 517 KiB |
|
Before Width: | Height: | Size: 538 KiB After Width: | Height: | Size: 538 KiB |
|
Before Width: | Height: | Size: 508 KiB After Width: | Height: | Size: 508 KiB |
|
Before Width: | Height: | Size: 526 KiB After Width: | Height: | Size: 526 KiB |
|
Before Width: | Height: | Size: 514 KiB After Width: | Height: | Size: 514 KiB |
|
Before Width: | Height: | Size: 559 KiB After Width: | Height: | Size: 559 KiB |
|
Before Width: | Height: | Size: 513 KiB After Width: | Height: | Size: 513 KiB |
|
Before Width: | Height: | Size: 474 KiB After Width: | Height: | Size: 474 KiB |
|
Before Width: | Height: | Size: 489 KiB After Width: | Height: | Size: 489 KiB |
|
Before Width: | Height: | Size: 498 KiB After Width: | Height: | Size: 498 KiB |
|
Before Width: | Height: | Size: 497 KiB After Width: | Height: | Size: 497 KiB |
|
Before Width: | Height: | Size: 503 KiB After Width: | Height: | Size: 503 KiB |
BIN
training/__pycache__/PendulumController.cpython-310.pyc
Normal file
BIN
training/__pycache__/PendulumDynamics.cpython-310.pyc
Normal file
BIN
training/__pycache__/initial_conditions.cpython-310.pyc
Normal file
159
training/average_normalized_trainer.py
Normal file
@ -0,0 +1,159 @@
|
||||
import torch
|
||||
import torch.optim as optim
|
||||
from torchdiffeq import odeint
|
||||
import numpy as np
|
||||
import os
|
||||
import shutil
|
||||
import csv
|
||||
import inspect
|
||||
|
||||
from PendulumController import PendulumController
|
||||
from PendulumDynamics import PendulumDynamics
|
||||
|
||||
# Device setup
|
||||
device = torch.device("cpu")
|
||||
|
||||
# Initial conditions (theta0, omega0, alpha0, desired_theta)
|
||||
from initial_conditions import initial_conditions
|
||||
state_0 = torch.tensor(initial_conditions, dtype=torch.float32, device=device)
|
||||
|
||||
# Device setup
|
||||
device = torch.device("cpu")
|
||||
|
||||
# Constants
|
||||
m = 10.0
|
||||
g = 9.81
|
||||
R = 1.0
|
||||
|
||||
# Time grid
|
||||
t_start, t_end, t_points = 0, 10, 1000
|
||||
t_span = torch.linspace(t_start, t_end, t_points, device=device)
|
||||
|
||||
# Specify directory for storing results
|
||||
output_dir = "average_normalized"
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Use a previously generated random seed
|
||||
random_seed = 4529
|
||||
|
||||
# Set the seeds for reproducibility
|
||||
torch.manual_seed(random_seed)
|
||||
np.random.seed(random_seed)
|
||||
|
||||
# Print the chosen random seed
|
||||
print(f"Random seed for torch and numpy: {random_seed}")
|
||||
|
||||
# Initialize controller and dynamics
|
||||
controller = PendulumController().to(device)
|
||||
pendulum_dynamics = PendulumDynamics(controller, m, R, g).to(device)
|
||||
|
||||
# Optimizer setup
|
||||
learning_rate = 1e-1
|
||||
weight_decay = 1e-4
|
||||
optimizer = optim.Adam(controller.parameters(), lr=learning_rate, weight_decay=weight_decay)
|
||||
|
||||
# Training parameters
|
||||
num_epochs = 1001
|
||||
|
||||
# Define loss functions
|
||||
def make_loss_fn(weight_fn):
|
||||
def loss_fn(state_traj, t_span):
|
||||
theta = state_traj[:, :, 0] # Size: [batch_size, t_points]
|
||||
desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points]
|
||||
weights = weight_fn(t_span) # Initially Size: [t_points]
|
||||
|
||||
# Reshape or expand weights to match theta dimensions
|
||||
weights = weights.view(-1, 1) # Now Size: [batch_size, t_points]
|
||||
|
||||
# Calculate the weighted loss
|
||||
return torch.mean(weights * (theta - desired_theta) ** 2)
|
||||
|
||||
return loss_fn
|
||||
|
||||
# Define and store weight functions with descriptions, normalized by average weight
|
||||
weight_functions = {
|
||||
'constant': {
|
||||
'function': lambda t: torch.ones_like(t) / torch.ones_like(t).mean(),
|
||||
'description': 'Constant weight: All weights are 1, normalized by the average (remains 1)'
|
||||
},
|
||||
'linear': {
|
||||
'function': lambda t: ((t+1) / (t+1).max()) / ((t+1) / (t+1).max()).mean(),
|
||||
'description': 'Linear weight: Weights increase linearly from 0 to 1, normalized by the average weight'
|
||||
},
|
||||
'quadratic': {
|
||||
'function': lambda t: ((t+1)**2 / ((t+1)**2).max()) / ((t+1)**2 / ((t+1)**2).max()).mean(),
|
||||
'description': 'Quadratic weight: Weights increase quadratically from 0 to 1, normalized by the average weight'
|
||||
},
|
||||
'cubic': {
|
||||
'function': lambda t: ((t+1)**3 / ((t+1)**3).max()) / ((t+1)**3 / ((t+1)**3).max()).mean(),
|
||||
'description': 'Quadratic weight: Weights increase cubically from 0 to 1, normalized by the average weight'
|
||||
},
|
||||
'inverse': {
|
||||
'function': lambda t: ((t+1)**-1 / ((t+1)**-1).max()) / ((t+1)**-2 / ((t+1)**-1).max()).mean(),
|
||||
'description': 'Inverse weight: Weights decrease inversely, normalized by the average weight'
|
||||
},
|
||||
'inverse_squared': {
|
||||
'function': lambda t: ((t+1)**-2 / ((t+1)**-2).max()) / ((t+1)**-2 / ((t+1)**-2).max()).mean(),
|
||||
'description': 'Inverse squared weight: Weights decrease inversely squared, normalized by the average weight'
|
||||
},
|
||||
'inverse_cubed': {
|
||||
'function': lambda t: ((t+1)**-3 / ((t+1)**-3).max()) / ((t+1)**-3 / ((t+1)**-3).max()).mean(),
|
||||
'description': 'Inverse cubed weight: Weights decrease inversely cubed, normalized by the average weight'
|
||||
}
|
||||
}
|
||||
|
||||
# Training loop for each weight function
|
||||
for name, weight_info in weight_functions.items():
|
||||
controller = PendulumController().to(device)
|
||||
pendulum_dynamics = PendulumDynamics(controller, m, R, g).to(device)
|
||||
optimizer = optim.Adam(controller.parameters(), lr=learning_rate, weight_decay=weight_decay)
|
||||
loss_fn = make_loss_fn(weight_info['function'])
|
||||
|
||||
# File paths
|
||||
function_output_dir = os.path.join(output_dir, name)
|
||||
controllers_dir = os.path.join(function_output_dir, "controllers")
|
||||
|
||||
# Check if controllers directory exists and remove it
|
||||
if os.path.exists(controllers_dir):
|
||||
shutil.rmtree(controllers_dir)
|
||||
os.makedirs(controllers_dir, exist_ok=True)
|
||||
|
||||
config_file = os.path.join(function_output_dir, "training_config.txt")
|
||||
log_file = os.path.join(function_output_dir, "training_log.csv")
|
||||
|
||||
# Overwrite configuration and log files
|
||||
with open(config_file, "w") as f:
|
||||
f.write(f"Random Seed: {random_seed}\n")
|
||||
f.write(f"Time Span: {t_start} to {t_end}, Points: {t_points}\n")
|
||||
f.write(f"Learning Rate: {learning_rate}\n")
|
||||
f.write(f"Weight Decay: {weight_decay}\n")
|
||||
f.write("\nLoss Function:\n")
|
||||
f.write(inspect.getsource(loss_fn))
|
||||
f.write("\nTraining Cases:\n")
|
||||
f.write("[theta0, omega0, alpha0, desired_theta]\n")
|
||||
for case in state_0.cpu().numpy():
|
||||
f.write(f"{case.tolist()}\n")
|
||||
|
||||
with open(log_file, "w", newline="") as csvfile:
|
||||
csv_writer = csv.writer(csvfile)
|
||||
csv_writer.writerow(["Epoch", "Loss"])
|
||||
|
||||
# Training loop
|
||||
for epoch in range(num_epochs):
|
||||
optimizer.zero_grad()
|
||||
state_traj = odeint(pendulum_dynamics, state_0, t_span, method='rk4')
|
||||
loss = loss_fn(state_traj, t_span)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# Logging
|
||||
with open(log_file, "a", newline="") as csvfile:
|
||||
csv_writer = csv.writer(csvfile)
|
||||
csv_writer.writerow([epoch, loss.item()])
|
||||
|
||||
# Save the model
|
||||
model_file = os.path.join(controllers_dir, f"controller_{epoch}.pth")
|
||||
torch.save(controller.state_dict(), model_file)
|
||||
print(f"{model_file} saved with loss: {loss}")
|
||||
|
||||
print("Training complete. Models and logs are saved under respective directories for each loss function.")
|
||||
159
training/max_normalized_trainer.py
Normal file
@ -0,0 +1,159 @@
|
||||
import torch
|
||||
import torch.optim as optim
|
||||
from torchdiffeq import odeint
|
||||
import numpy as np
|
||||
import os
|
||||
import shutil
|
||||
import csv
|
||||
import inspect
|
||||
|
||||
from PendulumController import PendulumController
|
||||
from PendulumDynamics import PendulumDynamics
|
||||
|
||||
# Device setup
|
||||
device = torch.device("cpu")
|
||||
|
||||
# Initial conditions (theta0, omega0, alpha0, desired_theta)
|
||||
from initial_conditions import initial_conditions
|
||||
state_0 = torch.tensor(initial_conditions, dtype=torch.float32, device=device)
|
||||
|
||||
# Device setup
|
||||
device = torch.device("cpu")
|
||||
|
||||
# Constants
|
||||
m = 10.0
|
||||
g = 9.81
|
||||
R = 1.0
|
||||
|
||||
# Time grid
|
||||
t_start, t_end, t_points = 0, 10, 1000
|
||||
t_span = torch.linspace(t_start, t_end, t_points, device=device)
|
||||
|
||||
# Specify directory for storing results
|
||||
output_dir = "max_normalized"
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Use a previously generated random seed
|
||||
random_seed = 4529
|
||||
|
||||
# Set the seeds for reproducibility
|
||||
torch.manual_seed(random_seed)
|
||||
np.random.seed(random_seed)
|
||||
|
||||
# Print the chosen random seed
|
||||
print(f"Random seed for torch and numpy: {random_seed}")
|
||||
|
||||
# Initialize controller and dynamics
|
||||
controller = PendulumController().to(device)
|
||||
pendulum_dynamics = PendulumDynamics(controller, m, R, g).to(device)
|
||||
|
||||
# Optimizer setup
|
||||
learning_rate = 1e-1
|
||||
weight_decay = 1e-4
|
||||
optimizer = optim.Adam(controller.parameters(), lr=learning_rate, weight_decay=weight_decay)
|
||||
|
||||
# Training parameters
|
||||
num_epochs = 1001
|
||||
|
||||
# Define loss functions
|
||||
def make_loss_fn(weight_fn):
|
||||
def loss_fn(state_traj, t_span):
|
||||
theta = state_traj[:, :, 0] # Size: [batch_size, t_points]
|
||||
desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points]
|
||||
weights = weight_fn(t_span) # Initially Size: [t_points]
|
||||
|
||||
# Reshape or expand weights to match theta dimensions
|
||||
weights = weights.view(-1, 1) # Now Size: [batch_size, t_points]
|
||||
|
||||
# Calculate the weighted loss
|
||||
return torch.mean(weights * (theta - desired_theta) ** 2)
|
||||
|
||||
return loss_fn
|
||||
|
||||
# Define and store weight functions with descriptions
|
||||
weight_functions = {
|
||||
'constant': {
|
||||
'function': lambda t: torch.ones_like(t),
|
||||
'description': 'Constant weight: All weights are 1'
|
||||
},
|
||||
'linear': {
|
||||
'function': lambda t: (t+1) / (t+1).max(),
|
||||
'description': 'Linear weight: Weights increase linearly, normalized by max'
|
||||
},
|
||||
'quadratic': {
|
||||
'function': lambda t: (t+1)**2 / ((t+1)**2).max(),
|
||||
'description': 'Quadratic weight: Weights increase quadratically, normalized by max'
|
||||
},
|
||||
'cubic': {
|
||||
'function': lambda t: (t+1)**3 / ((t+1)**3).max(),
|
||||
'description': 'Quadratic weight: Weights increase cubically, normalized by max'
|
||||
},
|
||||
'inverse': {
|
||||
'function': lambda t: (t+1)**-1 / ((t+1)**-1).max(),
|
||||
'description': 'Inverse weight: Weights decrease inversely, normalized by max'
|
||||
},
|
||||
'inverse_squared': {
|
||||
'function': lambda t: (t+1)**-2 / ((t+1)**-1).max(),
|
||||
'description': 'Inverse squared weight: Weights decrease inversely squared, normalized by max'
|
||||
},
|
||||
'inverse_cubed': {
|
||||
'function': lambda t: (t+1)**-3 / ((t+1)**-1).max(),
|
||||
'description': 'Inverse cubed weight: Weights decrease inversely cubed, normalized by max'
|
||||
}
|
||||
}
|
||||
|
||||
# Training loop for each weight function
|
||||
for name, weight_info in weight_functions.items():
|
||||
controller = PendulumController().to(device)
|
||||
pendulum_dynamics = PendulumDynamics(controller, m, R, g).to(device)
|
||||
optimizer = optim.Adam(controller.parameters(), lr=learning_rate, weight_decay=weight_decay)
|
||||
loss_fn = make_loss_fn(weight_info['function'])
|
||||
|
||||
# File paths
|
||||
function_output_dir = os.path.join(output_dir, name)
|
||||
controllers_dir = os.path.join(function_output_dir, "controllers")
|
||||
|
||||
# Check if controllers directory exists and remove it
|
||||
if os.path.exists(controllers_dir):
|
||||
shutil.rmtree(controllers_dir)
|
||||
os.makedirs(controllers_dir, exist_ok=True)
|
||||
|
||||
config_file = os.path.join(function_output_dir, "training_config.txt")
|
||||
log_file = os.path.join(function_output_dir, "training_log.csv")
|
||||
|
||||
# Overwrite configuration and log files
|
||||
with open(config_file, "w") as f:
|
||||
f.write(f"Random Seed: {random_seed}\n")
|
||||
f.write(f"Time Span: {t_start} to {t_end}, Points: {t_points}\n")
|
||||
f.write(f"Learning Rate: {learning_rate}\n")
|
||||
f.write(f"Weight Decay: {weight_decay}\n")
|
||||
f.write("\nLoss Function:\n")
|
||||
f.write(inspect.getsource(loss_fn))
|
||||
f.write("\nTraining Cases:\n")
|
||||
f.write("[theta0, omega0, alpha0, desired_theta]\n")
|
||||
for case in state_0.cpu().numpy():
|
||||
f.write(f"{case.tolist()}\n")
|
||||
|
||||
with open(log_file, "w", newline="") as csvfile:
|
||||
csv_writer = csv.writer(csvfile)
|
||||
csv_writer.writerow(["Epoch", "Loss"])
|
||||
|
||||
# Training loop
|
||||
for epoch in range(num_epochs):
|
||||
optimizer.zero_grad()
|
||||
state_traj = odeint(pendulum_dynamics, state_0, t_span, method='rk4')
|
||||
loss = loss_fn(state_traj, t_span)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# Logging
|
||||
with open(log_file, "a", newline="") as csvfile:
|
||||
csv_writer = csv.writer(csvfile)
|
||||
csv_writer.writerow([epoch, loss.item()])
|
||||
|
||||
# Save the model
|
||||
model_file = os.path.join(controllers_dir, f"controller_{epoch}.pth")
|
||||
torch.save(controller.state_dict(), model_file)
|
||||
print(f"{model_file} saved with loss: {loss}")
|
||||
|
||||
print("Training complete. Models and logs are saved under respective directories for each loss function.")
|
||||
17
training/old/normalized/PendulumController.py
Normal file
@ -0,0 +1,17 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
class PendulumController(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.net = nn.Sequential(
|
||||
nn.Linear(4, 64),
|
||||
nn.ReLU(),
|
||||
nn.Linear(64, 64),
|
||||
nn.ReLU(),
|
||||
nn.Linear(64, 1)
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
raw_torque = self.net(x)
|
||||
return torch.clamp(raw_torque, -250, 250)
|
||||
26
training/old/normalized/PendulumDynamics.py
Normal file
@ -0,0 +1,26 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
class PendulumDynamics(nn.Module):
|
||||
def __init__(self, controller, m:'float'=1, R:'float'=1, g:'float'=9.81):
|
||||
super().__init__()
|
||||
self.controller = controller
|
||||
self.m: 'float' = m
|
||||
self.R: 'float' = R
|
||||
self.g: 'float' = g
|
||||
|
||||
def forward(self, t, state):
|
||||
# Get the current values from the state
|
||||
theta, omega, alpha, desired_theta = state[:, 0], state[:, 1], state[:, 2], state[:, 3]
|
||||
|
||||
# Make the input stack for the controller
|
||||
input = torch.stack([theta, omega, alpha, desired_theta], dim=1)
|
||||
|
||||
# Get the torque (the output of the neural network)
|
||||
tau = self.controller(input).squeeze(-1)
|
||||
|
||||
# Relax alpha
|
||||
alpha_desired = (self.g / self.R) * torch.sin(theta) + tau / (self.m * self.R**2)
|
||||
dalpha = alpha_desired - alpha
|
||||
|
||||
return torch.stack([omega, alpha, dalpha, torch.zeros_like(desired_theta)], dim=1)
|
||||