Finding best learning rates from the sweeps
This commit is contained in:
parent
aa34bfac8c
commit
e238bed91e
26
analysis/PendulumDynamics.py
Normal file
26
analysis/PendulumDynamics.py
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
|
||||||
|
class PendulumDynamics(nn.Module):
|
||||||
|
def __init__(self, controller, m:'float'=1, R:'float'=1, g:'float'=9.81):
|
||||||
|
super().__init__()
|
||||||
|
self.controller = controller
|
||||||
|
self.m: 'float' = m
|
||||||
|
self.R: 'float' = R
|
||||||
|
self.g: 'float' = g
|
||||||
|
|
||||||
|
def forward(self, t, state):
|
||||||
|
# Get the current values from the state
|
||||||
|
theta, omega, alpha, desired_theta = state[:, 0], state[:, 1], state[:, 2], state[:, 3]
|
||||||
|
|
||||||
|
# Make the input stack for the controller
|
||||||
|
input = torch.stack([theta, omega, alpha, desired_theta], dim=1)
|
||||||
|
|
||||||
|
# Get the torque (the output of the neural network)
|
||||||
|
tau = self.controller(input).squeeze(-1)
|
||||||
|
|
||||||
|
# Relax alpha
|
||||||
|
alpha_desired = (self.g / self.R) * torch.sin(theta) + tau / (self.m * self.R**2)
|
||||||
|
dalpha = alpha_desired - alpha
|
||||||
|
|
||||||
|
return torch.stack([omega, alpha, dalpha, torch.zeros_like(desired_theta)], dim=1)
|
||||||
BIN
analysis/__pycache__/PendulumDynamics.cpython-310.pyc
Normal file
BIN
analysis/__pycache__/PendulumDynamics.cpython-310.pyc
Normal file
Binary file not shown.
BIN
analysis/__pycache__/initial_conditions.cpython-310.pyc
Normal file
BIN
analysis/__pycache__/initial_conditions.cpython-310.pyc
Normal file
Binary file not shown.
62
analysis/best_base_loss_learning_rate_sweep.py
Normal file
62
analysis/best_base_loss_learning_rate_sweep.py
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
data = {
|
||||||
|
'one': {
|
||||||
|
'csv': {
|
||||||
|
'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one/lr_0.100',
|
||||||
|
'csv_loss': 0.07867201417684555,
|
||||||
|
'constant_loss': 2.5390186309814453
|
||||||
|
},
|
||||||
|
'constant': {
|
||||||
|
'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one/lr_0.100',
|
||||||
|
'csv_loss': 0.07867201417684555,
|
||||||
|
'constant_loss': 2.5390186309814453
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'one_fourth': {
|
||||||
|
'csv': {
|
||||||
|
'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one_fourth/lr_0.300',
|
||||||
|
'csv_loss': 0.08876045793294907,
|
||||||
|
'constant_loss': 2.5319466590881348
|
||||||
|
},
|
||||||
|
'constant': {
|
||||||
|
'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one_fourth/lr_0.250',
|
||||||
|
'csv_loss': 0.09172269701957703,
|
||||||
|
'constant_loss': 2.5288496017456055
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'four': {
|
||||||
|
'csv': {
|
||||||
|
'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/four/lr_0.200',
|
||||||
|
'csv_loss': 0.1293140947818756,
|
||||||
|
'constant_loss': 2.9976892471313477
|
||||||
|
},
|
||||||
|
'constant': {
|
||||||
|
'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/four/lr_0.200',
|
||||||
|
'csv_loss': 0.1293140947818756,
|
||||||
|
'constant_loss': 2.9976892471313477
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'two': {
|
||||||
|
'csv': {
|
||||||
|
'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/two/lr_0.100',
|
||||||
|
'csv_loss': 0.07678339630365372,
|
||||||
|
'constant_loss': 2.5585412979125977
|
||||||
|
},
|
||||||
|
'constant': {
|
||||||
|
'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/two/lr_0.100',
|
||||||
|
'csv_loss': 0.07678339630365372,
|
||||||
|
'constant_loss': 2.5585412979125977
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'one_half': {
|
||||||
|
'csv': {
|
||||||
|
'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one_half/lr_0.200',
|
||||||
|
'csv_loss': 0.08620432019233704,
|
||||||
|
'constant_loss': 2.541421890258789
|
||||||
|
},
|
||||||
|
'constant': {
|
||||||
|
'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one_half/lr_0.200',
|
||||||
|
'csv_loss': 0.08620432019233704,
|
||||||
|
'constant_loss': 2.541421890258789
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
15
analysis/best_base_loss_learning_rate_sweep.txt
Normal file
15
analysis/best_base_loss_learning_rate_sweep.txt
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
Final best results (dictionary):
|
||||||
|
{'one': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one/lr_0.100', 'csv_loss': 0.07867201417684555, 'constant_loss': 2.5390186309814453}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one/lr_0.100', 'csv_loss': 0.07867201417684555, 'constant_loss': 2.5390186309814453}}, 'one_fourth': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one_fourth/lr_0.300', 'csv_loss': 0.08876045793294907, 'constant_loss': 2.5319466590881348}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one_fourth/lr_0.250', 'csv_loss': 0.09172269701957703, 'constant_loss': 2.5288496017456055}}, 'four': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/four/lr_0.200', 'csv_loss': 0.1293140947818756, 'constant_loss': 2.9976892471313477}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/four/lr_0.200', 'csv_loss': 0.1293140947818756, 'constant_loss': 2.9976892471313477}}, 'two': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/two/lr_0.100', 'csv_loss': 0.07678339630365372, 'constant_loss': 2.5585412979125977}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/two/lr_0.100', 'csv_loss': 0.07678339630365372, 'constant_loss': 2.5585412979125977}}, 'one_half': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one_half/lr_0.200', 'csv_loss': 0.08620432019233704, 'constant_loss': 2.541421890258789}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one_half/lr_0.200', 'csv_loss': 0.08620432019233704, 'constant_loss': 2.541421890258789}}}
|
||||||
|
|
||||||
|
Summary Table:
|
||||||
|
Function Name Candidate Learning Rate CSV Loss Constant Loss
|
||||||
|
one CSV 0.100 0.078672 2.539019
|
||||||
|
Constant 0.100 0.078672 2.539019
|
||||||
|
one_fourth CSV 0.300 0.088760 2.531947
|
||||||
|
Constant 0.250 0.091723 2.528850
|
||||||
|
four CSV 0.200 0.129314 2.997689
|
||||||
|
Constant 0.200 0.129314 2.997689
|
||||||
|
two CSV 0.100 0.076783 2.558541
|
||||||
|
Constant 0.100 0.076783 2.558541
|
||||||
|
one_half CSV 0.200 0.086204 2.541422
|
||||||
|
Constant 0.200 0.086204 2.541422
|
||||||
206
analysis/best_time_weighting_learning_rate_sweep.py
Normal file
206
analysis/best_time_weighting_learning_rate_sweep.py
Normal file
@ -0,0 +1,206 @@
|
|||||||
|
data = {
|
||||||
|
"inverse": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse/lr_0.250",
|
||||||
|
"csv_loss": 0.531498372554779,
|
||||||
|
"constant_loss": 2.5503664016723633
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse/lr_0.250",
|
||||||
|
"csv_loss": 0.531498372554779,
|
||||||
|
"constant_loss": 2.5503664016723633
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"linear_mirrored": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.125",
|
||||||
|
"csv_loss": 2.3770766258239746,
|
||||||
|
"constant_loss": 2.552375078201294
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.125",
|
||||||
|
"csv_loss": 2.3770766258239746,
|
||||||
|
"constant_loss": 2.552375078201294
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"inverse_squared_mirrored": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.160",
|
||||||
|
"csv_loss": 0.033845994621515274,
|
||||||
|
"constant_loss": 2.7603342533111572
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.160",
|
||||||
|
"csv_loss": 0.033845994621515274,
|
||||||
|
"constant_loss": 2.7603342533111572
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"cubic_mirrored": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.080",
|
||||||
|
"csv_loss": 2.0769901275634766,
|
||||||
|
"constant_loss": 2.563471555709839
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.080",
|
||||||
|
"csv_loss": 2.0769901275634766,
|
||||||
|
"constant_loss": 2.563471555709839
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"quadratic": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/quadratic/lr_0.200",
|
||||||
|
"csv_loss": 0.06192325800657272,
|
||||||
|
"constant_loss": 3.025479316711426
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/quadratic/lr_0.080",
|
||||||
|
"csv_loss": 0.14040324091911316,
|
||||||
|
"constant_loss": 2.982274055480957
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"inverse_squared": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.200",
|
||||||
|
"csv_loss": 1.1794205904006958,
|
||||||
|
"constant_loss": 2.5662319660186768
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.200",
|
||||||
|
"csv_loss": 1.1794205904006958,
|
||||||
|
"constant_loss": 2.5662319660186768
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"quadratic_mirrored": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.125",
|
||||||
|
"csv_loss": 2.218207836151123,
|
||||||
|
"constant_loss": 2.555176258087158
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.125",
|
||||||
|
"csv_loss": 2.218207836151123,
|
||||||
|
"constant_loss": 2.555176258087158
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"square_root": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/square_root/lr_0.250",
|
||||||
|
"csv_loss": 0.6526519656181335,
|
||||||
|
"constant_loss": 2.5856597423553467
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/square_root/lr_0.250",
|
||||||
|
"csv_loss": 0.6526519656181335,
|
||||||
|
"constant_loss": 2.5856597423553467
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"inverse_cubed_mirrored": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.200",
|
||||||
|
"csv_loss": 0.03754603490233421,
|
||||||
|
"constant_loss": 2.9996697902679443
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.200",
|
||||||
|
"csv_loss": 0.03754603490233421,
|
||||||
|
"constant_loss": 2.9996697902679443
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"cubic_root_mirrored": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.250",
|
||||||
|
"csv_loss": 2.47979474067688,
|
||||||
|
"constant_loss": 2.5389654636383057
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.250",
|
||||||
|
"csv_loss": 2.47979474067688,
|
||||||
|
"constant_loss": 2.5389654636383057
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"inverse_mirrored": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.160",
|
||||||
|
"csv_loss": 0.032234687358140945,
|
||||||
|
"constant_loss": 2.942859649658203
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.160",
|
||||||
|
"csv_loss": 0.032234687358140945,
|
||||||
|
"constant_loss": 2.942859649658203
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"inverse_cubed": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.200",
|
||||||
|
"csv_loss": 1.4481265544891357,
|
||||||
|
"constant_loss": 2.557009696960449
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.200",
|
||||||
|
"csv_loss": 1.4481265544891357,
|
||||||
|
"constant_loss": 2.557009696960449
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"cubic_root": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.250",
|
||||||
|
"csv_loss": 1.0203485488891602,
|
||||||
|
"constant_loss": 2.609311819076538
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.250",
|
||||||
|
"csv_loss": 1.0203485488891602,
|
||||||
|
"constant_loss": 2.609311819076538
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"square_root_mirrored": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.160",
|
||||||
|
"csv_loss": 2.4792795181274414,
|
||||||
|
"constant_loss": 2.5693373680114746
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.160",
|
||||||
|
"csv_loss": 2.4792795181274414,
|
||||||
|
"constant_loss": 2.5693373680114746
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"linear": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/linear/lr_0.125",
|
||||||
|
"csv_loss": 0.2883843183517456,
|
||||||
|
"constant_loss": 3.05281400680542
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/linear/lr_0.080",
|
||||||
|
"csv_loss": 0.28867313265800476,
|
||||||
|
"constant_loss": 2.9585072994232178
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/constant/lr_0.160",
|
||||||
|
"csv_loss": 2.608083486557007,
|
||||||
|
"constant_loss": 2.606748342514038
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/constant/lr_0.160",
|
||||||
|
"csv_loss": 2.608083486557007,
|
||||||
|
"constant_loss": 2.606748342514038
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"cubic": {
|
||||||
|
"csv": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/cubic/lr_0.160",
|
||||||
|
"csv_loss": 0.04065453261137009,
|
||||||
|
"constant_loss": 3.101959228515625
|
||||||
|
},
|
||||||
|
"constant": {
|
||||||
|
"path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/cubic/lr_0.300",
|
||||||
|
"csv_loss": 0.049555618315935135,
|
||||||
|
"constant_loss": 3.0432639122009277
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
39
analysis/best_time_weighting_learning_rate_sweep.txt
Normal file
39
analysis/best_time_weighting_learning_rate_sweep.txt
Normal file
File diff suppressed because one or more lines are too long
179
analysis/find_best_base_loss_learning_rate_sweep.py
Normal file
179
analysis/find_best_base_loss_learning_rate_sweep.py
Normal file
@ -0,0 +1,179 @@
|
|||||||
|
import os
|
||||||
|
import csv
|
||||||
|
import torch
|
||||||
|
from torchdiffeq import odeint
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
from PendulumController import PendulumController
|
||||||
|
from PendulumDynamics import PendulumDynamics
|
||||||
|
from initial_conditions import initial_conditions
|
||||||
|
|
||||||
|
# Device and initial conditions setup
|
||||||
|
device = torch.device("cpu")
|
||||||
|
state_0 = torch.tensor(initial_conditions, dtype=torch.float32, device=device)
|
||||||
|
|
||||||
|
# Constants (same as in your training code)
|
||||||
|
m = 10.0
|
||||||
|
g = 9.81
|
||||||
|
R = 1.0
|
||||||
|
t_start, t_end, t_points = 0, 10, 1000
|
||||||
|
t_span = torch.linspace(t_start, t_end, t_points, device=device)
|
||||||
|
|
||||||
|
# Base path containing the base loss function directories
|
||||||
|
base_path = "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep"
|
||||||
|
|
||||||
|
def compute_constant_loss(controller_path):
|
||||||
|
"""
|
||||||
|
Loads a controller from the given path, sets up the dynamics using the constant weighting function,
|
||||||
|
simulates the system, and returns the computed loss.
|
||||||
|
"""
|
||||||
|
controller = PendulumController().to(device)
|
||||||
|
controller.load_state_dict(torch.load(controller_path, map_location=device))
|
||||||
|
pendulum_dynamics = PendulumDynamics(controller, m, R, g).to(device)
|
||||||
|
with torch.no_grad():
|
||||||
|
state_traj = odeint(pendulum_dynamics, state_0, t_span, method='rk4')
|
||||||
|
theta = state_traj[:, :, 0]
|
||||||
|
desired_theta = state_traj[:, :, 3]
|
||||||
|
loss = torch.mean((theta - desired_theta) ** 2)
|
||||||
|
return loss.item()
|
||||||
|
|
||||||
|
# Dictionary to store the best results for each base loss function.
|
||||||
|
# Each key maps to a dictionary with keys "csv" and "constant".
|
||||||
|
# Each candidate dictionary contains:
|
||||||
|
# "path": best lr directory path,
|
||||||
|
# "csv_loss": loss from the training log,
|
||||||
|
# "constant_loss": loss computed via the constant method.
|
||||||
|
best_results = {}
|
||||||
|
|
||||||
|
# Process each base loss function directory
|
||||||
|
for function_name in os.listdir(base_path):
|
||||||
|
function_path = os.path.join(base_path, function_name)
|
||||||
|
if not os.path.isdir(function_path):
|
||||||
|
continue
|
||||||
|
|
||||||
|
print(f"Processing base loss function: {function_name}")
|
||||||
|
|
||||||
|
# Initialize best candidate variables for CSV-based best
|
||||||
|
best_csv_csv_loss = float('inf')
|
||||||
|
best_csv_constant_loss = float('inf')
|
||||||
|
best_csv_path = None
|
||||||
|
|
||||||
|
# Initialize best candidate variables for constant-based best
|
||||||
|
best_constant_constant_loss = float('inf')
|
||||||
|
best_constant_csv_loss = float('inf')
|
||||||
|
best_constant_path = None
|
||||||
|
|
||||||
|
# Loop through each learning rate directory (directories named "lr_*")
|
||||||
|
for lr_dir in os.listdir(function_path):
|
||||||
|
if not lr_dir.startswith("lr_"):
|
||||||
|
continue
|
||||||
|
lr_path = os.path.join(function_path, lr_dir)
|
||||||
|
if not os.path.isdir(lr_path):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# --- Compute CSV loss candidate ---
|
||||||
|
current_csv_loss = None
|
||||||
|
csv_file = os.path.join(lr_path, "training_log.csv")
|
||||||
|
if os.path.exists(csv_file):
|
||||||
|
try:
|
||||||
|
with open(csv_file, 'r') as f:
|
||||||
|
reader = csv.DictReader(f)
|
||||||
|
losses = []
|
||||||
|
for row in reader:
|
||||||
|
try:
|
||||||
|
loss_value = float(row['Loss'])
|
||||||
|
losses.append(loss_value)
|
||||||
|
except ValueError:
|
||||||
|
continue
|
||||||
|
if losses:
|
||||||
|
current_csv_loss = min(losses)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error reading CSV {csv_file}: {e}")
|
||||||
|
|
||||||
|
# --- Compute constant loss candidate ---
|
||||||
|
current_constant_loss = None
|
||||||
|
controllers_dir = os.path.join(lr_path, "controllers")
|
||||||
|
controller_file = os.path.join(controllers_dir, "controller_200.pth")
|
||||||
|
if os.path.exists(controller_file):
|
||||||
|
try:
|
||||||
|
current_constant_loss = compute_constant_loss(controller_file)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error computing constant loss for {controller_file}: {e}")
|
||||||
|
|
||||||
|
# Update best CSV candidate (based on CSV loss)
|
||||||
|
if current_csv_loss is not None:
|
||||||
|
csv_const_loss_val = current_constant_loss if current_constant_loss is not None else float('inf')
|
||||||
|
if current_csv_loss < best_csv_csv_loss:
|
||||||
|
best_csv_csv_loss = current_csv_loss
|
||||||
|
best_csv_constant_loss = csv_const_loss_val
|
||||||
|
best_csv_path = lr_path
|
||||||
|
|
||||||
|
# Update best Constant candidate (based on constant loss)
|
||||||
|
if current_constant_loss is not None:
|
||||||
|
csv_loss_val = current_csv_loss if current_csv_loss is not None else float('inf')
|
||||||
|
if current_constant_loss < best_constant_constant_loss:
|
||||||
|
best_constant_constant_loss = current_constant_loss
|
||||||
|
best_constant_csv_loss = csv_loss_val
|
||||||
|
best_constant_path = lr_path
|
||||||
|
|
||||||
|
best_results[function_name] = {
|
||||||
|
"csv": {"path": best_csv_path, "csv_loss": best_csv_csv_loss, "constant_loss": best_csv_constant_loss},
|
||||||
|
"constant": {"path": best_constant_path, "csv_loss": best_constant_csv_loss, "constant_loss": best_constant_constant_loss},
|
||||||
|
}
|
||||||
|
|
||||||
|
print(f"Finished {function_name}:")
|
||||||
|
print(f" Best CSV candidate - Path: {best_csv_path}, CSV Loss: {best_csv_csv_loss}, Constant Loss: {best_csv_constant_loss}")
|
||||||
|
print(f" Best Constant candidate - Path: {best_constant_path}, CSV Loss: {best_constant_csv_loss}, Constant Loss: {best_constant_constant_loss}")
|
||||||
|
|
||||||
|
print("Final best results:")
|
||||||
|
print(best_results)
|
||||||
|
|
||||||
|
# Build summary table rows using pandas.
|
||||||
|
# Extract only the learning rate (e.g., from "lr_0.250" get "0.250") rather than the full path.
|
||||||
|
def extract_lr(path):
|
||||||
|
if path is None:
|
||||||
|
return "N/A"
|
||||||
|
base = os.path.basename(path)
|
||||||
|
if base.startswith("lr_"):
|
||||||
|
return base[3:]
|
||||||
|
return base
|
||||||
|
|
||||||
|
table_rows = []
|
||||||
|
for function_name, results in best_results.items():
|
||||||
|
csv_info = results.get("csv", {})
|
||||||
|
constant_info = results.get("constant", {})
|
||||||
|
|
||||||
|
csv_lr = extract_lr(csv_info.get("path"))
|
||||||
|
constant_lr = extract_lr(constant_info.get("path"))
|
||||||
|
|
||||||
|
table_rows.append({
|
||||||
|
"Function Name": function_name,
|
||||||
|
"Candidate": "CSV",
|
||||||
|
"Learning Rate": csv_lr,
|
||||||
|
"CSV Loss": csv_info.get("csv_loss", float('inf')),
|
||||||
|
"Constant Loss": csv_info.get("constant_loss", float('inf'))
|
||||||
|
})
|
||||||
|
table_rows.append({
|
||||||
|
"Function Name": "", # Leave blank for the second row
|
||||||
|
"Candidate": "Constant",
|
||||||
|
"Learning Rate": constant_lr,
|
||||||
|
"CSV Loss": constant_info.get("csv_loss", float('inf')),
|
||||||
|
"Constant Loss": constant_info.get("constant_loss", float('inf'))
|
||||||
|
})
|
||||||
|
|
||||||
|
df = pd.DataFrame(table_rows, columns=["Function Name", "Candidate", "Learning Rate", "CSV Loss", "Constant Loss"])
|
||||||
|
|
||||||
|
# Get the table as a formatted string
|
||||||
|
table_str = df.to_string(index=False)
|
||||||
|
|
||||||
|
print("\n" + table_str)
|
||||||
|
|
||||||
|
# Write the dictionary and table to a file
|
||||||
|
output_file = "best_base_loss_learning_rate_sweep.txt"
|
||||||
|
with open(output_file, "w") as f:
|
||||||
|
f.write("Final best results (dictionary):\n")
|
||||||
|
f.write(str(best_results) + "\n\n")
|
||||||
|
f.write("Summary Table:\n")
|
||||||
|
f.write(table_str)
|
||||||
|
|
||||||
|
print(f"\nResults have been written to {output_file}")
|
||||||
179
analysis/find_best_time_weighting_learning_rate_sweep.py
Normal file
179
analysis/find_best_time_weighting_learning_rate_sweep.py
Normal file
@ -0,0 +1,179 @@
|
|||||||
|
import os
|
||||||
|
import csv
|
||||||
|
import torch
|
||||||
|
from torchdiffeq import odeint
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
from PendulumController import PendulumController
|
||||||
|
from PendulumDynamics import PendulumDynamics
|
||||||
|
from initial_conditions import initial_conditions
|
||||||
|
|
||||||
|
# Device and initial conditions setup
|
||||||
|
device = torch.device("cpu")
|
||||||
|
state_0 = torch.tensor(initial_conditions, dtype=torch.float32, device=device)
|
||||||
|
|
||||||
|
# Constants (same as in your training code)
|
||||||
|
m = 10.0
|
||||||
|
g = 9.81
|
||||||
|
R = 1.0
|
||||||
|
t_start, t_end, t_points = 0, 10, 1000
|
||||||
|
t_span = torch.linspace(t_start, t_end, t_points, device=device)
|
||||||
|
|
||||||
|
# Base path containing the time_weighting_function directories
|
||||||
|
base_path = "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep"
|
||||||
|
|
||||||
|
def compute_constant_loss(controller_path):
|
||||||
|
"""
|
||||||
|
Loads a controller from the given path, sets up the dynamics using the constant weighting function,
|
||||||
|
simulates the system, and returns the computed loss.
|
||||||
|
"""
|
||||||
|
controller = PendulumController().to(device)
|
||||||
|
controller.load_state_dict(torch.load(controller_path, map_location=device))
|
||||||
|
pendulum_dynamics = PendulumDynamics(controller, m, R, g).to(device)
|
||||||
|
with torch.no_grad():
|
||||||
|
state_traj = odeint(pendulum_dynamics, state_0, t_span, method='rk4')
|
||||||
|
theta = state_traj[:, :, 0]
|
||||||
|
desired_theta = state_traj[:, :, 3]
|
||||||
|
loss = torch.mean((theta - desired_theta) ** 2)
|
||||||
|
return loss.item()
|
||||||
|
|
||||||
|
# Dictionary to store the best results for each time weighting function.
|
||||||
|
# Each key maps to a dictionary with keys "csv" and "constant".
|
||||||
|
# Each candidate dictionary contains:
|
||||||
|
# "path": best lr directory path,
|
||||||
|
# "csv_loss": loss from the training log,
|
||||||
|
# "constant_loss": loss computed via the constant method.
|
||||||
|
best_results = {}
|
||||||
|
|
||||||
|
# Process each time weighting function directory
|
||||||
|
for function_name in os.listdir(base_path):
|
||||||
|
function_path = os.path.join(base_path, function_name)
|
||||||
|
if not os.path.isdir(function_path):
|
||||||
|
continue
|
||||||
|
|
||||||
|
print(f"Processing weighting function: {function_name}")
|
||||||
|
|
||||||
|
# Initialize best candidate variables for CSV-based best
|
||||||
|
best_csv_csv_loss = float('inf')
|
||||||
|
best_csv_constant_loss = float('inf')
|
||||||
|
best_csv_path = None
|
||||||
|
|
||||||
|
# Initialize best candidate variables for constant-based best
|
||||||
|
best_constant_constant_loss = float('inf')
|
||||||
|
best_constant_csv_loss = float('inf')
|
||||||
|
best_constant_path = None
|
||||||
|
|
||||||
|
# Loop through each learning rate directory (directories named "lr_*")
|
||||||
|
for lr_dir in os.listdir(function_path):
|
||||||
|
if not lr_dir.startswith("lr_"):
|
||||||
|
continue
|
||||||
|
lr_path = os.path.join(function_path, lr_dir)
|
||||||
|
if not os.path.isdir(lr_path):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# --- Compute CSV loss candidate ---
|
||||||
|
current_csv_loss = None
|
||||||
|
csv_file = os.path.join(lr_path, "training_log.csv")
|
||||||
|
if os.path.exists(csv_file):
|
||||||
|
try:
|
||||||
|
with open(csv_file, 'r') as f:
|
||||||
|
reader = csv.DictReader(f)
|
||||||
|
losses = []
|
||||||
|
for row in reader:
|
||||||
|
try:
|
||||||
|
loss_value = float(row['Loss'])
|
||||||
|
losses.append(loss_value)
|
||||||
|
except ValueError:
|
||||||
|
continue
|
||||||
|
if losses:
|
||||||
|
current_csv_loss = min(losses)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error reading CSV {csv_file}: {e}")
|
||||||
|
|
||||||
|
# --- Compute constant loss candidate ---
|
||||||
|
current_constant_loss = None
|
||||||
|
controllers_dir = os.path.join(lr_path, "controllers")
|
||||||
|
controller_file = os.path.join(controllers_dir, "controller_200.pth")
|
||||||
|
if os.path.exists(controller_file):
|
||||||
|
try:
|
||||||
|
current_constant_loss = compute_constant_loss(controller_file)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error computing constant loss for {controller_file}: {e}")
|
||||||
|
|
||||||
|
# Update best CSV candidate (based on CSV loss)
|
||||||
|
if current_csv_loss is not None:
|
||||||
|
csv_const_loss_val = current_constant_loss if current_constant_loss is not None else float('inf')
|
||||||
|
if current_csv_loss < best_csv_csv_loss:
|
||||||
|
best_csv_csv_loss = current_csv_loss
|
||||||
|
best_csv_constant_loss = csv_const_loss_val
|
||||||
|
best_csv_path = lr_path
|
||||||
|
|
||||||
|
# Update best Constant candidate (based on constant loss)
|
||||||
|
if current_constant_loss is not None:
|
||||||
|
csv_loss_val = current_csv_loss if current_csv_loss is not None else float('inf')
|
||||||
|
if current_constant_loss < best_constant_constant_loss:
|
||||||
|
best_constant_constant_loss = current_constant_loss
|
||||||
|
best_constant_csv_loss = csv_loss_val
|
||||||
|
best_constant_path = lr_path
|
||||||
|
|
||||||
|
best_results[function_name] = {
|
||||||
|
"csv": {"path": best_csv_path, "csv_loss": best_csv_csv_loss, "constant_loss": best_csv_constant_loss},
|
||||||
|
"constant": {"path": best_constant_path, "csv_loss": best_constant_csv_loss, "constant_loss": best_constant_constant_loss},
|
||||||
|
}
|
||||||
|
|
||||||
|
print(f"Finished {function_name}:")
|
||||||
|
print(f" Best CSV candidate - Path: {best_csv_path}, CSV Loss: {best_csv_csv_loss}, Constant Loss: {best_csv_constant_loss}")
|
||||||
|
print(f" Best Constant candidate - Path: {best_constant_path}, CSV Loss: {best_constant_csv_loss}, Constant Loss: {best_constant_constant_loss}")
|
||||||
|
|
||||||
|
print("Final best results:")
|
||||||
|
print(best_results)
|
||||||
|
|
||||||
|
# Build summary table rows using pandas.
|
||||||
|
# Extract only the learning rate (e.g., from "lr_0.250" get "0.250") rather than the full path.
|
||||||
|
def extract_lr(path):
|
||||||
|
if path is None:
|
||||||
|
return "N/A"
|
||||||
|
base = os.path.basename(path)
|
||||||
|
if base.startswith("lr_"):
|
||||||
|
return base[3:]
|
||||||
|
return base
|
||||||
|
|
||||||
|
table_rows = []
|
||||||
|
for function_name, results in best_results.items():
|
||||||
|
csv_info = results.get("csv", {})
|
||||||
|
constant_info = results.get("constant", {})
|
||||||
|
|
||||||
|
csv_lr = extract_lr(csv_info.get("path"))
|
||||||
|
constant_lr = extract_lr(constant_info.get("path"))
|
||||||
|
|
||||||
|
table_rows.append({
|
||||||
|
"Function Name": function_name,
|
||||||
|
"Candidate": "CSV",
|
||||||
|
"Learning Rate": csv_lr,
|
||||||
|
"CSV Loss": csv_info.get("csv_loss", float('inf')),
|
||||||
|
"Constant Loss": csv_info.get("constant_loss", float('inf'))
|
||||||
|
})
|
||||||
|
table_rows.append({
|
||||||
|
"Function Name": "", # Leave blank for the second row
|
||||||
|
"Candidate": "Constant",
|
||||||
|
"Learning Rate": constant_lr,
|
||||||
|
"CSV Loss": constant_info.get("csv_loss", float('inf')),
|
||||||
|
"Constant Loss": constant_info.get("constant_loss", float('inf'))
|
||||||
|
})
|
||||||
|
|
||||||
|
df = pd.DataFrame(table_rows, columns=["Function Name", "Candidate", "Learning Rate", "CSV Loss", "Constant Loss"])
|
||||||
|
|
||||||
|
# Get the table as a formatted string
|
||||||
|
table_str = df.to_string(index=False)
|
||||||
|
|
||||||
|
print("\n" + table_str)
|
||||||
|
|
||||||
|
# Write the dictionary and table to a file
|
||||||
|
output_file = "best_time_weighting_learning_rate_sweep.txt"
|
||||||
|
with open(output_file, "w") as f:
|
||||||
|
f.write("Final best results (dictionary):\n")
|
||||||
|
f.write(str(best_results) + "\n\n")
|
||||||
|
f.write("Summary Table:\n")
|
||||||
|
f.write(table_str)
|
||||||
|
|
||||||
|
print(f"\nResults have been written to {output_file}")
|
||||||
26
analysis/initial_conditions.py
Normal file
26
analysis/initial_conditions.py
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
from torch import pi
|
||||||
|
|
||||||
|
initial_conditions = [
|
||||||
|
[1/6 * pi, 0.0, 0.0, 0.0],
|
||||||
|
[-1/6 * pi, 0.0, 0.0, 0.0],
|
||||||
|
[2/3 * pi, 0.0, 0.0, 0.0],
|
||||||
|
[-2/3 * pi, 0.0, 0.0, 0.0],
|
||||||
|
[0.0, 1/3 * pi, 0.0, 0.0],
|
||||||
|
[0.0, -1/3 * pi, 0.0, 0.0],
|
||||||
|
[0.0, 2 * pi, 0.0, 0.0],
|
||||||
|
[0.0, -2 * pi, 0.0, 0.0],
|
||||||
|
[0.0, 0.0, 0.0, 2 * pi],
|
||||||
|
[0.0, 0.0, 0.0, -2 * pi],
|
||||||
|
[0.0, 0.0, 0.0, 1/2 * pi],
|
||||||
|
[0.0, 0.0, 0.0, -1/2 * pi],
|
||||||
|
[0.0, 0.0, 0.0, 1/3 * pi],
|
||||||
|
[0.0, 0.0, 0.0, -1/3 * pi],
|
||||||
|
[1/4 * pi, 1 * pi, 0.0, 0.0],
|
||||||
|
[-1/4 * pi, -1 * pi, 0.0, 0.0],
|
||||||
|
[1/2 * pi, -1 * pi, 0.0, 1/3 * pi],
|
||||||
|
[-1/2 * pi, 1 * pi, 0.0, -1/3 * pi],
|
||||||
|
[1/4 * pi, 1 * pi, 0.0, 2 * pi],
|
||||||
|
[-1/4 * pi, -1 * pi, 0.0, 2 * pi],
|
||||||
|
[1/2 * pi, -1 * pi, 0.0, 4 * pi],
|
||||||
|
[-1/2 * pi, 1 * pi, 0.0, -4 * pi],
|
||||||
|
]
|
||||||
@ -22,7 +22,7 @@ def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent:
|
|||||||
denominator = (2 * math.pi + delta) ** exponent - delta ** exponent
|
denominator = (2 * math.pi + delta) ** exponent - delta ** exponent
|
||||||
return min_val + (1 - min_val) * (numerator / denominator)
|
return min_val + (1 - min_val) * (numerator / denominator)
|
||||||
|
|
||||||
# Specific loss functions with exponents 1/4, 1/2, 1, 2, 4
|
# Existing loss functions
|
||||||
def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor:
|
def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor:
|
||||||
return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val)
|
return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val)
|
||||||
|
|
||||||
@ -38,13 +38,31 @@ def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float
|
|||||||
def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor:
|
def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor:
|
||||||
return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val)
|
return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val)
|
||||||
|
|
||||||
|
# New loss functions
|
||||||
|
def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor:
|
||||||
|
return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val)
|
||||||
|
|
||||||
|
def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor:
|
||||||
|
return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val)
|
||||||
|
|
||||||
|
def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor:
|
||||||
|
return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val)
|
||||||
|
|
||||||
|
def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor:
|
||||||
|
return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val)
|
||||||
|
|
||||||
# Dictionary mapping function names to a tuple of (exponent, function)
|
# Dictionary mapping function names to a tuple of (exponent, function)
|
||||||
base_loss_functions = {
|
base_loss_functions = {
|
||||||
'one_fourth': (1/4, one_fourth_loss),
|
# 'one_fourth': (1/4, one_fourth_loss),
|
||||||
'one_half': (1/2, one_half_loss),
|
# 'one_half': (1/2, one_half_loss),
|
||||||
'one': (1, abs_loss),
|
# 'one': (1, abs_loss),
|
||||||
'two': (2, square_loss),
|
# 'two': (2, square_loss),
|
||||||
'four': (4, fourth_loss)
|
# 'four': (4, fourth_loss),
|
||||||
|
# New entries:
|
||||||
|
'one_third': (1/3, one_third_loss),
|
||||||
|
'one_fifth': (1/5, one_fifth_loss),
|
||||||
|
'three': (3, three_loss),
|
||||||
|
'five': (5, five_loss),
|
||||||
}
|
}
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
@ -54,7 +72,7 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
plt.figure(figsize=(10, 6))
|
plt.figure(figsize=(10, 6))
|
||||||
for name, (exponent, loss_fn) in base_loss_functions.items():
|
for name, (exponent, loss_fn) in base_loss_functions.items():
|
||||||
# Compute loss for each error value (|theta - 0| = error)
|
# Compute loss for each error value
|
||||||
losses = loss_fn(errors, desired, min_val=0.01)
|
losses = loss_fn(errors, desired, min_val=0.01)
|
||||||
plt.plot(errors.numpy(), losses.numpy(), label=f"{name} (exp={exponent})")
|
plt.plot(errors.numpy(), losses.numpy(), label=f"{name} (exp={exponent})")
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user