diff --git a/analysis/base_loss/generate_data.py b/analysis/base_loss/generate_data.py index 1bcb9d5..2fe6edc 100644 --- a/analysis/base_loss/generate_data.py +++ b/analysis/base_loss/generate_data.py @@ -42,7 +42,7 @@ for condition_name, initial_condition in analysis_conditions.items(): for loss_function in loss_functions: print(f" Processing loss function: {loss_function}") # Build directory for controller files - directory = f"/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss/{loss_function}/controllers" + directory = f"/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss/{loss_function}/controllers" controllers = get_controller_files(directory, epoch_range, epoch_step) tasks = [(c, initial_condition, directory, dt, num_steps) for c in controllers] diff --git a/analysis/base_loss_learning_rate_sweep/best_base_loss_learning_rate_sweep.py b/analysis/base_loss_learning_rate_sweep/best_base_loss_learning_rate_sweep.py index 5994afa..34f492f 100644 --- a/analysis/base_loss_learning_rate_sweep/best_base_loss_learning_rate_sweep.py +++ b/analysis/base_loss_learning_rate_sweep/best_base_loss_learning_rate_sweep.py @@ -1 +1 @@ -data = {'one': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one/lr_0.100', 'csv_loss': 0.07867201417684555, 'constant_loss': 2.5390186309814453}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one/lr_0.100', 'csv_loss': 0.07867201417684555, 'constant_loss': 2.5390186309814453}}, 'one_fourth': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one_fourth/lr_0.300', 'csv_loss': 0.08876045793294907, 'constant_loss': 2.5319466590881348}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one_fourth/lr_0.250', 'csv_loss': 0.09172269701957703, 'constant_loss': 2.5288496017456055}}, 'four': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/four/lr_0.200', 'csv_loss': 0.1293140947818756, 'constant_loss': 2.9976892471313477}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/four/lr_0.200', 'csv_loss': 0.1293140947818756, 'constant_loss': 2.9976892471313477}}, 'one_fifth': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one_fifth/lr_0.250', 'csv_loss': 0.08940213173627853, 'constant_loss': 2.5548017024993896}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one_fifth/lr_0.100', 'csv_loss': 0.09396396577358246, 'constant_loss': 2.5306591987609863}}, 'three': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/three/lr_0.100', 'csv_loss': 0.09003690630197525, 'constant_loss': 2.5991272926330566}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/three/lr_0.100', 'csv_loss': 0.09003690630197525, 'constant_loss': 2.5991272926330566}}, 'five': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/five/lr_0.200', 'csv_loss': 0.2009778916835785, 'constant_loss': 3.64280366897583}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/five/lr_0.125', 'csv_loss': 0.20845991373062134, 'constant_loss': 3.589925527572632}}, 'one_third': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one_third/lr_0.250', 'csv_loss': 0.0854221060872078, 'constant_loss': 2.506823778152466}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one_third/lr_0.250', 'csv_loss': 0.0854221060872078, 'constant_loss': 2.506823778152466}}, 'two': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/two/lr_0.100', 'csv_loss': 0.07678339630365372, 'constant_loss': 2.5585412979125977}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/two/lr_0.100', 'csv_loss': 0.07678339630365372, 'constant_loss': 2.5585412979125977}}, 'one_half': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one_half/lr_0.200', 'csv_loss': 0.08620432019233704, 'constant_loss': 2.541421890258789}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/base_loss_learning_rate_sweep/one_half/lr_0.200', 'csv_loss': 0.08620432019233704, 'constant_loss': 2.541421890258789}}} +data = {'one': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/one/lr_0.100', 'csv_loss': 0.07867201417684555, 'constant_loss': 2.5390186309814453}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/one/lr_0.100', 'csv_loss': 0.07867201417684555, 'constant_loss': 2.5390186309814453}}, 'one_fourth': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/one_fourth/lr_0.300', 'csv_loss': 0.08876045793294907, 'constant_loss': 2.5319466590881348}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/one_fourth/lr_0.250', 'csv_loss': 0.09172269701957703, 'constant_loss': 2.5288496017456055}}, 'four': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/four/lr_0.200', 'csv_loss': 0.1293140947818756, 'constant_loss': 2.9976892471313477}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/four/lr_0.200', 'csv_loss': 0.1293140947818756, 'constant_loss': 2.9976892471313477}}, 'one_fifth': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/one_fifth/lr_0.250', 'csv_loss': 0.08940213173627853, 'constant_loss': 2.5548017024993896}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/one_fifth/lr_0.100', 'csv_loss': 0.09396396577358246, 'constant_loss': 2.5306591987609863}}, 'three': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/three/lr_0.100', 'csv_loss': 0.09003690630197525, 'constant_loss': 2.5991272926330566}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/three/lr_0.100', 'csv_loss': 0.09003690630197525, 'constant_loss': 2.5991272926330566}}, 'five': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/five/lr_0.200', 'csv_loss': 0.2009778916835785, 'constant_loss': 3.64280366897583}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/five/lr_0.125', 'csv_loss': 0.20845991373062134, 'constant_loss': 3.589925527572632}}, 'one_third': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/one_third/lr_0.250', 'csv_loss': 0.0854221060872078, 'constant_loss': 2.506823778152466}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/one_third/lr_0.250', 'csv_loss': 0.0854221060872078, 'constant_loss': 2.506823778152466}}, 'two': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/two/lr_0.100', 'csv_loss': 0.07678339630365372, 'constant_loss': 2.5585412979125977}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/two/lr_0.100', 'csv_loss': 0.07678339630365372, 'constant_loss': 2.5585412979125977}}, 'one_half': {'csv': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/one_half/lr_0.200', 'csv_loss': 0.08620432019233704, 'constant_loss': 2.541421890258789}, 'constant': {'path': '/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/base_loss_learning_rate_sweep/one_half/lr_0.200', 'csv_loss': 0.08620432019233704, 'constant_loss': 2.541421890258789}}} diff --git a/analysis/misc/validator.py b/analysis/misc/validator.py index 5ce2e48..8532b3c 100644 --- a/analysis/misc/validator.py +++ b/analysis/misc/validator.py @@ -14,7 +14,7 @@ from PendulumController import PendulumController # List of controller file names to validate. # Replace these paths with your actual controller file paths. controller_file_names = [ - "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting/constant/controllers/controller_1000.pth", + "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting/constant/controllers/controller_1000.pth", ] # Constants for simulation diff --git a/analysis/time_weighting/generate_data.py b/analysis/time_weighting/generate_data.py index 0bce42a..8fddbbd 100644 --- a/analysis/time_weighting/generate_data.py +++ b/analysis/time_weighting/generate_data.py @@ -48,7 +48,7 @@ for condition_name, initial_condition in analysis_conditions.items(): for loss_function in loss_functions: print(f" Processing loss function: {loss_function}") # Build directory for controller files - directory = f"/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting/{loss_function}/controllers" + directory = f"/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting/{loss_function}/controllers" controllers = get_controller_files(directory, epoch_range, epoch_step) tasks = [(c, initial_condition, directory, dt, num_steps) for c in controllers] diff --git a/analysis/time_weighting_learning_rate_sweep/best_time_weighting_learning_rate_sweep.py b/analysis/time_weighting_learning_rate_sweep/best_time_weighting_learning_rate_sweep.py index d143ca0..c65985c 100644 --- a/analysis/time_weighting_learning_rate_sweep/best_time_weighting_learning_rate_sweep.py +++ b/analysis/time_weighting_learning_rate_sweep/best_time_weighting_learning_rate_sweep.py @@ -1,204 +1,204 @@ data = { "inverse": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse/lr_0.250", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/inverse/lr_0.250", "csv_loss": 0.531498372554779, "constant_loss": 2.5503664016723633 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse/lr_0.250", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/inverse/lr_0.250", "csv_loss": 0.531498372554779, "constant_loss": 2.5503664016723633 } }, "linear_mirrored": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.125", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.125", "csv_loss": 2.3770766258239746, "constant_loss": 2.552375078201294 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.125", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.125", "csv_loss": 2.3770766258239746, "constant_loss": 2.552375078201294 } }, "inverse_squared_mirrored": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.160", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.160", "csv_loss": 0.033845994621515274, "constant_loss": 2.7603342533111572 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.160", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.160", "csv_loss": 0.033845994621515274, "constant_loss": 2.7603342533111572 } }, "cubic_mirrored": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.080", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.080", "csv_loss": 2.0769901275634766, "constant_loss": 2.563471555709839 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.080", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.080", "csv_loss": 2.0769901275634766, "constant_loss": 2.563471555709839 } }, "quadratic": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/quadratic/lr_0.200", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/quadratic/lr_0.200", "csv_loss": 0.06192325800657272, "constant_loss": 3.025479316711426 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/quadratic/lr_0.080", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/quadratic/lr_0.080", "csv_loss": 0.14040324091911316, "constant_loss": 2.982274055480957 } }, "inverse_squared": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.200", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/inverse_squared/lr_0.200", "csv_loss": 1.1794205904006958, "constant_loss": 2.5662319660186768 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.200", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/inverse_squared/lr_0.200", "csv_loss": 1.1794205904006958, "constant_loss": 2.5662319660186768 } }, "quadratic_mirrored": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.125", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.125", "csv_loss": 2.218207836151123, "constant_loss": 2.555176258087158 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.125", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.125", "csv_loss": 2.218207836151123, "constant_loss": 2.555176258087158 } }, "square_root": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/square_root/lr_0.250", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/square_root/lr_0.250", "csv_loss": 0.6526519656181335, "constant_loss": 2.5856597423553467 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/square_root/lr_0.250", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/square_root/lr_0.250", "csv_loss": 0.6526519656181335, "constant_loss": 2.5856597423553467 } }, "inverse_cubed_mirrored": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.200", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.200", "csv_loss": 0.03754603490233421, "constant_loss": 2.9996697902679443 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.200", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.200", "csv_loss": 0.03754603490233421, "constant_loss": 2.9996697902679443 } }, "cubic_root_mirrored": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.250", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.250", "csv_loss": 2.47979474067688, "constant_loss": 2.5389654636383057 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.250", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.250", "csv_loss": 2.47979474067688, "constant_loss": 2.5389654636383057 } }, "inverse_mirrored": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.160", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.160", "csv_loss": 0.032234687358140945, "constant_loss": 2.942859649658203 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.160", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.160", "csv_loss": 0.032234687358140945, "constant_loss": 2.942859649658203 } }, "inverse_cubed": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.200", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.200", "csv_loss": 1.4481265544891357, "constant_loss": 2.557009696960449 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.200", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.200", "csv_loss": 1.4481265544891357, "constant_loss": 2.557009696960449 } }, "cubic_root": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.250", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/cubic_root/lr_0.250", "csv_loss": 1.0203485488891602, "constant_loss": 2.609311819076538 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.250", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/cubic_root/lr_0.250", "csv_loss": 1.0203485488891602, "constant_loss": 2.609311819076538 } }, "square_root_mirrored": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.160", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.160", "csv_loss": 2.4792795181274414, "constant_loss": 2.5693373680114746 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.160", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.160", "csv_loss": 2.4792795181274414, "constant_loss": 2.5693373680114746 } }, "linear": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/linear/lr_0.125", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/linear/lr_0.125", "csv_loss": 0.2883843183517456, "constant_loss": 3.05281400680542 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/linear/lr_0.080", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/linear/lr_0.080", "csv_loss": 0.28867313265800476, "constant_loss": 2.9585072994232178 } }, "constant": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/constant/lr_0.160", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/constant/lr_0.160", "csv_loss": 2.608083486557007, "constant_loss": 2.606748342514038 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/constant/lr_0.160", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/constant/lr_0.160", "csv_loss": 2.608083486557007, "constant_loss": 2.606748342514038 } }, "cubic": { "csv": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/cubic/lr_0.160", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/cubic/lr_0.160", "csv_loss": 0.04065453261137009, "constant_loss": 3.101959228515625 }, "constant": { - "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting_learning_rate_sweep/cubic/lr_0.300", + "path": "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/training_files/time_weighting_learning_rate_sweep/cubic/lr_0.300", "csv_loss": 0.049555618315935135, "constant_loss": 3.0432639122009277 } diff --git a/training/base_loss/five/training_config.txt b/training/base_loss/five/training_config.txt deleted file mode 100644 index af82b47..0000000 --- a/training/base_loss/five/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def current_loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(loss_fn(theta, desired_theta)) - -Specific Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss/five/training_log.csv b/training/base_loss/five/training_log.csv deleted file mode 100644 index 7156501..0000000 --- a/training/base_loss/five/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,97.54940795898438 -2,42.66001892089844 -3,30.97127914428711 -4,10.447839736938477 -5,9.610530853271484 -6,7.629119873046875 -7,5.866682052612305 -8,4.419277667999268 -9,4.370494842529297 -10,4.241362571716309 -11,4.11055326461792 -12,3.9880781173706055 -13,3.8653488159179688 -14,3.738297700881958 -15,3.6085386276245117 -16,3.477187156677246 -17,3.3440167903900146 -18,3.208820343017578 -19,3.071382761001587 -20,2.9317591190338135 -21,2.792588949203491 -22,2.6525728702545166 -23,2.50795841217041 -24,2.3538851737976074 -25,2.179443120956421 -26,1.9734361171722412 -27,1.6945775747299194 -28,1.3012670278549194 -29,1.0992951393127441 -30,0.9830872416496277 -31,0.8995509743690491 -32,0.8310967087745667 -33,0.7604226469993591 -34,0.6766762137413025 -35,0.6391433477401733 -36,0.610859215259552 -37,0.5876933336257935 -38,0.5682887434959412 -39,0.5516213774681091 -40,0.5371435284614563 -41,0.5242865085601807 -42,0.5127325057983398 -43,0.5022875666618347 -44,0.4928046762943268 -45,0.4841647744178772 -46,0.4762633144855499 -47,0.46899914741516113 -48,0.4622844159603119 -49,0.45602965354919434 -50,0.4501799941062927 -51,0.4446870982646942 -52,0.4395068883895874 -53,0.4346056878566742 -54,0.4299522936344147 -55,0.4255170524120331 -56,0.4212760031223297 -57,0.41720595955848694 -58,0.41328880190849304 -59,0.40951815247535706 -60,0.4058775305747986 -61,0.4023496210575104 -62,0.3989298641681671 -63,0.3956117331981659 -64,0.392376184463501 -65,0.38921037316322327 -66,0.38610750436782837 -67,0.3830595314502716 -68,0.38005566596984863 -69,0.37709134817123413 -70,0.37416136264801025 -71,0.3712588846683502 -72,0.36838018894195557 -73,0.3655182123184204 -74,0.36266082525253296 -75,0.35980018973350525 -76,0.35692915320396423 -77,0.3540395498275757 -78,0.3511212170124054 -79,0.34816431999206543 -80,0.345162034034729 -81,0.34211593866348267 -82,0.33900710940361023 -83,0.3358181416988373 -84,0.33253294229507446 -85,0.3291327953338623 -86,0.32561278343200684 -87,0.3220059871673584 -88,0.3182927668094635 -89,0.31451359391212463 -90,0.31068646907806396 -91,0.3067920506000519 -92,0.30285319685935974 -93,0.2990175485610962 -94,0.29538586735725403 -95,0.2919396162033081 -96,0.2886740267276764 -97,0.28562548756599426 -98,0.2827906608581543 -99,0.28016096353530884 -100,0.27773505449295044 -101,0.275490403175354 -102,0.2734115421772003 -103,0.2714877128601074 -104,0.26972174644470215 -105,0.268088698387146 -106,0.2665599584579468 -107,0.26513102650642395 -108,0.26378723978996277 -109,0.2625221014022827 -110,0.2613414525985718 -111,0.2602316439151764 -112,0.259179025888443 -113,0.2581782639026642 -114,0.257224440574646 -115,0.2563137114048004 -116,0.25544217228889465 -117,0.25460681319236755 -118,0.25380828976631165 -119,0.25304096937179565 -120,0.2523025870323181 -121,0.2515914738178253 -122,0.25090494751930237 -123,0.25024139881134033 -124,0.24959859251976013 -125,0.24897551536560059 -126,0.24837131798267365 -127,0.24778418242931366 -128,0.2472122460603714 -129,0.24665603041648865 -130,0.24611325562000275 -131,0.2455829232931137 -132,0.24506469070911407 -133,0.24455837905406952 -134,0.24406254291534424 -135,0.24357706308364868 -136,0.24310098588466644 -137,0.24263355135917664 -138,0.24217426776885986 -139,0.24172332882881165 -140,0.24127954244613647 -141,0.24084264039993286 -142,0.24041324853897095 -143,0.23999056220054626 -144,0.239574134349823 -145,0.23916426301002502 -146,0.23876087367534637 -147,0.23836395144462585 -148,0.2379724532365799 -149,0.23758602142333984 -150,0.23720502853393555 -151,0.2368289679288864 -152,0.23645789921283722 -153,0.23609137535095215 -154,0.23572935163974762 -155,0.2353714108467102 -156,0.23501801490783691 -157,0.23466894030570984 -158,0.23432394862174988 -159,0.23398248851299286 -160,0.2336445152759552 -161,0.23331105709075928 -162,0.23298120498657227 -163,0.23265451192855835 -164,0.23233087360858917 -165,0.23201067745685577 -166,0.231694296002388 -167,0.2313811182975769 -168,0.23107095062732697 -169,0.23076385259628296 -170,0.23046037554740906 -171,0.23016038537025452 -172,0.22986315190792084 -173,0.22956913709640503 -174,0.2292780727148056 -175,0.2289903163909912 -176,0.22870561480522156 -177,0.22842396795749664 -178,0.2281448245048523 -179,0.227867990732193 -180,0.22759397327899933 -181,0.22732266783714294 -182,0.2270539402961731 -183,0.22678762674331665 -184,0.22652409970760345 -185,0.22626371681690216 -186,0.22600629925727844 -187,0.2257516235113144 -188,0.2254999279975891 -189,0.22525055706501007 -190,0.22500352561473846 -191,0.22475847601890564 -192,0.22451554238796234 -193,0.22427470982074738 -194,0.22403578460216522 -195,0.2237994521856308 -196,0.223565474152565 -197,0.2233334332704544 -198,0.22310322523117065 -199,0.2228749543428421 -200,0.22264909744262695 -201,0.22242513298988342 -202,0.22220301628112793 -203,0.22198301553726196 -204,0.2217646986246109 -205,0.2215484231710434 -206,0.2213340699672699 -207,0.22112151980400085 -208,0.22091044485569 -209,0.22070077061653137 -210,0.22049298882484436 -211,0.22028690576553345 -212,0.22008216381072998 -213,0.21987856924533844 -214,0.21967646479606628 -215,0.21947652101516724 -216,0.21927794814109802 -217,0.21908071637153625 -218,0.2188851684331894 -219,0.21869111061096191 -220,0.21849913895130157 -221,0.2183084636926651 -222,0.21811923384666443 -223,0.21793146431446075 -224,0.21774502098560333 -225,0.21756018698215485 -226,0.2173769474029541 -227,0.21719491481781006 -228,0.21701404452323914 -229,0.21683458983898163 -230,0.21665708720684052 -231,0.2164808064699173 -232,0.21630564332008362 -233,0.2161317765712738 -234,0.21595916152000427 -235,0.21578821539878845 -236,0.21561843156814575 -237,0.21544961631298065 -238,0.21528202295303345 -239,0.21511586010456085 -240,0.21495072543621063 -241,0.21478670835494995 -242,0.21462363004684448 -243,0.2144615650177002 -244,0.2143011838197708 -245,0.21414178609848022 -246,0.21398332715034485 -247,0.21382609009742737 -248,0.21366974711418152 -249,0.21351473033428192 -250,0.2133607119321823 -251,0.2132074534893036 -252,0.21305522322654724 -253,0.21290399134159088 -254,0.2127537727355957 -255,0.2126045972108841 -256,0.21245618164539337 -257,0.2123086005449295 -258,0.21216237545013428 -259,0.2120169997215271 -260,0.21187247335910797 -261,0.21172896027565002 -262,0.2115861177444458 -263,0.2114446461200714 -264,0.21130408346652985 -265,0.2111642211675644 -266,0.2110251486301422 -267,0.21088707447052002 -268,0.2107498049736023 -269,0.2106132209300995 -270,0.21047745645046234 -271,0.2103423774242401 -272,0.21020826697349548 -273,0.21007519960403442 -274,0.2099427580833435 -275,0.20981107652187347 -276,0.2096802443265915 -277,0.20955033600330353 -278,0.20942136645317078 -279,0.20929299294948578 -280,0.20916520059108734 -281,0.2090381234884262 -282,0.2089119255542755 -283,0.20878638327121735 -284,0.20866134762763977 -285,0.20853698253631592 -286,0.20841312408447266 -287,0.20829032361507416 -288,0.20816828310489655 -289,0.20804670453071594 -290,0.20792560279369354 -291,0.20780515670776367 -292,0.20768581330776215 -293,0.20756688714027405 -294,0.20744852721691132 -295,0.20733074843883514 -296,0.2072134166955948 -297,0.20709683001041412 -298,0.20698077976703644 -299,0.2068653404712677 -300,0.20675033330917358 -301,0.2066357433795929 -302,0.20652234554290771 -303,0.20640935003757477 -304,0.20629668235778809 -305,0.206184521317482 -306,0.20607291162014008 -307,0.20596207678318024 -308,0.20585158467292786 -309,0.2057417631149292 -310,0.20563234388828278 -311,0.20552334189414978 -312,0.20541508495807648 -313,0.20530739426612854 -314,0.20520001649856567 -315,0.20509308576583862 -316,0.20498694479465485 -317,0.20488137006759644 -318,0.20477613806724548 -319,0.20467127859592438 -320,0.20456694066524506 -321,0.20446324348449707 -322,0.20435984432697296 -323,0.2042568325996399 -324,0.20415422320365906 -325,0.20405198633670807 -326,0.2039501667022705 -327,0.20384876430034637 -328,0.20374755561351776 -329,0.20364660024642944 -330,0.20354612171649933 -331,0.20344631373882294 -332,0.20334672927856445 -333,0.20324738323688507 -334,0.2031484842300415 -335,0.20305012166500092 -336,0.20295210182666779 -337,0.20285440981388092 -338,0.20275703072547913 -339,0.20265989005565643 -340,0.20256321132183075 -341,0.20246689021587372 -342,0.20237089693546295 -343,0.20227518677711487 -344,0.20217972993850708 -345,0.20208518207073212 -346,0.2019907832145691 -347,0.2018965482711792 -348,0.20180246233940125 -349,0.2017088234424591 -350,0.20161566138267517 -351,0.2015226185321808 -352,0.20142988860607147 -353,0.20133747160434723 -354,0.2012452930212021 -355,0.2011534571647644 -356,0.20106209814548492 -357,0.20097090303897858 -358,0.20087997615337372 -359,0.20078952610492706 -360,0.20069952309131622 -361,0.20060968399047852 -362,0.20052006840705872 -363,0.2004309594631195 -364,0.20034228265285492 -365,0.20025399327278137 -366,0.20016594231128693 -367,0.2000783532857895 -368,0.19999103248119354 -369,0.19990411400794983 -370,0.19981749355793 -371,0.1997312605381012 -372,0.19964522123336792 -373,0.19955943524837494 -374,0.19947434961795807 -375,0.19938966631889343 -376,0.19930517673492432 -377,0.19922101497650146 -378,0.1991371214389801 -379,0.19905400276184082 -380,0.19897110760211945 -381,0.1988884061574936 -382,0.19880597293376923 -383,0.19872397184371948 -384,0.19864235818386078 -385,0.19856086373329163 -386,0.19847962260246277 -387,0.19839885830879211 -388,0.1983182728290558 -389,0.19823814928531647 -390,0.19815824925899506 -391,0.19807875156402588 -392,0.19799946248531342 -393,0.19792033731937408 -394,0.1978418380022049 -395,0.19776365160942078 -396,0.19768556952476501 -397,0.19760771095752716 -398,0.19753007590770721 -399,0.19745296239852905 -400,0.19737602770328522 -401,0.1972992718219757 -402,0.19722285866737366 -403,0.1971467137336731 -404,0.19707094132900238 -405,0.19699546694755554 -406,0.19692014157772064 -407,0.196845144033432 -408,0.1967703104019165 -409,0.19669581949710846 -410,0.19662171602249146 -411,0.19654794037342072 -412,0.1964743435382843 -413,0.1964009702205658 -414,0.19632796943187714 -415,0.1962553709745407 -416,0.1961829662322998 -417,0.196110799908638 -418,0.1960388571023941 -419,0.19596730172634125 -420,0.195896178483963 -421,0.1958252191543579 -422,0.19575461745262146 -423,0.19568434357643127 -424,0.19561435282230377 -425,0.19554480910301208 -426,0.19547556340694427 -427,0.19540663063526154 -428,0.19533789157867432 -429,0.1952693611383438 -430,0.1952013075351715 -431,0.1951335072517395 -432,0.19506585597991943 -433,0.19499844312667847 -434,0.19493131339550018 -435,0.19486485421657562 -436,0.19479863345623016 -437,0.19473260641098022 -438,0.19466686248779297 -439,0.19460152089595795 -440,0.19453632831573486 -441,0.1944713443517685 -442,0.1944063901901245 -443,0.19434165954589844 -444,0.1942770928144455 -445,0.19421268999576569 -446,0.19414860010147095 -447,0.19408482313156128 -448,0.19402118027210236 -449,0.19395771622657776 -450,0.1938944160938263 -451,0.19383183121681213 -452,0.1937694400548935 -453,0.1937071681022644 -454,0.19364513456821442 -455,0.19358336925506592 -456,0.19352197647094727 -457,0.1934608519077301 -458,0.19339990615844727 -459,0.19333931803703308 -460,0.1932789832353592 -461,0.1932189017534256 -462,0.1931590884923935 -463,0.19309955835342407 -464,0.1930404007434845 -465,0.19298149645328522 -466,0.1929226666688919 -467,0.192864328622818 -468,0.1928064376115799 -469,0.1927487850189209 -470,0.1926914006471634 -471,0.19263416528701782 -472,0.19257737696170807 -473,0.19252103567123413 -474,0.1924649477005005 -475,0.19240900874137878 -476,0.192353293299675 -477,0.19229793548583984 -478,0.19224292039871216 -479,0.19218800961971283 -480,0.1921333223581314 -481,0.1920788288116455 -482,0.1920245885848999 -483,0.19197072088718414 -484,0.19191694259643555 -485,0.19186332821846008 -486,0.19181010127067566 -487,0.19175703823566437 -488,0.19170434772968292 -489,0.19165189564228058 -490,0.19159960746765137 -491,0.1915476769208908 -492,0.1914958655834198 -493,0.19144433736801147 -494,0.19139297306537628 -495,0.1913418471813202 -496,0.19129088521003723 -497,0.19124005734920502 -498,0.19118960201740265 -499,0.19113938510417938 -500,0.19108949601650238 -501,0.1910397708415985 -502,0.19099023938179016 -503,0.1909409910440445 -504,0.1908920407295227 -505,0.1908433884382248 -506,0.19079487025737762 -507,0.1907465159893036 -508,0.19069840013980865 -509,0.1906505674123764 -510,0.19060298800468445 -511,0.19055557250976562 -512,0.19050827622413635 -513,0.19046123325824738 -514,0.19041472673416138 -515,0.19036836922168732 -516,0.1903221607208252 -517,0.19027601182460785 -518,0.1902301162481308 -519,0.1901847869157791 -520,0.19013969600200653 -521,0.19009467959403992 -522,0.19004984200000763 -523,0.19000521302223206 -524,0.18996095657348633 -525,0.18991681933403015 -526,0.18987278640270233 -527,0.18982882797718048 -528,0.1897851526737213 -529,0.1897418349981308 -530,0.18969875574111938 -531,0.1896558254957199 -532,0.1896129697561264 -533,0.189570352435112 -534,0.18952807784080505 -535,0.1894860863685608 -536,0.18944422900676727 -537,0.18940243124961853 -538,0.18936075270175934 -539,0.18931931257247925 -540,0.18927809596061707 -541,0.1892370581626892 -542,0.18919610977172852 -543,0.18915528059005737 -544,0.18911471962928772 -545,0.1890743523836136 -546,0.18903431296348572 -547,0.18899427354335785 -548,0.18895427882671356 -549,0.18891459703445435 -550,0.18887504935264587 -551,0.18883582949638367 -552,0.18879683315753937 -553,0.18875785171985626 -554,0.18871906399726868 -555,0.18868044018745422 -556,0.18864186108112335 -557,0.18860352039337158 -558,0.1885651797056198 -559,0.18852698802947998 -560,0.18848897516727448 -561,0.18845106661319733 -562,0.1884133368730545 -563,0.18837577104568481 -564,0.1883382499217987 -565,0.1883009523153305 -566,0.18826375901699066 -567,0.1882266104221344 -568,0.18818971514701843 -569,0.18815287947654724 -570,0.1881161630153656 -571,0.18807955086231232 -572,0.1880430281162262 -573,0.18800656497478485 -574,0.18797026574611664 -575,0.1879340559244156 -576,0.18789799511432648 -577,0.18786203861236572 -578,0.1878260225057602 -579,0.1877901703119278 -580,0.18775446712970734 -581,0.18771886825561523 -582,0.18768338859081268 -583,0.1876479983329773 -584,0.1876126527786255 -585,0.18757736682891846 -586,0.18754230439662933 -587,0.18750730156898499 -588,0.18747234344482422 -589,0.18743743002414703 -590,0.18740254640579224 -591,0.1873677670955658 -592,0.1873330920934677 -593,0.18729856610298157 -594,0.18726415932178497 -595,0.18722975254058838 -596,0.1871953308582306 -597,0.18716111779212952 -598,0.1871269792318344 -599,0.1870931088924408 -600,0.18705925345420837 -601,0.18702541291713715 -602,0.18699157238006592 -603,0.18695785105228424 -604,0.18692433834075928 -605,0.18689091503620148 -606,0.18685755133628845 -607,0.18682420253753662 -608,0.18679079413414001 -609,0.18675750494003296 -610,0.18672439455986023 -611,0.18669140338897705 -612,0.18665848672389984 -613,0.18662559986114502 -614,0.1865927278995514 -615,0.18655994534492493 -616,0.18652750551700592 -617,0.1864950954914093 -618,0.1864626705646515 -619,0.18643033504486084 -620,0.18639801442623138 -621,0.1863657832145691 -622,0.18633389472961426 -623,0.1863020658493042 -624,0.18627026677131653 -625,0.18623849749565125 -626,0.18620678782463074 -627,0.18617509305477142 -628,0.18614374101161957 -629,0.1861124485731125 -630,0.1860811859369278 -631,0.18604999780654907 -632,0.18601885437965393 -633,0.18598778545856476 -634,0.18595707416534424 -635,0.18592648208141327 -636,0.18589593470096588 -637,0.18586544692516327 -638,0.18583497405052185 -639,0.18580467998981476 -640,0.1857745498418808 -641,0.18574470281600952 -642,0.18571491539478302 -643,0.18568521738052368 -644,0.1856555938720703 -645,0.18562614917755127 -646,0.18559685349464417 -647,0.1855677366256714 -648,0.1855386644601822 -649,0.18550968170166016 -650,0.18548081815242767 -651,0.18545208871364594 -652,0.18542353808879852 -653,0.18539513647556305 -654,0.18536698818206787 -655,0.18533892929553986 -656,0.1853109896183014 -657,0.18528315424919128 -658,0.18525545299053192 -659,0.18522781133651733 -660,0.18520042300224304 -661,0.1851731687784195 -662,0.1851460486650467 -663,0.18511909246444702 -664,0.18509230017662048 -665,0.1850656270980835 -666,0.18503908812999725 -667,0.18501265347003937 -668,0.18498627841472626 -669,0.18496009707450867 -670,0.18493406474590302 -671,0.18490810692310333 -672,0.18488232791423798 -673,0.18485671281814575 -674,0.18483123183250427 -675,0.18480584025382996 -676,0.18478062748908997 -677,0.18475545942783356 -678,0.18473035097122192 -679,0.18470549583435059 -680,0.1846807599067688 -681,0.18465617299079895 -682,0.18463173508644104 -683,0.1846073716878891 -684,0.1845831423997879 -685,0.18455904722213745 -686,0.18453511595726013 -687,0.18451133370399475 -688,0.18448765575885773 -689,0.18446408212184906 -690,0.18444053828716278 -691,0.18441715836524963 -692,0.1843939870595932 -693,0.18437090516090393 -694,0.18434801697731018 -695,0.1843252032995224 -696,0.18430249392986298 -697,0.18427982926368713 -698,0.18425729870796204 -699,0.18423493206501007 -700,0.18421275913715363 -701,0.18419069051742554 -702,0.184168741106987 -703,0.18414686620235443 -704,0.1841251403093338 -705,0.18410350382328033 -706,0.1840820461511612 -707,0.18406075239181519 -708,0.18403948843479156 -709,0.1840183138847351 -710,0.18399719893932343 -711,0.1839761584997177 -712,0.1839553415775299 -713,0.18393467366695404 -714,0.18391400575637817 -715,0.18389344215393066 -716,0.18387290835380554 -717,0.1838524341583252 -718,0.18383200466632843 -719,0.1838117390871048 -720,0.1837916523218155 -721,0.18377162516117096 -722,0.18375170230865479 -723,0.18373186886310577 -724,0.18371206521987915 -725,0.18369236588478088 -726,0.18367280066013336 -727,0.18365338444709778 -728,0.18363402783870697 -729,0.18361473083496094 -730,0.18359550833702087 -731,0.18357639014720917 -732,0.18355730175971985 -733,0.18353836238384247 -734,0.1835196167230606 -735,0.1835009604692459 -736,0.18348242342472076 -737,0.183463916182518 -738,0.1834455281496048 -739,0.18342715501785278 -740,0.18340887129306793 -741,0.18339084088802338 -742,0.18337294459342957 -743,0.18335512280464172 -744,0.18333740532398224 -745,0.18331976234912872 -746,0.18330217897891998 -747,0.1832846701145172 -748,0.18326731026172638 -749,0.18325012922286987 -750,0.18323299288749695 -751,0.18321593105793 -752,0.1831989288330078 -753,0.1831819713115692 -754,0.18316511809825897 -755,0.1831483244895935 -756,0.18313175439834595 -757,0.18311527371406555 -758,0.18309888243675232 -759,0.18308261036872864 -760,0.18306638300418854 -761,0.1830502301454544 -762,0.18303415179252625 -763,0.18301811814308167 -764,0.183002308011055 -765,0.18298660218715668 -766,0.18297098577022552 -767,0.18295547366142273 -768,0.18294000625610352 -769,0.18292464315891266 -770,0.18290938436985016 -771,0.18289417028427124 -772,0.18287906050682068 -773,0.18286408483982086 -774,0.18284918367862701 -775,0.18283435702323914 -776,0.18281961977481842 -777,0.18280497193336487 -778,0.1827903687953949 -779,0.1827758401632309 -780,0.18276149034500122 -781,0.1827472448348999 -782,0.18273308873176575 -783,0.18271905183792114 -784,0.18270501494407654 -785,0.18269109725952148 -786,0.18267722427845 -787,0.1826634407043457 -788,0.18264980614185333 -789,0.1826363056898117 -790,0.18262287974357605 -791,0.18260954320430756 -792,0.18259625136852264 -793,0.18258309364318848 -794,0.18257002532482147 -795,0.18255700170993805 -796,0.18254414200782776 -797,0.18253135681152344 -798,0.18251870572566986 -799,0.18250612914562225 -800,0.18249359726905823 -801,0.18248118460178375 -802,0.18246880173683167 -803,0.18245650827884674 -804,0.18244430422782898 -805,0.18243230879306793 -806,0.18242038786411285 -807,0.18240854144096375 -808,0.1823967844247818 -809,0.18238508701324463 -810,0.18237346410751343 -811,0.1823619157075882 -812,0.18235045671463013 -813,0.1823391169309616 -814,0.18232789635658264 -815,0.18231676518917084 -816,0.18230566382408142 -817,0.18229469656944275 -818,0.18228377401828766 -819,0.18227292597293854 -820,0.18226216733455658 -821,0.1822514683008194 -822,0.18224094808101654 -823,0.18223047256469727 -824,0.18222008645534515 -825,0.182209774851799 -826,0.18219953775405884 -827,0.18218939006328583 -828,0.18217924237251282 -829,0.18216919898986816 -830,0.18215925991535187 -831,0.18214945495128632 -832,0.18213973939418793 -833,0.1821300983428955 -834,0.18212056159973145 -835,0.18211109936237335 -836,0.18210168182849884 -837,0.18209238350391388 -838,0.18208317458629608 -839,0.18207399547100067 -840,0.18206501007080078 -841,0.18205608427524567 -842,0.18204720318317413 -843,0.18203841149806976 -844,0.18202967941761017 -845,0.18202097713947296 -846,0.18201231956481934 -847,0.18200375139713287 -848,0.18199522793293 -849,0.18198677897453308 -850,0.18197840452194214 -851,0.18197010457515717 -852,0.18196183443069458 -853,0.18195360898971558 -854,0.18194541335105896 -855,0.1819372922182083 -856,0.18192917108535767 -857,0.18192116916179657 -858,0.18191318213939667 -859,0.1819053292274475 -860,0.18189752101898193 -861,0.18188974261283875 -862,0.18188202381134033 -863,0.1818743497133255 -864,0.18186670541763306 -865,0.1818591058254242 -866,0.1818515807390213 -867,0.181844100356102 -868,0.18183667957782745 -869,0.1818293184041977 -870,0.1818220317363739 -871,0.1818147599697113 -872,0.18180757761001587 -873,0.18180038034915924 -874,0.18179325759410858 -875,0.18178610503673553 -876,0.18177902698516846 -877,0.18177194893360138 -878,0.1817648857831955 -879,0.1817578673362732 -880,0.18175090849399567 -881,0.18174393475055695 -882,0.181737020611763 -883,0.18173015117645264 -884,0.18172328174114227 -885,0.1817164123058319 -886,0.1817096322774887 -887,0.18170283734798431 -888,0.1816960871219635 -889,0.18168935179710388 -890,0.18168272078037262 -891,0.18167616426944733 -892,0.18166956305503845 -893,0.18166305124759674 -894,0.18165655434131622 -895,0.1816500872373581 -896,0.18164363503456116 -897,0.1816372126340866 -898,0.18163086473941803 -899,0.18162453174591064 -900,0.18161821365356445 -901,0.18161199986934662 -902,0.18160583078861237 -903,0.1815996766090393 -904,0.18159358203411102 -905,0.18158751726150513 -906,0.18158148229122162 -907,0.1815754771232605 -908,0.18156948685646057 -909,0.1815635710954666 -910,0.18155765533447266 -911,0.18155178427696228 -912,0.18154595792293549 -913,0.18154019117355347 -914,0.18153445422649384 -915,0.1815287321805954 -916,0.18152305483818054 -917,0.18151740729808807 -918,0.1815117746591568 -919,0.1815061867237091 -920,0.181500643491745 -921,0.18149513006210327 -922,0.18148961663246155 -923,0.1814841479063034 -924,0.18147873878479004 -925,0.18147335946559906 -926,0.18146805465221405 -927,0.18146276473999023 -928,0.1814575046300888 -929,0.18145224452018738 -930,0.18144704401493073 -931,0.18144188821315765 -932,0.18143674731254578 -933,0.1814316362142563 -934,0.1814265251159668 -935,0.18142150342464447 -936,0.18141646683216095 -937,0.1814115047454834 -938,0.18140658736228943 -939,0.18140168488025665 -940,0.18139684200286865 -941,0.18139198422431946 -942,0.18138715624809265 -943,0.18138238787651062 -944,0.1813776195049286 -945,0.18137289583683014 -946,0.18136820197105408 -947,0.1813635230064392 -948,0.18135887384414673 -949,0.18135429918766022 -950,0.1813497245311737 -951,0.18134522438049316 -952,0.18134070932865143 -953,0.18133625388145447 -954,0.1813318133354187 -955,0.18132737278938293 -956,0.18132302165031433 -957,0.18131867051124573 -958,0.18131433427333832 -959,0.18131007254123688 -960,0.18130582571029663 -961,0.1813015639781952 -962,0.1812973916530609 -963,0.1812932789325714 -964,0.18128915131092072 -965,0.1812850832939148 -966,0.18128103017807007 -967,0.18127703666687012 -968,0.18127304315567017 -969,0.1812690645456314 -970,0.18126517534255981 -971,0.18126127123832703 -972,0.1812574416399002 -973,0.1812536120414734 -974,0.18124979734420776 -975,0.18124604225158691 -976,0.18124227225780487 -977,0.18123862147331238 -978,0.18123497068881989 -979,0.18123137950897217 -980,0.18122778832912445 -981,0.18122422695159912 -982,0.18122071027755737 -983,0.181217223405838 -984,0.18121375143527985 -985,0.18121030926704407 -986,0.18120692670345306 -987,0.18120352923870087 -988,0.18120019137859344 -989,0.1811968833208084 -990,0.18119356036186218 -991,0.18119031190872192 -992,0.18118706345558167 -993,0.18118388950824738 -994,0.1811807006597519 -995,0.18117755651474 -996,0.18117444217205048 -997,0.18117134273052216 -998,0.18116827309131622 -999,0.18116521835327148 -1000,0.18116219341754913 diff --git a/training/base_loss/four/training_config.txt b/training/base_loss/four/training_config.txt deleted file mode 100644 index 127f0fa..0000000 --- a/training/base_loss/four/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def current_loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(loss_fn(theta, desired_theta)) - -Specific Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss/four/training_log.csv b/training/base_loss/four/training_log.csv deleted file mode 100644 index 68c374b..0000000 --- a/training/base_loss/four/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,45.877498626708984 -2,31.579391479492188 -3,5.387729644775391 -4,1.456140160560608 -5,1.170560359954834 -6,0.9658188223838806 -7,0.5530353784561157 -8,0.4760901927947998 -9,0.4246048927307129 -10,0.37656134366989136 -11,0.3434385061264038 -12,0.32386064529418945 -13,0.30575793981552124 -14,0.2828333079814911 -15,0.2250952571630478 -16,0.21628165245056152 -17,0.21073150634765625 -18,0.20633867383003235 -19,0.20262666046619415 -20,0.19941949844360352 -21,0.19660191237926483 -22,0.19408094882965088 -23,0.1918119192123413 -24,0.1897588074207306 -25,0.187888965010643 -26,0.18617360293865204 -27,0.18459247052669525 -28,0.18313078582286835 -29,0.18177366256713867 -30,0.18050558865070343 -31,0.17931897938251495 -32,0.17820462584495544 -33,0.1771543174982071 -34,0.17616161704063416 -35,0.17522010207176208 -36,0.17432570457458496 -37,0.1734728366136551 -38,0.17265857756137848 -39,0.17188024520874023 -40,0.1711348593235016 -41,0.17041829228401184 -42,0.1697278469800949 -43,0.16906140744686127 -44,0.16841737926006317 -45,0.1677943915128708 -46,0.16719022393226624 -47,0.16660472750663757 -48,0.16603504121303558 -49,0.1654805839061737 -50,0.16494035720825195 -51,0.16441291570663452 -52,0.16389797627925873 -53,0.16339434683322906 -54,0.16290143132209778 -55,0.16241887211799622 -56,0.16194531321525574 -57,0.16148026287555695 -58,0.16102337837219238 -59,0.16057370603084564 -60,0.16013145446777344 -61,0.15969668328762054 -62,0.15926873683929443 -63,0.15884749591350555 -64,0.1584327220916748 -65,0.1580241322517395 -66,0.15762141346931458 -67,0.15722443163394928 -68,0.15683318674564362 -69,0.15644782781600952 -70,0.15606831014156342 -71,0.15569323301315308 -72,0.15532289445400238 -73,0.15495680272579193 -74,0.15459461510181427 -75,0.15423694252967834 -76,0.1538827270269394 -77,0.15353186428546906 -78,0.15318448841571808 -79,0.15284010767936707 -80,0.15249815583229065 -81,0.15215957164764404 -82,0.1518239974975586 -83,0.15149115025997162 -84,0.15116211771965027 -85,0.1508357971906662 -86,0.15051288902759552 -87,0.15019376575946808 -88,0.1498778909444809 -89,0.149565190076828 -90,0.14925676584243774 -91,0.14895209670066833 -92,0.1486511081457138 -93,0.14835433661937714 -94,0.14806130528450012 -95,0.14777319133281708 -96,0.14749066531658173 -97,0.14721395075321198 -98,0.1469433754682541 -99,0.14667943120002747 -100,0.14642216265201569 -101,0.1461719125509262 -102,0.14592915773391724 -103,0.14569370448589325 -104,0.14546555280685425 -105,0.14524492621421814 -106,0.14503107964992523 -107,0.14482393860816956 -108,0.14462323486804962 -109,0.1444285362958908 -110,0.14423924684524536 -111,0.1440557837486267 -112,0.14387719333171844 -113,0.14370372891426086 -114,0.14353494346141815 -115,0.1433708220720291 -116,0.14321069419384003 -117,0.1430547833442688 -118,0.14290297031402588 -119,0.14275488257408142 -120,0.14261044561862946 -121,0.1424696296453476 -122,0.14233247935771942 -123,0.14219817519187927 -124,0.14206679165363312 -125,0.1419382393360138 -126,0.14181211590766907 -127,0.1416884809732437 -128,0.14156728982925415 -129,0.14144840836524963 -130,0.14133170247077942 -131,0.14121706783771515 -132,0.14110472798347473 -133,0.1409939080476761 -134,0.14088432490825653 -135,0.14077608287334442 -136,0.14066936075687408 -137,0.14056400954723358 -138,0.1404598355293274 -139,0.14035668969154358 -140,0.14025475084781647 -141,0.14015373587608337 -142,0.1400536298751831 -143,0.13995476067066193 -144,0.13985693454742432 -145,0.13975992798805237 -146,0.1396639049053192 -147,0.13956882059574127 -148,0.13947445154190063 -149,0.13938061892986298 -150,0.139287069439888 -151,0.1391940414905548 -152,0.13910192251205444 -153,0.13901019096374512 -154,0.13891881704330444 -155,0.1388280838727951 -156,0.1387379765510559 -157,0.1386481672525406 -158,0.13855887949466705 -159,0.13846977055072784 -160,0.1383812427520752 -161,0.13829346001148224 -162,0.13820582628250122 -163,0.13811847567558289 -164,0.13803181052207947 -165,0.13794533908367157 -166,0.13785922527313232 -167,0.1377735435962677 -168,0.13768801093101501 -169,0.1376030445098877 -170,0.13751855492591858 -171,0.1374342143535614 -172,0.1373501569032669 -173,0.13726679980754852 -174,0.13718372583389282 -175,0.13710089027881622 -176,0.1370183676481247 -177,0.13693606853485107 -178,0.13685409724712372 -179,0.13677257299423218 -180,0.1366911381483078 -181,0.13660992681980133 -182,0.1365295648574829 -183,0.13644929230213165 -184,0.13636934757232666 -185,0.1362897902727127 -186,0.13621050119400024 -187,0.13613180816173553 -188,0.13605356216430664 -189,0.1359754502773285 -190,0.13589781522750854 -191,0.13582079112529755 -192,0.13574400544166565 -193,0.1356676071882248 -194,0.13559147715568542 -195,0.1355157047510147 -196,0.13544057309627533 -197,0.135365828871727 -198,0.13529126346111298 -199,0.13521714508533478 -200,0.1351436823606491 -201,0.13507039844989777 -202,0.13499753177165985 -203,0.13492505252361298 -204,0.13485276699066162 -205,0.1347811222076416 -206,0.1347099393606186 -207,0.13463880121707916 -208,0.13456816971302032 -209,0.13449808955192566 -210,0.13442829251289368 -211,0.13435888290405273 -212,0.13428974151611328 -213,0.13422076404094696 -214,0.13415247201919556 -215,0.13408443331718445 -216,0.13401657342910767 -217,0.13394927978515625 -218,0.13388240337371826 -219,0.13381585478782654 -220,0.13374964892864227 -221,0.13368366658687592 -222,0.1336178481578827 -223,0.13355250656604767 -224,0.133487731218338 -225,0.13342322409152985 -226,0.13335901498794556 -227,0.13329532742500305 -228,0.13323193788528442 -229,0.13316887617111206 -230,0.13310623168945312 -231,0.13304390013217926 -232,0.1329818218946457 -233,0.13292041420936584 -234,0.13285917043685913 -235,0.13279809057712555 -236,0.13273771107196808 -237,0.13267755508422852 -238,0.13261781632900238 -239,0.1325584501028061 -240,0.13249926269054413 -241,0.1324402540922165 -242,0.1323818415403366 -243,0.1323239803314209 -244,0.13226625323295593 -245,0.1322089284658432 -246,0.13215196132659912 -247,0.13209523260593414 -248,0.13203896582126617 -249,0.13198307156562805 -250,0.1319272220134735 -251,0.13187170028686523 -252,0.1318168342113495 -253,0.13176225125789642 -254,0.13170793652534485 -255,0.13165415823459625 -256,0.1316005140542984 -257,0.13154713809490204 -258,0.131494402885437 -259,0.13144183158874512 -260,0.13138936460018158 -261,0.13133728504180908 -262,0.1312856376171112 -263,0.13123424351215363 -264,0.13118314743041992 -265,0.13113245368003845 -266,0.13108175992965698 -267,0.1310313194990158 -268,0.1309814453125 -269,0.13093167543411255 -270,0.13088202476501465 -271,0.1308327466249466 -272,0.13078375160694122 -273,0.13073506951332092 -274,0.13068678975105286 -275,0.13063888251781464 -276,0.13059101998806 -277,0.13054339587688446 -278,0.13049635291099548 -279,0.13044945895671844 -280,0.13040268421173096 -281,0.13035613298416138 -282,0.13030987977981567 -283,0.13026398420333862 -284,0.13021835684776306 -285,0.13017301261425018 -286,0.13012780249118805 -287,0.13008275628089905 -288,0.1300382763147354 -289,0.1299939900636673 -290,0.12994979321956635 -291,0.12990587949752808 -292,0.12986227869987488 -293,0.12981906533241272 -294,0.12977594137191772 -295,0.12973317503929138 -296,0.12969060242176056 -297,0.12964816391468048 -298,0.1296062171459198 -299,0.1295645534992218 -300,0.1295229196548462 -301,0.12948143482208252 -302,0.12944036722183228 -303,0.12939976155757904 -304,0.12935929000377655 -305,0.1293189972639084 -306,0.12927915155887604 -307,0.12923955917358398 -308,0.12920024991035461 -309,0.12916122376918793 -310,0.12912242114543915 -311,0.12908385694026947 -312,0.12904547154903412 -313,0.12900769710540771 -314,0.12897013127803802 -315,0.12893272936344147 -316,0.128895565867424 -317,0.12885892391204834 -318,0.12882259488105774 -319,0.12878653407096863 -320,0.12875071167945862 -321,0.12871527671813965 -322,0.1286800503730774 -323,0.12864509224891663 -324,0.1286105066537857 -325,0.12857629358768463 -326,0.12854230403900146 -327,0.1285085529088974 -328,0.128475159406662 -329,0.12844234704971313 -330,0.1284097135066986 -331,0.128377303481102 -332,0.12834522128105164 -333,0.1283136010169983 -334,0.1282823532819748 -335,0.1282513588666916 -336,0.1282205879688263 -337,0.12819036841392517 -338,0.12816037237644196 -339,0.1281307339668274 -340,0.12810133397579193 -341,0.12807226181030273 -342,0.1280435025691986 -343,0.1280149519443512 -344,0.12798669934272766 -345,0.12795878946781158 -346,0.12793117761611938 -347,0.1279037445783615 -348,0.12787653505802155 -349,0.1278495341539383 -350,0.12782293558120728 -351,0.12779667973518372 -352,0.12777061760425568 -353,0.12774470448493958 -354,0.1277189999818802 -355,0.12769357860088348 -356,0.12766851484775543 -357,0.12764360010623932 -358,0.12761886417865753 -359,0.12759427726268768 -360,0.1275700032711029 -361,0.1275460571050644 -362,0.12752225995063782 -363,0.12749862670898438 -364,0.12747514247894287 -365,0.12745186686515808 -366,0.12742890417575836 -367,0.12740616500377655 -368,0.12738355994224548 -369,0.12736111879348755 -370,0.12733878195285797 -371,0.1273166835308075 -372,0.12729476392269135 -373,0.12727303802967072 -374,0.12725141644477844 -375,0.12722992897033691 -376,0.12720859050750732 -377,0.12718750536441803 -378,0.12716655433177948 -379,0.12714572250843048 -380,0.12712503969669342 -381,0.12710443139076233 -382,0.12708407640457153 -383,0.12706385552883148 -384,0.1270437240600586 -385,0.12702371180057526 -386,0.12700381875038147 -387,0.1269841194152832 -388,0.12696456909179688 -389,0.1269451081752777 -390,0.1269257664680481 -391,0.12690649926662445 -392,0.12688733637332916 -393,0.12686841189861298 -394,0.12684963643550873 -395,0.12683095037937164 -396,0.12681230902671814 -397,0.126793771982193 -398,0.12677538394927979 -399,0.1267571747303009 -400,0.1267390102148056 -401,0.12672095000743866 -402,0.1267029345035553 -403,0.1266849935054779 -404,0.12666723132133484 -405,0.12664945423603058 -406,0.12663179636001587 -407,0.12661416828632355 -408,0.1265965849161148 -409,0.12657909095287323 -410,0.12656176090240479 -411,0.12654449045658112 -412,0.12652724981307983 -413,0.12651003897190094 -414,0.12649281322956085 -415,0.1264757663011551 -416,0.1264587640762329 -417,0.12644173204898834 -418,0.12642474472522736 -419,0.1264077126979828 -420,0.126390740275383 -421,0.12637381255626678 -422,0.12635688483715057 -423,0.12633992731571198 -424,0.12632301449775696 -425,0.12630613148212433 -426,0.12628938257694244 -427,0.12627263367176056 -428,0.1262558549642563 -429,0.12623907625675201 -430,0.12622222304344177 -431,0.12620553374290466 -432,0.12618882954120636 -433,0.12617209553718567 -434,0.12615534663200378 -435,0.1261385679244995 -436,0.12612178921699524 -437,0.12610498070716858 -438,0.12608815729618073 -439,0.1260712742805481 -440,0.1260543316602707 -441,0.1260373741388321 -442,0.12602049112319946 -443,0.12600353360176086 -444,0.12598653137683868 -445,0.12596942484378815 -446,0.12595227360725403 -447,0.12593518197536469 -448,0.12591803073883057 -449,0.12590079009532928 -450,0.12588350474834442 -451,0.1258661150932312 -452,0.1258486956357956 -453,0.1258312314748764 -454,0.12581367790699005 -455,0.12579599022865295 -456,0.1257782131433487 -457,0.12576045095920563 -458,0.12574265897274017 -459,0.12572477757930756 -460,0.12570679187774658 -461,0.12568870186805725 -462,0.12567056715488434 -463,0.12565240263938904 -464,0.1256341189146042 -465,0.12561573088169098 -466,0.1255972534418106 -467,0.12557877600193024 -468,0.1255601942539215 -469,0.12554150819778442 -470,0.12552273273468018 -471,0.12550385296344757 -472,0.12548497319221497 -473,0.1254660040140152 -474,0.12544693052768707 -475,0.1254277378320694 -476,0.12540848553180695 -477,0.1253892332315445 -478,0.12536992132663727 -479,0.12535043060779572 -480,0.125330850481987 -481,0.1253112554550171 -482,0.1252915859222412 -483,0.1252717673778534 -484,0.12525182962417603 -485,0.12523183226585388 -486,0.12521184980869293 -487,0.12519173324108124 -488,0.12517155706882477 -489,0.12515117228031158 -490,0.1251307874917984 -491,0.1251102089881897 -492,0.1250893920660019 -493,0.12506835162639618 -494,0.12504719197750092 -495,0.1250258833169937 -496,0.1250043362379074 -497,0.1249825581908226 -498,0.12496069073677063 -499,0.12493875622749329 -500,0.12491662800312042 -501,0.12489426136016846 -502,0.12487176805734634 -503,0.12484925240278244 -504,0.12482661753892899 -505,0.12480384111404419 -506,0.1247810497879982 -507,0.12475815415382385 -508,0.12473511695861816 -509,0.1247120276093483 -510,0.1246890127658844 -511,0.12466594576835632 -512,0.12464284151792526 -513,0.12461982667446136 -514,0.1245969608426094 -515,0.12457415461540222 -516,0.12455134838819504 -517,0.12452847510576248 -518,0.12450552731752396 -519,0.12448246777057648 -520,0.1244593933224678 -521,0.12443656474351883 -522,0.12441375106573105 -523,0.12439100444316864 -524,0.12436851859092712 -525,0.12434622645378113 -526,0.12432396411895752 -527,0.12430192530155182 -528,0.12428005039691925 -529,0.12425822019577026 -530,0.12423646450042725 -531,0.12421493232250214 -532,0.12419341504573822 -533,0.1241718977689743 -534,0.12415041029453278 -535,0.12412899732589722 -536,0.1241074874997139 -537,0.12408578395843506 -538,0.12406405061483383 -539,0.12404203414916992 -540,0.12401975691318512 -541,0.12399725615978241 -542,0.12397456169128418 -543,0.12395154684782028 -544,0.12392821162939072 -545,0.1239047423005104 -546,0.12388096749782562 -547,0.12385684251785278 -548,0.12383251637220383 -549,0.12380802631378174 -550,0.12378331273794174 -551,0.12375861406326294 -552,0.1237340196967125 -553,0.12370942533016205 -554,0.12368504703044891 -555,0.12366103380918503 -556,0.12363725900650024 -557,0.12361382693052292 -558,0.12359088659286499 -559,0.12356837093830109 -560,0.12354631721973419 -561,0.12352504581212997 -562,0.12350429594516754 -563,0.12348402291536331 -564,0.12346445769071579 -565,0.12344542145729065 -566,0.12342683225870132 -567,0.1234087347984314 -568,0.12339111417531967 -569,0.12337388098239899 -570,0.12335702031850815 -571,0.12334064394235611 -572,0.12332461029291153 -573,0.12330887466669083 -574,0.123293437063694 -575,0.12327840924263 -576,0.12326367199420929 -577,0.12324918806552887 -578,0.12323495000600815 -579,0.1232210174202919 -580,0.12320733070373535 -581,0.12319386750459671 -582,0.12318064272403717 -583,0.12316771596670151 -584,0.12315520644187927 -585,0.12314298003911972 -586,0.12313098460435867 -587,0.12311918288469315 -588,0.12310759723186493 -589,0.12309631705284119 -590,0.12308523803949356 -591,0.12307438254356384 -592,0.12306372076272964 -593,0.12305329740047455 -594,0.12304304540157318 -595,0.12303304672241211 -596,0.12302323430776596 -597,0.12301361560821533 -598,0.12300418317317963 -599,0.12299490720033646 -600,0.12298579514026642 -601,0.12297683209180832 -602,0.12296802550554276 -603,0.12295942008495331 -604,0.1229509636759758 -605,0.12294260412454605 -606,0.12293439358472824 -607,0.12292630225419998 -608,0.12291834503412247 -609,0.12291049212217331 -610,0.12290272861719131 -611,0.12289507687091827 -612,0.12288752943277359 -613,0.12288007885217667 -614,0.1228727474808693 -615,0.12286552041769028 -616,0.12285839021205902 -617,0.12285132706165314 -618,0.1228443831205368 -619,0.12283749133348465 -620,0.12283068895339966 -621,0.12282399833202362 -622,0.12281735986471176 -623,0.12281083315610886 -624,0.12280435115098953 -625,0.12279796600341797 -626,0.12279164791107178 -627,0.12278538942337036 -628,0.1227792277932167 -629,0.12277312576770782 -630,0.12276711314916611 -631,0.12276116758584976 -632,0.1227552592754364 -633,0.1227494329214096 -634,0.12274366617202759 -635,0.12273792922496796 -636,0.1227322667837143 -637,0.12272664159536362 -638,0.12272109091281891 -639,0.12271560728549957 -640,0.12271018326282501 -641,0.12270480394363403 -642,0.12269947677850723 -643,0.1226942241191864 -644,0.12268900871276855 -645,0.12268388271331787 -646,0.12267878651618958 -647,0.12267379462718964 -648,0.1226688101887703 -649,0.12266390770673752 -650,0.12265904992818832 -651,0.12265422195196152 -652,0.12264944612979889 -653,0.12264470756053925 -654,0.12264005094766617 -655,0.12263540923595428 -656,0.12263080477714539 -657,0.12262624502182007 -658,0.12262170761823654 -659,0.1226172223687172 -660,0.12261277437210083 -661,0.12260836362838745 -662,0.12260402739048004 -663,0.12259970605373383 -664,0.12259544432163239 -665,0.12259119749069214 -666,0.12258701026439667 -667,0.1225828304886818 -668,0.1225786805152893 -669,0.12257456034421921 -670,0.12257049232721329 -671,0.12256643921136856 -672,0.12256240844726562 -673,0.12255842983722687 -674,0.12255445122718811 -675,0.12255051732063293 -676,0.12254660576581955 -677,0.12254274636507034 -678,0.12253887206315994 -679,0.12253506481647491 -680,0.12253127247095108 -681,0.12252750247716904 -682,0.12252374738454819 -683,0.12252002209424973 -684,0.12251633405685425 -685,0.12251266092061996 -686,0.12250903248786926 -687,0.12250544130802155 -688,0.12250184267759323 -689,0.12249830365180969 -690,0.12249479442834854 -691,0.12249130755662918 -692,0.12248783558607101 -693,0.12248443067073822 -694,0.12248102575540543 -695,0.12247763574123383 -696,0.12247427552938461 -697,0.12247096747159958 -698,0.12246768176555634 -699,0.12246443331241608 -700,0.12246118485927582 -701,0.12245798856019974 -702,0.12245481461286545 -703,0.12245164811611176 -704,0.12244853377342224 -705,0.1224454715847969 -706,0.12244242429733276 -707,0.1224394291639328 -708,0.12243643403053284 -709,0.12243350595235825 -710,0.12243056297302246 -711,0.12242766469717026 -712,0.12242479622364044 -713,0.12242195010185242 -714,0.12241914868354797 -715,0.12241638451814651 -716,0.12241362035274506 -717,0.12241090089082718 -718,0.12240821123123169 -719,0.12240555882453918 -720,0.12240292876958847 -721,0.12240032106637955 -722,0.12239775061607361 -723,0.12239518761634827 -724,0.12239265441894531 -725,0.12239015847444534 -726,0.12238766998052597 -727,0.12238519638776779 -728,0.1223827674984932 -729,0.1223803460597992 -730,0.12237795442342758 -731,0.12237557768821716 -732,0.12237321585416794 -733,0.1223708838224411 -734,0.12236855179071426 -735,0.12236625701189041 -736,0.12236395478248596 -737,0.1223616749048233 -738,0.12235940992832184 -739,0.12235716730356216 -740,0.12235492467880249 -741,0.12235269695520401 -742,0.12235048413276672 -743,0.12234830111265182 -744,0.12234612554311752 -745,0.12234392762184143 -746,0.12234176695346832 -747,0.12233961373567581 -748,0.1223374605178833 -749,0.12233532965183258 -750,0.12233319878578186 -751,0.12233107537031174 -752,0.1223289892077446 -753,0.12232688069343567 -754,0.12232479453086853 -755,0.12232273072004318 -756,0.12232066690921783 -757,0.12231861799955368 -758,0.12231658399105072 -759,0.12231454253196716 -760,0.1223125234246254 -761,0.12231050431728363 -762,0.12230850756168365 -763,0.12230651825666428 -764,0.1223045289516449 -765,0.1223025694489479 -766,0.12230060249567032 -767,0.12229866534471512 -768,0.12229672074317932 -769,0.12229479104280472 -770,0.12229284644126892 -771,0.12229093909263611 -772,0.1222890168428421 -773,0.12228713184595108 -774,0.12228523194789886 -775,0.12228336185216904 -776,0.12228146940469742 -777,0.12227959930896759 -778,0.12227772921323776 -779,0.12227588891983032 -780,0.12227404862642288 -781,0.12227220833301544 -782,0.1222703754901886 -783,0.12226855754852295 -784,0.1222667470574379 -785,0.12226492911577225 -786,0.12226314097642899 -787,0.12226131558418274 -788,0.12225954234600067 -789,0.1222577691078186 -790,0.12225598096847534 -791,0.12225421518087387 -792,0.1222524642944336 -793,0.12225071340799332 -794,0.12224896997213364 -795,0.12224722653627396 -796,0.12224548310041428 -797,0.12224375456571579 -798,0.1222420334815979 -799,0.12224031239748001 -800,0.12223859131336212 -801,0.12223689258098602 -802,0.12223519384860992 -803,0.12223351001739502 -804,0.12223182618618011 -805,0.1222301498055458 -806,0.12222845107316971 -807,0.122226782143116 -808,0.12222514301538467 -809,0.12222348898649216 -810,0.12222182750701904 -811,0.12222017347812653 -812,0.1222185268998146 -813,0.12221689522266388 -814,0.12221528589725494 -815,0.12221364676952362 -816,0.12221202254295349 -817,0.12221040576696396 -818,0.12220880389213562 -819,0.12220718711614609 -820,0.12220558524131775 -821,0.1222039982676506 -822,0.12220241129398346 -823,0.12220082432031631 -824,0.12219925224781036 -825,0.12219768762588501 -826,0.12219613045454025 -827,0.12219458073377609 -828,0.12219304591417313 -829,0.12219150364398956 -830,0.1221899762749672 -831,0.12218843400478363 -832,0.12218692153692245 -833,0.12218541651964188 -834,0.1221839040517807 -835,0.12218239158391953 -836,0.12218090891838074 -837,0.12217945605516434 -838,0.12217800319194794 -839,0.12217653542757034 -840,0.12217509001493454 -841,0.12217365205287933 -842,0.12217220664024353 -843,0.12217077612876892 -844,0.12216934561729431 -845,0.1221679225564003 -846,0.12216651439666748 -847,0.12216509133577347 -848,0.12216368317604065 -849,0.12216226756572723 -850,0.12216086685657501 -851,0.1221594586968422 -852,0.12215805798768997 -853,0.12215665727853775 -854,0.12215527147054672 -855,0.1221538856625557 -856,0.12215250730514526 -857,0.12215114384889603 -858,0.1221497505903244 -859,0.12214839458465576 -860,0.12214700877666473 -861,0.12214566022157669 -862,0.12214428931474686 -863,0.12214294075965881 -864,0.12214159965515137 -865,0.12214025855064392 -866,0.12213891744613647 -867,0.12213759869337082 -868,0.12213625013828278 -869,0.12213493138551712 -870,0.12213364243507385 -871,0.1221323236823082 -872,0.12213101238012314 -873,0.12212971597909927 -874,0.12212841957807541 -875,0.12212712317705154 -876,0.12212584167718887 -877,0.1221245527267456 -878,0.12212327122688293 -879,0.12212202697992325 -880,0.12212073057889938 -881,0.1221194937825203 -882,0.12211822718381882 -883,0.12211699038743973 -884,0.12211574614048004 -885,0.12211450189352036 -886,0.12211327254772186 -887,0.12211203575134277 -888,0.12211082130670547 -889,0.12210959941148758 -890,0.12210839986801147 -891,0.12210720032453537 -892,0.12210600078105927 -893,0.12210481613874435 -894,0.12210363894701004 -895,0.12210244685411453 -896,0.1221012994647026 -897,0.12210012972354889 -898,0.12209896743297577 -899,0.12209781259298325 -900,0.12209665775299072 -901,0.12209552526473999 -902,0.12209438532590866 -903,0.12209323048591614 -904,0.1220920979976654 -905,0.12209095060825348 -906,0.12208984047174454 -907,0.1220887154340744 -908,0.12208759039640427 -909,0.12208647280931473 -910,0.12208537012338638 -911,0.12208423763513565 -912,0.1220831423997879 -913,0.12208203226327896 -914,0.12208092957735062 -915,0.12207984179258347 -916,0.12207874655723572 -917,0.12207765877246857 -918,0.12207657098770142 -919,0.12207547575235367 -920,0.12207441031932831 -921,0.12207332998514175 -922,0.1220722571015358 -923,0.12207117676734924 -924,0.12207010388374329 -925,0.12206903845071793 -926,0.12206797301769257 -927,0.1220669150352478 -928,0.12206586450338364 -929,0.12206479907035828 -930,0.12206373363733292 -931,0.12206269055604935 -932,0.12206163257360458 -933,0.12206058949232101 -934,0.12205954641103745 -935,0.12205850332975388 -936,0.1220574751496315 -937,0.12205643206834793 -938,0.12205539643764496 -939,0.12205437570810318 -940,0.1220533475279808 -941,0.12205231189727783 -942,0.12205129116773605 -943,0.12205028533935547 -944,0.12204926460981369 -945,0.12204825133085251 -946,0.12204723805189133 -947,0.12204621732234955 -948,0.12204521149396896 -949,0.12204422056674957 -950,0.12204321473836899 -951,0.12204219400882721 -952,0.12204120308160782 -953,0.12204020470380783 -954,0.12203922122716904 -955,0.12203823029994965 -956,0.12203721702098846 -957,0.12203623354434967 -958,0.12203525751829147 -959,0.12203426659107208 -960,0.12203329056501389 -961,0.12203232944011688 -962,0.12203133851289749 -963,0.12203036993741989 -964,0.1220293864607811 -965,0.1220284178853035 -966,0.1220274567604065 -967,0.1220264881849289 -968,0.1220255121588707 -969,0.1220245435833931 -970,0.12202358990907669 -971,0.12202263623476028 -972,0.12202169746160507 -973,0.12202074378728867 -974,0.12201979011297226 -975,0.12201882898807526 -976,0.12201789021492004 -977,0.12201695889234543 -978,0.12201602756977081 -979,0.122015081346035 -980,0.12201415747404099 -981,0.12201324105262756 -982,0.12201233208179474 -983,0.12201139330863953 -984,0.1220104843378067 -985,0.12200957536697388 -986,0.12200866639614105 -987,0.12200775742530823 -988,0.122006855905056 -989,0.12200596183538437 -990,0.12200506031513214 -991,0.12200417369604111 -992,0.12200328707695007 -993,0.12200238555669785 -994,0.12200149893760681 -995,0.12200061231851578 -996,0.12199973315000534 -997,0.1219988465309143 -998,0.12199795991182327 -999,0.12199705839157104 -1000,0.1219962015748024 diff --git a/training/base_loss/one/training_config.txt b/training/base_loss/one/training_config.txt deleted file mode 100644 index 20fc62c..0000000 --- a/training/base_loss/one/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def current_loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(loss_fn(theta, desired_theta)) - -Specific Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss/one/training_log.csv b/training/base_loss/one/training_log.csv deleted file mode 100644 index 619d803..0000000 --- a/training/base_loss/one/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,3.7583343982696533 -2,1.7503284215927124 -3,1.0627415180206299 -4,0.8644859790802002 -5,0.7661573886871338 -6,0.8408671021461487 -7,0.8452070951461792 -8,0.8243302702903748 -9,0.7488458752632141 -10,0.6945880651473999 -11,0.6325046420097351 -12,0.5696651339530945 -13,0.5101272463798523 -14,0.47640496492385864 -15,0.44743838906288147 -16,0.4205334782600403 -17,0.38880500197410583 -18,0.3363999128341675 -19,0.3078468143939972 -20,0.2836759090423584 -21,0.26069197058677673 -22,0.23615776002407074 -23,0.20336563885211945 -24,0.1793546825647354 -25,0.1581915318965912 -26,0.15738965570926666 -27,0.1592046320438385 -28,0.15562549233436584 -29,0.15988494455814362 -30,0.1629936248064041 -31,0.15933316946029663 -32,0.15089261531829834 -33,0.13811351358890533 -34,0.12527185678482056 -35,0.1215096116065979 -36,0.1250063180923462 -37,0.12350810319185257 -38,0.11773484200239182 -39,0.10955535620450974 -40,0.10428431630134583 -41,0.10471164435148239 -42,0.1055501401424408 -43,0.10290168970823288 -44,0.09870085120201111 -45,0.10059275478124619 -46,0.10205402225255966 -47,0.10144495964050293 -48,0.09845657646656036 -49,0.09428969770669937 -50,0.0964774638414383 -51,0.09735556691884995 -52,0.09625536948442459 -53,0.09370218962430954 -54,0.09346475452184677 -55,0.09342683851718903 -56,0.09237684309482574 -57,0.09258364886045456 -58,0.09135262668132782 -59,0.08955350518226624 -60,0.09036507457494736 -61,0.08974035829305649 -62,0.09115975350141525 -63,0.08995316177606583 -64,0.09023081511259079 -65,0.09049049019813538 -66,0.090176060795784 -67,0.08948295563459396 -68,0.09029467403888702 -69,0.08877253532409668 -70,0.09056820720434189 -71,0.09146003425121307 -72,0.09036637842655182 -73,0.08962556719779968 -74,0.0888746976852417 -75,0.08803563565015793 -76,0.08905413746833801 -77,0.08875136077404022 -78,0.08721301704645157 -79,0.08833843469619751 -80,0.08869863301515579 -81,0.08919216692447662 -82,0.08923426270484924 -83,0.08758798241615295 -84,0.0886761024594307 -85,0.08872532844543457 -86,0.08691048622131348 -87,0.08749046176671982 -88,0.08648703992366791 -89,0.08783285319805145 -90,0.08727908879518509 -91,0.08632925152778625 -92,0.08706087619066238 -93,0.08559738099575043 -94,0.08863569051027298 -95,0.08743277192115784 -96,0.08557223528623581 -97,0.08585280925035477 -98,0.08518456667661667 -99,0.08501322567462921 -100,0.08598334342241287 -101,0.08538568764925003 -102,0.08724107593297958 -103,0.08528038114309311 -104,0.08779433369636536 -105,0.08862532675266266 -106,0.08581163734197617 -107,0.08877930045127869 -108,0.08925719559192657 -109,0.08465412259101868 -110,0.08936268836259842 -111,0.09075474739074707 -112,0.08855301141738892 -113,0.08598396182060242 -114,0.09047301113605499 -115,0.08903692662715912 -116,0.08784792572259903 -117,0.09147506952285767 -118,0.09340070188045502 -119,0.09137142449617386 -120,0.08623017370700836 -121,0.09005986154079437 -122,0.09254271537065506 -123,0.08820760995149612 -124,0.0884307399392128 -125,0.09237204492092133 -126,0.09234955161809921 -127,0.08929552137851715 -128,0.08525239676237106 -129,0.08719773590564728 -130,0.08532785624265671 -131,0.08611597865819931 -132,0.08492740988731384 -133,0.08674377202987671 -134,0.08511921763420105 -135,0.0854446217417717 -136,0.08466030657291412 -137,0.08492907136678696 -138,0.0868784636259079 -139,0.08662794530391693 -140,0.08465489745140076 -141,0.08485884219408035 -142,0.08545277267694473 -143,0.08602406829595566 -144,0.084438756108284 -145,0.08550652116537094 -146,0.08495976030826569 -147,0.08430483937263489 -148,0.0844019427895546 -149,0.08516255766153336 -150,0.08560388535261154 -151,0.08588314801454544 -152,0.08476902544498444 -153,0.08348963409662247 -154,0.08396544307470322 -155,0.08421500027179718 -156,0.08476941287517548 -157,0.0836399495601654 -158,0.08365066349506378 -159,0.08488543331623077 -160,0.08547913283109665 -161,0.08451051265001297 -162,0.084751196205616 -163,0.08486611396074295 -164,0.08475236594676971 -165,0.08380825817584991 -166,0.08422200381755829 -167,0.08410961925983429 -168,0.0850512832403183 -169,0.08439747244119644 -170,0.0866234228014946 -171,0.08560104668140411 -172,0.08519088476896286 -173,0.08421991765499115 -174,0.08417357504367828 -175,0.08328212052583694 -176,0.08527611941099167 -177,0.08398029953241348 -178,0.08431272208690643 -179,0.08428912609815598 -180,0.08343609422445297 -181,0.08509422093629837 -182,0.08370190113782883 -183,0.08381594717502594 -184,0.08445459604263306 -185,0.08508364111185074 -186,0.08420538902282715 -187,0.0864984542131424 -188,0.08464644849300385 -189,0.08638034760951996 -190,0.08698432147502899 -191,0.08336344361305237 -192,0.0868941992521286 -193,0.08408454805612564 -194,0.08700990676879883 -195,0.087447389960289 -196,0.08602989464998245 -197,0.08787963539361954 -198,0.08659105002880096 -199,0.08666244149208069 -200,0.08860287070274353 -201,0.08575881272554398 -202,0.08573847264051437 -203,0.08582939207553864 -204,0.0836004987359047 -205,0.08557111024856567 -206,0.08581657707691193 -207,0.08649926632642746 -208,0.08350256085395813 -209,0.08512619137763977 -210,0.0842406153678894 -211,0.08420650660991669 -212,0.08412428200244904 -213,0.083529993891716 -214,0.0830007791519165 -215,0.0854489728808403 -216,0.08497948944568634 -217,0.08302635699510574 -218,0.08321072906255722 -219,0.08260559290647507 -220,0.0826539471745491 -221,0.08299089223146439 -222,0.08337809890508652 -223,0.08292544633150101 -224,0.08447886258363724 -225,0.08347935229539871 -226,0.08289782702922821 -227,0.08367768675088882 -228,0.08535484969615936 -229,0.08466091752052307 -230,0.08328888565301895 -231,0.08464347571134567 -232,0.0847322940826416 -233,0.08489608764648438 -234,0.08342768251895905 -235,0.08572328090667725 -236,0.08429408073425293 -237,0.08403632044792175 -238,0.08598361164331436 -239,0.08566386252641678 -240,0.08383294194936752 -241,0.08629613369703293 -242,0.08289122581481934 -243,0.08542221039533615 -244,0.08457731455564499 -245,0.08532189577817917 -246,0.08498863875865936 -247,0.08308326452970505 -248,0.08373476564884186 -249,0.08304186165332794 -250,0.08269467949867249 -251,0.08396269381046295 -252,0.08513885736465454 -253,0.08306340128183365 -254,0.08265221118927002 -255,0.08513222634792328 -256,0.08238988369703293 -257,0.08306875079870224 -258,0.08339056372642517 -259,0.08601352572441101 -260,0.08462243527173996 -261,0.08436203002929688 -262,0.08331403881311417 -263,0.08248775452375412 -264,0.08236954361200333 -265,0.0858624130487442 -266,0.08316980302333832 -267,0.087282195687294 -268,0.08610086143016815 -269,0.0856086015701294 -270,0.08492787927389145 -271,0.0844569057226181 -272,0.08292138576507568 -273,0.08368688076734543 -274,0.08370932191610336 -275,0.08618706464767456 -276,0.08377092331647873 -277,0.08510298281908035 -278,0.08386528491973877 -279,0.08332543075084686 -280,0.08415503799915314 -281,0.08300977945327759 -282,0.08659709244966507 -283,0.0843018889427185 -284,0.08381646126508713 -285,0.08505132794380188 -286,0.08658084273338318 -287,0.0843118205666542 -288,0.08730682730674744 -289,0.08517860621213913 -290,0.08759995549917221 -291,0.08878203481435776 -292,0.08372107148170471 -293,0.0914914458990097 -294,0.08805456012487411 -295,0.08731812983751297 -296,0.09130413085222244 -297,0.0845414251089096 -298,0.09094993770122528 -299,0.0921751856803894 -300,0.08708211779594421 -301,0.08806022256612778 -302,0.09050397574901581 -303,0.08589824289083481 -304,0.08663932234048843 -305,0.08777391165494919 -306,0.08441849797964096 -307,0.08490131050348282 -308,0.08526532351970673 -309,0.08646649867296219 -310,0.08659911900758743 -311,0.08532494306564331 -312,0.08401922881603241 -313,0.08417468518018723 -314,0.08310209214687347 -315,0.0877268984913826 -316,0.08389447629451752 -317,0.08404262363910675 -318,0.08380682021379471 -319,0.08382640779018402 -320,0.08539500087499619 -321,0.08509047329425812 -322,0.0831063985824585 -323,0.0855020061135292 -324,0.08364769071340561 -325,0.0860142856836319 -326,0.08299584686756134 -327,0.08574492484331131 -328,0.0823662281036377 -329,0.08368764817714691 -330,0.08553265780210495 -331,0.08752444386482239 -332,0.08922673761844635 -333,0.08546905219554901 -334,0.09293379634618759 -335,0.09179075062274933 -336,0.08309853821992874 -337,0.08839675039052963 -338,0.0860476940870285 -339,0.08566859364509583 -340,0.08459413796663284 -341,0.09072557836771011 -342,0.08891260623931885 -343,0.08571065962314606 -344,0.08989842981100082 -345,0.08608835935592651 -346,0.08803247660398483 -347,0.09185243397951126 -348,0.08659494668245316 -349,0.08690748363733292 -350,0.09080396592617035 -351,0.086207315325737 -352,0.0899677723646164 -353,0.0872250497341156 -354,0.08459130674600601 -355,0.08564548194408417 -356,0.08601424843072891 -357,0.08732343465089798 -358,0.08370507508516312 -359,0.08374696224927902 -360,0.08447343111038208 -361,0.0844145268201828 -362,0.08453504741191864 -363,0.08362933248281479 -364,0.08341923356056213 -365,0.08403264731168747 -366,0.0837031900882721 -367,0.08585217595100403 -368,0.08625198900699615 -369,0.08274044841527939 -370,0.0885620042681694 -371,0.08699510246515274 -372,0.08381310850381851 -373,0.08849696815013885 -374,0.08740250766277313 -375,0.08555828034877777 -376,0.08551096171140671 -377,0.08971186727285385 -378,0.08331859111785889 -379,0.08750516921281815 -380,0.08783584088087082 -381,0.08590378612279892 -382,0.083259716629982 -383,0.08666166663169861 -384,0.08637029677629471 -385,0.08413277566432953 -386,0.08572936803102493 -387,0.08528493344783783 -388,0.08504413068294525 -389,0.08348225802183151 -390,0.08705750852823257 -391,0.08578253537416458 -392,0.08266875892877579 -393,0.08480954170227051 -394,0.08597347885370255 -395,0.08359923213720322 -396,0.08385182917118073 -397,0.08426724374294281 -398,0.08219324797391891 -399,0.08922024816274643 -400,0.08926872164011002 -401,0.08186764270067215 -402,0.09121880680322647 -403,0.08834593743085861 -404,0.08617261052131653 -405,0.08657176792621613 -406,0.08285985887050629 -407,0.08689273148775101 -408,0.08611168712377548 -409,0.08394378423690796 -410,0.08704116195440292 -411,0.08571074903011322 -412,0.0904761329293251 -413,0.09211112558841705 -414,0.0869378075003624 -415,0.09056501090526581 -416,0.09323160350322723 -417,0.08394630998373032 -418,0.0886964201927185 -419,0.09099908918142319 -420,0.08224565535783768 -421,0.09113863855600357 -422,0.09106792509555817 -423,0.08364331722259521 -424,0.08844209462404251 -425,0.08760048449039459 -426,0.08277003467082977 -427,0.08812694996595383 -428,0.08400456607341766 -429,0.08811220526695251 -430,0.08901946246623993 -431,0.08385330438613892 -432,0.08689365535974503 -433,0.08393832296133041 -434,0.08471961319446564 -435,0.08329904824495316 -436,0.08460874110460281 -437,0.08266599476337433 -438,0.0841241329908371 -439,0.08482005447149277 -440,0.08326949179172516 -441,0.08502213656902313 -442,0.08724046498537064 -443,0.08533498644828796 -444,0.08384915441274643 -445,0.08477604389190674 -446,0.08341945707798004 -447,0.08344157040119171 -448,0.08231432735919952 -449,0.08329812437295914 -450,0.08231622725725174 -451,0.08484619855880737 -452,0.0826878547668457 -453,0.08240208029747009 -454,0.08196102827787399 -455,0.08383980393409729 -456,0.08383408188819885 -457,0.08211018145084381 -458,0.08551063388586044 -459,0.08389052748680115 -460,0.08273816108703613 -461,0.08275868743658066 -462,0.08754082769155502 -463,0.08673463761806488 -464,0.08549504727125168 -465,0.08494365215301514 -466,0.08646360039710999 -467,0.08503188192844391 -468,0.08274596184492111 -469,0.08282449841499329 -470,0.08763988316059113 -471,0.08586260676383972 -472,0.08383005857467651 -473,0.08557198941707611 -474,0.08567026257514954 -475,0.08581798523664474 -476,0.08701619505882263 -477,0.08471982926130295 -478,0.08888711035251617 -479,0.08377660065889359 -480,0.09005849063396454 -481,0.08907408267259598 -482,0.08265072107315063 -483,0.08923392742872238 -484,0.08743273466825485 -485,0.08403343707323074 -486,0.09122507274150848 -487,0.0848761796951294 -488,0.08481389284133911 -489,0.08667486906051636 -490,0.08580942451953888 -491,0.08216249942779541 -492,0.08466830104589462 -493,0.08523640781641006 -494,0.08381890505552292 -495,0.0833170935511589 -496,0.0834072083234787 -497,0.08210774511098862 -498,0.08370519429445267 -499,0.08428307622671127 -500,0.08529583364725113 -501,0.08868423849344254 -502,0.08532705903053284 -503,0.08262831717729568 -504,0.0845160260796547 -505,0.08491285145282745 -506,0.08454970270395279 -507,0.08385144919157028 -508,0.0850014016032219 -509,0.0823131576180458 -510,0.08526172488927841 -511,0.08443460613489151 -512,0.08306082338094711 -513,0.08408498018980026 -514,0.08485569059848785 -515,0.08217678219079971 -516,0.08599107712507248 -517,0.08390326797962189 -518,0.08483700454235077 -519,0.08353768289089203 -520,0.08248681575059891 -521,0.08857998251914978 -522,0.08853576332330704 -523,0.08271340280771255 -524,0.08316563069820404 -525,0.08497186005115509 -526,0.0827714279294014 -527,0.0841536894440651 -528,0.08213713020086288 -529,0.08434493839740753 -530,0.08223900198936462 -531,0.08526401221752167 -532,0.08246469497680664 -533,0.08757083863019943 -534,0.08729508519172668 -535,0.08247105032205582 -536,0.08734016120433807 -537,0.0834537222981453 -538,0.08848938345909119 -539,0.08747999370098114 -540,0.0877615362405777 -541,0.08718177676200867 -542,0.08471789211034775 -543,0.08474759757518768 -544,0.08390315622091293 -545,0.08324921131134033 -546,0.08316345512866974 -547,0.08405794948339462 -548,0.0838407576084137 -549,0.08359173685312271 -550,0.08527576923370361 -551,0.08336158096790314 -552,0.08402585983276367 -553,0.08666176348924637 -554,0.08449949324131012 -555,0.08432426303625107 -556,0.08452158421278 -557,0.08340825885534286 -558,0.08270228654146194 -559,0.08268836885690689 -560,0.08192221820354462 -561,0.08242818713188171 -562,0.08708018809556961 -563,0.08529618382453918 -564,0.08471319824457169 -565,0.08356159925460815 -566,0.0924411490559578 -567,0.08992229402065277 -568,0.0859813466668129 -569,0.0939946174621582 -570,0.09090722352266312 -571,0.08601744472980499 -572,0.09017064422369003 -573,0.08531087636947632 -574,0.09083779156208038 -575,0.0935145914554596 -576,0.08476828783750534 -577,0.09278848767280579 -578,0.09789522737264633 -579,0.09043009579181671 -580,0.08572688698768616 -581,0.09690020978450775 -582,0.09290852397680283 -583,0.08607307076454163 -584,0.09341051429510117 -585,0.09095747768878937 -586,0.08345731347799301 -587,0.09128771722316742 -588,0.09019441157579422 -589,0.08339961618185043 -590,0.08730612695217133 -591,0.08398722857236862 -592,0.08551159501075745 -593,0.08519675582647324 -594,0.08686685562133789 -595,0.08783150464296341 -596,0.08447973430156708 -597,0.08825469762086868 -598,0.08608093857765198 -599,0.08524832874536514 -600,0.08662206679582596 -601,0.08353091031312943 -602,0.0880722850561142 -603,0.08434376865625381 -604,0.08798012137413025 -605,0.09148653596639633 -606,0.08700031042098999 -607,0.0853065624833107 -608,0.08432997763156891 -609,0.08561325073242188 -610,0.08560431748628616 -611,0.08335387706756592 -612,0.08868071436882019 -613,0.08292487263679504 -614,0.0844099223613739 -615,0.08333514630794525 -616,0.08375459164381027 -617,0.08171198517084122 -618,0.08484331518411636 -619,0.08146701008081436 -620,0.08170117437839508 -621,0.0818227007985115 -622,0.08276055008172989 -623,0.08199702203273773 -624,0.08429549634456635 -625,0.08509139716625214 -626,0.08411508798599243 -627,0.0836772546172142 -628,0.08299913257360458 -629,0.08269043266773224 -630,0.08474095910787582 -631,0.08364942669868469 -632,0.08302731812000275 -633,0.08574265986680984 -634,0.08304501324892044 -635,0.08426648378372192 -636,0.08244279026985168 -637,0.08704115450382233 -638,0.08135952800512314 -639,0.08783812075853348 -640,0.08620987087488174 -641,0.08935265243053436 -642,0.08629237860441208 -643,0.08542091399431229 -644,0.08558198064565659 -645,0.08760353922843933 -646,0.0919048935174942 -647,0.0875585526227951 -648,0.08341004699468613 -649,0.09083037078380585 -650,0.08667293190956116 -651,0.08630499988794327 -652,0.09065937995910645 -653,0.08747349679470062 -654,0.08646515756845474 -655,0.0832577720284462 -656,0.08845993131399155 -657,0.08545106649398804 -658,0.08522521704435349 -659,0.085990309715271 -660,0.08804536610841751 -661,0.08448997139930725 -662,0.08506346493959427 -663,0.08623582869768143 -664,0.0856906995177269 -665,0.0889916643500328 -666,0.09209411591291428 -667,0.090667225420475 -668,0.08452476561069489 -669,0.0853710025548935 -670,0.08564552664756775 -671,0.08324839174747467 -672,0.08331584930419922 -673,0.08366460353136063 -674,0.0840732678771019 -675,0.0826096460223198 -676,0.08467672765254974 -677,0.08497975021600723 -678,0.08478008955717087 -679,0.0829741358757019 -680,0.08895529061555862 -681,0.08259142935276031 -682,0.09391376376152039 -683,0.09751717746257782 -684,0.08940888196229935 -685,0.08698860555887222 -686,0.08897214382886887 -687,0.08408942818641663 -688,0.08694066107273102 -689,0.08328541368246078 -690,0.08874382823705673 -691,0.08926266431808472 -692,0.08179257810115814 -693,0.0847858414053917 -694,0.08312435448169708 -695,0.08528201282024384 -696,0.08439519256353378 -697,0.08390694856643677 -698,0.08407489210367203 -699,0.08572591096162796 -700,0.08272746205329895 -701,0.08596065640449524 -702,0.08396156132221222 -703,0.0821409821510315 -704,0.0845264196395874 -705,0.08401366323232651 -706,0.08637771755456924 -707,0.08309902995824814 -708,0.08705567568540573 -709,0.08660757541656494 -710,0.0848279818892479 -711,0.08837287127971649 -712,0.08262874186038971 -713,0.08710790425539017 -714,0.08548837155103683 -715,0.087422676384449 -716,0.09195555746555328 -717,0.08840452134609222 -718,0.08280830085277557 -719,0.0891546979546547 -720,0.08655957877635956 -721,0.08444882184267044 -722,0.08833839744329453 -723,0.08670216798782349 -724,0.08376216143369675 -725,0.08646392077207565 -726,0.08818239718675613 -727,0.08588829636573792 -728,0.08353375643491745 -729,0.08965378999710083 -730,0.08949646353721619 -731,0.08628741651773453 -732,0.0845395177602768 -733,0.08801020681858063 -734,0.08277961611747742 -735,0.08405676484107971 -736,0.08649934083223343 -737,0.08452490717172623 -738,0.08544006943702698 -739,0.08442600071430206 -740,0.08347336947917938 -741,0.08239182829856873 -742,0.08439189195632935 -743,0.08259066939353943 -744,0.08177392184734344 -745,0.08455558121204376 -746,0.08314377069473267 -747,0.08417945355176926 -748,0.08981344103813171 -749,0.08437588065862656 -750,0.08724059164524078 -751,0.08991187810897827 -752,0.0824231281876564 -753,0.09080865979194641 -754,0.09064101427793503 -755,0.0831223726272583 -756,0.09108570218086243 -757,0.08891334384679794 -758,0.08350411802530289 -759,0.08444949984550476 -760,0.08460099995136261 -761,0.08648359030485153 -762,0.0826205313205719 -763,0.08671766519546509 -764,0.08366493135690689 -765,0.08758312463760376 -766,0.08690232038497925 -767,0.08206646144390106 -768,0.08820787817239761 -769,0.08282801508903503 -770,0.09056711941957474 -771,0.09316003322601318 -772,0.0854140892624855 -773,0.08822490274906158 -774,0.09225595742464066 -775,0.08513768762350082 -776,0.0868205651640892 -777,0.08833657205104828 -778,0.08544071763753891 -779,0.08790384978055954 -780,0.08733734488487244 -781,0.08781582862138748 -782,0.08766558766365051 -783,0.08427147567272186 -784,0.08696141839027405 -785,0.08390980213880539 -786,0.08641502261161804 -787,0.08649991452693939 -788,0.0838175043463707 -789,0.08473698049783707 -790,0.08443578332662582 -791,0.0834118202328682 -792,0.0858292207121849 -793,0.08545966446399689 -794,0.08688155561685562 -795,0.08629851043224335 -796,0.08587973564863205 -797,0.09017501771450043 -798,0.08642339706420898 -799,0.08594036102294922 -800,0.08905941247940063 -801,0.08194155991077423 -802,0.08375506103038788 -803,0.08254905790090561 -804,0.08472001552581787 -805,0.08293196558952332 -806,0.08233743160963058 -807,0.0840505063533783 -808,0.0840630903840065 -809,0.08326379209756851 -810,0.0828760415315628 -811,0.08226024359464645 -812,0.08487113565206528 -813,0.08243642002344131 -814,0.08549162745475769 -815,0.08226653933525085 -816,0.08352730423212051 -817,0.08266833424568176 -818,0.08690891414880753 -819,0.08495514839887619 -820,0.08461930602788925 -821,0.0846066027879715 -822,0.08169490844011307 -823,0.08713845908641815 -824,0.08611001074314117 -825,0.08282244205474854 -826,0.08439772576093674 -827,0.08575177192687988 -828,0.08438316732645035 -829,0.08602087199687958 -830,0.08863817155361176 -831,0.08439494669437408 -832,0.08588969707489014 -833,0.08856058120727539 -834,0.08479809761047363 -835,0.08585326373577118 -836,0.08431338518857956 -837,0.08390515297651291 -838,0.08288923650979996 -839,0.0836983174085617 -840,0.0835181400179863 -841,0.08331306278705597 -842,0.08650428801774979 -843,0.08191276341676712 -844,0.08417481184005737 -845,0.08282890170812607 -846,0.08162518590688705 -847,0.08362356573343277 -848,0.08223769813776016 -849,0.08187117427587509 -850,0.08510531485080719 -851,0.08208474516868591 -852,0.09064502269029617 -853,0.08820643275976181 -854,0.08428260684013367 -855,0.09479950368404388 -856,0.09116427600383759 -857,0.08337487280368805 -858,0.08948702365159988 -859,0.08483494818210602 -860,0.08597923070192337 -861,0.08597658574581146 -862,0.08365198969841003 -863,0.08407437056303024 -864,0.08316526561975479 -865,0.0891866385936737 -866,0.08291511982679367 -867,0.08322726935148239 -868,0.08291637897491455 -869,0.08188387006521225 -870,0.08430031687021255 -871,0.0816413164138794 -872,0.08130105584859848 -873,0.08199506253004074 -874,0.08220525830984116 -875,0.08165359497070312 -876,0.08127614855766296 -877,0.08167365938425064 -878,0.0804881826043129 -879,0.08754824101924896 -880,0.08568871021270752 -881,0.08447772264480591 -882,0.0853428766131401 -883,0.08541496843099594 -884,0.08720186352729797 -885,0.08518750220537186 -886,0.09008603543043137 -887,0.08668851852416992 -888,0.08867982029914856 -889,0.08355844765901566 -890,0.08689023554325104 -891,0.08528390526771545 -892,0.08433953672647476 -893,0.08376577496528625 -894,0.0853021889925003 -895,0.08507248014211655 -896,0.08519656211137772 -897,0.08222909271717072 -898,0.08515216410160065 -899,0.0852624773979187 -900,0.0867960974574089 -901,0.0840613916516304 -902,0.08372000604867935 -903,0.08186379075050354 -904,0.08386139571666718 -905,0.08260144293308258 -906,0.08355502784252167 -907,0.08303399384021759 -908,0.08214480429887772 -909,0.08219005167484283 -910,0.08349154144525528 -911,0.08378193527460098 -912,0.08250024169683456 -913,0.08264234662055969 -914,0.08257759362459183 -915,0.08168467879295349 -916,0.0818876400589943 -917,0.08128971606492996 -918,0.08626118302345276 -919,0.08548922836780548 -920,0.08481296896934509 -921,0.08234883099794388 -922,0.09106763452291489 -923,0.08938194811344147 -924,0.08372928202152252 -925,0.09141609072685242 -926,0.08679011464118958 -927,0.08819850534200668 -928,0.09419262409210205 -929,0.08810155093669891 -930,0.0854230523109436 -931,0.088838130235672 -932,0.08292704820632935 -933,0.08804737776517868 -934,0.08428993076086044 -935,0.08614236861467361 -936,0.08711466938257217 -937,0.08403005450963974 -938,0.08721039444208145 -939,0.08384356647729874 -940,0.08479119092226028 -941,0.08465583622455597 -942,0.08516425639390945 -943,0.08385826647281647 -944,0.08203039318323135 -945,0.08450501412153244 -946,0.08121290057897568 -947,0.08295019716024399 -948,0.08300049602985382 -949,0.08223205804824829 -950,0.08464677631855011 -951,0.08411387354135513 -952,0.08393224328756332 -953,0.0828637033700943 -954,0.08591322600841522 -955,0.08492579311132431 -956,0.08339887112379074 -957,0.09069901704788208 -958,0.08302152901887894 -959,0.08356186747550964 -960,0.08412476629018784 -961,0.08333917707204819 -962,0.08120255917310715 -963,0.08702513575553894 -964,0.08631587028503418 -965,0.08147825300693512 -966,0.0882553905248642 -967,0.08447190374135971 -968,0.08811847865581512 -969,0.09211092442274094 -970,0.09049201011657715 -971,0.08299773186445236 -972,0.08978432416915894 -973,0.09755601733922958 -974,0.08817242085933685 -975,0.08893874287605286 -976,0.09126391261816025 -977,0.09090196341276169 -978,0.08405005186796188 -979,0.0881110280752182 -980,0.0896313488483429 -981,0.09230532497167587 -982,0.08460631966590881 -983,0.08715656399726868 -984,0.08969378471374512 -985,0.08563742786645889 -986,0.08463284373283386 -987,0.08391720056533813 -988,0.08306140452623367 -989,0.08252838999032974 -990,0.08309279382228851 -991,0.0834052637219429 -992,0.084524005651474 -993,0.08322856575250626 -994,0.08297451585531235 -995,0.08351393789052963 -996,0.08327972888946533 -997,0.08192680031061172 -998,0.08119163662195206 -999,0.08269177377223969 -1000,0.08285852521657944 diff --git a/training/base_loss/one_fifth/training_config.txt b/training/base_loss/one_fifth/training_config.txt deleted file mode 100644 index 90001c3..0000000 --- a/training/base_loss/one_fifth/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def current_loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(loss_fn(theta, desired_theta)) - -Specific Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss/one_fifth/training_log.csv b/training/base_loss/one_fifth/training_log.csv deleted file mode 100644 index 5f420ee..0000000 --- a/training/base_loss/one_fifth/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,1.7681154012680054 -2,1.4298176765441895 -3,1.2166063785552979 -4,1.2256532907485962 -5,1.16298508644104 -6,1.3871614933013916 -7,1.0644876956939697 -8,0.9478405714035034 -9,0.8455779552459717 -10,0.7632237076759338 -11,0.6136890649795532 -12,0.6004542112350464 -13,0.5650331377983093 -14,0.5581585764884949 -15,0.5706428289413452 -16,0.5868701338768005 -17,0.5743229985237122 -18,0.6008477210998535 -19,0.5805042386054993 -20,0.6365225911140442 -21,0.6237371563911438 -22,0.5875139832496643 -23,0.571355402469635 -24,0.5455604195594788 -25,0.5295674800872803 -26,0.5169272422790527 -27,0.49622514843940735 -28,0.5034670233726501 -29,0.49649032950401306 -30,0.4790339171886444 -31,0.4358106851577759 -32,0.46232661604881287 -33,0.39290308952331543 -34,0.3504895567893982 -35,0.36187541484832764 -36,0.35772645473480225 -37,0.33422890305519104 -38,0.2885058522224426 -39,0.22983045876026154 -40,0.22650372982025146 -41,0.2317291647195816 -42,0.2373938262462616 -43,0.22555963695049286 -44,0.19758199155330658 -45,0.1645956188440323 -46,0.17589901387691498 -47,0.18051062524318695 -48,0.17869530618190765 -49,0.17538954317569733 -50,0.16662707924842834 -51,0.1555621474981308 -52,0.1449608951807022 -53,0.1447189599275589 -54,0.14291466772556305 -55,0.13625867664813995 -56,0.13606376945972443 -57,0.1329670399427414 -58,0.12803372740745544 -59,0.1281290501356125 -60,0.121353879570961 -61,0.12008390575647354 -62,0.11858168989419937 -63,0.12043415755033493 -64,0.11940055340528488 -65,0.12045787274837494 -66,0.11776550114154816 -67,0.12360092997550964 -68,0.12129677832126617 -69,0.11882186681032181 -70,0.1264277845621109 -71,0.11397431790828705 -72,0.12388034909963608 -73,0.13261285424232483 -74,0.1251564621925354 -75,0.10843763500452042 -76,0.12885084748268127 -77,0.13488832116127014 -78,0.12210314720869064 -79,0.11872495710849762 -80,0.12951940298080444 -81,0.13294534385204315 -82,0.1278887242078781 -83,0.1168086901307106 -84,0.11398641765117645 -85,0.11277097463607788 -86,0.11835528910160065 -87,0.11739102751016617 -88,0.1099909245967865 -89,0.11655037850141525 -90,0.11628589034080505 -91,0.1122395321726799 -92,0.11294019967317581 -93,0.10759788751602173 -94,0.10995698720216751 -95,0.10761076211929321 -96,0.10517971217632294 -97,0.10546582192182541 -98,0.10851432383060455 -99,0.10763125121593475 -100,0.10985726863145828 -101,0.10599174350500107 -102,0.10543954372406006 -103,0.11218645423650742 -104,0.10520900785923004 -105,0.11166121065616608 -106,0.11534424126148224 -107,0.11348575353622437 -108,0.11213681101799011 -109,0.10938608646392822 -110,0.11977774649858475 -111,0.1153116226196289 -112,0.11013190448284149 -113,0.11594591289758682 -114,0.11670037358999252 -115,0.10607658326625824 -116,0.12094352394342422 -117,0.12397827953100204 -118,0.12149977684020996 -119,0.11607983708381653 -120,0.11352047324180603 -121,0.1105276346206665 -122,0.10901319235563278 -123,0.1080717220902443 -124,0.10889394581317902 -125,0.1023917868733406 -126,0.10631648451089859 -127,0.10699483007192612 -128,0.10390482097864151 -129,0.1046840026974678 -130,0.10567598044872284 -131,0.10775656998157501 -132,0.11140965670347214 -133,0.10658184438943863 -134,0.1064717248082161 -135,0.11328732222318649 -136,0.1104448214173317 -137,0.10226543992757797 -138,0.10586301237344742 -139,0.10350639373064041 -140,0.1047038659453392 -141,0.10399631410837173 -142,0.10483313351869583 -143,0.10470451414585114 -144,0.10816246271133423 -145,0.10502299666404724 -146,0.1084122508764267 -147,0.10710622370243073 -148,0.10487515479326248 -149,0.10909555852413177 -150,0.10083117336034775 -151,0.10725432634353638 -152,0.10055972635746002 -153,0.1172245517373085 -154,0.11818583309650421 -155,0.10555266588926315 -156,0.11870984733104706 -157,0.12355193495750427 -158,0.11162694543600082 -159,0.11367636173963547 -160,0.12120795249938965 -161,0.11848286539316177 -162,0.10705198347568512 -163,0.13536079227924347 -164,0.1429472118616104 -165,0.1241360530257225 -166,0.11161534488201141 -167,0.12747561931610107 -168,0.1185978427529335 -169,0.10563358664512634 -170,0.12344813346862793 -171,0.11878552287817001 -172,0.11524377763271332 -173,0.11605204641819 -174,0.10926441848278046 -175,0.10693887621164322 -176,0.11143792420625687 -177,0.1019754409790039 -178,0.10573411732912064 -179,0.10208628326654434 -180,0.10358747839927673 -181,0.10944528132677078 -182,0.10242325067520142 -183,0.10911088436841965 -184,0.10841750353574753 -185,0.10069733113050461 -186,0.1128396987915039 -187,0.11071255803108215 -188,0.1022702157497406 -189,0.1145484521985054 -190,0.10629715025424957 -191,0.1125422790646553 -192,0.1152179166674614 -193,0.10444595664739609 -194,0.12121648341417313 -195,0.12075114250183105 -196,0.10304448008537292 -197,0.12010835111141205 -198,0.11666770279407501 -199,0.1026507318019867 -200,0.10431713610887527 -201,0.10588813573122025 -202,0.10397756099700928 -203,0.10275689512491226 -204,0.10446412116289139 -205,0.10479199141263962 -206,0.10462172329425812 -207,0.09998583793640137 -208,0.1012192815542221 -209,0.10165335237979889 -210,0.10472213476896286 -211,0.1025136262178421 -212,0.10141122341156006 -213,0.10451541095972061 -214,0.10133032500743866 -215,0.09950580447912216 -216,0.10734222829341888 -217,0.10081372410058975 -218,0.10879074037075043 -219,0.10186290740966797 -220,0.11926085501909256 -221,0.12119326740503311 -222,0.1060355007648468 -223,0.11482889950275421 -224,0.11181728541851044 -225,0.10714071244001389 -226,0.11367106437683105 -227,0.10183944553136826 -228,0.11673595011234283 -229,0.1140899583697319 -230,0.11101271957159042 -231,0.12193289399147034 -232,0.11951442807912827 -233,0.10392427444458008 -234,0.10215503722429276 -235,0.10403943806886673 -236,0.10115713626146317 -237,0.10402980446815491 -238,0.10576736927032471 -239,0.1018931120634079 -240,0.10632164776325226 -241,0.10233854502439499 -242,0.10162891447544098 -243,0.09994369000196457 -244,0.10523589700460434 -245,0.10708307474851608 -246,0.10503986477851868 -247,0.10049004852771759 -248,0.09913647174835205 -249,0.10206592082977295 -250,0.10158610343933105 -251,0.11007717251777649 -252,0.11971785128116608 -253,0.10931780189275742 -254,0.11519760638475418 -255,0.12586916983127594 -256,0.11201361566781998 -257,0.11441335082054138 -258,0.10559632629156113 -259,0.1153561919927597 -260,0.11897629499435425 -261,0.10534258186817169 -262,0.11947888135910034 -263,0.1170174703001976 -264,0.10931088775396347 -265,0.10901091992855072 -266,0.11757398396730423 -267,0.11105596274137497 -268,0.11159636825323105 -269,0.11704210191965103 -270,0.10099200904369354 -271,0.11641401052474976 -272,0.11827736347913742 -273,0.10797611624002457 -274,0.11262664198875427 -275,0.10940481722354889 -276,0.10274173319339752 -277,0.11202014982700348 -278,0.10493670403957367 -279,0.10934720188379288 -280,0.10621552169322968 -281,0.10743992030620575 -282,0.10727570950984955 -283,0.10750333219766617 -284,0.11375574767589569 -285,0.10673311352729797 -286,0.10567935556173325 -287,0.10420434921979904 -288,0.10633546113967896 -289,0.11030387878417969 -290,0.10385700315237045 -291,0.11465991288423538 -292,0.11069654673337936 -293,0.10531682521104813 -294,0.10818914324045181 -295,0.1026933491230011 -296,0.1026143878698349 -297,0.11067624390125275 -298,0.11057528108358383 -299,0.10708808153867722 -300,0.11267482489347458 -301,0.10680223256349564 -302,0.10428300499916077 -303,0.10807701200246811 -304,0.10846029222011566 -305,0.10146494209766388 -306,0.1019081175327301 -307,0.10575541853904724 -308,0.10168418288230896 -309,0.10617650300264359 -310,0.1062733381986618 -311,0.09942606836557388 -312,0.10798516124486923 -313,0.0995974913239479 -314,0.10010010004043579 -315,0.09842932224273682 -316,0.0991964340209961 -317,0.1042587012052536 -318,0.1015690490603447 -319,0.10449069738388062 -320,0.10674364864826202 -321,0.1027580201625824 -322,0.11164570599794388 -323,0.11171213537454605 -324,0.10163518786430359 -325,0.11610493808984756 -326,0.11020845919847488 -327,0.10988860577344894 -328,0.11216314136981964 -329,0.1062096357345581 -330,0.11110609024763107 -331,0.10029177367687225 -332,0.10727234184741974 -333,0.1042986512184143 -334,0.1012604683637619 -335,0.10452630370855331 -336,0.10687199234962463 -337,0.10762440413236618 -338,0.10132034868001938 -339,0.10635910928249359 -340,0.10352776199579239 -341,0.10346755385398865 -342,0.09953573346138 -343,0.10498083382844925 -344,0.10229630768299103 -345,0.10501007735729218 -346,0.09816206246614456 -347,0.10149814933538437 -348,0.09847074747085571 -349,0.10005372017621994 -350,0.10810133069753647 -351,0.10244344919919968 -352,0.11233152449131012 -353,0.10881230980157852 -354,0.10447167605161667 -355,0.11241596937179565 -356,0.10050778836011887 -357,0.11469174176454544 -358,0.11329077184200287 -359,0.10762420296669006 -360,0.11427470296621323 -361,0.10907481610774994 -362,0.1093631237745285 -363,0.11099640280008316 -364,0.10417556017637253 -365,0.10832901298999786 -366,0.10209401696920395 -367,0.09831972420215607 -368,0.10196752101182938 -369,0.1019592210650444 -370,0.10018469393253326 -371,0.09978462010622025 -372,0.09747656434774399 -373,0.10087375342845917 -374,0.10142258554697037 -375,0.0994163453578949 -376,0.10661503672599792 -377,0.10200679302215576 -378,0.1048920750617981 -379,0.10031746327877045 -380,0.10733399540185928 -381,0.10135289281606674 -382,0.1194959357380867 -383,0.11858174204826355 -384,0.09921558946371078 -385,0.12406576424837112 -386,0.12383253127336502 -387,0.10261952877044678 -388,0.11317694932222366 -389,0.11754922568798065 -390,0.10456253588199615 -391,0.12001640349626541 -392,0.12273462116718292 -393,0.11165919899940491 -394,0.11538664996623993 -395,0.11335251480340958 -396,0.10709845274686813 -397,0.10660556703805923 -398,0.11642482876777649 -399,0.10199714452028275 -400,0.11212559789419174 -401,0.11089175939559937 -402,0.10457073152065277 -403,0.10151471197605133 -404,0.10199417173862457 -405,0.09978441148996353 -406,0.10147041827440262 -407,0.1067400798201561 -408,0.11154263466596603 -409,0.10587642341852188 -410,0.10942934453487396 -411,0.1052129790186882 -412,0.10971121490001678 -413,0.11512468010187149 -414,0.1038932353258133 -415,0.11584782600402832 -416,0.12163583189249039 -417,0.11224288493394852 -418,0.10674770176410675 -419,0.11961464583873749 -420,0.12165404111146927 -421,0.10683480650186539 -422,0.11681709438562393 -423,0.11955971270799637 -424,0.11259870231151581 -425,0.10017934441566467 -426,0.11564940214157104 -427,0.12103667855262756 -428,0.11999978870153427 -429,0.10736580193042755 -430,0.09928455948829651 -431,0.11170941591262817 -432,0.10598158836364746 -433,0.09659749269485474 -434,0.10309441387653351 -435,0.1070341020822525 -436,0.11036564409732819 -437,0.10752680152654648 -438,0.10584180057048798 -439,0.10420902818441391 -440,0.10372627526521683 -441,0.11152350157499313 -442,0.11717607825994492 -443,0.111232690513134 -444,0.10442056506872177 -445,0.10972720384597778 -446,0.10975313931703568 -447,0.10688738524913788 -448,0.10668524354696274 -449,0.10952320694923401 -450,0.10787956416606903 -451,0.10145709663629532 -452,0.10518071055412292 -453,0.098447784781456 -454,0.1131625846028328 -455,0.10900338739156723 -456,0.10396771878004074 -457,0.10572405159473419 -458,0.09955620765686035 -459,0.11116810142993927 -460,0.10644549131393433 -461,0.11295229196548462 -462,0.11512302607297897 -463,0.1012224480509758 -464,0.11700072884559631 -465,0.11941550672054291 -466,0.09919068217277527 -467,0.1244746744632721 -468,0.13492979109287262 -469,0.1272689402103424 -470,0.11004416644573212 -471,0.10968627035617828 -472,0.1124332845211029 -473,0.1007494255900383 -474,0.10778554528951645 -475,0.10667362809181213 -476,0.10627088695764542 -477,0.11375720053911209 -478,0.10221746563911438 -479,0.11135190725326538 -480,0.1161365881562233 -481,0.10909968614578247 -482,0.10683582723140717 -483,0.11613006144762039 -484,0.10940565168857574 -485,0.11191631853580475 -486,0.09906933456659317 -487,0.1041664257645607 -488,0.11053700000047684 -489,0.10933929681777954 -490,0.10880749672651291 -491,0.10861489921808243 -492,0.09940461814403534 -493,0.10732490569353104 -494,0.11254727095365524 -495,0.11347372084856033 -496,0.11230602115392685 -497,0.11879343539476395 -498,0.10882148891687393 -499,0.10793330520391464 -500,0.12347108125686646 -501,0.11917812377214432 -502,0.10556726902723312 -503,0.12057266384363174 -504,0.12520325183868408 -505,0.11271526664495468 -506,0.1147349551320076 -507,0.12164462357759476 -508,0.11419190466403961 -509,0.10435011237859726 -510,0.12276920676231384 -511,0.1251436024904251 -512,0.10370177030563354 -513,0.1193743646144867 -514,0.13520768284797668 -515,0.1344914436340332 -516,0.11992914229631424 -517,0.10895554721355438 -518,0.12242384254932404 -519,0.12779495120048523 -520,0.1128549799323082 -521,0.11388342082500458 -522,0.12208132445812225 -523,0.12385765463113785 -524,0.11178316175937653 -525,0.10182121396064758 -526,0.11738520860671997 -527,0.11474715918302536 -528,0.11340143531560898 -529,0.11341817677021027 -530,0.10590742528438568 -531,0.10658219456672668 -532,0.10745418071746826 -533,0.1085178554058075 -534,0.10483995825052261 -535,0.09850668162107468 -536,0.10228358209133148 -537,0.10093306005001068 -538,0.10710150748491287 -539,0.10093704611063004 -540,0.10145382583141327 -541,0.0979764312505722 -542,0.10811394453048706 -543,0.10644263029098511 -544,0.10001501441001892 -545,0.1011577621102333 -546,0.10229998081922531 -547,0.10169253498315811 -548,0.10239425301551819 -549,0.10195185989141464 -550,0.10037752985954285 -551,0.09714452177286148 -552,0.10495427995920181 -553,0.11363158375024796 -554,0.10199208557605743 -555,0.10084687918424606 -556,0.09827818721532822 -557,0.10476518422365189 -558,0.10218502581119537 -559,0.10450103133916855 -560,0.10563181340694427 -561,0.09785890579223633 -562,0.10191027820110321 -563,0.09742950648069382 -564,0.10354942828416824 -565,0.0998898446559906 -566,0.11532318592071533 -567,0.11527179181575775 -568,0.10529494285583496 -569,0.11264222115278244 -570,0.11388711631298065 -571,0.10254844278097153 -572,0.10733802616596222 -573,0.09955044090747833 -574,0.10929610580205917 -575,0.11267196387052536 -576,0.09945224970579147 -577,0.11717190593481064 -578,0.11825736612081528 -579,0.10799198597669601 -580,0.10343947261571884 -581,0.12312588840723038 -582,0.12241742759943008 -583,0.11246396601200104 -584,0.10679343342781067 -585,0.10792600363492966 -586,0.10630424320697784 -587,0.11774305254220963 -588,0.11457495391368866 -589,0.10476335883140564 -590,0.12781786918640137 -591,0.13643544912338257 -592,0.12797780334949493 -593,0.10863448679447174 -594,0.11561498045921326 -595,0.1282692402601242 -596,0.12107854336500168 -597,0.11443717032670975 -598,0.11524414271116257 -599,0.10491839051246643 -600,0.10726293921470642 -601,0.11055167019367218 -602,0.10634545981884003 -603,0.10725532472133636 -604,0.102683424949646 -605,0.10745459794998169 -606,0.10779622197151184 -607,0.09746576845645905 -608,0.10343392938375473 -609,0.10164997726678848 -610,0.09745815396308899 -611,0.09959588944911957 -612,0.096234530210495 -613,0.10620584338903427 -614,0.10070855170488358 -615,0.10512486845254898 -616,0.10685841739177704 -617,0.09934058785438538 -618,0.10482700169086456 -619,0.09851167351007462 -620,0.09662307798862457 -621,0.09361625462770462 -622,0.10328236222267151 -623,0.1017993614077568 -624,0.10835282504558563 -625,0.10661572217941284 -626,0.11000553518533707 -627,0.11070045083761215 -628,0.09823304414749146 -629,0.11743547022342682 -630,0.12255845963954926 -631,0.11648207902908325 -632,0.10643908381462097 -633,0.11664514243602753 -634,0.12272820621728897 -635,0.10588108003139496 -636,0.10912181437015533 -637,0.12232663482427597 -638,0.1178470253944397 -639,0.10725384205579758 -640,0.10387756675481796 -641,0.10384257137775421 -642,0.10393457114696503 -643,0.10798701643943787 -644,0.10043938457965851 -645,0.09902577847242355 -646,0.09656290709972382 -647,0.09549687057733536 -648,0.09833167493343353 -649,0.09611694514751434 -650,0.09468962997198105 -651,0.09646325558423996 -652,0.09834106266498566 -653,0.09778310358524323 -654,0.09580712020397186 -655,0.09940046072006226 -656,0.09900201857089996 -657,0.09901642054319382 -658,0.10495612025260925 -659,0.10488848388195038 -660,0.09683644026517868 -661,0.09416773915290833 -662,0.10743764787912369 -663,0.1041630282998085 -664,0.09666381031274796 -665,0.10439963638782501 -666,0.0935836210846901 -667,0.10815706849098206 -668,0.1081090122461319 -669,0.09599379450082779 -670,0.11226796358823776 -671,0.10993467271327972 -672,0.10452978312969208 -673,0.11161437630653381 -674,0.10817728191614151 -675,0.10621917247772217 -676,0.11547134816646576 -677,0.11923021078109741 -678,0.10395205020904541 -679,0.11156448721885681 -680,0.12449127435684204 -681,0.11131196469068527 -682,0.10086753219366074 -683,0.10797720402479172 -684,0.09684623777866364 -685,0.10834112018346786 -686,0.11280648410320282 -687,0.09804549813270569 -688,0.10192413628101349 -689,0.10841450840234756 -690,0.1032341942191124 -691,0.11119984090328217 -692,0.11107739061117172 -693,0.12277498841285706 -694,0.12627847492694855 -695,0.12736661732196808 -696,0.12295598536729813 -697,0.1070999875664711 -698,0.11629697680473328 -699,0.12697675824165344 -700,0.11295024305582047 -701,0.117293581366539 -702,0.12409999966621399 -703,0.11965373158454895 -704,0.12099118530750275 -705,0.11973719298839569 -706,0.11013416945934296 -707,0.10626506805419922 -708,0.10439853370189667 -709,0.11874044686555862 -710,0.11875990778207779 -711,0.10649558156728745 -712,0.10117708891630173 -713,0.10529854148626328 -714,0.10558268427848816 -715,0.10742797702550888 -716,0.10027758032083511 -717,0.10828706622123718 -718,0.11315910518169403 -719,0.10124000161886215 -720,0.10654795169830322 -721,0.11435791850090027 -722,0.10557785630226135 -723,0.0996212288737297 -724,0.12018179893493652 -725,0.12160565704107285 -726,0.10269363969564438 -727,0.11000431329011917 -728,0.1250535100698471 -729,0.12649711966514587 -730,0.11565336585044861 -731,0.09992381930351257 -732,0.12084417045116425 -733,0.12755854427814484 -734,0.11851378530263901 -735,0.11500469595193863 -736,0.11578351259231567 -737,0.11575086414813995 -738,0.107363261282444 -739,0.121256522834301 -740,0.12754927575588226 -741,0.11285869032144547 -742,0.09744521230459213 -743,0.10767453908920288 -744,0.10406304150819778 -745,0.09504128247499466 -746,0.10492037981748581 -747,0.09929099678993225 -748,0.10135835409164429 -749,0.10447198897600174 -750,0.10014794766902924 -751,0.09979674220085144 -752,0.10611603409051895 -753,0.10069583356380463 -754,0.1003209799528122 -755,0.09875154495239258 -756,0.09716857969760895 -757,0.09498795121908188 -758,0.09990454465150833 -759,0.09554244577884674 -760,0.09777913987636566 -761,0.09460170567035675 -762,0.10234352946281433 -763,0.10071305930614471 -764,0.09409092366695404 -765,0.10761135071516037 -766,0.09864792972803116 -767,0.10413960367441177 -768,0.1093083918094635 -769,0.09947701543569565 -770,0.10184892266988754 -771,0.10636734217405319 -772,0.09480264782905579 -773,0.09890339523553848 -774,0.1041516438126564 -775,0.09258048981428146 -776,0.10872296988964081 -777,0.11263980716466904 -778,0.0982654020190239 -779,0.11485680192708969 -780,0.11629246175289154 -781,0.10728563368320465 -782,0.09788841754198074 -783,0.11701402813196182 -784,0.11654628813266754 -785,0.10141529142856598 -786,0.10330528765916824 -787,0.11146192252635956 -788,0.10894545912742615 -789,0.10917666554450989 -790,0.09980785846710205 -791,0.10407394915819168 -792,0.10621605068445206 -793,0.1008833646774292 -794,0.09638629853725433 -795,0.10479187220335007 -796,0.09984149783849716 -797,0.10325093567371368 -798,0.1019798144698143 -799,0.09485387057065964 -800,0.10633505135774612 -801,0.09774263203144073 -802,0.10531876981258392 -803,0.11017798632383347 -804,0.09994199126958847 -805,0.10373470932245255 -806,0.10508501529693604 -807,0.10204381495714188 -808,0.10369079560041428 -809,0.10429761558771133 -810,0.09620057046413422 -811,0.10604182630777359 -812,0.1026856005191803 -813,0.10481132566928864 -814,0.10177077353000641 -815,0.09248840808868408 -816,0.09460930526256561 -817,0.09834965318441391 -818,0.10232910513877869 -819,0.10060155391693115 -820,0.10058552771806717 -821,0.09135094285011292 -822,0.09528569132089615 -823,0.09854155033826828 -824,0.1029377430677414 -825,0.09349755942821503 -826,0.09484715759754181 -827,0.09873996675014496 -828,0.09422595053911209 -829,0.10162217915058136 -830,0.09590119123458862 -831,0.09991315007209778 -832,0.09330924600362778 -833,0.09277033805847168 -834,0.0964418426156044 -835,0.0930827334523201 -836,0.0904376208782196 -837,0.10153642296791077 -838,0.09506923705339432 -839,0.09607137739658356 -840,0.09753905981779099 -841,0.10466307401657104 -842,0.10140226781368256 -843,0.0968654453754425 -844,0.0936615914106369 -845,0.09481396526098251 -846,0.0923021212220192 -847,0.09449216723442078 -848,0.09513510763645172 -849,0.09448105841875076 -850,0.10052919387817383 -851,0.09871769696474075 -852,0.09622833877801895 -853,0.09473245590925217 -854,0.09861678630113602 -855,0.09541060775518417 -856,0.09568584710359573 -857,0.09291728585958481 -858,0.09675480425357819 -859,0.0905582383275032 -860,0.09884271025657654 -861,0.09225879609584808 -862,0.09667215496301651 -863,0.09019982069730759 -864,0.09617211669683456 -865,0.0909658893942833 -866,0.09891762584447861 -867,0.09082356840372086 -868,0.10445822030305862 -869,0.09680557250976562 -870,0.10685566812753677 -871,0.11046458780765533 -872,0.10111698508262634 -873,0.10179569572210312 -874,0.09878018498420715 -875,0.10466263443231583 -876,0.10587780922651291 -877,0.09941636770963669 -878,0.09535364806652069 -879,0.09467451274394989 -880,0.09484853595495224 -881,0.09798505157232285 -882,0.09773457795381546 -883,0.10606007277965546 -884,0.10379858314990997 -885,0.10038518905639648 -886,0.10149110108613968 -887,0.09408605098724365 -888,0.09876023232936859 -889,0.10396341979503632 -890,0.0983683168888092 -891,0.09391196817159653 -892,0.10273826122283936 -893,0.09320379048585892 -894,0.0961177721619606 -895,0.09760680049657822 -896,0.09275592863559723 -897,0.0957472026348114 -898,0.10408922284841537 -899,0.09749626368284225 -900,0.09889692813158035 -901,0.09751420468091965 -902,0.10020878165960312 -903,0.10505567491054535 -904,0.09666472673416138 -905,0.10284335911273956 -906,0.09690243005752563 -907,0.10452241450548172 -908,0.11123879998922348 -909,0.10213832557201385 -910,0.09799633920192719 -911,0.10245966166257858 -912,0.09858828037977219 -913,0.09381885081529617 -914,0.0993339940905571 -915,0.09905128926038742 -916,0.09545107185840607 -917,0.09632977098226547 -918,0.09428217262029648 -919,0.10297494381666183 -920,0.10411985218524933 -921,0.0901816114783287 -922,0.09338197112083435 -923,0.09758879989385605 -924,0.09122905880212784 -925,0.10435187071561813 -926,0.09993112832307816 -927,0.09769529104232788 -928,0.09850472956895828 -929,0.09974723309278488 -930,0.09999755769968033 -931,0.09365452826023102 -932,0.10458095371723175 -933,0.10483092069625854 -934,0.09489021450281143 -935,0.10358171910047531 -936,0.09866484254598618 -937,0.09480548650026321 -938,0.09394748508930206 -939,0.09921983629465103 -940,0.09533895552158356 -941,0.09396471083164215 -942,0.1056104302406311 -943,0.09949177503585815 -944,0.09887059777975082 -945,0.10014277696609497 -946,0.0934675857424736 -947,0.0936814546585083 -948,0.10359522700309753 -949,0.09892068803310394 -950,0.10180967301130295 -951,0.10197550803422928 -952,0.0980546623468399 -953,0.10559821128845215 -954,0.10344935208559036 -955,0.09157837182283401 -956,0.10054884850978851 -957,0.09549526125192642 -958,0.10543524473905563 -959,0.10496845841407776 -960,0.09780573844909668 -961,0.0974285751581192 -962,0.0951351523399353 -963,0.09495721012353897 -964,0.0922062024474144 -965,0.0978471115231514 -966,0.09243705868721008 -967,0.09507282078266144 -968,0.09639786183834076 -969,0.09326323866844177 -970,0.10194206237792969 -971,0.0993126854300499 -972,0.09340303391218185 -973,0.09303420782089233 -974,0.09120212495326996 -975,0.10087000578641891 -976,0.10007031261920929 -977,0.09635340422391891 -978,0.09955530613660812 -979,0.09520106017589569 -980,0.0943923220038414 -981,0.09603100270032883 -982,0.09350597858428955 -983,0.10043711960315704 -984,0.09921595454216003 -985,0.0943467915058136 -986,0.09409937262535095 -987,0.09542690962553024 -988,0.09010407328605652 -989,0.09117241203784943 -990,0.10038191080093384 -991,0.09488801658153534 -992,0.10463608801364899 -993,0.10770018398761749 -994,0.09593041986227036 -995,0.10651835799217224 -996,0.1060309186577797 -997,0.09489331394433975 -998,0.1117314025759697 -999,0.1110096350312233 -1000,0.09239722788333893 diff --git a/training/base_loss/one_fourth/training_config.txt b/training/base_loss/one_fourth/training_config.txt deleted file mode 100644 index 80f9f20..0000000 --- a/training/base_loss/one_fourth/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def current_loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(loss_fn(theta, desired_theta)) - -Specific Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss/one_fourth/training_log.csv b/training/base_loss/one_fourth/training_log.csv deleted file mode 100644 index 2d1f11e..0000000 --- a/training/base_loss/one_fourth/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,1.8545726537704468 -2,1.4484902620315552 -3,1.5451250076293945 -4,1.6450263261795044 -5,1.5671416521072388 -6,1.5534955263137817 -7,1.4467295408248901 -8,1.1871256828308105 -9,1.0950387716293335 -10,1.0131062269210815 -11,0.9107488393783569 -12,0.8713904619216919 -13,0.7643683552742004 -14,0.6744686365127563 -15,0.6680580973625183 -16,0.6987844705581665 -17,0.6803445219993591 -18,0.6598490476608276 -19,0.6339329481124878 -20,0.5894742608070374 -21,0.5129363536834717 -22,0.46286559104919434 -23,0.40880894660949707 -24,0.37168723344802856 -25,0.34776583313941956 -26,0.3272499442100525 -27,0.3053683936595917 -28,0.2832457721233368 -29,0.26654139161109924 -30,0.25037670135498047 -31,0.22755983471870422 -32,0.2149142175912857 -33,0.20272941887378693 -34,0.1808091104030609 -35,0.17953449487686157 -36,0.15396742522716522 -37,0.15124186873435974 -38,0.15233257412910461 -39,0.15193282067775726 -40,0.15037071704864502 -41,0.1420917958021164 -42,0.1496637463569641 -43,0.14835287630558014 -44,0.13451647758483887 -45,0.1396087259054184 -46,0.14540664851665497 -47,0.14364145696163177 -48,0.12579099833965302 -49,0.1281907558441162 -50,0.13565601408481598 -51,0.13271313905715942 -52,0.12219292670488358 -53,0.12056383490562439 -54,0.12879973649978638 -55,0.11746114492416382 -56,0.11160332709550858 -57,0.11870503425598145 -58,0.11831746250391006 -59,0.11846738308668137 -60,0.12065616250038147 -61,0.11727139353752136 -62,0.119158536195755 -63,0.11779170483350754 -64,0.1078423261642456 -65,0.12091019749641418 -66,0.12325175106525421 -67,0.1102522537112236 -68,0.1180412694811821 -69,0.11658746749162674 -70,0.10628102719783783 -71,0.121966652572155 -72,0.12399546056985855 -73,0.10722102224826813 -74,0.117599256336689 -75,0.11881253123283386 -76,0.11547736078500748 -77,0.11092126369476318 -78,0.11269202828407288 -79,0.1038256585597992 -80,0.11176931113004684 -81,0.11181219667196274 -82,0.10432935506105423 -83,0.11572286486625671 -84,0.11482492089271545 -85,0.102035753428936 -86,0.10691051185131073 -87,0.1037757471203804 -88,0.10334359109401703 -89,0.10472699254751205 -90,0.10124054551124573 -91,0.10859140753746033 -92,0.10418710857629776 -93,0.10770423710346222 -94,0.10945598781108856 -95,0.10236742347478867 -96,0.10594118386507034 -97,0.10342187434434891 -98,0.10479632019996643 -99,0.10718736797571182 -100,0.10360919684171677 -101,0.103676438331604 -102,0.10286564379930496 -103,0.10220227390527725 -104,0.10169292986392975 -105,0.0997937023639679 -106,0.10810915380716324 -107,0.10386299341917038 -108,0.10974094271659851 -109,0.11233341693878174 -110,0.10180484503507614 -111,0.11116386204957962 -112,0.11027390509843826 -113,0.1049809679389 -114,0.10270301252603531 -115,0.1110132709145546 -116,0.10289134830236435 -117,0.11677387356758118 -118,0.12324192374944687 -119,0.11252473294734955 -120,0.10727470368146896 -121,0.12225230783224106 -122,0.11247333139181137 -123,0.11018374562263489 -124,0.120953768491745 -125,0.11529235541820526 -126,0.1068812906742096 -127,0.10826525092124939 -128,0.10996875166893005 -129,0.10842262953519821 -130,0.10285546630620956 -131,0.1165984570980072 -132,0.11272244900465012 -133,0.10171616077423096 -134,0.10890620201826096 -135,0.10639254748821259 -136,0.10243164002895355 -137,0.10045597702264786 -138,0.10581185668706894 -139,0.09842078387737274 -140,0.10833306610584259 -141,0.10494328290224075 -142,0.10269607603549957 -143,0.10068674385547638 -144,0.09994231909513474 -145,0.10712305456399918 -146,0.10211005061864853 -147,0.10872268676757812 -148,0.10941322147846222 -149,0.09960992634296417 -150,0.1071070209145546 -151,0.10244674235582352 -152,0.10521731525659561 -153,0.10471675544977188 -154,0.10707971453666687 -155,0.10524670779705048 -156,0.10092952847480774 -157,0.1104867085814476 -158,0.11233515292406082 -159,0.1097637191414833 -160,0.1104973778128624 -161,0.11127815395593643 -162,0.10683267563581467 -163,0.10752498358488083 -164,0.11313360929489136 -165,0.10760440677404404 -166,0.10271020233631134 -167,0.10389355570077896 -168,0.10683335363864899 -169,0.11459989845752716 -170,0.10653409361839294 -171,0.1217462569475174 -172,0.12214057147502899 -173,0.1053650826215744 -174,0.11366348713636398 -175,0.1167835220694542 -176,0.11183950304985046 -177,0.11747746914625168 -178,0.12380705028772354 -179,0.1182238906621933 -180,0.10753840953111649 -181,0.11530259996652603 -182,0.11716974526643753 -183,0.10485866665840149 -184,0.11407815665006638 -185,0.10544659942388535 -186,0.10111222416162491 -187,0.10110489279031754 -188,0.09911755472421646 -189,0.10955733060836792 -190,0.10062012076377869 -191,0.11390676349401474 -192,0.11684470623731613 -193,0.10408072918653488 -194,0.10993818938732147 -195,0.10852046310901642 -196,0.10577873140573502 -197,0.11154542863368988 -198,0.10189954936504364 -199,0.11001503467559814 -200,0.10648051649332047 -201,0.10629262775182724 -202,0.10983216762542725 -203,0.10382700711488724 -204,0.10628855973482132 -205,0.09915340691804886 -206,0.10790038853883743 -207,0.10816288739442825 -208,0.1004549041390419 -209,0.10899672657251358 -210,0.1017904281616211 -211,0.10493285208940506 -212,0.10127292573451996 -213,0.09921477735042572 -214,0.09882064908742905 -215,0.10138414055109024 -216,0.10156252980232239 -217,0.09922230988740921 -218,0.10113950073719025 -219,0.09853103756904602 -220,0.09860841929912567 -221,0.1003088727593422 -222,0.09718851745128632 -223,0.09800206124782562 -224,0.09966643899679184 -225,0.10544952750205994 -226,0.10669443756341934 -227,0.09562090784311295 -228,0.09634441882371902 -229,0.10480058938264847 -230,0.1015419289469719 -231,0.10478027164936066 -232,0.10098028928041458 -233,0.10555748641490936 -234,0.10756764560937881 -235,0.0974363461136818 -236,0.0986311063170433 -237,0.09933561831712723 -238,0.10006973892450333 -239,0.0970272570848465 -240,0.1022660881280899 -241,0.09837239980697632 -242,0.10089637339115143 -243,0.10253309458494186 -244,0.09760087728500366 -245,0.09570726752281189 -246,0.09672380238771439 -247,0.09647807478904724 -248,0.10233023017644882 -249,0.10111261904239655 -250,0.09782511740922928 -251,0.10791786015033722 -252,0.0978875607252121 -253,0.09843126684427261 -254,0.10387744009494781 -255,0.10069715231657028 -256,0.1025002971291542 -257,0.10295102745294571 -258,0.09945222735404968 -259,0.09734712541103363 -260,0.10011857748031616 -261,0.09815913438796997 -262,0.10078232735395432 -263,0.096848264336586 -264,0.09621567279100418 -265,0.10216408222913742 -266,0.10392346233129501 -267,0.09967628866434097 -268,0.09999488294124603 -269,0.09903422743082047 -270,0.09585383534431458 -271,0.10574717074632645 -272,0.09873602539300919 -273,0.11011191457509995 -274,0.11496898531913757 -275,0.10253135859966278 -276,0.11521837115287781 -277,0.11679238080978394 -278,0.09681600332260132 -279,0.10988666117191315 -280,0.10254604369401932 -281,0.11327850073575974 -282,0.10634047538042068 -283,0.10698278993368149 -284,0.10839147120714188 -285,0.0967484712600708 -286,0.10306806862354279 -287,0.09878662973642349 -288,0.10289427638053894 -289,0.09776747971773148 -290,0.09674926847219467 -291,0.0997471809387207 -292,0.09636802226305008 -293,0.09751538187265396 -294,0.09572526067495346 -295,0.09735218435525894 -296,0.1004587933421135 -297,0.10021752864122391 -298,0.0993884727358818 -299,0.09568503499031067 -300,0.1032385379076004 -301,0.09876777976751328 -302,0.09631306678056717 -303,0.09483453631401062 -304,0.09832543879747391 -305,0.09605519473552704 -306,0.09972265362739563 -307,0.09326144307851791 -308,0.09575571864843369 -309,0.09905333071947098 -310,0.09760793298482895 -311,0.10002471506595612 -312,0.09640224277973175 -313,0.11484077572822571 -314,0.11072134971618652 -315,0.10418952256441116 -316,0.11324504017829895 -317,0.10304165631532669 -318,0.11896426975727081 -319,0.11658994108438492 -320,0.09680357575416565 -321,0.11425463855266571 -322,0.11676213890314102 -323,0.10279014706611633 -324,0.11290974915027618 -325,0.10402451455593109 -326,0.10994677990674973 -327,0.11698336154222488 -328,0.10462537407875061 -329,0.10414863377809525 -330,0.10573273152112961 -331,0.0992935448884964 -332,0.10324130207300186 -333,0.10336362570524216 -334,0.10213460773229599 -335,0.10211934894323349 -336,0.09875272959470749 -337,0.1003822386264801 -338,0.09853313863277435 -339,0.09834735840559006 -340,0.0962839275598526 -341,0.1022048145532608 -342,0.09930181503295898 -343,0.1013856753706932 -344,0.09712563455104828 -345,0.10488277673721313 -346,0.09644749015569687 -347,0.10215917974710464 -348,0.09755173325538635 -349,0.10800434648990631 -350,0.1064288467168808 -351,0.09843776375055313 -352,0.10317131876945496 -353,0.09862741082906723 -354,0.09583798795938492 -355,0.09803096950054169 -356,0.09911108762025833 -357,0.09516251087188721 -358,0.10736478120088577 -359,0.10529697686433792 -360,0.09940385073423386 -361,0.10087575763463974 -362,0.09862256795167923 -363,0.09941123425960541 -364,0.10481155663728714 -365,0.09719936549663544 -366,0.10271240025758743 -367,0.09924375265836716 -368,0.10917479544878006 -369,0.10879149287939072 -370,0.10207910090684891 -371,0.10738347470760345 -372,0.09536831080913544 -373,0.09783636778593063 -374,0.09957250952720642 -375,0.10425196588039398 -376,0.09841945767402649 -377,0.10270479321479797 -378,0.09417203813791275 -379,0.10890427231788635 -380,0.10430324822664261 -381,0.10017625987529755 -382,0.10024893283843994 -383,0.10474744439125061 -384,0.10234149545431137 -385,0.10459865629673004 -386,0.10525158792734146 -387,0.09834014624357224 -388,0.11223585903644562 -389,0.1069667860865593 -390,0.10176719725131989 -391,0.11134403944015503 -392,0.102030448615551 -393,0.10115459561347961 -394,0.09946155548095703 -395,0.10610563308000565 -396,0.0970953032374382 -397,0.10370154678821564 -398,0.09811989217996597 -399,0.10509059578180313 -400,0.1063091903924942 -401,0.09607113152742386 -402,0.10196907073259354 -403,0.09490876644849777 -404,0.10649925470352173 -405,0.10089955478906631 -406,0.10279233753681183 -407,0.10462828725576401 -408,0.09627144783735275 -409,0.10938232392072678 -410,0.09707584977149963 -411,0.10971947014331818 -412,0.11372438818216324 -413,0.09693367034196854 -414,0.11275231093168259 -415,0.11511849611997604 -416,0.1003076508641243 -417,0.10029459744691849 -418,0.10761929303407669 -419,0.10922457277774811 -420,0.09762170165777206 -421,0.10797495394945145 -422,0.10085198283195496 -423,0.10296567529439926 -424,0.10579917579889297 -425,0.09681969881057739 -426,0.10622330009937286 -427,0.1135278195142746 -428,0.10254502296447754 -429,0.10226345807313919 -430,0.10746540129184723 -431,0.0951276645064354 -432,0.10017801076173782 -433,0.10065604001283646 -434,0.10109231621026993 -435,0.10626887530088425 -436,0.10203997045755386 -437,0.09490792453289032 -438,0.1070837751030922 -439,0.10509975254535675 -440,0.10328971594572067 -441,0.10338710993528366 -442,0.10191307216882706 -443,0.09957856684923172 -444,0.10467919707298279 -445,0.10492268204689026 -446,0.09925622493028641 -447,0.10416751354932785 -448,0.10013385862112045 -449,0.10373473912477493 -450,0.10167934000492096 -451,0.10054489970207214 -452,0.10013177245855331 -453,0.0971120074391365 -454,0.10152261704206467 -455,0.09866897016763687 -456,0.10148067027330399 -457,0.10293148458003998 -458,0.10026025027036667 -459,0.10110768675804138 -460,0.09917984157800674 -461,0.10737686604261398 -462,0.1002763882279396 -463,0.10332143306732178 -464,0.10318455100059509 -465,0.1079953983426094 -466,0.10741377621889114 -467,0.1007952094078064 -468,0.11580175906419754 -469,0.11519306153059006 -470,0.09475234895944595 -471,0.10635877400636673 -472,0.1064067929983139 -473,0.09280791878700256 -474,0.10551997274160385 -475,0.10039877146482468 -476,0.10356719046831131 -477,0.10183624923229218 -478,0.09820989519357681 -479,0.10440286248922348 -480,0.10147537291049957 -481,0.10372990369796753 -482,0.09782375395298004 -483,0.11334006488323212 -484,0.10916639119386673 -485,0.09906543046236038 -486,0.11094783991575241 -487,0.10869728773832321 -488,0.09424322843551636 -489,0.10991458594799042 -490,0.1018257737159729 -491,0.11014308780431747 -492,0.11536501348018646 -493,0.10266999155282974 -494,0.10801538825035095 -495,0.11522188037633896 -496,0.09692807495594025 -497,0.116964191198349 -498,0.12551946938037872 -499,0.11654479056596756 -500,0.10727230459451675 -501,0.11465223133563995 -502,0.11392288655042648 -503,0.09865269064903259 -504,0.10729887336492538 -505,0.1062239557504654 -506,0.09683463722467422 -507,0.10071375966072083 -508,0.09805381298065186 -509,0.0977628082036972 -510,0.09671596437692642 -511,0.09814874082803726 -512,0.09979134052991867 -513,0.09757605195045471 -514,0.09799479693174362 -515,0.09864963591098785 -516,0.0950232744216919 -517,0.09961436688899994 -518,0.09661266207695007 -519,0.09822504222393036 -520,0.10167478024959564 -521,0.09519640356302261 -522,0.10192885249853134 -523,0.1016063541173935 -524,0.09713806211948395 -525,0.10026092827320099 -526,0.09570945054292679 -527,0.1006009504199028 -528,0.10168179869651794 -529,0.09560615569353104 -530,0.10485465824604034 -531,0.09972504526376724 -532,0.10561934858560562 -533,0.10812710970640182 -534,0.09842950850725174 -535,0.098876953125 -536,0.09588198363780975 -537,0.10140245407819748 -538,0.10135360807180405 -539,0.101438008248806 -540,0.1051488146185875 -541,0.10323566198348999 -542,0.10284530371427536 -543,0.10037221759557724 -544,0.11561689525842667 -545,0.11478076130151749 -546,0.09854620695114136 -547,0.11564324051141739 -548,0.12106706947088242 -549,0.10615164786577225 -550,0.10563158243894577 -551,0.11081814765930176 -552,0.10919355601072311 -553,0.09746786206960678 -554,0.11243335157632828 -555,0.11642703413963318 -556,0.1113860011100769 -557,0.10230177640914917 -558,0.10966704785823822 -559,0.10973350703716278 -560,0.10095500200986862 -561,0.10692023485898972 -562,0.10593686997890472 -563,0.10828683525323868 -564,0.10003673285245895 -565,0.10412196069955826 -566,0.10356269031763077 -567,0.09613946080207825 -568,0.10337478667497635 -569,0.10152444988489151 -570,0.0957602858543396 -571,0.10396690666675568 -572,0.09908615797758102 -573,0.10278774052858353 -574,0.10582112520933151 -575,0.09458380937576294 -576,0.1089513748884201 -577,0.10702548921108246 -578,0.09968046098947525 -579,0.10105038434267044 -580,0.09976422786712646 -581,0.1011623740196228 -582,0.09696579724550247 -583,0.10885132104158401 -584,0.10468978434801102 -585,0.10121768712997437 -586,0.10476561635732651 -587,0.09402543306350708 -588,0.10050039738416672 -589,0.10113575309515 -590,0.09565164148807526 -591,0.10405606031417847 -592,0.10125095397233963 -593,0.09332577139139175 -594,0.1006275862455368 -595,0.0965801477432251 -596,0.1044435054063797 -597,0.10390638560056686 -598,0.10896376520395279 -599,0.10121384263038635 -600,0.10976310819387436 -601,0.11358004808425903 -602,0.0994805321097374 -603,0.09830867499113083 -604,0.10499332845211029 -605,0.09908853471279144 -606,0.09711264073848724 -607,0.10409516841173172 -608,0.09867110103368759 -609,0.09648055583238602 -610,0.10129895061254501 -611,0.09501101076602936 -612,0.10173468291759491 -613,0.10357353091239929 -614,0.09932912141084671 -615,0.10495738685131073 -616,0.10448171943426132 -617,0.10254903137683868 -618,0.1028958186507225 -619,0.11007388681173325 -620,0.10229489207267761 -621,0.09973590821027756 -622,0.1126675009727478 -623,0.10605980455875397 -624,0.09450386464595795 -625,0.1069941371679306 -626,0.10896171629428864 -627,0.11148952692747116 -628,0.10047409683465958 -629,0.10413337498903275 -630,0.11355171352624893 -631,0.10299938172101974 -632,0.10715780407190323 -633,0.11433861404657364 -634,0.10463230311870575 -635,0.09783586859703064 -636,0.11166009306907654 -637,0.10417234897613525 -638,0.10049952566623688 -639,0.10962031781673431 -640,0.10402404516935349 -641,0.1014241948723793 -642,0.10504150390625 -643,0.09633509069681168 -644,0.10083787143230438 -645,0.10191402584314346 -646,0.0991048663854599 -647,0.09866457432508469 -648,0.10091439634561539 -649,0.09591636061668396 -650,0.1015218198299408 -651,0.10137555748224258 -652,0.10854356735944748 -653,0.09599156677722931 -654,0.1083744689822197 -655,0.10864035040140152 -656,0.0937865674495697 -657,0.10817232728004456 -658,0.10387194901704788 -659,0.09973089396953583 -660,0.10709443688392639 -661,0.0996209904551506 -662,0.10663113743066788 -663,0.10596062988042831 -664,0.09563367813825607 -665,0.10291282832622528 -666,0.09428104758262634 -667,0.09549465030431747 -668,0.09554335474967957 -669,0.09794718027114868 -670,0.10121320933103561 -671,0.09450803697109222 -672,0.10730263590812683 -673,0.10471851378679276 -674,0.09818856418132782 -675,0.10318085551261902 -676,0.09228833764791489 -677,0.11129222810268402 -678,0.107489213347435 -679,0.09515729546546936 -680,0.09836360067129135 -681,0.095577172935009 -682,0.10459890216588974 -683,0.09928851574659348 -684,0.10625942051410675 -685,0.10357348620891571 -686,0.09573022276163101 -687,0.10559114068746567 -688,0.09855668246746063 -689,0.10411592572927475 -690,0.11030053347349167 -691,0.10051023215055466 -692,0.11047907173633575 -693,0.11542560160160065 -694,0.10673505067825317 -695,0.1001393124461174 -696,0.10328159481287003 -697,0.10263678431510925 -698,0.1037633866071701 -699,0.09596386551856995 -700,0.09935426712036133 -701,0.09670647978782654 -702,0.0998888686299324 -703,0.09580125659704208 -704,0.09478927403688431 -705,0.09680313616991043 -706,0.09516983479261398 -707,0.09570489078760147 -708,0.09086381644010544 -709,0.10010985285043716 -710,0.09765838831663132 -711,0.09919624775648117 -712,0.10610603541135788 -713,0.09780129045248032 -714,0.10281729698181152 -715,0.10305872559547424 -716,0.09644895046949387 -717,0.10588487982749939 -718,0.1060289666056633 -719,0.1042831689119339 -720,0.10355845838785172 -721,0.10498414933681488 -722,0.10163893550634384 -723,0.10297247767448425 -724,0.10461033880710602 -725,0.11478567868471146 -726,0.10410910844802856 -727,0.10338473320007324 -728,0.11146584153175354 -729,0.09939471632242203 -730,0.10935677587985992 -731,0.10509670525789261 -732,0.10482427477836609 -733,0.11541269719600677 -734,0.11403743177652359 -735,0.09886366873979568 -736,0.1108040139079094 -737,0.11393368244171143 -738,0.10018061101436615 -739,0.10701680183410645 -740,0.10523007810115814 -741,0.09896621853113174 -742,0.09771531075239182 -743,0.10226023942232132 -744,0.09818518161773682 -745,0.10421238839626312 -746,0.10704043507575989 -747,0.10384654998779297 -748,0.09804490208625793 -749,0.10451480746269226 -750,0.1047714427113533 -751,0.09949226677417755 -752,0.1011136844754219 -753,0.1067299023270607 -754,0.09846226125955582 -755,0.0944032296538353 -756,0.09734601527452469 -757,0.09586600214242935 -758,0.09540737420320511 -759,0.09425245225429535 -760,0.0929638147354126 -761,0.09325070679187775 -762,0.09370817244052887 -763,0.09225130826234818 -764,0.09187817573547363 -765,0.09245971590280533 -766,0.09570702910423279 -767,0.09609745442867279 -768,0.09242133051156998 -769,0.09920486807823181 -770,0.09073814004659653 -771,0.09765297919511795 -772,0.09316803514957428 -773,0.09937548637390137 -774,0.10413221269845963 -775,0.10108517110347748 -776,0.09563834965229034 -777,0.10154436528682709 -778,0.09172933548688889 -779,0.10836012661457062 -780,0.11075621098279953 -781,0.0971355065703392 -782,0.10765305161476135 -783,0.10602042078971863 -784,0.09557856619358063 -785,0.10955005884170532 -786,0.10673534125089645 -787,0.0977654904127121 -788,0.10758760571479797 -789,0.10799744725227356 -790,0.10486175119876862 -791,0.10427704453468323 -792,0.10017900913953781 -793,0.10440371185541153 -794,0.09830401092767715 -795,0.1058184951543808 -796,0.11071208864450455 -797,0.09747335314750671 -798,0.10748910158872604 -799,0.11604341119527817 -800,0.10883377492427826 -801,0.09335222840309143 -802,0.1022518053650856 -803,0.09553387016057968 -804,0.09794063121080399 -805,0.09602487832307816 -806,0.09601274132728577 -807,0.09303127229213715 -808,0.09658260643482208 -809,0.09354773163795471 -810,0.09652122110128403 -811,0.09140528738498688 -812,0.0943770781159401 -813,0.09469229727983475 -814,0.0917295590043068 -815,0.09117342531681061 -816,0.09390603005886078 -817,0.09419875591993332 -818,0.09126807749271393 -819,0.0915365070104599 -820,0.09416534006595612 -821,0.09314748644828796 -822,0.09823572635650635 -823,0.09605006873607635 -824,0.09540456533432007 -825,0.10026711970567703 -826,0.09883913397789001 -827,0.09426867961883545 -828,0.0913669764995575 -829,0.09645777195692062 -830,0.09290777891874313 -831,0.09155528992414474 -832,0.09507853537797928 -833,0.09317025542259216 -834,0.09532066434621811 -835,0.09224094450473785 -836,0.09272745251655579 -837,0.09524762630462646 -838,0.10006173700094223 -839,0.09277820587158203 -840,0.09934218227863312 -841,0.09886053204536438 -842,0.09155154973268509 -843,0.09893526136875153 -844,0.09100260585546494 -845,0.09534641355276108 -846,0.09033229947090149 -847,0.09230440109968185 -848,0.09741981327533722 -849,0.09313192218542099 -850,0.09598997980356216 -851,0.0936647579073906 -852,0.09269169718027115 -853,0.0912880226969719 -854,0.10023003816604614 -855,0.09466813504695892 -856,0.11100310832262039 -857,0.10897523164749146 -858,0.09640457481145859 -859,0.10965584218502045 -860,0.10678549855947495 -861,0.10219305753707886 -862,0.11751852184534073 -863,0.12021201103925705 -864,0.10425892472267151 -865,0.1053137332201004 -866,0.1015530526638031 -867,0.10012590140104294 -868,0.09919111430644989 -869,0.10143280774354935 -870,0.09956096857786179 -871,0.09921126067638397 -872,0.10243841260671616 -873,0.10716857016086578 -874,0.09666843712329865 -875,0.10823564231395721 -876,0.11038361489772797 -877,0.10074016451835632 -878,0.10756818950176239 -879,0.1133434846997261 -880,0.09848945587873459 -881,0.11249643564224243 -882,0.11978722363710403 -883,0.11692771315574646 -884,0.101357102394104 -885,0.10836110264062881 -886,0.11144109815359116 -887,0.09931487590074539 -888,0.09889311343431473 -889,0.09877768903970718 -890,0.10426712036132812 -891,0.09779869765043259 -892,0.09795868396759033 -893,0.10047853738069534 -894,0.09504830837249756 -895,0.0997684970498085 -896,0.11052194237709045 -897,0.1072685644030571 -898,0.09949329495429993 -899,0.09756691753864288 -900,0.10035157203674316 -901,0.09669109433889389 -902,0.09913816303014755 -903,0.09601563960313797 -904,0.09232649207115173 -905,0.09652727842330933 -906,0.10319684445858002 -907,0.1034192368388176 -908,0.10002542287111282 -909,0.09769333899021149 -910,0.09433141350746155 -911,0.09719645231962204 -912,0.09569528698921204 -913,0.09422773867845535 -914,0.09338807314634323 -915,0.0959281250834465 -916,0.09395111352205276 -917,0.09679003804922104 -918,0.09321167320013046 -919,0.09617360681295395 -920,0.0925498902797699 -921,0.09578925371170044 -922,0.09702018648386002 -923,0.09677781164646149 -924,0.09463078528642654 -925,0.0967397466301918 -926,0.1013861894607544 -927,0.0974820926785469 -928,0.0978490561246872 -929,0.10239166021347046 -930,0.09534457325935364 -931,0.09797081351280212 -932,0.09880396723747253 -933,0.09524770081043243 -934,0.09257208555936813 -935,0.09384574741125107 -936,0.0935126543045044 -937,0.09501806646585464 -938,0.09268851578235626 -939,0.09577663242816925 -940,0.09804599732160568 -941,0.09591645747423172 -942,0.0963490754365921 -943,0.09962283819913864 -944,0.09941908717155457 -945,0.10106893628835678 -946,0.09993554651737213 -947,0.0940108448266983 -948,0.09752273559570312 -949,0.09881680458784103 -950,0.09244155138731003 -951,0.09699282795190811 -952,0.09846991300582886 -953,0.09753365814685822 -954,0.1044665202498436 -955,0.10208266228437424 -956,0.09834586828947067 -957,0.1031108871102333 -958,0.0947718471288681 -959,0.09998846054077148 -960,0.0976811945438385 -961,0.09820714592933655 -962,0.09398997575044632 -963,0.10588008165359497 -964,0.10833043605089188 -965,0.09241031110286713 -966,0.111702561378479 -967,0.11882522702217102 -968,0.10551250725984573 -969,0.09957761317491531 -970,0.10764231532812119 -971,0.10042077302932739 -972,0.10441073030233383 -973,0.09893521666526794 -974,0.09655828028917313 -975,0.09365629404783249 -976,0.1027631163597107 -977,0.10246977210044861 -978,0.09198477119207382 -979,0.10200802236795425 -980,0.1063307672739029 -981,0.09454340487718582 -982,0.1057656928896904 -983,0.10877102613449097 -984,0.09541584551334381 -985,0.1046137660741806 -986,0.10722385346889496 -987,0.10263583064079285 -988,0.10179265588521957 -989,0.10693765431642532 -990,0.10237129032611847 -991,0.09283018112182617 -992,0.0997161865234375 -993,0.10494351387023926 -994,0.09918922185897827 -995,0.09902479499578476 -996,0.10672478377819061 -997,0.09991945326328278 -998,0.0971079096198082 -999,0.1027822270989418 -1000,0.10503604263067245 diff --git a/training/base_loss/one_half/training_config.txt b/training/base_loss/one_half/training_config.txt deleted file mode 100644 index f1b36ed..0000000 --- a/training/base_loss/one_half/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def current_loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(loss_fn(theta, desired_theta)) - -Specific Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss/one_half/training_log.csv b/training/base_loss/one_half/training_log.csv deleted file mode 100644 index 908bb0f..0000000 --- a/training/base_loss/one_half/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,2.374849796295166 -2,1.7610682249069214 -3,1.892038345336914 -4,1.7930089235305786 -5,1.5165635347366333 -6,2.3730392456054688 -7,1.4760717153549194 -8,1.233771800994873 -9,1.128870964050293 -10,1.026740550994873 -11,1.048467993736267 -12,1.0171390771865845 -13,0.997927725315094 -14,0.9777700901031494 -15,0.8490002751350403 -16,0.7462286949157715 -17,0.6748026609420776 -18,0.6220809817314148 -19,0.6363933086395264 -20,0.6377995610237122 -21,0.6185005307197571 -22,0.6211222410202026 -23,0.6174491047859192 -24,0.6229243278503418 -25,0.6075895428657532 -26,0.6012120246887207 -27,0.5908080339431763 -28,0.5776153802871704 -29,0.5704137682914734 -30,0.561610758304596 -31,0.5412768721580505 -32,0.5209556818008423 -33,0.4770500659942627 -34,0.44805610179901123 -35,0.45300132036209106 -36,0.44502049684524536 -37,0.4304162263870239 -38,0.4106188416481018 -39,0.40104126930236816 -40,0.3898600935935974 -41,0.3786983788013458 -42,0.3681178092956543 -43,0.3576139807701111 -44,0.34513458609580994 -45,0.3287496566772461 -46,0.3164929747581482 -47,0.2998135983943939 -48,0.28053054213523865 -49,0.2648157477378845 -50,0.24228635430335999 -51,0.22393250465393066 -52,0.24040795862674713 -53,0.24589596688747406 -54,0.24003103375434875 -55,0.21012276411056519 -56,0.20291224122047424 -57,0.19841131567955017 -58,0.1857411414384842 -59,0.17750415205955505 -60,0.18814806640148163 -61,0.1864621341228485 -62,0.18620102107524872 -63,0.1871282160282135 -64,0.18767833709716797 -65,0.17864990234375 -66,0.18046380579471588 -67,0.18427951633930206 -68,0.18629787862300873 -69,0.18623389303684235 -70,0.18433505296707153 -71,0.18072651326656342 -72,0.1752086728811264 -73,0.1667201668024063 -74,0.16856281459331512 -75,0.1657017171382904 -76,0.1561993509531021 -77,0.15439336001873016 -78,0.15818019211292267 -79,0.155645489692688 -80,0.14762797951698303 -81,0.13793186843395233 -82,0.13917379081249237 -83,0.1380610466003418 -84,0.13780999183654785 -85,0.13580875098705292 -86,0.1384902149438858 -87,0.13500773906707764 -88,0.12921717762947083 -89,0.12748093903064728 -90,0.12311132997274399 -91,0.13130027055740356 -92,0.1285882592201233 -93,0.11977140605449677 -94,0.12012960761785507 -95,0.12321123480796814 -96,0.12562207877635956 -97,0.1267424076795578 -98,0.12366271018981934 -99,0.11636044085025787 -100,0.11496023088693619 -101,0.1142626404762268 -102,0.11501190811395645 -103,0.11667074263095856 -104,0.10911495983600616 -105,0.1086815595626831 -106,0.11106760799884796 -107,0.10845574736595154 -108,0.11081316322088242 -109,0.10519340634346008 -110,0.11026529967784882 -111,0.11125451326370239 -112,0.10507245361804962 -113,0.1106831505894661 -114,0.11059825867414474 -115,0.10316473990678787 -116,0.10382895916700363 -117,0.10494952648878098 -118,0.10358504205942154 -119,0.10477188229560852 -120,0.09975914657115936 -121,0.10410282760858536 -122,0.10194434970617294 -123,0.10133349895477295 -124,0.1021440401673317 -125,0.09973426163196564 -126,0.10059337317943573 -127,0.10079400986433029 -128,0.10075607895851135 -129,0.100021131336689 -130,0.10099516063928604 -131,0.09699264168739319 -132,0.10232682526111603 -133,0.1009054034948349 -134,0.09912129491567612 -135,0.10038412362337112 -136,0.10196076333522797 -137,0.09862909466028214 -138,0.09840597212314606 -139,0.098630890250206 -140,0.09791843593120575 -141,0.09775938838720322 -142,0.10251203179359436 -143,0.09665787220001221 -144,0.10873367637395859 -145,0.1104855164885521 -146,0.09977464377880096 -147,0.10581890493631363 -148,0.10849956423044205 -149,0.09702465683221817 -150,0.10199160873889923 -151,0.09798690676689148 -152,0.10181630402803421 -153,0.1007685512304306 -154,0.09683875739574432 -155,0.10119558870792389 -156,0.0979958027601242 -157,0.0993502289056778 -158,0.09670793265104294 -159,0.10037582367658615 -160,0.09675297141075134 -161,0.09863739460706711 -162,0.09603528678417206 -163,0.10046344250440598 -164,0.10066769272089005 -165,0.09772548824548721 -166,0.10044090449810028 -167,0.09676493704319 -168,0.0959300771355629 -169,0.09882377833127975 -170,0.0980120599269867 -171,0.09910787642002106 -172,0.10160990804433823 -173,0.10444137454032898 -174,0.09646011888980865 -175,0.10149563103914261 -176,0.09882403165102005 -177,0.10044799745082855 -178,0.10307697206735611 -179,0.09846646338701248 -180,0.09781944751739502 -181,0.09796465188264847 -182,0.09757208824157715 -183,0.09634628891944885 -184,0.0961543619632721 -185,0.09799423813819885 -186,0.09449155628681183 -187,0.10013411939144135 -188,0.09762883186340332 -189,0.09937283396720886 -190,0.09710559248924255 -191,0.10182590782642365 -192,0.10169535875320435 -193,0.09745433181524277 -194,0.10180702060461044 -195,0.09817039966583252 -196,0.100510373711586 -197,0.10228247940540314 -198,0.0971958115696907 -199,0.09583984315395355 -200,0.09538646042346954 -201,0.09875728189945221 -202,0.09939298033714294 -203,0.10234704613685608 -204,0.09621730446815491 -205,0.09521487355232239 -206,0.10167238861322403 -207,0.09564312547445297 -208,0.09784534573554993 -209,0.10060817748308182 -210,0.10107945650815964 -211,0.09636637568473816 -212,0.09958618134260178 -213,0.10236027091741562 -214,0.09753552079200745 -215,0.09611766785383224 -216,0.09835410118103027 -217,0.1016203835606575 -218,0.09634344279766083 -219,0.09866621345281601 -220,0.10056842863559723 -221,0.10084284096956253 -222,0.09708335250616074 -223,0.1011771559715271 -224,0.10250988602638245 -225,0.10179628431797028 -226,0.09987810999155045 -227,0.10379032790660858 -228,0.09917854517698288 -229,0.09612629562616348 -230,0.09548428654670715 -231,0.10009590536355972 -232,0.09909306466579437 -233,0.09689992666244507 -234,0.09939416497945786 -235,0.0984654575586319 -236,0.0931350439786911 -237,0.0973922535777092 -238,0.10293581336736679 -239,0.1039864644408226 -240,0.09604176133871078 -241,0.09902820736169815 -242,0.10301419347524643 -243,0.09937534481287003 -244,0.09677544981241226 -245,0.0979694351553917 -246,0.10132958739995956 -247,0.09609056264162064 -248,0.10005690902471542 -249,0.10213430225849152 -250,0.09379029273986816 -251,0.0965203046798706 -252,0.0993366464972496 -253,0.09269817918539047 -254,0.09399722516536713 -255,0.09340810030698776 -256,0.09701524674892426 -257,0.0962148904800415 -258,0.09686972945928574 -259,0.09934879094362259 -260,0.09340889006853104 -261,0.0934552252292633 -262,0.1025114506483078 -263,0.10158021003007889 -264,0.09137608855962753 -265,0.10845115035772324 -266,0.10810039937496185 -267,0.09312183409929276 -268,0.10728897154331207 -269,0.11549469828605652 -270,0.11219647526741028 -271,0.09738567471504211 -272,0.09928888827562332 -273,0.1023828312754631 -274,0.09384288638830185 -275,0.09626682102680206 -276,0.10105728358030319 -277,0.1011461392045021 -278,0.09190259128808975 -279,0.09434808790683746 -280,0.09459584951400757 -281,0.09491445124149323 -282,0.09253731369972229 -283,0.09070410579442978 -284,0.09543411433696747 -285,0.09429367631673813 -286,0.09153927117586136 -287,0.09136515110731125 -288,0.09370794892311096 -289,0.09602995216846466 -290,0.09177640080451965 -291,0.0908152312040329 -292,0.09231778979301453 -293,0.09542010724544525 -294,0.09511660784482956 -295,0.09226632118225098 -296,0.09288358688354492 -297,0.09162747114896774 -298,0.09246441721916199 -299,0.08897142112255096 -300,0.09895043075084686 -301,0.09696158021688461 -302,0.09344477951526642 -303,0.09322068095207214 -304,0.09126852452754974 -305,0.09835337102413177 -306,0.09967057406902313 -307,0.0917954072356224 -308,0.10265356302261353 -309,0.09833595156669617 -310,0.09548620879650116 -311,0.10150475800037384 -312,0.09479723870754242 -313,0.09902866184711456 -314,0.09474442899227142 -315,0.10490377247333527 -316,0.10819826275110245 -317,0.09993629157543182 -318,0.0954684391617775 -319,0.10909377038478851 -320,0.10811355710029602 -321,0.09426607191562653 -322,0.10347991436719894 -323,0.10465715080499649 -324,0.09429194778203964 -325,0.09369563311338425 -326,0.10378703474998474 -327,0.09455142170190811 -328,0.09950301796197891 -329,0.10407450795173645 -330,0.09619339555501938 -331,0.096962571144104 -332,0.1034429520368576 -333,0.09062671661376953 -334,0.093536376953125 -335,0.09210079908370972 -336,0.09197933971881866 -337,0.0947311595082283 -338,0.09281488507986069 -339,0.09478537738323212 -340,0.09646415710449219 -341,0.09678822755813599 -342,0.0899239182472229 -343,0.09249866008758545 -344,0.08952347189188004 -345,0.09257856011390686 -346,0.09726440161466599 -347,0.09437025338411331 -348,0.09012345969676971 -349,0.09644133597612381 -350,0.09645429253578186 -351,0.09006573259830475 -352,0.09965816885232925 -353,0.10164251923561096 -354,0.0961504578590393 -355,0.09029807150363922 -356,0.09706515073776245 -357,0.09881684184074402 -358,0.09576410800218582 -359,0.09445401281118393 -360,0.09458600729703903 -361,0.09604067355394363 -362,0.09572243690490723 -363,0.09430663287639618 -364,0.10264040529727936 -365,0.10008147358894348 -366,0.10285700857639313 -367,0.10249323397874832 -368,0.09823060035705566 -369,0.11376184970140457 -370,0.11770252883434296 -371,0.10494735091924667 -372,0.09484866261482239 -373,0.1044001430273056 -374,0.10191306471824646 -375,0.0907249003648758 -376,0.10523347556591034 -377,0.10784030705690384 -378,0.09547638148069382 -379,0.10077895224094391 -380,0.10683783143758774 -381,0.09629417955875397 -382,0.09864258021116257 -383,0.10383937507867813 -384,0.09225630015134811 -385,0.09355784207582474 -386,0.1000046581029892 -387,0.09254424273967743 -388,0.09591830521821976 -389,0.0974968895316124 -390,0.0882125273346901 -391,0.0904584750533104 -392,0.09042174369096756 -393,0.0905291885137558 -394,0.09297434240579605 -395,0.08991753309965134 -396,0.09003354609012604 -397,0.09019219130277634 -398,0.0883365273475647 -399,0.09333139657974243 -400,0.09429041296243668 -401,0.08662682771682739 -402,0.09221100062131882 -403,0.09059572219848633 -404,0.09371564537286758 -405,0.09223174303770065 -406,0.09811793267726898 -407,0.09162651747465134 -408,0.09644869714975357 -409,0.10163114964962006 -410,0.09158500283956528 -411,0.0986790880560875 -412,0.10003187507390976 -413,0.09423980116844177 -414,0.08997955918312073 -415,0.09323535114526749 -416,0.09603208303451538 -417,0.09223270416259766 -418,0.09033740311861038 -419,0.09454140067100525 -420,0.09319505840539932 -421,0.09141381084918976 -422,0.0936603918671608 -423,0.09779880940914154 -424,0.08887422829866409 -425,0.09252876043319702 -426,0.08980218321084976 -427,0.0943187028169632 -428,0.0967358872294426 -429,0.08744100481271744 -430,0.10084876418113708 -431,0.10374480485916138 -432,0.09330306947231293 -433,0.10494817793369293 -434,0.10844848304986954 -435,0.09797517210245132 -436,0.10017267614603043 -437,0.0904722586274147 -438,0.09588553011417389 -439,0.09845441579818726 -440,0.0955294743180275 -441,0.0915684849023819 -442,0.09397739171981812 -443,0.09183262288570404 -444,0.08821745961904526 -445,0.09195975959300995 -446,0.0921921357512474 -447,0.08588413149118423 -448,0.09337251633405685 -449,0.09104888141155243 -450,0.09137789905071259 -451,0.09369998425245285 -452,0.088620625436306 -453,0.08764416724443436 -454,0.09105505049228668 -455,0.0903974249958992 -456,0.08793661743402481 -457,0.09060773998498917 -458,0.0862128883600235 -459,0.0960705429315567 -460,0.09245429933071136 -461,0.091811403632164 -462,0.09008776396512985 -463,0.094814732670784 -464,0.09901759028434753 -465,0.09340405464172363 -466,0.0987653061747551 -467,0.09747809171676636 -468,0.09070084244012833 -469,0.09852699935436249 -470,0.09188579767942429 -471,0.10008783638477325 -472,0.10412608832120895 -473,0.09237663447856903 -474,0.09910950064659119 -475,0.10560891777276993 -476,0.09680061787366867 -477,0.09139062464237213 -478,0.09375815838575363 -479,0.09399715811014175 -480,0.09393680840730667 -481,0.0911487564444542 -482,0.09845338761806488 -483,0.09771918505430222 -484,0.08874927461147308 -485,0.09971313178539276 -486,0.09763951599597931 -487,0.08796034008264542 -488,0.09832049906253815 -489,0.0985594391822815 -490,0.09642988443374634 -491,0.09343703091144562 -492,0.0932387039065361 -493,0.10071919113397598 -494,0.09767960757017136 -495,0.0898810476064682 -496,0.09799271076917648 -497,0.09419918805360794 -498,0.09357702732086182 -499,0.0881480723619461 -500,0.09522488713264465 -501,0.09520559757947922 -502,0.08836741745471954 -503,0.09557448327541351 -504,0.0955585241317749 -505,0.08798769116401672 -506,0.09044977277517319 -507,0.0893905907869339 -508,0.09686807543039322 -509,0.08883552253246307 -510,0.09679289907217026 -511,0.10040612518787384 -512,0.09210754930973053 -513,0.09433187544345856 -514,0.09513899683952332 -515,0.08803053200244904 -516,0.09292672574520111 -517,0.08902567625045776 -518,0.09483534097671509 -519,0.09343405812978745 -520,0.08801321685314178 -521,0.09400543570518494 -522,0.08751612901687622 -523,0.0914761871099472 -524,0.08969906717538834 -525,0.08803515136241913 -526,0.08902233839035034 -527,0.0910661444067955 -528,0.0920916274189949 -529,0.08707988262176514 -530,0.08968072384595871 -531,0.08849881589412689 -532,0.08938169479370117 -533,0.09113973379135132 -534,0.09176071733236313 -535,0.09130236506462097 -536,0.08720291405916214 -537,0.08918780088424683 -538,0.09047401696443558 -539,0.09034188836812973 -540,0.08874817192554474 -541,0.09136108309030533 -542,0.08883833140134811 -543,0.09006503224372864 -544,0.09592663496732712 -545,0.09371751546859741 -546,0.08797227591276169 -547,0.09515812247991562 -548,0.0920872911810875 -549,0.08987008035182953 -550,0.08802738040685654 -551,0.09153889864683151 -552,0.09070081263780594 -553,0.0879817083477974 -554,0.09114430099725723 -555,0.09268910437822342 -556,0.08744046837091446 -557,0.0892912819981575 -558,0.09339890629053116 -559,0.0901697650551796 -560,0.09040998667478561 -561,0.09055916219949722 -562,0.09482874721288681 -563,0.0915309265255928 -564,0.08983466029167175 -565,0.08854235708713531 -566,0.09003604203462601 -567,0.09548261016607285 -568,0.08783817291259766 -569,0.09604045748710632 -570,0.09964773803949356 -571,0.0921793207526207 -572,0.09485410153865814 -573,0.08849629014730453 -574,0.09174152463674545 -575,0.0876312255859375 -576,0.09413860738277435 -577,0.096188984811306 -578,0.088229700922966 -579,0.09514517337083817 -580,0.09359108656644821 -581,0.0891956239938736 -582,0.09245731681585312 -583,0.08973903208971024 -584,0.09046683460474014 -585,0.08563315868377686 -586,0.0932963415980339 -587,0.0907551571726799 -588,0.09387001395225525 -589,0.089577317237854 -590,0.09545493125915527 -591,0.09222213178873062 -592,0.08671575784683228 -593,0.08818276226520538 -594,0.08824516087770462 -595,0.08952522277832031 -596,0.08666578680276871 -597,0.08810175210237503 -598,0.08912788331508636 -599,0.09016019105911255 -600,0.08738287538290024 -601,0.08635912835597992 -602,0.08599337190389633 -603,0.09151189029216766 -604,0.08930191397666931 -605,0.08982005715370178 -606,0.09001108258962631 -607,0.09354859590530396 -608,0.08761412650346756 -609,0.089772529900074 -610,0.08646035194396973 -611,0.08640831708908081 -612,0.08794479072093964 -613,0.08508570492267609 -614,0.08698797971010208 -615,0.08559403568506241 -616,0.08798231929540634 -617,0.08937409520149231 -618,0.08538349717855453 -619,0.08604582399129868 -620,0.09037017077207565 -621,0.08481772243976593 -622,0.09213655441999435 -623,0.08716294169425964 -624,0.094637431204319 -625,0.09527615457773209 -626,0.08944790065288544 -627,0.10211900621652603 -628,0.10117089003324509 -629,0.08569598197937012 -630,0.08747269213199615 -631,0.08748382329940796 -632,0.09101036190986633 -633,0.09035855531692505 -634,0.0916837677359581 -635,0.08919995278120041 -636,0.08601386845111847 -637,0.0861494243144989 -638,0.08586558699607849 -639,0.08438877761363983 -640,0.08430448919534683 -641,0.08649617433547974 -642,0.08706244826316833 -643,0.08703390508890152 -644,0.08683035522699356 -645,0.08706016093492508 -646,0.08729204535484314 -647,0.08528515696525574 -648,0.0859052985906601 -649,0.0861082673072815 -650,0.08672896027565002 -651,0.08954156935214996 -652,0.08615502715110779 -653,0.0890883356332779 -654,0.08742494136095047 -655,0.08453163504600525 -656,0.08450187742710114 -657,0.08616581559181213 -658,0.08897100389003754 -659,0.08624005317687988 -660,0.0881078913807869 -661,0.086825892329216 -662,0.08691633492708206 -663,0.08992868661880493 -664,0.09049665927886963 -665,0.08821763843297958 -666,0.08787685632705688 -667,0.08977510035037994 -668,0.08779612183570862 -669,0.08491197973489761 -670,0.0931791216135025 -671,0.0905308946967125 -672,0.08604444563388824 -673,0.08998730033636093 -674,0.09185387194156647 -675,0.08518996834754944 -676,0.08689168095588684 -677,0.09092776477336884 -678,0.08466765284538269 -679,0.0902145728468895 -680,0.09126101434230804 -681,0.0858089029788971 -682,0.09424100071191788 -683,0.09532719105482101 -684,0.08432864397764206 -685,0.10185179859399796 -686,0.10653038322925568 -687,0.09856142103672028 -688,0.08512286096811295 -689,0.09156247973442078 -690,0.08609194308519363 -691,0.0902973935008049 -692,0.08830748498439789 -693,0.0884597897529602 -694,0.08624012023210526 -695,0.09113713353872299 -696,0.08919820934534073 -697,0.08801528066396713 -698,0.09101375192403793 -699,0.0886772945523262 -700,0.08878053724765778 -701,0.09391471743583679 -702,0.08850860595703125 -703,0.09911099821329117 -704,0.09608938544988632 -705,0.08999534696340561 -706,0.09683427214622498 -707,0.09144386649131775 -708,0.09548517316579819 -709,0.10073491930961609 -710,0.09208124130964279 -711,0.09430395811796188 -712,0.0986219048500061 -713,0.08745791018009186 -714,0.09892908483743668 -715,0.1030159592628479 -716,0.0936620905995369 -717,0.09813239425420761 -718,0.10277265310287476 -719,0.0961880311369896 -720,0.0893874540925026 -721,0.1039406880736351 -722,0.10142696648836136 -723,0.08617771416902542 -724,0.09545456618070602 -725,0.0950719341635704 -726,0.08763140439987183 -727,0.09818205237388611 -728,0.09944413602352142 -729,0.08773493766784668 -730,0.09002990275621414 -731,0.08557849377393723 -732,0.09223645180463791 -733,0.09270121902227402 -734,0.08723696321249008 -735,0.08755343407392502 -736,0.09088144451379776 -737,0.0887916311621666 -738,0.08665182441473007 -739,0.09104519337415695 -740,0.08948154747486115 -741,0.08628997206687927 -742,0.08881746977567673 -743,0.08790184557437897 -744,0.08548533171415329 -745,0.08473032712936401 -746,0.08932878822088242 -747,0.08494479954242706 -748,0.08706183731555939 -749,0.08437594026327133 -750,0.08658696711063385 -751,0.08529528975486755 -752,0.0853797122836113 -753,0.0855259895324707 -754,0.08463060855865479 -755,0.08600350469350815 -756,0.08487430959939957 -757,0.08940987288951874 -758,0.0867174044251442 -759,0.0849468782544136 -760,0.08749057352542877 -761,0.08528009057044983 -762,0.08525365591049194 -763,0.08374887704849243 -764,0.08586834371089935 -765,0.08699502050876617 -766,0.08816370368003845 -767,0.08358876407146454 -768,0.08909103274345398 -769,0.08842542767524719 -770,0.08645258098840714 -771,0.0911831185221672 -772,0.08816546946763992 -773,0.08910848945379257 -774,0.08713427931070328 -775,0.09085990488529205 -776,0.09304521977901459 -777,0.0877145305275917 -778,0.1011110320687294 -779,0.09782411903142929 -780,0.08911401033401489 -781,0.0989842638373375 -782,0.09491714835166931 -783,0.09141109138727188 -784,0.09419765323400497 -785,0.08579862862825394 -786,0.09833977371454239 -787,0.10161300748586655 -788,0.0918167382478714 -789,0.0954250916838646 -790,0.09853867441415787 -791,0.09215901792049408 -792,0.09436735510826111 -793,0.08866667002439499 -794,0.08803604543209076 -795,0.09556275606155396 -796,0.08811526745557785 -797,0.09656456112861633 -798,0.09808531403541565 -799,0.08565033227205276 -800,0.09726235270500183 -801,0.09889353066682816 -802,0.08511482924222946 -803,0.09397461265325546 -804,0.09359239786863327 -805,0.0849994570016861 -806,0.09459517151117325 -807,0.08825964480638504 -808,0.09517407417297363 -809,0.09838118404150009 -810,0.0866943895816803 -811,0.10036274790763855 -812,0.10622809082269669 -813,0.0968584194779396 -814,0.09481090307235718 -815,0.10105970501899719 -816,0.09634257107973099 -817,0.08979392051696777 -818,0.09252870827913284 -819,0.08553682267665863 -820,0.09205520898103714 -821,0.09009990096092224 -822,0.08576816320419312 -823,0.08766409754753113 -824,0.090064637362957 -825,0.08814151585102081 -826,0.08913213014602661 -827,0.09084317088127136 -828,0.08585047721862793 -829,0.08978833258152008 -830,0.0869155302643776 -831,0.08837475627660751 -832,0.08429519832134247 -833,0.08506884425878525 -834,0.0846506729722023 -835,0.08712343871593475 -836,0.08609563857316971 -837,0.08698568493127823 -838,0.0855136290192604 -839,0.08359453827142715 -840,0.08503107726573944 -841,0.0850646048784256 -842,0.0849982351064682 -843,0.08688318729400635 -844,0.08554621785879135 -845,0.09004855155944824 -846,0.08752091974020004 -847,0.09109396487474442 -848,0.09244338423013687 -849,0.08561521023511887 -850,0.0918392762541771 -851,0.08881453424692154 -852,0.08857013285160065 -853,0.08668860048055649 -854,0.09003901481628418 -855,0.08761484175920486 -856,0.08938921242952347 -857,0.09465041011571884 -858,0.08652839809656143 -859,0.0950155109167099 -860,0.09830385446548462 -861,0.08618337661027908 -862,0.09724317491054535 -863,0.10209757834672928 -864,0.09282030165195465 -865,0.0896901860833168 -866,0.09349024295806885 -867,0.08635901659727097 -868,0.09212923794984818 -869,0.09221664071083069 -870,0.08578569442033768 -871,0.09193001687526703 -872,0.08706481009721756 -873,0.09164869785308838 -874,0.09133827686309814 -875,0.08707263320684433 -876,0.08592971414327621 -877,0.09073283523321152 -878,0.09055977314710617 -879,0.08603046089410782 -880,0.08579009771347046 -881,0.09004230052232742 -882,0.08808678388595581 -883,0.08674216270446777 -884,0.08573461323976517 -885,0.09045308083295822 -886,0.09049452841281891 -887,0.08566458523273468 -888,0.09294062852859497 -889,0.08637373149394989 -890,0.09491830319166183 -891,0.09514688700437546 -892,0.08792034536600113 -893,0.09787826240062714 -894,0.09635020792484283 -895,0.08793128281831741 -896,0.09048006683588028 -897,0.08575156331062317 -898,0.09218709915876389 -899,0.08936404436826706 -900,0.09150044620037079 -901,0.0911165401339531 -902,0.09036890417337418 -903,0.09103973954916 -904,0.0842069610953331 -905,0.09199418872594833 -906,0.08697866648435593 -907,0.09155645966529846 -908,0.09178519994020462 -909,0.08847738057374954 -910,0.09169350564479828 -911,0.08727744966745377 -912,0.08827808499336243 -913,0.08837872743606567 -914,0.09284615516662598 -915,0.09082074463367462 -916,0.08686237782239914 -917,0.08779308944940567 -918,0.0898914709687233 -919,0.08887982368469238 -920,0.0860331803560257 -921,0.09287270158529282 -922,0.08812744915485382 -923,0.09056100994348526 -924,0.09026503562927246 -925,0.08617832511663437 -926,0.08619802445173264 -927,0.08983300626277924 -928,0.0918060839176178 -929,0.08626344799995422 -930,0.09488756954669952 -931,0.09106642007827759 -932,0.09005692601203918 -933,0.0958123654127121 -934,0.08749425411224365 -935,0.09453941881656647 -936,0.09852144122123718 -937,0.09084882587194443 -938,0.09724809974431992 -939,0.1008693054318428 -940,0.09035119414329529 -941,0.09494432806968689 -942,0.1018427386879921 -943,0.0924219936132431 -944,0.09333211928606033 -945,0.10258560627698898 -946,0.09809768944978714 -947,0.08688611537218094 -948,0.09890028834342957 -949,0.10252870619297028 -950,0.09643847495317459 -951,0.09037400037050247 -952,0.09618120640516281 -953,0.09807644039392471 -954,0.0987856462597847 -955,0.09205686300992966 -956,0.08851956576108932 -957,0.09541408717632294 -958,0.09092716872692108 -959,0.09306355565786362 -960,0.09229444712400436 -961,0.09079907089471817 -962,0.09134029597043991 -963,0.08500976860523224 -964,0.08905602246522903 -965,0.08757957071065903 -966,0.08509646356105804 -967,0.08605407923460007 -968,0.08579733222723007 -969,0.08617521822452545 -970,0.0859123021364212 -971,0.08931072056293488 -972,0.08534834533929825 -973,0.08715841174125671 -974,0.08506163209676743 -975,0.08760447800159454 -976,0.08389415591955185 -977,0.08963233232498169 -978,0.08636623620986938 -979,0.09195214509963989 -980,0.0850883275270462 -981,0.09511573612689972 -982,0.09647275507450104 -983,0.08724037557840347 -984,0.09421275556087494 -985,0.09225244075059891 -986,0.08963071554899216 -987,0.08830036222934723 -988,0.08840072900056839 -989,0.08673693984746933 -990,0.08840671926736832 -991,0.0932331532239914 -992,0.08733344078063965 -993,0.08841769397258759 -994,0.08624839037656784 -995,0.08838313072919846 -996,0.08517496287822723 -997,0.08558677136898041 -998,0.08654356002807617 -999,0.0854133665561676 -1000,0.09224486351013184 diff --git a/training/base_loss/one_third/training_config.txt b/training/base_loss/one_third/training_config.txt deleted file mode 100644 index 1cb6811..0000000 --- a/training/base_loss/one_third/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def current_loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(loss_fn(theta, desired_theta)) - -Specific Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss/one_third/training_log.csv b/training/base_loss/one_third/training_log.csv deleted file mode 100644 index dbd8520..0000000 --- a/training/base_loss/one_third/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,1.992972493171692 -2,1.583356499671936 -3,1.2822308540344238 -4,1.2744250297546387 -5,1.8244584798812866 -6,1.9915246963500977 -7,1.90507173538208 -8,1.5858758687973022 -9,1.4944287538528442 -10,1.3345308303833008 -11,1.2213300466537476 -12,1.132070541381836 -13,0.9231880307197571 -14,0.7349575757980347 -15,0.7017775177955627 -16,0.6156940460205078 -17,0.596169114112854 -18,0.6319488883018494 -19,0.6300190091133118 -20,0.5995364189147949 -21,0.5700337290763855 -22,0.5253716707229614 -23,0.49458593130111694 -24,0.4639160633087158 -25,0.46890732645988464 -26,0.4698893427848816 -27,0.45200657844543457 -28,0.4312981069087982 -29,0.4118771255016327 -30,0.39792245626449585 -31,0.3845624625682831 -32,0.3725946843624115 -33,0.34971049427986145 -34,0.34106990694999695 -35,0.3309486508369446 -36,0.32319483160972595 -37,0.3071179986000061 -38,0.2838596999645233 -39,0.24386121332645416 -40,0.20276932418346405 -41,0.21755445003509521 -42,0.22806714475154877 -43,0.22222572565078735 -44,0.20148420333862305 -45,0.19588685035705566 -46,0.19650541245937347 -47,0.19573993980884552 -48,0.1944306194782257 -49,0.19368885457515717 -50,0.1926516592502594 -51,0.18866482377052307 -52,0.1817432940006256 -53,0.17253614962100983 -54,0.15935726463794708 -55,0.1482243537902832 -56,0.1473385989665985 -57,0.15746699273586273 -58,0.1469765454530716 -59,0.15016470849514008 -60,0.14962562918663025 -61,0.141968235373497 -62,0.13402274250984192 -63,0.13209892809391022 -64,0.1264854073524475 -65,0.13247829675674438 -66,0.1334579437971115 -67,0.12788155674934387 -68,0.12247855961322784 -69,0.12478284537792206 -70,0.1232563704252243 -71,0.12040062248706818 -72,0.11434593796730042 -73,0.1157166063785553 -74,0.11702128499746323 -75,0.11491663753986359 -76,0.11379499733448029 -77,0.11272184550762177 -78,0.11186322569847107 -79,0.1097307801246643 -80,0.10719379037618637 -81,0.10942176729440689 -82,0.11142036318778992 -83,0.11001326143741608 -84,0.10862196236848831 -85,0.10572490096092224 -86,0.10611898452043533 -87,0.10797567665576935 -88,0.10744979232549667 -89,0.10476995259523392 -90,0.1037522405385971 -91,0.1060289666056633 -92,0.10625165700912476 -93,0.1051378846168518 -94,0.10414135456085205 -95,0.10589528828859329 -96,0.10459107905626297 -97,0.10650108009576797 -98,0.10725948959589005 -99,0.10403476655483246 -100,0.10659579187631607 -101,0.10454709827899933 -102,0.10571218281984329 -103,0.10113769769668579 -104,0.10874108970165253 -105,0.10283954441547394 -106,0.10740552097558975 -107,0.10159118473529816 -108,0.10436558723449707 -109,0.10205211490392685 -110,0.10259027779102325 -111,0.10019517689943314 -112,0.10123086720705032 -113,0.10106956958770752 -114,0.1009039431810379 -115,0.10294726490974426 -116,0.10380042344331741 -117,0.10465715080499649 -118,0.10406818985939026 -119,0.09924906492233276 -120,0.10539379715919495 -121,0.10233420878648758 -122,0.10762971639633179 -123,0.10319650173187256 -124,0.10473226755857468 -125,0.09909086674451828 -126,0.10045117139816284 -127,0.10333842039108276 -128,0.10020627826452255 -129,0.10405154526233673 -130,0.10256611555814743 -131,0.10422560572624207 -132,0.09893374145030975 -133,0.11135852336883545 -134,0.10448107868432999 -135,0.10400428622961044 -136,0.10559538006782532 -137,0.1048390343785286 -138,0.10384581238031387 -139,0.10227879881858826 -140,0.10408157855272293 -141,0.10074891149997711 -142,0.10503613203763962 -143,0.1044822409749031 -144,0.10413957387208939 -145,0.10154113918542862 -146,0.10049742460250854 -147,0.09896412491798401 -148,0.09922495484352112 -149,0.09856640547513962 -150,0.10505637526512146 -151,0.10285720229148865 -152,0.10017483681440353 -153,0.10142631083726883 -154,0.10413454473018646 -155,0.10159770399332047 -156,0.10535518079996109 -157,0.10341019928455353 -158,0.10508455336093903 -159,0.10892055183649063 -160,0.10994512587785721 -161,0.1040862575173378 -162,0.10362064093351364 -163,0.10926572233438492 -164,0.10045751184225082 -165,0.10705938935279846 -166,0.10574431717395782 -167,0.10722965002059937 -168,0.11050453037023544 -169,0.10355319827795029 -170,0.09845374524593353 -171,0.10633336007595062 -172,0.10889257490634918 -173,0.10886044055223465 -174,0.10136368125677109 -175,0.11603458970785141 -176,0.10650015622377396 -177,0.11585497856140137 -178,0.12743134796619415 -179,0.12215818464756012 -180,0.11038094758987427 -181,0.111416295170784 -182,0.10920536518096924 -183,0.1030055582523346 -184,0.1095496267080307 -185,0.10561402142047882 -186,0.10789994895458221 -187,0.10354054719209671 -188,0.11267396062612534 -189,0.1122356429696083 -190,0.10252605378627777 -191,0.11568444967269897 -192,0.11091652512550354 -193,0.11501789093017578 -194,0.11571213603019714 -195,0.11094401776790619 -196,0.10985250771045685 -197,0.11476144194602966 -198,0.10637418925762177 -199,0.10945317149162292 -200,0.11796994507312775 -201,0.10974244028329849 -202,0.10556599497795105 -203,0.10949758440256119 -204,0.09897559881210327 -205,0.10495360195636749 -206,0.10351376980543137 -207,0.10633014142513275 -208,0.10465944558382034 -209,0.10629234462976456 -210,0.1085788831114769 -211,0.1003982350230217 -212,0.1101573184132576 -213,0.11064217984676361 -214,0.10032787173986435 -215,0.10593036562204361 -216,0.10490959882736206 -217,0.0993816927075386 -218,0.10165613889694214 -219,0.10396943986415863 -220,0.10099973529577255 -221,0.10092822462320328 -222,0.0989375114440918 -223,0.09861666709184647 -224,0.09955202043056488 -225,0.10362653434276581 -226,0.1022309884428978 -227,0.10385815799236298 -228,0.09839285910129547 -229,0.10056401789188385 -230,0.0995795801281929 -231,0.09766797721385956 -232,0.10123102366924286 -233,0.09818368405103683 -234,0.09822934865951538 -235,0.09933499246835709 -236,0.09930658340454102 -237,0.09877404570579529 -238,0.10232838243246078 -239,0.09781421720981598 -240,0.09708759933710098 -241,0.09744632244110107 -242,0.09681924432516098 -243,0.09657686948776245 -244,0.10458303242921829 -245,0.09838756173849106 -246,0.10169599950313568 -247,0.09655197709798813 -248,0.09865947812795639 -249,0.09559579193592072 -250,0.09853377193212509 -251,0.09900741279125214 -252,0.10072837024927139 -253,0.10083542764186859 -254,0.09808935225009918 -255,0.09759587794542313 -256,0.09776798635721207 -257,0.09712398052215576 -258,0.10064806044101715 -259,0.1009712815284729 -260,0.09932029247283936 -261,0.09641843289136887 -262,0.10178624093532562 -263,0.09732791781425476 -264,0.09788664430379868 -265,0.1062893494963646 -266,0.10193648934364319 -267,0.10061626881361008 -268,0.10001487284898758 -269,0.09779813885688782 -270,0.1001533642411232 -271,0.10999063402414322 -272,0.1067686676979065 -273,0.10625611245632172 -274,0.10926124453544617 -275,0.1059865951538086 -276,0.10316289961338043 -277,0.10598158091306686 -278,0.09792415797710419 -279,0.109563909471035 -280,0.10237522423267365 -281,0.11220412701368332 -282,0.11652278155088425 -283,0.11231663078069687 -284,0.10668931901454926 -285,0.11456045508384705 -286,0.1009332612156868 -287,0.11833198368549347 -288,0.12722942233085632 -289,0.1174909919500351 -290,0.10446736216545105 -291,0.11354833841323853 -292,0.1063522920012474 -293,0.10691799223423004 -294,0.11060882359743118 -295,0.10395445674657822 -296,0.10227664560079575 -297,0.10942225158214569 -298,0.10054875910282135 -299,0.10026618093252182 -300,0.10236123949289322 -301,0.10125613957643509 -302,0.09835075587034225 -303,0.10296136140823364 -304,0.10117019712924957 -305,0.10823968052864075 -306,0.10179882496595383 -307,0.10940170288085938 -308,0.11807660758495331 -309,0.11414986103773117 -310,0.10143907368183136 -311,0.11526591330766678 -312,0.10756424069404602 -313,0.10311159491539001 -314,0.10982440412044525 -315,0.09761796146631241 -316,0.10825631767511368 -317,0.11059634387493134 -318,0.09740335494279861 -319,0.10818590223789215 -320,0.10651027411222458 -321,0.09774494171142578 -322,0.11271161586046219 -323,0.10795250535011292 -324,0.10438419133424759 -325,0.10871995985507965 -326,0.10081976652145386 -327,0.10366909950971603 -328,0.09759998321533203 -329,0.10682155191898346 -330,0.1108989268541336 -331,0.09683318436145782 -332,0.10836633294820786 -333,0.10823240131139755 -334,0.09722258895635605 -335,0.10418186336755753 -336,0.09914075583219528 -337,0.10286659002304077 -338,0.1066819280385971 -339,0.09485915303230286 -340,0.10811552405357361 -341,0.10420362651348114 -342,0.09702719748020172 -343,0.1006091758608818 -344,0.09887058287858963 -345,0.09555640816688538 -346,0.09525977820158005 -347,0.09690253436565399 -348,0.10095709562301636 -349,0.09567106515169144 -350,0.10141666978597641 -351,0.10168838500976562 -352,0.09630744159221649 -353,0.10144000500440598 -354,0.10274320840835571 -355,0.09752372652292252 -356,0.10595472157001495 -357,0.10742311924695969 -358,0.1049397885799408 -359,0.1062144860625267 -360,0.10414883494377136 -361,0.10158197581768036 -362,0.09784878045320511 -363,0.10054174810647964 -364,0.09536811709403992 -365,0.09781156480312347 -366,0.10023479908704758 -367,0.1003466323018074 -368,0.09845741093158722 -369,0.10265955328941345 -370,0.09813949465751648 -371,0.10539140552282333 -372,0.10356979072093964 -373,0.09751603752374649 -374,0.09689577668905258 -375,0.10255052894353867 -376,0.10493439435958862 -377,0.09677892178297043 -378,0.10236430168151855 -379,0.09472611546516418 -380,0.09938713908195496 -381,0.09747305512428284 -382,0.10164045542478561 -383,0.09630342572927475 -384,0.10904547572135925 -385,0.10951423645019531 -386,0.0991285964846611 -387,0.10562854260206223 -388,0.10407943278551102 -389,0.11295865476131439 -390,0.11329641193151474 -391,0.10932811349630356 -392,0.10586515814065933 -393,0.1161167323589325 -394,0.10934749990701675 -395,0.1031053438782692 -396,0.11006486415863037 -397,0.10605942457914352 -398,0.1029936745762825 -399,0.1074867695569992 -400,0.10233067721128464 -401,0.10613004863262177 -402,0.11895466595888138 -403,0.11803729832172394 -404,0.10615702718496323 -405,0.10848736017942429 -406,0.10233892500400543 -407,0.10719569027423859 -408,0.11891797184944153 -409,0.11415201425552368 -410,0.10160140693187714 -411,0.11241111159324646 -412,0.10776536166667938 -413,0.09935561567544937 -414,0.10227757692337036 -415,0.09640639275312424 -416,0.10255976766347885 -417,0.10411545634269714 -418,0.10627429187297821 -419,0.09758670628070831 -420,0.10310393571853638 -421,0.10304336994886398 -422,0.09575369209051132 -423,0.10508811473846436 -424,0.10533151030540466 -425,0.09308028966188431 -426,0.10578975081443787 -427,0.10325035452842712 -428,0.09652277082204819 -429,0.10026683658361435 -430,0.10253790020942688 -431,0.09700910747051239 -432,0.09640668332576752 -433,0.10055333375930786 -434,0.0941166803240776 -435,0.10145344585180283 -436,0.09641510248184204 -437,0.10060283541679382 -438,0.09694337844848633 -439,0.1025814414024353 -440,0.10291697084903717 -441,0.09626108407974243 -442,0.10511017590761185 -443,0.09658303111791611 -444,0.09790249913930893 -445,0.0976870134472847 -446,0.0986001193523407 -447,0.09881547093391418 -448,0.09612889587879181 -449,0.09598644077777863 -450,0.09336458891630173 -451,0.09652546048164368 -452,0.10754289478063583 -453,0.11351044476032257 -454,0.10593675822019577 -455,0.10393931716680527 -456,0.10958452522754669 -457,0.09639126807451248 -458,0.10459888726472855 -459,0.09899360686540604 -460,0.10295654088258743 -461,0.10359126329421997 -462,0.09646996855735779 -463,0.10134056210517883 -464,0.09769776463508606 -465,0.10205361247062683 -466,0.09798165410757065 -467,0.1015583798289299 -468,0.09778502583503723 -469,0.0978616252541542 -470,0.09899405390024185 -471,0.09552732110023499 -472,0.09585050493478775 -473,0.09609570354223251 -474,0.0945451557636261 -475,0.09479791671037674 -476,0.0945838987827301 -477,0.09819983690977097 -478,0.09477793425321579 -479,0.09438478946685791 -480,0.10086547583341599 -481,0.0923386737704277 -482,0.09827220439910889 -483,0.09343525022268295 -484,0.10306304693222046 -485,0.09904708713293076 -486,0.1008712500333786 -487,0.09775782376527786 -488,0.10285085439682007 -489,0.09915544837713242 -490,0.10585817694664001 -491,0.11462828516960144 -492,0.10243623703718185 -493,0.10276704281568527 -494,0.10989858955144882 -495,0.09639979898929596 -496,0.11031151562929153 -497,0.11403795331716537 -498,0.10437335819005966 -499,0.10095743834972382 -500,0.11612169444561005 -501,0.11235613375902176 -502,0.09663362056016922 -503,0.10777811706066132 -504,0.11166692525148392 -505,0.09830527752637863 -506,0.10678764432668686 -507,0.1118379682302475 -508,0.09801109880208969 -509,0.11120442301034927 -510,0.11918902397155762 -511,0.11250925809144974 -512,0.09878269582986832 -513,0.10993104428052902 -514,0.11623399704694748 -515,0.10305280983448029 -516,0.10284636914730072 -517,0.10063827037811279 -518,0.09522368758916855 -519,0.1056865006685257 -520,0.10179808735847473 -521,0.10347048193216324 -522,0.1056278645992279 -523,0.09533782303333282 -524,0.10487271100282669 -525,0.10219623148441315 -526,0.09472792595624924 -527,0.10272591561079025 -528,0.09754777699708939 -529,0.09954137355089188 -530,0.10418424010276794 -531,0.09417849034070969 -532,0.09465382993221283 -533,0.09738880395889282 -534,0.09489583969116211 -535,0.0998154729604721 -536,0.09485344588756561 -537,0.09737657755613327 -538,0.0940297544002533 -539,0.09466852247714996 -540,0.09609833359718323 -541,0.09745804220438004 -542,0.09833858162164688 -543,0.09320807456970215 -544,0.1025073379278183 -545,0.09758583456277847 -546,0.09705757349729538 -547,0.0955190658569336 -548,0.09265483170747757 -549,0.10199321806430817 -550,0.10020961612462997 -551,0.10561461001634598 -552,0.10195988416671753 -553,0.10430808365345001 -554,0.10589377582073212 -555,0.0963672548532486 -556,0.1138511523604393 -557,0.11678747832775116 -558,0.11612232774496078 -559,0.10312243551015854 -560,0.09743081778287888 -561,0.10796312242746353 -562,0.10199218988418579 -563,0.10134205967187881 -564,0.10280346870422363 -565,0.09608498215675354 -566,0.10644403845071793 -567,0.10537781566381454 -568,0.10631673038005829 -569,0.10892707109451294 -570,0.11043670028448105 -571,0.09983193129301071 -572,0.1060132086277008 -573,0.11413726210594177 -574,0.10235729813575745 -575,0.10607556998729706 -576,0.1110524907708168 -577,0.10394633561372757 -578,0.09770219027996063 -579,0.10300372540950775 -580,0.103421650826931 -581,0.0998820811510086 -582,0.09659934043884277 -583,0.10480709373950958 -584,0.09927747398614883 -585,0.09335656464099884 -586,0.09798356145620346 -587,0.09736587852239609 -588,0.09498909115791321 -589,0.09512472152709961 -590,0.09353263676166534 -591,0.09305290132761002 -592,0.09401579201221466 -593,0.09884355962276459 -594,0.09363912791013718 -595,0.09646448493003845 -596,0.094216488301754 -597,0.09653124958276749 -598,0.09408626705408096 -599,0.09793085604906082 -600,0.09417255967855453 -601,0.0943179801106453 -602,0.10213759541511536 -603,0.09717325121164322 -604,0.09315397590398788 -605,0.10295438021421432 -606,0.09452266991138458 -607,0.10768956691026688 -608,0.11325182020664215 -609,0.10618269443511963 -610,0.10064858198165894 -611,0.10417302697896957 -612,0.09925295412540436 -613,0.10175623744726181 -614,0.10999371111392975 -615,0.10150398313999176 -616,0.10386485606431961 -617,0.104522205889225 -618,0.09863711893558502 -619,0.10637788474559784 -620,0.1088588610291481 -621,0.10958018898963928 -622,0.1044822633266449 -623,0.10976491123437881 -624,0.10018584877252579 -625,0.09983718395233154 -626,0.10190631449222565 -627,0.09475044161081314 -628,0.10281135886907578 -629,0.10139242559671402 -630,0.0950988307595253 -631,0.09909921884536743 -632,0.09613794833421707 -633,0.09690449386835098 -634,0.10132253170013428 -635,0.09588008373975754 -636,0.09969419986009598 -637,0.09660590440034866 -638,0.09506132453680038 -639,0.09601907432079315 -640,0.10026708990335464 -641,0.09844716638326645 -642,0.0935208648443222 -643,0.10054533183574677 -644,0.09305979311466217 -645,0.09915561974048615 -646,0.10256824642419815 -647,0.09218956530094147 -648,0.09828970581293106 -649,0.094027079641819 -650,0.09046217054128647 -651,0.09069246798753738 -652,0.09262094646692276 -653,0.09439925104379654 -654,0.08978655934333801 -655,0.09462623298168182 -656,0.1000613272190094 -657,0.09109900891780853 -658,0.09996724128723145 -659,0.09765205532312393 -660,0.09313052892684937 -661,0.10183044523000717 -662,0.09577928483486176 -663,0.09520495682954788 -664,0.09588124603033066 -665,0.0963270366191864 -666,0.09572341293096542 -667,0.09320474416017532 -668,0.09846598654985428 -669,0.09709357470273972 -670,0.09346645325422287 -671,0.09463823586702347 -672,0.09259199351072311 -673,0.09355779737234116 -674,0.09327208250761032 -675,0.09320273995399475 -676,0.09806223213672638 -677,0.09404134005308151 -678,0.09500584751367569 -679,0.09889735281467438 -680,0.09525176137685776 -681,0.09401847422122955 -682,0.09919262677431107 -683,0.0904468297958374 -684,0.10282745957374573 -685,0.10101828724145889 -686,0.09531676024198532 -687,0.11065082252025604 -688,0.10606309771537781 -689,0.09966004639863968 -690,0.10795953124761581 -691,0.10394593328237534 -692,0.10851508378982544 -693,0.11018208414316177 -694,0.09624627977609634 -695,0.10587016493082047 -696,0.11342794448137283 -697,0.10825623571872711 -698,0.09441660344600677 -699,0.10128716379404068 -700,0.09808122366666794 -701,0.09664520621299744 -702,0.09933317452669144 -703,0.09418705850839615 -704,0.10133834928274155 -705,0.09437466412782669 -706,0.10020319372415543 -707,0.10432370007038116 -708,0.0929187685251236 -709,0.0993427261710167 -710,0.1003146767616272 -711,0.09141690284013748 -712,0.09922046959400177 -713,0.09423093497753143 -714,0.09307568520307541 -715,0.09121738374233246 -716,0.09341566264629364 -717,0.09335632622241974 -718,0.09225942939519882 -719,0.09373567253351212 -720,0.0903000682592392 -721,0.09366103261709213 -722,0.09003204107284546 -723,0.08954481035470963 -724,0.0888693630695343 -725,0.08935777842998505 -726,0.09550575911998749 -727,0.08921303600072861 -728,0.10368066281080246 -729,0.1037496030330658 -730,0.08940467238426208 -731,0.10393142700195312 -732,0.10209126770496368 -733,0.08926544338464737 -734,0.10254687815904617 -735,0.09760237485170364 -736,0.09269998222589493 -737,0.0949963852763176 -738,0.09265365451574326 -739,0.09218426048755646 -740,0.09863362461328506 -741,0.09236530214548111 -742,0.09640873223543167 -743,0.09442349523305893 -744,0.09906872361898422 -745,0.102012038230896 -746,0.0956125408411026 -747,0.10111137479543686 -748,0.09995192289352417 -749,0.09811204671859741 -750,0.10153815150260925 -751,0.09869403392076492 -752,0.09239251166582108 -753,0.10230693221092224 -754,0.10214944928884506 -755,0.09935355931520462 -756,0.09736934304237366 -757,0.1010756641626358 -758,0.09254062175750732 -759,0.09723285585641861 -760,0.09554970264434814 -761,0.09446058422327042 -762,0.09170226752758026 -763,0.0906982496380806 -764,0.0908598005771637 -765,0.0923399105668068 -766,0.09425649046897888 -767,0.0971270203590393 -768,0.0975436344742775 -769,0.0911695584654808 -770,0.09062537550926208 -771,0.09564753621816635 -772,0.09431002289056778 -773,0.0905277207493782 -774,0.09977942705154419 -775,0.09525766968727112 -776,0.09781702607870102 -777,0.10062944889068604 -778,0.09400006383657455 -779,0.09248290210962296 -780,0.09098630398511887 -781,0.09269406646490097 -782,0.09101542830467224 -783,0.08988242596387863 -784,0.08900132030248642 -785,0.09039848297834396 -786,0.08925729990005493 -787,0.09234998375177383 -788,0.08858956396579742 -789,0.09371182322502136 -790,0.08868519961833954 -791,0.09952704608440399 -792,0.09848577529191971 -793,0.0917368084192276 -794,0.09897687286138535 -795,0.09126663953065872 -796,0.09543199837207794 -797,0.0974469855427742 -798,0.0951068252325058 -799,0.09378030896186829 -800,0.0941193625330925 -801,0.0870770514011383 -802,0.09862051904201508 -803,0.09421730786561966 -804,0.09871690720319748 -805,0.10086490213871002 -806,0.09252005070447922 -807,0.10151337087154388 -808,0.09723712503910065 -809,0.09686701744794846 -810,0.09678316116333008 -811,0.09585054218769073 -812,0.09267128258943558 -813,0.0908295214176178 -814,0.0942511111497879 -815,0.09545382112264633 -816,0.09366502612829208 -817,0.0905621126294136 -818,0.10086597502231598 -819,0.09775286167860031 -820,0.10129952430725098 -821,0.10780645161867142 -822,0.10034447908401489 -823,0.09601963311433792 -824,0.10631582140922546 -825,0.10053566843271255 -826,0.09694835543632507 -827,0.10335655510425568 -828,0.10197741538286209 -829,0.09278085827827454 -830,0.10344434529542923 -831,0.10362712293863297 -832,0.09302518516778946 -833,0.10528450459241867 -834,0.10607357323169708 -835,0.09599374979734421 -836,0.10032093524932861 -837,0.10086249560117722 -838,0.09984412789344788 -839,0.09479491412639618 -840,0.10074219852685928 -841,0.10329867154359818 -842,0.09755027294158936 -843,0.09562796354293823 -844,0.10370653122663498 -845,0.10290806740522385 -846,0.09420132637023926 -847,0.09488770365715027 -848,0.09658156335353851 -849,0.09424760192632675 -850,0.09635535627603531 -851,0.09734971076250076 -852,0.09330404549837112 -853,0.09037736803293228 -854,0.09361349046230316 -855,0.09024336189031601 -856,0.09012269973754883 -857,0.09193692356348038 -858,0.09169667214155197 -859,0.08997884392738342 -860,0.09119491279125214 -861,0.09194093942642212 -862,0.08899727463722229 -863,0.09608647227287292 -864,0.09475867450237274 -865,0.09287890791893005 -866,0.0957975834608078 -867,0.09388134628534317 -868,0.09237957000732422 -869,0.09635548293590546 -870,0.09509041905403137 -871,0.09076973050832748 -872,0.09496936947107315 -873,0.09196388721466064 -874,0.09866541624069214 -875,0.10166264325380325 -876,0.08962225914001465 -877,0.10256389528512955 -878,0.10118252784013748 -879,0.09106425940990448 -880,0.1053883358836174 -881,0.10260775685310364 -882,0.0932576134800911 -883,0.11075186729431152 -884,0.11006271839141846 -885,0.09208186715841293 -886,0.1004561185836792 -887,0.10121256858110428 -888,0.09218056499958038 -889,0.09458109736442566 -890,0.09403849393129349 -891,0.0922168493270874 -892,0.09254360944032669 -893,0.09494330734014511 -894,0.08941283077001572 -895,0.09694518148899078 -896,0.09261094778776169 -897,0.09769291430711746 -898,0.09496679902076721 -899,0.09698962420225143 -900,0.09878621250391006 -901,0.0886169970035553 -902,0.09237775951623917 -903,0.09319429099559784 -904,0.09240169823169708 -905,0.08957933634519577 -906,0.09268063306808472 -907,0.09369860589504242 -908,0.09505987167358398 -909,0.09527012705802917 -910,0.09284576773643494 -911,0.09061513096094131 -912,0.09257001429796219 -913,0.09024561941623688 -914,0.09365944564342499 -915,0.08960198611021042 -916,0.08994600921869278 -917,0.08986985683441162 -918,0.09309609979391098 -919,0.08755294233560562 -920,0.09315424412488937 -921,0.08804930746555328 -922,0.0949721485376358 -923,0.08706925809383392 -924,0.09265260398387909 -925,0.09126508980989456 -926,0.08695035427808762 -927,0.09376859664916992 -928,0.08967585116624832 -929,0.09375408291816711 -930,0.08927465975284576 -931,0.09592633694410324 -932,0.09416448324918747 -933,0.09361732006072998 -934,0.09468398988246918 -935,0.09071820974349976 -936,0.09654383361339569 -937,0.09687849879264832 -938,0.09139066934585571 -939,0.09688900411128998 -940,0.10573608428239822 -941,0.09439313411712646 -942,0.10118918865919113 -943,0.10670489817857742 -944,0.0958651453256607 -945,0.09582192450761795 -946,0.09313151240348816 -947,0.09758126735687256 -948,0.10190409421920776 -949,0.09063483029603958 -950,0.10381560772657394 -951,0.1042841225862503 -952,0.08981510996818542 -953,0.09685283899307251 -954,0.0943433940410614 -955,0.09779233485460281 -956,0.09590835869312286 -957,0.09934330731630325 -958,0.09784964472055435 -959,0.091598279774189 -960,0.10158346593379974 -961,0.09390149265527725 -962,0.09950032085180283 -963,0.10153553634881973 -964,0.0920366570353508 -965,0.09881175309419632 -966,0.09477642178535461 -967,0.10292976349592209 -968,0.10566858947277069 -969,0.09747478365898132 -970,0.10078216344118118 -971,0.10126890987157822 -972,0.09508948028087616 -973,0.0980617105960846 -974,0.09465488791465759 -975,0.1010512113571167 -976,0.10007986426353455 -977,0.08978164941072464 -978,0.09690512716770172 -979,0.0960882380604744 -980,0.08996697515249252 -981,0.09249133616685867 -982,0.09283368289470673 -983,0.09127673506736755 -984,0.09408814460039139 -985,0.09449879080057144 -986,0.0918799415230751 -987,0.0908675342798233 -988,0.09410698711872101 -989,0.0925537645816803 -990,0.0929882749915123 -991,0.0900004580616951 -992,0.09062408655881882 -993,0.0897524356842041 -994,0.09180423617362976 -995,0.09032952040433884 -996,0.09214714169502258 -997,0.08934348821640015 -998,0.09962314367294312 -999,0.10002415627241135 -1000,0.09235816448926926 diff --git a/training/base_loss/three/training_config.txt b/training/base_loss/three/training_config.txt deleted file mode 100644 index f17065e..0000000 --- a/training/base_loss/three/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def current_loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(loss_fn(theta, desired_theta)) - -Specific Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss/three/training_log.csv b/training/base_loss/three/training_log.csv deleted file mode 100644 index 70b508c..0000000 --- a/training/base_loss/three/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,21.261375427246094 -2,5.891673564910889 -3,2.6077992916107178 -4,1.1370291709899902 -5,0.6385924816131592 -6,0.6915555596351624 -7,0.6061262488365173 -8,0.6074151396751404 -9,0.6083714365959167 -10,0.605050802230835 -11,0.5985455513000488 -12,0.6010023951530457 -13,0.5952361226081848 -14,0.5857062935829163 -15,0.5748509168624878 -16,0.5646540522575378 -17,0.5512927770614624 -18,0.5332582592964172 -19,0.5170798301696777 -20,0.4973803758621216 -21,0.4802533686161041 -22,0.46568670868873596 -23,0.4525829255580902 -24,0.4407518208026886 -25,0.43000274896621704 -26,0.42019084095954895 -27,0.4111976623535156 -28,0.4040480852127075 -29,0.39871758222579956 -30,0.3923768699169159 -31,0.38596847653388977 -32,0.38170596957206726 -33,0.37672656774520874 -34,0.37251004576683044 -35,0.3706360459327698 -36,0.3665635585784912 -37,0.3643368184566498 -38,0.3616892397403717 -39,0.35870951414108276 -40,0.35657182335853577 -41,0.35321974754333496 -42,0.35056939721107483 -43,0.3478899896144867 -44,0.3449389338493347 -45,0.3426477313041687 -46,0.3401634097099304 -47,0.3378303647041321 -48,0.33628085255622864 -49,0.33464860916137695 -50,0.33279597759246826 -51,0.33076775074005127 -52,0.328666090965271 -53,0.3264012336730957 -54,0.3239538371562958 -55,0.32139408588409424 -56,0.31895747780799866 -57,0.3170231282711029 -58,0.3149743676185608 -59,0.31262704730033875 -60,0.3100486099720001 -61,0.30748704075813293 -62,0.30523303151130676 -63,0.3030840754508972 -64,0.300724059343338 -65,0.2983125150203705 -66,0.29592016339302063 -67,0.2934330701828003 -68,0.2910674214363098 -69,0.2887977361679077 -70,0.28616946935653687 -71,0.28358131647109985 -72,0.28116247057914734 -73,0.27880653738975525 -74,0.27658432722091675 -75,0.2743116617202759 -76,0.2719093859195709 -77,0.2695898413658142 -78,0.2672993242740631 -79,0.2649315893650055 -80,0.2625272870063782 -81,0.26015040278434753 -82,0.25789791345596313 -83,0.25559601187705994 -84,0.25310903787612915 -85,0.2504628598690033 -86,0.24768434464931488 -87,0.24490614235401154 -88,0.24186447262763977 -89,0.23844817280769348 -90,0.23509971797466278 -91,0.23214495182037354 -92,0.22933851182460785 -93,0.22656965255737305 -94,0.2238079458475113 -95,0.22107939422130585 -96,0.21855473518371582 -97,0.2162478268146515 -98,0.21391157805919647 -99,0.21160833537578583 -100,0.2093716859817505 -101,0.20715609192848206 -102,0.20492880046367645 -103,0.2028047740459442 -104,0.20066344738006592 -105,0.19845257699489594 -106,0.19616585969924927 -107,0.19381804764270782 -108,0.19128037989139557 -109,0.17494212090969086 -110,0.10716380923986435 -111,0.10715986788272858 -112,0.10993078351020813 -113,0.1130639836192131 -114,0.11589814722537994 -115,0.11807447671890259 -116,0.11957936733961105 -117,0.12002908438444138 -118,0.11941218376159668 -119,0.11795084178447723 -120,0.11595441401004791 -121,0.11374032497406006 -122,0.11151764541864395 -123,0.10938114672899246 -124,0.10738934576511383 -125,0.10557354986667633 -126,0.10392256081104279 -127,0.10243283957242966 -128,0.10108807682991028 -129,0.09987969696521759 -130,0.09879843890666962 -131,0.09784293174743652 -132,0.09701330214738846 -133,0.0963020771741867 -134,0.09574238955974579 -135,0.09532471746206284 -136,0.09506046026945114 -137,0.09487618505954742 -138,0.09475447237491608 -139,0.09467997401952744 -140,0.09464657306671143 -141,0.09465795755386353 -142,0.09467548131942749 -143,0.0946844145655632 -144,0.09468094259500504 -145,0.0946597009897232 -146,0.09461823105812073 -147,0.09455692023038864 -148,0.09447645395994186 -149,0.09437862038612366 -150,0.09426578879356384 -151,0.09414089471101761 -152,0.09400674700737 -153,0.09386694431304932 -154,0.0937243402004242 -155,0.09358269721269608 -156,0.09345539659261703 -157,0.09333407133817673 -158,0.0932118371129036 -159,0.09308682382106781 -160,0.09296078979969025 -161,0.09283531457185745 -162,0.09270970523357391 -163,0.09258268028497696 -164,0.09245548397302628 -165,0.09232783317565918 -166,0.09220302850008011 -167,0.09208209812641144 -168,0.09196651726961136 -169,0.09185603260993958 -170,0.09175104647874832 -171,0.0916556641459465 -172,0.09157522767782211 -173,0.0915113240480423 -174,0.09145496785640717 -175,0.09140581637620926 -176,0.09136401116847992 -177,0.09132834523916245 -178,0.09129789471626282 -179,0.09127190709114075 -180,0.09124946594238281 -181,0.09122975915670395 -182,0.09121190011501312 -183,0.09119515866041183 -184,0.09117889404296875 -185,0.09116298705339432 -186,0.09114784002304077 -187,0.09113328903913498 -188,0.09111907333135605 -189,0.09110541641712189 -190,0.09109289199113846 -191,0.09108127653598785 -192,0.0910702496767044 -193,0.09105963259935379 -194,0.09104940295219421 -195,0.09103924036026001 -196,0.0910288393497467 -197,0.09101791679859161 -198,0.09100648015737534 -199,0.09099466353654861 -200,0.09098269045352936 -201,0.09097060561180115 -202,0.09095878899097443 -203,0.090947724878788 -204,0.0909373089671135 -205,0.09092709422111511 -206,0.09091712534427643 -207,0.09090735763311386 -208,0.09089777618646622 -209,0.09088844060897827 -210,0.09087927639484406 -211,0.09087029844522476 -212,0.09086146950721741 -213,0.09085298329591751 -214,0.0908447727560997 -215,0.0908367782831192 -216,0.09082897007465363 -217,0.09082125127315521 -218,0.09081362187862396 -219,0.09080605953931808 -220,0.09079856425523758 -221,0.09079115092754364 -222,0.09078378230333328 -223,0.0907764732837677 -224,0.0907692238688469 -225,0.09076201915740967 -226,0.0907549038529396 -227,0.09074797481298447 -228,0.09074125438928604 -229,0.09073446691036224 -230,0.09072758257389069 -231,0.09072062373161316 -232,0.09071363508701324 -233,0.09070684015750885 -234,0.09070020914077759 -235,0.09069361537694931 -236,0.09068705141544342 -237,0.09068053215742111 -238,0.09067408740520477 -239,0.09066767990589142 -240,0.09066132456064224 -241,0.09065509587526321 -242,0.09064894914627075 -243,0.09064283221960068 -244,0.09063675999641418 -245,0.09063073992729187 -246,0.09062477946281433 -247,0.09061893820762634 -248,0.0906132161617279 -249,0.09060746431350708 -250,0.09060171246528625 -251,0.09059610217809677 -252,0.09059057384729385 -253,0.09058508276939392 -254,0.09057959914207458 -255,0.09057415276765823 -256,0.09056872874498367 -257,0.09056329727172852 -258,0.09055790305137634 -259,0.09055256098508835 -260,0.09054729342460632 -261,0.0905420258641243 -262,0.09053678065538406 -263,0.0905316025018692 -264,0.09052647650241852 -265,0.09052136540412903 -266,0.09051629155874252 -267,0.09051123261451721 -268,0.09050624817609787 -269,0.09050127863883972 -270,0.09049632400274277 -271,0.09049143642187119 -272,0.09048658609390259 -273,0.09048176556825638 -274,0.09047697484493256 -275,0.09047223627567291 -276,0.09046754986047745 -277,0.09046291559934616 -278,0.09045828878879547 -279,0.09045377373695374 -280,0.0904492735862732 -281,0.09044481813907623 -282,0.09044039994478226 -283,0.09043603390455246 -284,0.09043172746896744 -285,0.090427465736866 -286,0.09042324125766754 -287,0.09041909128427505 -288,0.09041496366262436 -289,0.09041087329387665 -290,0.09040683507919312 -291,0.09040285646915436 -292,0.09039893746376038 -293,0.09039504081010818 -294,0.09039124846458435 -295,0.0903875082731247 -296,0.09038380533456802 -297,0.09038016945123672 -298,0.09037656337022781 -299,0.0903729796409607 -300,0.09036945551633835 -301,0.09036596864461899 -302,0.09036257117986679 -303,0.09035926312208176 -304,0.0903560146689415 -305,0.09035279601812363 -306,0.09034962952136993 -307,0.09034651517868042 -308,0.09034344553947449 -309,0.09034043550491333 -310,0.09033747762441635 -311,0.09033451974391937 -312,0.09033158421516418 -313,0.09032870084047318 -314,0.09032584726810455 -315,0.09032303094863892 -316,0.09032021462917328 -317,0.09031743556261063 -318,0.09031469374895096 -319,0.09031200408935547 -320,0.09030933678150177 -321,0.09030667692422867 -322,0.09030400216579437 -323,0.09030134975910187 -324,0.09029871970415115 -325,0.09029611200094223 -326,0.0902935266494751 -327,0.09029096364974976 -328,0.0902884230017662 -329,0.09028589725494385 -330,0.09028340131044388 -331,0.0902809128165245 -332,0.09027843177318573 -333,0.09027594327926636 -334,0.09027346968650818 -335,0.09027101844549179 -336,0.0902685821056366 -337,0.0902661681175232 -338,0.09026378393173218 -339,0.09026141464710236 -340,0.09025907516479492 -341,0.09025678783655167 -342,0.09025456011295319 -343,0.09025240689516068 -344,0.09025029093027115 -345,0.09024834632873535 -346,0.09024642407894135 -347,0.09024457633495331 -348,0.09024275094270706 -349,0.090240977704525 -350,0.09023922681808472 -351,0.09023751318454742 -352,0.09023579210042953 -353,0.09023413807153702 -354,0.09023244678974152 -355,0.09023081511259079 -356,0.09022918343544006 -357,0.09022758901119232 -358,0.09022599458694458 -359,0.09022443741559982 -360,0.09022299200296402 -361,0.09022154659032822 -362,0.09022018313407898 -363,0.09021882712841034 -364,0.09021754562854767 -365,0.09021621942520142 -366,0.09021498262882233 -367,0.09021375328302383 -368,0.0902126207947731 -369,0.09021157026290894 -370,0.09021079540252686 -371,0.09021008014678955 -372,0.09020940959453583 -373,0.09020864963531494 -374,0.09020794928073883 -375,0.09020723402500153 -376,0.0902065709233284 -377,0.09020588546991348 -378,0.09020522981882095 -379,0.09020452201366425 -380,0.09020388126373291 -381,0.0902031660079956 -382,0.09020249545574188 -383,0.09020178020000458 -384,0.09020110219717026 -385,0.09020042419433594 -386,0.09019976109266281 -387,0.09019908308982849 -388,0.09019842743873596 -389,0.09019780158996582 -390,0.09019725769758224 -391,0.09019678831100464 -392,0.09019637107849121 -393,0.09019593149423599 -394,0.09019546210765839 -395,0.0901949554681778 -396,0.0901944562792778 -397,0.09019391238689423 -398,0.09019338339567184 -399,0.09019286185503006 -400,0.09019242972135544 -401,0.09019199758768082 -402,0.09019160270690918 -403,0.09019120782613754 -404,0.09019080549478531 -405,0.09019047766923904 -406,0.09019005298614502 -407,0.09018969535827637 -408,0.09018930792808533 -409,0.09018892049789429 -410,0.09018860012292862 -411,0.09018819779157639 -412,0.09018786251544952 -413,0.09018754214048386 -414,0.09018721431493759 -415,0.0901869460940361 -416,0.09018661081790924 -417,0.09018632769584656 -418,0.09018601477146149 -419,0.09018567949533463 -420,0.09018542617559433 -421,0.09018505364656448 -422,0.0901847630739212 -423,0.09018447250127792 -424,0.09018415957689285 -425,0.09018390625715256 -426,0.09018363058567047 -427,0.09018337726593018 -428,0.09018312394618988 -429,0.0901825875043869 -430,0.09018260985612869 -431,0.0901826024055481 -432,0.09018294513225555 -433,0.09018116444349289 -434,0.09018038958311081 -435,0.09018279612064362 -436,0.09018052369356155 -437,0.0901782438158989 -438,0.09017901867628098 -439,0.0901767835021019 -440,0.0901780053973198 -441,0.0901755839586258 -442,0.09017618745565414 -443,0.09017468243837357 -444,0.09017445892095566 -445,0.09017378836870193 -446,0.09017224609851837 -447,0.09017357230186462 -448,0.09017208963632584 -449,0.09017203748226166 -450,0.09017205238342285 -451,0.09016969799995422 -452,0.09017084538936615 -453,0.09016906470060349 -454,0.09016869962215424 -455,0.09016852080821991 -456,0.09016738831996918 -457,0.09016871452331543 -458,0.0901670902967453 -459,0.09016674757003784 -460,0.09016644954681396 -461,0.09016599506139755 -462,0.09016638994216919 -463,0.09016511589288712 -464,0.09016522765159607 -465,0.09016439318656921 -466,0.09016463905572891 -467,0.09016475826501846 -468,0.09016432613134384 -469,0.09016373753547668 -470,0.09016396105289459 -471,0.09016339480876923 -472,0.0901632159948349 -473,0.09016252309083939 -474,0.09016291052103043 -475,0.09016193449497223 -476,0.09016286581754684 -477,0.09016238898038864 -478,0.09016165137290955 -479,0.09016130864620209 -480,0.09016123414039612 -481,0.09016136825084686 -482,0.09016167372465134 -483,0.09016051143407822 -484,0.09016024321317673 -485,0.0901598259806633 -486,0.09016019850969315 -487,0.09015965461730957 -488,0.09015914052724838 -489,0.09015912562608719 -490,0.09015867114067078 -491,0.09015854448080063 -492,0.09015808254480362 -493,0.09015782922506332 -494,0.090157650411129 -495,0.09015760570764542 -496,0.09015738219022751 -497,0.09015703946352005 -498,0.09015706926584244 -499,0.09015704691410065 -500,0.09015682339668274 -501,0.09015678614377975 -502,0.09015638381242752 -503,0.09015651792287827 -504,0.0901562049984932 -505,0.0901564285159111 -506,0.09015586972236633 -507,0.09015578031539917 -508,0.09015591442584991 -509,0.09015540033578873 -510,0.09015528112649918 -511,0.09015508741140366 -512,0.09015536308288574 -513,0.09015515446662903 -514,0.09015484154224396 -515,0.09015505760908127 -516,0.09015452861785889 -517,0.09015459567308426 -518,0.09015414118766785 -519,0.09015441685914993 -520,0.09015391767024994 -521,0.09015373885631561 -522,0.0901537761092186 -523,0.09015348553657532 -524,0.0901532992720604 -525,0.09015306830406189 -526,0.09015311300754547 -527,0.0901530534029007 -528,0.09015282988548279 -529,0.09015276283025742 -530,0.09015214443206787 -531,0.0901520848274231 -532,0.09015180915594101 -533,0.09015200287103653 -534,0.09015160799026489 -535,0.09015122056007385 -536,0.09015121310949326 -537,0.09015098959207535 -538,0.09015064686536789 -539,0.09015031903982162 -540,0.09015030413866043 -541,0.09015023708343506 -542,0.09015002101659775 -543,0.09014954417943954 -544,0.09014897793531418 -545,0.09014870226383209 -546,0.09014860540628433 -547,0.09014865010976791 -548,0.0901479423046112 -549,0.09014765918254852 -550,0.09014766663312912 -551,0.09014735370874405 -552,0.09014686942100525 -553,0.09014656394720078 -554,0.09014631807804108 -555,0.09014635533094406 -556,0.09014631807804108 -557,0.09014609456062317 -558,0.09014598280191422 -559,0.09014606475830078 -560,0.09014579653739929 -561,0.09014594554901123 -562,0.09014597535133362 -563,0.09014616161584854 -564,0.09014595299959183 -565,0.09014611691236496 -566,0.0901462733745575 -567,0.0901462659239769 -568,0.09014604240655899 -569,0.09014616906642914 -570,0.09014658629894257 -571,0.09014641493558884 -572,0.09014643728733063 -573,0.090146504342556 -574,0.09014680236577988 -575,0.090147003531456 -576,0.09014680981636047 -577,0.09014696627855301 -578,0.09014719724655151 -579,0.09014730155467987 -580,0.09014743566513062 -581,0.09014774858951569 -582,0.09014788269996643 -583,0.09014804661273956 -584,0.0901481881737709 -585,0.09014841914176941 -586,0.09014904499053955 -587,0.0901491641998291 -588,0.09014987200498581 -589,0.09014999866485596 -590,0.09015057235956192 -591,0.09015098959207535 -592,0.09015116840600967 -593,0.09015180170536041 -594,0.09015185385942459 -595,0.0901530459523201 -596,0.09015291184186935 -597,0.0901532843708992 -598,0.09015367180109024 -599,0.09015391767024994 -600,0.09015437215566635 -601,0.09015455096960068 -602,0.0901554524898529 -603,0.0901557207107544 -604,0.09015604108572006 -605,0.09015647321939468 -606,0.09015641361474991 -607,0.09015686064958572 -608,0.0901573896408081 -609,0.09015754610300064 -610,0.0901581197977066 -611,0.09015833586454391 -612,0.09015922993421555 -613,0.09015974402427673 -614,0.0901598185300827 -615,0.09016074240207672 -616,0.09016039222478867 -617,0.09016094356775284 -618,0.09016103297472 -619,0.09016218781471252 -620,0.0901624783873558 -621,0.09016270935535431 -622,0.09016350656747818 -623,0.0901629775762558 -624,0.09016358107328415 -625,0.09016379714012146 -626,0.09016404300928116 -627,0.09016434848308563 -628,0.09016469866037369 -629,0.09016548842191696 -630,0.09016577899456024 -631,0.09016589820384979 -632,0.09016669541597366 -633,0.09016681462526321 -634,0.0901671051979065 -635,0.0901678130030632 -636,0.09016793221235275 -637,0.09016833454370499 -638,0.09016916900873184 -639,0.09016922116279602 -640,0.09016965329647064 -641,0.0901702269911766 -642,0.0901704952120781 -643,0.09017080813646317 -644,0.0901711955666542 -645,0.09017150849103928 -646,0.09017201513051987 -647,0.09017234295606613 -648,0.09017293900251389 -649,0.09017340838909149 -650,0.09017373621463776 -651,0.09017442911863327 -652,0.09017480909824371 -653,0.09017512202262878 -654,0.09017620980739594 -655,0.09017619490623474 -656,0.09017668664455414 -657,0.0901774987578392 -658,0.09017782658338547 -659,0.09017828106880188 -660,0.09017903357744217 -661,0.09017936885356903 -662,0.09017985314130783 -663,0.09018054604530334 -664,0.09018084406852722 -665,0.09018135815858841 -666,0.09018202126026154 -667,0.0901823416352272 -668,0.09018278121948242 -669,0.09018351882696152 -670,0.09018391370773315 -671,0.09018430858850479 -672,0.09018506854772568 -673,0.09018541872501373 -674,0.09018591791391373 -675,0.09018668532371521 -676,0.09018706530332565 -677,0.09018751233816147 -678,0.09018857777118683 -679,0.09018883109092712 -680,0.09018924832344055 -681,0.09019025415182114 -682,0.09019038081169128 -683,0.09019092470407486 -684,0.09019164741039276 -685,0.09019190818071365 -686,0.09019245207309723 -687,0.09019327908754349 -688,0.09019366651773453 -689,0.09019404649734497 -690,0.09019522368907928 -691,0.09019532799720764 -692,0.09019579738378525 -693,0.09019673615694046 -694,0.09019698947668076 -695,0.0901976004242897 -696,0.09019852429628372 -697,0.09019877761602402 -698,0.09019928425550461 -699,0.09020029753446579 -700,0.09020058065652847 -701,0.09020102769136429 -702,0.09020204842090607 -703,0.09020224213600159 -704,0.09020281583070755 -705,0.09020377695560455 -706,0.09020395576953888 -707,0.09020449221134186 -708,0.09020540863275528 -709,0.09020578116178513 -710,0.09020622074604034 -711,0.0902073085308075 -712,0.09020743519067764 -713,0.090208038687706 -714,0.09020894765853882 -715,0.09020917862653732 -716,0.09020967781543732 -717,0.09021064639091492 -718,0.09021099656820297 -719,0.0902114287018776 -720,0.09021251648664474 -721,0.09021259844303131 -722,0.09021317958831787 -723,0.09021403640508652 -724,0.09021429717540741 -725,0.0902147889137268 -726,0.0902157872915268 -727,0.09021613001823425 -728,0.09021655470132828 -729,0.09021763503551483 -730,0.0902177095413208 -731,0.09021827578544617 -732,0.09021914005279541 -733,0.09021936357021332 -734,0.09021986275911331 -735,0.09022083878517151 -736,0.09022106230258942 -737,0.09022147208452225 -738,0.09022262692451477 -739,0.09022260457277298 -740,0.09022311866283417 -741,0.0902240127325058 -742,0.09022410213947296 -743,0.09022451192140579 -744,0.09022542834281921 -745,0.0902254730463028 -746,0.09022583812475204 -747,0.09022681415081024 -748,0.09022671729326248 -749,0.09022708237171173 -750,0.0902278944849968 -751,0.09022785723209381 -752,0.09022799134254456 -753,0.09022878110408783 -754,0.09022844582796097 -755,0.09022855758666992 -756,0.09022919833660126 -757,0.09022864699363708 -758,0.09022863954305649 -759,0.0902290791273117 -760,0.0902286246418953 -761,0.09022840112447739 -762,0.09022876620292664 -763,0.09022795408964157 -764,0.09022804349660873 -765,0.09022902697324753 -766,0.09022849798202515 -767,0.09022882580757141 -768,0.09022954851388931 -769,0.09022948145866394 -770,0.09022969007492065 -771,0.0902305319905281 -772,0.09023050218820572 -773,0.09023073315620422 -774,0.09023161977529526 -775,0.09023143351078033 -776,0.09023188054561615 -777,0.09023252129554749 -778,0.09023264050483704 -779,0.09023301303386688 -780,0.09023385494947433 -781,0.09023403376340866 -782,0.09023430198431015 -783,0.09023541212081909 -784,0.09023526310920715 -785,0.09023568779230118 -786,0.09023670852184296 -787,0.09023653715848923 -788,0.09023702144622803 -789,0.09023793041706085 -790,0.09023810178041458 -791,0.09023842960596085 -792,0.09023945778608322 -793,0.0902395099401474 -794,0.09023986756801605 -795,0.09024100005626678 -796,0.09024082124233246 -797,0.0902412086725235 -798,0.09024225175380707 -799,0.09024208784103394 -800,0.09024249017238617 -801,0.09024343639612198 -802,0.0902433767914772 -803,0.09024371951818466 -804,0.09024481475353241 -805,0.09024462848901749 -806,0.09024496376514435 -807,0.09024616330862045 -808,0.09024595469236374 -809,0.09024631232023239 -810,0.09024748206138611 -811,0.09024728089570999 -812,0.09024766087532043 -813,0.09024875611066818 -814,0.09024864435195923 -815,0.0902489572763443 -816,0.09025004506111145 -817,0.0902499333024025 -818,0.09025026857852936 -819,0.09025142341852188 -820,0.09025122970342636 -821,0.09025155752897263 -822,0.09025269746780396 -823,0.09025246649980545 -824,0.09025280177593231 -825,0.09025385975837708 -826,0.09025373309850693 -827,0.09025402367115021 -828,0.09025505930185318 -829,0.09025498479604721 -830,0.0902552455663681 -831,0.09025628864765167 -832,0.09025624394416809 -833,0.09025644510984421 -834,0.09025754779577255 -835,0.0902574434876442 -836,0.0902576595544815 -837,0.09025876969099045 -838,0.0902586504817009 -839,0.0902588814496994 -840,0.09025996178388596 -841,0.0902598425745964 -842,0.09026006609201431 -843,0.09026109427213669 -844,0.09026111662387848 -845,0.09026132524013519 -846,0.09026221185922623 -847,0.09026242792606354 -848,0.09026253968477249 -849,0.09026329219341278 -850,0.09026382863521576 -851,0.09026375412940979 -852,0.09026439487934113 -853,0.0902651771903038 -854,0.09026492387056351 -855,0.0902656763792038 -856,0.09026595950126648 -857,0.09026587009429932 -858,0.0902666449546814 -859,0.09026678651571274 -860,0.09026684612035751 -861,0.09026731550693512 -862,0.09026793390512466 -863,0.09026803076267242 -864,0.09026846289634705 -865,0.09026890248060226 -866,0.09026875346899033 -867,0.09026915580034256 -868,0.09026964008808136 -869,0.09026981890201569 -870,0.09027046710252762 -871,0.09027101844549179 -872,0.09027104824781418 -873,0.09027133136987686 -874,0.09027163684368134 -875,0.0902719497680664 -876,0.09027205407619476 -877,0.09027252346277237 -878,0.09027343988418579 -879,0.09027314931154251 -880,0.09027387201786041 -881,0.09027466922998428 -882,0.09027446806430817 -883,0.0902753472328186 -884,0.09027519077062607 -885,0.09027595818042755 -886,0.09027574211359024 -887,0.09027636796236038 -888,0.09027748554944992 -889,0.09027746319770813 -890,0.0902782753109932 -891,0.09027820825576782 -892,0.09027864784002304 -893,0.09027885645627975 -894,0.09028004109859467 -895,0.09028016030788422 -896,0.09028065949678421 -897,0.09028054773807526 -898,0.09028109163045883 -899,0.09028178453445435 -900,0.09028232097625732 -901,0.09028219431638718 -902,0.0902823656797409 -903,0.0902828499674797 -904,0.09028352797031403 -905,0.09028419852256775 -906,0.09028496593236923 -907,0.09028498083353043 -908,0.09028548002243042 -909,0.09028609842061996 -910,0.09028720110654831 -911,0.09028790891170502 -912,0.09028758853673935 -913,0.09028791636228561 -914,0.09028838574886322 -915,0.09028900414705276 -916,0.0902893990278244 -917,0.09029021114110947 -918,0.09029100090265274 -919,0.09029130637645721 -920,0.09029217064380646 -921,0.09029201418161392 -922,0.09029264748096466 -923,0.09029299020767212 -924,0.09029387682676315 -925,0.09029407799243927 -926,0.09029452502727509 -927,0.09029548615217209 -928,0.09029604494571686 -929,0.09029711782932281 -930,0.09029729664325714 -931,0.0902976393699646 -932,0.09029802680015564 -933,0.09029863029718399 -934,0.09029961377382278 -935,0.09030048549175262 -936,0.09030123054981232 -937,0.09030129015445709 -938,0.09030146896839142 -939,0.09030191600322723 -940,0.09030299633741379 -941,0.090303435921669 -942,0.09030383080244064 -943,0.09030452370643616 -944,0.09030462801456451 -945,0.09030552208423615 -946,0.09030651301145554 -947,0.0903068482875824 -948,0.09030701220035553 -949,0.0903075709939003 -950,0.09030844271183014 -951,0.0903090387582779 -952,0.09030991792678833 -953,0.09030985087156296 -954,0.09030988067388535 -955,0.09031035006046295 -956,0.09031107276678085 -957,0.09031179547309875 -958,0.09031223505735397 -959,0.09031260758638382 -960,0.09031335264444351 -961,0.09031404554843903 -962,0.09031419456005096 -963,0.09031486511230469 -964,0.09031541645526886 -965,0.09031607210636139 -966,0.09031675010919571 -967,0.09031708538532257 -968,0.09031704068183899 -969,0.09031738340854645 -970,0.09031824767589569 -971,0.09031844884157181 -972,0.0903189480304718 -973,0.09031945466995239 -974,0.09031941741704941 -975,0.09032026678323746 -976,0.09032108634710312 -977,0.09032144397497177 -978,0.09032130241394043 -979,0.09032177925109863 -980,0.09032298624515533 -981,0.09032338112592697 -982,0.09032364189624786 -983,0.09032384306192398 -984,0.09032446146011353 -985,0.09032489359378815 -986,0.09032484143972397 -987,0.09032680839300156 -988,0.09032649546861649 -989,0.09032890200614929 -990,0.09032762795686722 -991,0.09032838046550751 -992,0.09032822400331497 -993,0.09032893180847168 -994,0.09033001959323883 -995,0.09032919257879257 -996,0.09033195674419403 -997,0.0903313159942627 -998,0.09033383429050446 -999,0.09033223241567612 -1000,0.09033215045928955 diff --git a/training/base_loss/two/training_config.txt b/training/base_loss/two/training_config.txt deleted file mode 100644 index 5c167de..0000000 --- a/training/base_loss/two/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def current_loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(loss_fn(theta, desired_theta)) - -Specific Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss/two/training_log.csv b/training/base_loss/two/training_log.csv deleted file mode 100644 index b2f36f0..0000000 --- a/training/base_loss/two/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,10.234851837158203 -2,6.466392993927002 -3,2.496265172958374 -4,1.4330482482910156 -5,0.6652761101722717 -6,0.4092560410499573 -7,0.34617507457733154 -8,0.3352189064025879 -9,0.29997044801712036 -10,0.28455498814582825 -11,0.2706639766693115 -12,0.256809800863266 -13,0.2449451982975006 -14,0.2332826852798462 -15,0.221483513712883 -16,0.20902703702449799 -17,0.19769549369812012 -18,0.18950577080249786 -19,0.18280331790447235 -20,0.175080806016922 -21,0.16781212389469147 -22,0.16767650842666626 -23,0.1685328185558319 -24,0.16768693923950195 -25,0.1656242161989212 -26,0.162550151348114 -27,0.15855692327022552 -28,0.15373079478740692 -29,0.14830870926380157 -30,0.1436430811882019 -31,0.14141088724136353 -32,0.13972227275371552 -33,0.1373298317193985 -34,0.13407160341739655 -35,0.13039763271808624 -36,0.1274370402097702 -37,0.12534591555595398 -38,0.12401100993156433 -39,0.12209673225879669 -40,0.119998037815094 -41,0.118746817111969 -42,0.11816772818565369 -43,0.11734144389629364 -44,0.11601420491933823 -45,0.11450773477554321 -46,0.11339925974607468 -47,0.11282777041196823 -48,0.11220709979534149 -49,0.11122202128171921 -50,0.1102323979139328 -51,0.10945899784564972 -52,0.10872214287519455 -53,0.10792545974254608 -54,0.10711057484149933 -55,0.10635531693696976 -56,0.10553282499313354 -57,0.10467448085546494 -58,0.10408355295658112 -59,0.10351421684026718 -60,0.10273735225200653 -61,0.10191062092781067 -62,0.10130678117275238 -63,0.10065558552742004 -64,0.09983336180448532 -65,0.0991254448890686 -66,0.09838370233774185 -67,0.09753256291151047 -68,0.09675586968660355 -69,0.09597576409578323 -70,0.09514985978603363 -71,0.09422588348388672 -72,0.09340473264455795 -73,0.09244240820407867 -74,0.09170093387365341 -75,0.09120742976665497 -76,0.09092261642217636 -77,0.09066799283027649 -78,0.0902705192565918 -79,0.08983185887336731 -80,0.08929625153541565 -81,0.08880485594272614 -82,0.08854542672634125 -83,0.08820799738168716 -84,0.08807382732629776 -85,0.0878276377916336 -86,0.08757469803094864 -87,0.08733223378658295 -88,0.08705996721982956 -89,0.08679627627134323 -90,0.0865522101521492 -91,0.08623664826154709 -92,0.08614487200975418 -93,0.08588608354330063 -94,0.08573922514915466 -95,0.0856480821967125 -96,0.08537930250167847 -97,0.08523824065923691 -98,0.08512590825557709 -99,0.0849238932132721 -100,0.08509727567434311 -101,0.08473098278045654 -102,0.08475121855735779 -103,0.08472488820552826 -104,0.08425026386976242 -105,0.08463004231452942 -106,0.08395949751138687 -107,0.08409921079874039 -108,0.08365374803543091 -109,0.08372105658054352 -110,0.08361837267875671 -111,0.08354061841964722 -112,0.08306435495615005 -113,0.08326923102140427 -114,0.08278565853834152 -115,0.08285008370876312 -116,0.08249189704656601 -117,0.08278565108776093 -118,0.08225379884243011 -119,0.08225330710411072 -120,0.08190350979566574 -121,0.08176734298467636 -122,0.08163733035326004 -123,0.08155843615531921 -124,0.0814240500330925 -125,0.08128814399242401 -126,0.08135181665420532 -127,0.081159308552742 -128,0.0810864120721817 -129,0.08081468939781189 -130,0.0807863101363182 -131,0.08050070703029633 -132,0.08065730333328247 -133,0.08055564761161804 -134,0.08044923841953278 -135,0.08036768436431885 -136,0.08023950457572937 -137,0.08009099960327148 -138,0.08011028170585632 -139,0.08012078702449799 -140,0.07996564358472824 -141,0.08014830201864243 -142,0.07986270636320114 -143,0.07980833947658539 -144,0.07980886846780777 -145,0.07974610477685928 -146,0.07966186106204987 -147,0.07972170412540436 -148,0.07960563153028488 -149,0.07958094030618668 -150,0.07968541234731674 -151,0.07952871173620224 -152,0.07945350557565689 -153,0.0795331597328186 -154,0.07942258566617966 -155,0.07943854480981827 -156,0.07936373353004456 -157,0.07938434183597565 -158,0.0792689397931099 -159,0.07951365411281586 -160,0.07929336279630661 -161,0.07924657315015793 -162,0.07934610545635223 -163,0.07928822189569473 -164,0.07947507500648499 -165,0.07928752899169922 -166,0.07936655730009079 -167,0.07924246788024902 -168,0.07926242053508759 -169,0.07927785813808441 -170,0.07901915907859802 -171,0.07938885688781738 -172,0.07902936637401581 -173,0.07898504287004471 -174,0.07892755419015884 -175,0.0788704976439476 -176,0.07913511246442795 -177,0.0790446549654007 -178,0.0789061039686203 -179,0.0788482204079628 -180,0.07900616526603699 -181,0.07903589308261871 -182,0.0787847489118576 -183,0.07925772666931152 -184,0.078799769282341 -185,0.07893846929073334 -186,0.07898034900426865 -187,0.07865510880947113 -188,0.07887572050094604 -189,0.07868988811969757 -190,0.07871898263692856 -191,0.0786697044968605 -192,0.07875783741474152 -193,0.07867857068777084 -194,0.07886817306280136 -195,0.07862976938486099 -196,0.07871319353580475 -197,0.07847002893686295 -198,0.07864023745059967 -199,0.07850417494773865 -200,0.07869499921798706 -201,0.07850071787834167 -202,0.07891260087490082 -203,0.07890446484088898 -204,0.0785602331161499 -205,0.07918666303157806 -206,0.07880017906427383 -207,0.07888921350240707 -208,0.07916509360074997 -209,0.07914380729198456 -210,0.07849302142858505 -211,0.07935908436775208 -212,0.07926768064498901 -213,0.07887936383485794 -214,0.07909756153821945 -215,0.07907329499721527 -216,0.07905580848455429 -217,0.07841295003890991 -218,0.07880539447069168 -219,0.07859814167022705 -220,0.07832927256822586 -221,0.07851839065551758 -222,0.07849419116973877 -223,0.07836023718118668 -224,0.07845134288072586 -225,0.07829266041517258 -226,0.07831202447414398 -227,0.0783422514796257 -228,0.0784643217921257 -229,0.07822586596012115 -230,0.07861951738595963 -231,0.07831801474094391 -232,0.07870838791131973 -233,0.07876107841730118 -234,0.07820352911949158 -235,0.07893161475658417 -236,0.07872527837753296 -237,0.07838798314332962 -238,0.07873966544866562 -239,0.07839757949113846 -240,0.07849889248609543 -241,0.07844500988721848 -242,0.07835138589143753 -243,0.07859360426664352 -244,0.07828768342733383 -245,0.07850424945354462 -246,0.07850389927625656 -247,0.07834126800298691 -248,0.07869115471839905 -249,0.07823686301708221 -250,0.07866998016834259 -251,0.07839164137840271 -252,0.07857239246368408 -253,0.07860052585601807 -254,0.0784408375620842 -255,0.07860536128282547 -256,0.07853659987449646 -257,0.07859333604574203 -258,0.07882256805896759 -259,0.0784611627459526 -260,0.0783892571926117 -261,0.07821166515350342 -262,0.07830832898616791 -263,0.07845472544431686 -264,0.07815374433994293 -265,0.078372061252594 -266,0.07836724817752838 -267,0.07834804803133011 -268,0.07854241877794266 -269,0.07817064970731735 -270,0.07836581766605377 -271,0.0781969204545021 -272,0.07825981825590134 -273,0.07862366735935211 -274,0.07828362286090851 -275,0.07828705757856369 -276,0.07859742641448975 -277,0.0780966728925705 -278,0.07838410139083862 -279,0.07798713445663452 -280,0.07821927219629288 -281,0.07802703976631165 -282,0.07816630601882935 -283,0.07803410291671753 -284,0.07828307896852493 -285,0.07787659019231796 -286,0.07808981835842133 -287,0.07795646786689758 -288,0.0779065191745758 -289,0.07812948524951935 -290,0.07794821262359619 -291,0.07797113060951233 -292,0.07817322015762329 -293,0.07800313830375671 -294,0.07860931754112244 -295,0.07864463329315186 -296,0.07801160961389542 -297,0.07900190353393555 -298,0.07866941392421722 -299,0.07808799296617508 -300,0.0785217210650444 -301,0.0782015472650528 -302,0.07813960313796997 -303,0.07813762128353119 -304,0.07802267372608185 -305,0.07811687886714935 -306,0.0780801996588707 -307,0.07805006206035614 -308,0.07812291383743286 -309,0.0780651718378067 -310,0.07786960899829865 -311,0.07792899757623672 -312,0.07798369973897934 -313,0.0778142511844635 -314,0.07786343991756439 -315,0.07783043384552002 -316,0.0778464823961258 -317,0.07776661217212677 -318,0.07779961824417114 -319,0.07793940603733063 -320,0.07785313576459885 -321,0.07820986956357956 -322,0.0779627114534378 -323,0.0782121941447258 -324,0.07823067158460617 -325,0.07792142033576965 -326,0.07844450324773788 -327,0.07789670675992966 -328,0.07814809679985046 -329,0.07803238183259964 -330,0.07802785187959671 -331,0.0779600515961647 -332,0.07835789024829865 -333,0.07828126847743988 -334,0.07780952006578445 -335,0.077938012778759 -336,0.07785438001155853 -337,0.07827029377222061 -338,0.0782247856259346 -339,0.07771893590688705 -340,0.07764863967895508 -341,0.0778261199593544 -342,0.07811377197504044 -343,0.07781931757926941 -344,0.07845232635736465 -345,0.07776925712823868 -346,0.07830584049224854 -347,0.0783783420920372 -348,0.07804635912179947 -349,0.07845665514469147 -350,0.07816731184720993 -351,0.0779450386762619 -352,0.07841788232326508 -353,0.0780324786901474 -354,0.07801465690135956 -355,0.07795173674821854 -356,0.07795317471027374 -357,0.07807818055152893 -358,0.07788532972335815 -359,0.07783545553684235 -360,0.07799635082483292 -361,0.07787401974201202 -362,0.07777505367994308 -363,0.07770556956529617 -364,0.07758531719446182 -365,0.07772568613290787 -366,0.07782302796840668 -367,0.07783672213554382 -368,0.07778783142566681 -369,0.07761449366807938 -370,0.07764123380184174 -371,0.07811995595693588 -372,0.07781403511762619 -373,0.07773146778345108 -374,0.07824813574552536 -375,0.07750725001096725 -376,0.07766114920377731 -377,0.07770022004842758 -378,0.07764220237731934 -379,0.0777118057012558 -380,0.07758817821741104 -381,0.07780518382787704 -382,0.07810316979885101 -383,0.07771607488393784 -384,0.07827109843492508 -385,0.07764781266450882 -386,0.07840166985988617 -387,0.07835271954536438 -388,0.07757174968719482 -389,0.07838264852762222 -390,0.07777516543865204 -391,0.07827653735876083 -392,0.07888121157884598 -393,0.07807979732751846 -394,0.07816629111766815 -395,0.07823546975851059 -396,0.07789744436740875 -397,0.07814966887235641 -398,0.0778207927942276 -399,0.07787006348371506 -400,0.07775034010410309 -401,0.07764816284179688 -402,0.07773223519325256 -403,0.07789493352174759 -404,0.07768715918064117 -405,0.07754630595445633 -406,0.07775025814771652 -407,0.07800710201263428 -408,0.07781899720430374 -409,0.07780978828668594 -410,0.07754430919885635 -411,0.07810129970312119 -412,0.0781763419508934 -413,0.07765012234449387 -414,0.07773295789957047 -415,0.07801397889852524 -416,0.07773534953594208 -417,0.07756348699331284 -418,0.07763165980577469 -419,0.07761207967996597 -420,0.07764014601707458 -421,0.07753324508666992 -422,0.07793325185775757 -423,0.0775177925825119 -424,0.07815376669168472 -425,0.07793501764535904 -426,0.07753974944353104 -427,0.07766635715961456 -428,0.0775998905301094 -429,0.07766807824373245 -430,0.07748553156852722 -431,0.07748076319694519 -432,0.07762281596660614 -433,0.0775587186217308 -434,0.07766638696193695 -435,0.07746539264917374 -436,0.07759056240320206 -437,0.07732097804546356 -438,0.07763919234275818 -439,0.077333003282547 -440,0.07774917781352997 -441,0.07759808003902435 -442,0.07788386940956116 -443,0.07758037000894547 -444,0.07821808755397797 -445,0.07779224216938019 -446,0.07830558717250824 -447,0.07820100337266922 -448,0.07741688191890717 -449,0.07766695320606232 -450,0.07759763300418854 -451,0.0777016282081604 -452,0.07755806297063828 -453,0.07802053540945053 -454,0.07748392224311829 -455,0.07782064378261566 -456,0.07758387923240662 -457,0.07777909934520721 -458,0.0774981826543808 -459,0.07766455411911011 -460,0.0776214674115181 -461,0.07746551185846329 -462,0.07741735875606537 -463,0.07737931609153748 -464,0.07789219915866852 -465,0.07746762037277222 -466,0.0783943384885788 -467,0.0778588354587555 -468,0.07838001847267151 -469,0.07879404723644257 -470,0.07738208025693893 -471,0.0792321115732193 -472,0.0790657103061676 -473,0.07731588184833527 -474,0.07842248678207397 -475,0.07823590934276581 -476,0.07784873992204666 -477,0.07801975309848785 -478,0.0779976174235344 -479,0.07817177474498749 -480,0.07835644483566284 -481,0.07849223166704178 -482,0.07788130640983582 -483,0.0790223702788353 -484,0.07857086509466171 -485,0.07776235044002533 -486,0.07843781262636185 -487,0.07788969576358795 -488,0.07790926843881607 -489,0.07789408415555954 -490,0.07781615853309631 -491,0.07815239578485489 -492,0.07758340984582901 -493,0.07804016768932343 -494,0.07785045355558395 -495,0.07766494154930115 -496,0.07796507328748703 -497,0.07752441614866257 -498,0.07739675790071487 -499,0.07769210636615753 -500,0.07738480716943741 -501,0.07757333666086197 -502,0.07733433693647385 -503,0.0774913802742958 -504,0.07755815982818604 -505,0.07737081497907639 -506,0.07787288725376129 -507,0.07728328555822372 -508,0.07749675959348679 -509,0.0772523432970047 -510,0.07750571519136429 -511,0.07764255255460739 -512,0.07727622240781784 -513,0.0779527872800827 -514,0.07757145911455154 -515,0.07795237004756927 -516,0.07812047749757767 -517,0.07739074528217316 -518,0.07815295457839966 -519,0.07749854028224945 -520,0.07757090032100677 -521,0.07748725265264511 -522,0.07740375399589539 -523,0.07742610573768616 -524,0.07728396356105804 -525,0.07755547016859055 -526,0.07743559032678604 -527,0.07747966051101685 -528,0.07746528089046478 -529,0.077168770134449 -530,0.07764826714992523 -531,0.07722141593694687 -532,0.07767059653997421 -533,0.07738788425922394 -534,0.07739314436912537 -535,0.07812267541885376 -536,0.07729532569646835 -537,0.07868628203868866 -538,0.07852603495121002 -539,0.07720029354095459 -540,0.07849115878343582 -541,0.07737015187740326 -542,0.07861601561307907 -543,0.0787702426314354 -544,0.07774397730827332 -545,0.07902292907238007 -546,0.07855603098869324 -547,0.07780173420906067 -548,0.07813630998134613 -549,0.07857212424278259 -550,0.07771133631467819 -551,0.07775668054819107 -552,0.07802583277225494 -553,0.0776585191488266 -554,0.07756981998682022 -555,0.07780229300260544 -556,0.07764904946088791 -557,0.07797572016716003 -558,0.07757841050624847 -559,0.07787610590457916 -560,0.07757481932640076 -561,0.07759986817836761 -562,0.07755639404058456 -563,0.07749602943658829 -564,0.07782194018363953 -565,0.07746461033821106 -566,0.07771524041891098 -567,0.07734934240579605 -568,0.07780241221189499 -569,0.07732534408569336 -570,0.07793125510215759 -571,0.0775894746184349 -572,0.07762835919857025 -573,0.07781776040792465 -574,0.07720082253217697 -575,0.07764345407485962 -576,0.07728128880262375 -577,0.07745814323425293 -578,0.07724366337060928 -579,0.07714198529720306 -580,0.07728918641805649 -581,0.07730301469564438 -582,0.07717891782522202 -583,0.07722999900579453 -584,0.07737161219120026 -585,0.07752946764230728 -586,0.07741480320692062 -587,0.07713388651609421 -588,0.0774182453751564 -589,0.07762029767036438 -590,0.07741548120975494 -591,0.07741130143404007 -592,0.07721700519323349 -593,0.07753854990005493 -594,0.07758532464504242 -595,0.07704051584005356 -596,0.07750561833381653 -597,0.07744419574737549 -598,0.07747531682252884 -599,0.07732686400413513 -600,0.07711467146873474 -601,0.07747431844472885 -602,0.07732097059488297 -603,0.07753695547580719 -604,0.07729538530111313 -605,0.07766323536634445 -606,0.07724883407354355 -607,0.07751292735338211 -608,0.07726884633302689 -609,0.07712952792644501 -610,0.07751064747571945 -611,0.07719003409147263 -612,0.0772145465016365 -613,0.07729683816432953 -614,0.07718561589717865 -615,0.07786203175783157 -616,0.07733356952667236 -617,0.07724830508232117 -618,0.07714023441076279 -619,0.0774247795343399 -620,0.07715866714715958 -621,0.07727108150720596 -622,0.07736317068338394 -623,0.07731032371520996 -624,0.07790445536375046 -625,0.07752326130867004 -626,0.07703299075365067 -627,0.07730133831501007 -628,0.07721460610628128 -629,0.07766829431056976 -630,0.07730051130056381 -631,0.07699597626924515 -632,0.07737796008586884 -633,0.0773567333817482 -634,0.0773405209183693 -635,0.07706809788942337 -636,0.07725993543863297 -637,0.07715144753456116 -638,0.07698094844818115 -639,0.07716073840856552 -640,0.07746575027704239 -641,0.07718625664710999 -642,0.07777667045593262 -643,0.07724624127149582 -644,0.07785753905773163 -645,0.077679842710495 -646,0.07732248306274414 -647,0.07836584746837616 -648,0.07765412330627441 -649,0.07749534398317337 -650,0.07765927165746689 -651,0.07747204601764679 -652,0.07709351927042007 -653,0.07716109603643417 -654,0.07711350917816162 -655,0.0770353451371193 -656,0.07711736112833023 -657,0.07745004445314407 -658,0.07721468806266785 -659,0.07846622914075851 -660,0.07739973813295364 -661,0.07883437722921371 -662,0.07893641293048859 -663,0.07723811268806458 -664,0.07886245101690292 -665,0.0782509297132492 -666,0.07752887159585953 -667,0.07830008864402771 -668,0.07730824500322342 -669,0.07807133346796036 -670,0.07820645719766617 -671,0.07763952016830444 -672,0.078254833817482 -673,0.07810618728399277 -674,0.07758284360170364 -675,0.07749751210212708 -676,0.07769892364740372 -677,0.07725856453180313 -678,0.07729177176952362 -679,0.07736002653837204 -680,0.07714279741048813 -681,0.07694603502750397 -682,0.07708676159381866 -683,0.07704237848520279 -684,0.07775762677192688 -685,0.07696279883384705 -686,0.07736635208129883 -687,0.0770760029554367 -688,0.0772395059466362 -689,0.07693580538034439 -690,0.07711352407932281 -691,0.0781375914812088 -692,0.07762031257152557 -693,0.07812754064798355 -694,0.07767321914434433 -695,0.0780365839600563 -696,0.07826896011829376 -697,0.07717613875865936 -698,0.07811341434717178 -699,0.07701720297336578 -700,0.07795998454093933 -701,0.07729857414960861 -702,0.07743176072835922 -703,0.07708728313446045 -704,0.07718049734830856 -705,0.07718449831008911 -706,0.07746071368455887 -707,0.07749535143375397 -708,0.07741760462522507 -709,0.07704012095928192 -710,0.07719077169895172 -711,0.07720625400543213 -712,0.07708807289600372 -713,0.07701001316308975 -714,0.0770304948091507 -715,0.07715269923210144 -716,0.07699064910411835 -717,0.07735048234462738 -718,0.07691587507724762 -719,0.07793053984642029 -720,0.07689858973026276 -721,0.07729873806238174 -722,0.07771080732345581 -723,0.07689300924539566 -724,0.0775277242064476 -725,0.07692553102970123 -726,0.0769810602068901 -727,0.07698673009872437 -728,0.07689622044563293 -729,0.07690044492483139 -730,0.07684415578842163 -731,0.07682421058416367 -732,0.07685483247041702 -733,0.07684164494276047 -734,0.07682251930236816 -735,0.07737502455711365 -736,0.07696649432182312 -737,0.07747814059257507 -738,0.07761439681053162 -739,0.077490895986557 -740,0.0776946097612381 -741,0.07753019779920578 -742,0.07857347279787064 -743,0.0784437283873558 -744,0.07749525457620621 -745,0.07734448462724686 -746,0.0770283192396164 -747,0.07737955451011658 -748,0.07694172859191895 -749,0.0780312567949295 -750,0.07695069164037704 -751,0.07770241796970367 -752,0.07707434892654419 -753,0.07821197807788849 -754,0.0775439590215683 -755,0.07809064537286758 -756,0.07866831123828888 -757,0.07730616629123688 -758,0.07839062809944153 -759,0.07759923487901688 -760,0.07859812676906586 -761,0.0788559690117836 -762,0.07716429978609085 -763,0.07853902876377106 -764,0.07858076691627502 -765,0.07738583534955978 -766,0.07786443829536438 -767,0.0778384581208229 -768,0.07744274288415909 -769,0.07780423015356064 -770,0.07826534658670425 -771,0.07757501304149628 -772,0.07751678675413132 -773,0.07795986533164978 -774,0.07781191170215607 -775,0.07736030966043472 -776,0.07718668133020401 -777,0.07762861251831055 -778,0.0775587186217308 -779,0.077121302485466 -780,0.07734430581331253 -781,0.07740939408540726 -782,0.07714717090129852 -783,0.07742167264223099 -784,0.07729412615299225 -785,0.07770136743783951 -786,0.07696334272623062 -787,0.07792266458272934 -788,0.07735178619623184 -789,0.0774281695485115 -790,0.07693953812122345 -791,0.07710408419370651 -792,0.07690276950597763 -793,0.07682829350233078 -794,0.07678666710853577 -795,0.07676944881677628 -796,0.0767306536436081 -797,0.0768735334277153 -798,0.07746308296918869 -799,0.07681602239608765 -800,0.07712121307849884 -801,0.07768965512514114 -802,0.0771719440817833 -803,0.07784422487020493 -804,0.0770508199930191 -805,0.07836704701185226 -806,0.0775768905878067 -807,0.07764992862939835 -808,0.07720876485109329 -809,0.07742069661617279 -810,0.07700426131486893 -811,0.0773513987660408 -812,0.07745406776666641 -813,0.07702098786830902 -814,0.07736555486917496 -815,0.07736685127019882 -816,0.0767972394824028 -817,0.07735218107700348 -818,0.07821037620306015 -819,0.07722005993127823 -820,0.0775679349899292 -821,0.07811177521944046 -822,0.07689554244279861 -823,0.0780123919248581 -824,0.07743334770202637 -825,0.07813867181539536 -826,0.07724647969007492 -827,0.07851549237966537 -828,0.07828718423843384 -829,0.07721788436174393 -830,0.0789058580994606 -831,0.07740221172571182 -832,0.07746495306491852 -833,0.07737447321414948 -834,0.0773594081401825 -835,0.07706980407238007 -836,0.07719851285219193 -837,0.07709630578756332 -838,0.07713869214057922 -839,0.07693851739168167 -840,0.07774452865123749 -841,0.07696244865655899 -842,0.0769660696387291 -843,0.07750414311885834 -844,0.07675959914922714 -845,0.07701603323221207 -846,0.07673455029726028 -847,0.07687205821275711 -848,0.07722606509923935 -849,0.07671679556369781 -850,0.07708185166120529 -851,0.07728112488985062 -852,0.07699070870876312 -853,0.07803032547235489 -854,0.0773017406463623 -855,0.07848798483610153 -856,0.07857879251241684 -857,0.07696020603179932 -858,0.0784451812505722 -859,0.07725288718938828 -860,0.07793919742107391 -861,0.0782412737607956 -862,0.07741478085517883 -863,0.0777185782790184 -864,0.07726801931858063 -865,0.07741623371839523 -866,0.07697051018476486 -867,0.07723032683134079 -868,0.07708998769521713 -869,0.07684587687253952 -870,0.07686024904251099 -871,0.07673501968383789 -872,0.07668386399745941 -873,0.0768851786851883 -874,0.07765472680330276 -875,0.07700381428003311 -876,0.07691020518541336 -877,0.07769083231687546 -878,0.07715756446123123 -879,0.0771210566163063 -880,0.07740461081266403 -881,0.0772826299071312 -882,0.07716003805398941 -883,0.0773983895778656 -884,0.0770820900797844 -885,0.07706377655267715 -886,0.07725173979997635 -887,0.07696754485368729 -888,0.07682611793279648 -889,0.0769837498664856 -890,0.07754941284656525 -891,0.07676871120929718 -892,0.07686112076044083 -893,0.07693639397621155 -894,0.07683553546667099 -895,0.07678084820508957 -896,0.07676719129085541 -897,0.07723862677812576 -898,0.07675933092832565 -899,0.07689646631479263 -900,0.07704490423202515 -901,0.07678315788507462 -902,0.07672426849603653 -903,0.07692406326532364 -904,0.07757379859685898 -905,0.07726702839136124 -906,0.07659593969583511 -907,0.0773085281252861 -908,0.07697669416666031 -909,0.07672960311174393 -910,0.07715211063623428 -911,0.07748855650424957 -912,0.07716653496026993 -913,0.07761773467063904 -914,0.07722466439008713 -915,0.07699509710073471 -916,0.0774005874991417 -917,0.0768834799528122 -918,0.07687828689813614 -919,0.0769171491265297 -920,0.07659579068422318 -921,0.07703826576471329 -922,0.07754632830619812 -923,0.0770704448223114 -924,0.07675399631261826 -925,0.07757087796926498 -926,0.07759673148393631 -927,0.07722017168998718 -928,0.07810169458389282 -929,0.07678326219320297 -930,0.07870036363601685 -931,0.07802921533584595 -932,0.07747888565063477 -933,0.07681338489055634 -934,0.0790950357913971 -935,0.0782693400979042 -936,0.07739352434873581 -937,0.07732628285884857 -938,0.07841384410858154 -939,0.07898452132940292 -940,0.07706674188375473 -941,0.07923460006713867 -942,0.07939175516366959 -943,0.07748990505933762 -944,0.07859046012163162 -945,0.07784651219844818 -946,0.07731074094772339 -947,0.07906591892242432 -948,0.07767964154481888 -949,0.07828804850578308 -950,0.07926499098539352 -951,0.07804791629314423 -952,0.0772721916437149 -953,0.07914137840270996 -954,0.07816046476364136 -955,0.07817018777132034 -956,0.07932119816541672 -957,0.07822069525718689 -958,0.077597476541996 -959,0.07935120910406113 -960,0.07801083475351334 -961,0.07818000018596649 -962,0.0797996073961258 -963,0.07861556112766266 -964,0.07704361528158188 -965,0.07895199954509735 -966,0.07825344800949097 -967,0.07767337560653687 -968,0.0793856754899025 -969,0.07861704379320145 -970,0.07701358199119568 -971,0.07862503081560135 -972,0.0781509131193161 -973,0.07753957062959671 -974,0.07867521792650223 -975,0.07807395607233047 -976,0.07694444805383682 -977,0.07820828258991241 -978,0.07749844342470169 -979,0.07779709994792938 -980,0.07911112904548645 -981,0.07810676097869873 -982,0.07736224681138992 -983,0.07828368991613388 -984,0.0775228813290596 -985,0.07792320102453232 -986,0.07858242094516754 -987,0.07722865045070648 -988,0.07739510387182236 -989,0.07948534935712814 -990,0.07784159481525421 -991,0.07782764732837677 -992,0.07907374948263168 -993,0.07736952602863312 -994,0.07771026343107224 -995,0.07820295542478561 -996,0.07725347578525543 -997,0.07806582003831863 -998,0.07820100337266922 -999,0.07721172273159027 -1000,0.07782912999391556 diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.003/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.003/training_config.txt deleted file mode 100644 index af09b72..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.003/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.0025 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.003/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.003/training_log.csv deleted file mode 100644 index f192ac1..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.003/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,7770.62841796875 -2,6718.59423828125 -3,5794.0087890625 -4,4935.95947265625 -5,4327.53271484375 -6,3802.377197265625 -7,3329.73046875 -8,2931.816650390625 -9,2564.612060546875 -10,2215.23388671875 -11,1976.3067626953125 -12,1704.258544921875 -13,1438.279052734375 -14,1257.5654296875 -15,1139.8956298828125 -16,995.1824340820312 -17,815.4264526367188 -18,727.8626098632812 -19,632.2618408203125 -20,605.2363891601562 -21,508.25048828125 -22,416.4864196777344 -23,366.84027099609375 -24,369.57958984375 -25,295.07867431640625 -26,248.3028564453125 -27,223.56149291992188 -28,239.32118225097656 -29,202.33860778808594 -30,194.6480255126953 -31,167.9254150390625 -32,156.02577209472656 -33,151.82904052734375 -34,151.10589599609375 -35,144.50389099121094 -36,144.8226776123047 -37,145.7972869873047 -38,148.10838317871094 -39,152.98716735839844 -40,154.3490753173828 -41,156.31729125976562 -42,156.1006622314453 -43,154.32318115234375 -44,149.095947265625 -45,141.8406524658203 -46,132.90611267089844 -47,130.16073608398438 -48,128.01268005371094 -49,127.04995727539062 -50,126.7177963256836 -51,126.15620422363281 -52,125.43661499023438 -53,124.47113800048828 -54,122.81539916992188 -55,123.9261245727539 -56,124.15579223632812 -57,124.08218383789062 -58,123.82746887207031 -59,123.41194152832031 -60,122.7802963256836 -61,121.62992095947266 -62,122.0086441040039 -63,122.00167083740234 -64,121.75686645507812 -65,121.33285522460938 -66,120.52906036376953 -67,121.29076385498047 -68,121.7078857421875 -69,121.7892074584961 -70,121.73064422607422 -71,121.57801055908203 -72,121.35053253173828 -73,121.05697631835938 -74,120.69910430908203 -75,120.26854705810547 -76,119.73797607421875 -77,119.03357696533203 -78,117.8167724609375 -79,116.83917236328125 -80,116.34820556640625 -81,115.63579559326172 -82,114.36708068847656 -83,115.11323547363281 -84,115.37007904052734 -85,115.34087371826172 -86,115.01927185058594 -87,114.36756896972656 -88,114.035400390625 -89,114.01355743408203 -90,114.08613586425781 -91,113.75379180908203 -92,113.07562255859375 -93,111.48456573486328 -94,111.98307037353516 -95,113.6790542602539 -96,113.85575103759766 -97,113.80955505371094 -98,113.60377502441406 -99,112.89239501953125 -100,113.32893371582031 -101,114.0166244506836 -102,114.2397689819336 -103,114.04379272460938 -104,112.6296157836914 -105,107.9374771118164 -106,107.7266845703125 -107,107.7040786743164 -108,108.00822448730469 -109,108.39141845703125 -110,108.71395111083984 -111,108.90518951416016 -112,108.95988464355469 -113,108.91028594970703 -114,108.78828430175781 -115,108.61094665527344 -116,108.3825454711914 -117,108.09669494628906 -118,107.74114990234375 -119,107.30583190917969 -120,106.79058837890625 -121,106.1917953491211 -122,110.83098602294922 -123,105.59488677978516 -124,105.57504272460938 -125,105.48200225830078 -126,105.35533142089844 -127,105.2125015258789 -128,105.0588150024414 -129,104.88456726074219 -130,106.81533813476562 -131,106.51081848144531 -132,109.82728576660156 -133,115.5789794921875 -134,128.6304473876953 -135,131.7053680419922 -136,121.98052215576172 -137,118.72258758544922 -138,117.06177520751953 -139,116.03680419921875 -140,115.83661651611328 -141,121.03598022460938 -142,120.2808837890625 -143,119.34547424316406 -144,118.78247833251953 -145,118.43366241455078 -146,118.12821960449219 -147,117.74102020263672 -148,117.023193359375 -149,118.93740844726562 -150,119.89824676513672 -151,120.37339782714844 -152,120.54845428466797 -153,120.5039291381836 -154,120.2508773803711 -155,119.74303436279297 -156,118.87834930419922 -157,116.06034088134766 -158,116.97679901123047 -159,117.20667266845703 -160,117.27363586425781 -161,117.27104187011719 -162,117.23406982421875 -163,117.1791000366211 -164,117.11459350585938 -165,117.04459381103516 -166,116.971923828125 -167,116.8958511352539 -168,116.815185546875 -169,116.7279281616211 -170,116.63368225097656 -171,116.53009033203125 -172,116.41732788085938 -173,116.29596710205078 -174,116.16725158691406 -175,116.0330581665039 -176,115.89540100097656 -177,115.7552719116211 -178,115.61406707763672 -179,115.47228240966797 -180,115.32965850830078 -181,115.18595123291016 -182,115.04006958007812 -183,114.89013671875 -184,114.73365783691406 -185,114.56776428222656 -186,114.38726043701172 -187,114.18212127685547 -188,113.93012237548828 -189,113.56849670410156 -190,113.1717300415039 -191,114.64246368408203 -192,115.82766723632812 -193,110.32905578613281 -194,116.33597564697266 -195,111.70674896240234 -196,112.64942932128906 -197,113.70431518554688 -198,114.73318481445312 -199,115.6131820678711 -200,116.27291870117188 diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.005/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.005/training_config.txt deleted file mode 100644 index ac61d9b..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.005/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.005 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.005/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.005/training_log.csv deleted file mode 100644 index 3edea96..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.005/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,6725.52001953125 -2,4997.44384765625 -3,3804.835693359375 -4,2935.503173828125 -5,2222.25732421875 -6,1693.8331298828125 -7,1256.9798583984375 -8,963.8367309570312 -9,665.0695190429688 -10,535.7227783203125 -11,364.3943786621094 -12,252.67892456054688 -13,221.32859802246094 -14,194.2473602294922 -15,135.03506469726562 -16,110.41338348388672 -17,121.97980499267578 -18,109.03631591796875 -19,108.22425079345703 -20,109.557861328125 -21,104.28150939941406 -22,86.65518951416016 -23,76.22408294677734 -24,73.43879699707031 -25,71.68223571777344 -26,68.87044525146484 -27,62.73584747314453 -28,62.54648971557617 -29,60.324642181396484 -30,55.01743698120117 -31,53.166526794433594 -32,50.682369232177734 -33,48.591407775878906 -34,46.70733642578125 -35,45.11572265625 -36,42.944496154785156 -37,40.58065414428711 -38,39.167545318603516 -39,38.011505126953125 -40,36.439659118652344 -41,36.005401611328125 -42,35.49430847167969 -43,35.04253387451172 -44,34.596492767333984 -45,32.74248123168945 -46,32.25679397583008 -47,31.893898010253906 -48,31.575838088989258 -49,31.29216957092285 -50,31.034502029418945 -51,30.795312881469727 -52,30.56829071044922 -53,30.349084854125977 -54,30.13558006286621 -55,29.928409576416016 -56,29.730928421020508 -57,29.550201416015625 -58,29.39569091796875 -59,29.28091812133789 -60,29.154844284057617 -61,28.832595825195312 -62,28.593297958374023 -63,28.365314483642578 -64,28.122602462768555 -65,27.836380004882812 -66,27.429363250732422 -67,26.822507858276367 -68,26.477489471435547 -69,25.864377975463867 -70,25.378442764282227 -71,23.984514236450195 -72,23.57914161682129 -73,23.373628616333008 -74,23.214035034179688 -75,23.080238342285156 -76,22.963762283325195 -77,22.859521865844727 -78,22.763877868652344 -79,22.67420196533203 -80,22.587997436523438 -81,22.503862380981445 -82,22.420806884765625 -83,22.338428497314453 -84,22.256061553955078 -85,22.172527313232422 -86,22.08760643005371 -87,22.0006103515625 -88,21.91148567199707 -89,21.82109260559082 -90,21.730979919433594 -91,21.642606735229492 -92,21.556926727294922 -93,21.474634170532227 -94,21.395732879638672 -95,21.319725036621094 -96,21.246471405029297 -97,21.17557144165039 -98,21.10727882385254 -99,21.041545867919922 -100,20.97849464416504 -101,20.918176651000977 -102,20.860525131225586 -103,20.805519104003906 -104,20.753093719482422 -105,20.703218460083008 -106,20.655841827392578 -107,20.610776901245117 -108,20.567886352539062 -109,20.527023315429688 -110,20.487937927246094 -111,20.450258255004883 -112,20.413429260253906 -113,20.376502990722656 -114,20.33868408203125 -115,20.300275802612305 -116,20.262203216552734 -117,20.225011825561523 -118,20.188913345336914 -119,20.15406036376953 -120,20.120267868041992 -121,20.087509155273438 -122,20.055627822875977 -123,20.024587631225586 -124,19.994253158569336 -125,19.964513778686523 -126,19.935354232788086 -127,19.906675338745117 -128,19.878501892089844 -129,19.850749969482422 -130,19.823453903198242 -131,19.796541213989258 -132,19.77004623413086 -133,19.743968963623047 -134,19.718332290649414 -135,19.693124771118164 -136,19.668338775634766 -137,19.64396858215332 -138,19.620038986206055 -139,19.59649658203125 -140,19.573381423950195 -141,19.550628662109375 -142,19.528242111206055 -143,19.506250381469727 -144,19.48459815979004 -145,19.463274002075195 -146,19.442277908325195 -147,19.421581268310547 -148,19.401185989379883 -149,19.381067276000977 -150,19.361270904541016 -151,19.341718673706055 -152,19.322460174560547 -153,19.30341911315918 -154,19.284698486328125 -155,19.26616859436035 -156,19.247894287109375 -157,19.229854583740234 -158,19.2120418548584 -159,19.194433212280273 -160,19.17705726623535 -161,19.15989112854004 -162,19.1429443359375 -163,19.12616539001465 -164,19.10961151123047 -165,19.093191146850586 -166,19.07701301574707 -167,19.060972213745117 -168,19.045129776000977 -169,19.029451370239258 -170,19.013916015625 -171,18.99856185913086 -172,18.98333168029785 -173,18.96823501586914 -174,18.953290939331055 -175,18.938474655151367 -176,18.923778533935547 -177,18.909225463867188 -178,18.894758224487305 -179,18.880416870117188 -180,18.866165161132812 -181,18.852020263671875 -182,18.83796501159668 -183,18.824012756347656 -184,18.81011390686035 -185,18.79629898071289 -186,18.782527923583984 -187,18.768795013427734 -188,18.7551212310791 -189,18.74149513244629 -190,18.72787857055664 -191,18.71425437927246 -192,18.70063018798828 -193,18.68698501586914 -194,18.673267364501953 -195,18.659528732299805 -196,18.645681381225586 -197,18.631698608398438 -198,18.61760139465332 -199,18.603317260742188 -200,18.58875274658203 diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.010/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.010/training_config.txt deleted file mode 100644 index 84796f5..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.010/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.010/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.010/training_log.csv deleted file mode 100644 index 946a12f..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,5059.482421875 -2,3010.3740234375 -3,1816.048583984375 -4,1021.2877197265625 -5,545.4293823242188 -6,291.2381591796875 -7,230.19659423828125 -8,125.45794677734375 -9,134.42971801757812 -10,117.55680847167969 -11,102.60958099365234 -12,76.38494873046875 -13,68.34687805175781 -14,62.285850524902344 -15,52.418697357177734 -16,46.43372344970703 -17,44.03841018676758 -18,43.49372100830078 -19,42.42775344848633 -20,40.21412658691406 -21,40.499664306640625 -22,34.71615219116211 -23,32.54283905029297 -24,27.556747436523438 -25,24.67312240600586 -26,23.636560440063477 -27,22.539194107055664 -28,20.3195858001709 -29,18.445499420166016 -30,15.775135040283203 -31,14.405683517456055 -32,13.408025741577148 -33,12.5646390914917 -34,11.871582984924316 -35,11.312950134277344 -36,10.84416389465332 -37,10.433754920959473 -38,9.928411483764648 -39,9.642745018005371 -40,9.40228271484375 -41,9.211402893066406 -42,9.064362525939941 -43,8.936352729797363 -44,8.75970458984375 -45,8.682718276977539 -46,8.610445022583008 -47,8.548540115356445 -48,8.481241226196289 -49,8.38536262512207 -50,8.219938278198242 -51,7.891648292541504 -52,7.5890936851501465 -53,7.376401901245117 -54,6.950267791748047 -55,6.586104393005371 -56,6.322219371795654 -57,6.213955402374268 -58,6.054443359375 -59,5.882267951965332 -60,5.800445556640625 -61,5.714313983917236 -62,5.6333489418029785 -63,5.557476043701172 -64,5.517022132873535 -65,5.424365997314453 -66,5.364746570587158 -67,5.305666446685791 -68,5.246865272521973 -69,5.18797492980957 -70,5.129032611846924 -71,5.070919513702393 -72,5.01667594909668 -73,4.888418197631836 -74,4.858117580413818 -75,4.824503421783447 -76,4.788944721221924 -77,4.752417087554932 -78,4.715500831604004 -79,4.678471088409424 -80,4.6414289474487305 -81,4.604058742523193 -82,4.569658279418945 -83,4.531286239624023 -84,4.4958882331848145 -85,4.4591498374938965 -86,4.421026706695557 -87,4.38107442855835 -88,4.34315299987793 -89,4.300421714782715 -90,4.258784294128418 -91,4.216139316558838 -92,4.173788547515869 -93,4.132620811462402 -94,4.092691898345947 -95,4.0533270835876465 -96,4.0136308670043945 -97,3.972959518432617 -98,3.859011650085449 -99,3.9553351402282715 -100,3.9903764724731445 -101,4.018263339996338 -102,3.9151854515075684 -103,3.882251739501953 -104,3.7630834579467773 -105,3.7000489234924316 -106,3.6120121479034424 -107,3.475703477859497 -108,3.497345447540283 -109,3.395029067993164 -110,3.279219150543213 -111,3.223318099975586 -112,3.2120800018310547 -113,3.209965467453003 -114,3.107729434967041 -115,3.0968451499938965 -116,3.078746795654297 -117,3.0428404808044434 -118,2.9915549755096436 -119,2.9698710441589355 -120,2.9466629028320312 -121,2.918043613433838 -122,2.883138656616211 -123,2.8782236576080322 -124,2.873089551925659 -125,2.86930513381958 -126,2.86629581451416 -127,2.863724708557129 -128,2.861400842666626 -129,2.8591976165771484 -130,2.8570375442504883 -131,2.854865312576294 -132,2.8526415824890137 -133,2.850334882736206 -134,2.847916841506958 -135,2.845351457595825 -136,2.8425850868225098 -137,2.839533805847168 -138,2.8360393047332764 -139,2.8318018913269043 -140,2.826171636581421 -141,2.817456007003784 -142,2.800262689590454 -143,2.759161949157715 -144,2.7409002780914307 -145,2.733471393585205 -146,2.7278575897216797 -147,2.7232511043548584 -148,2.719332456588745 -149,2.715935468673706 -150,2.7129509449005127 -151,2.7102749347686768 -152,2.7075414657592773 -153,2.7018003463745117 -154,2.666679620742798 -155,2.6628167629241943 -156,2.662264823913574 -157,2.6619603633880615 -158,2.6614482402801514 -159,2.6607513427734375 -160,2.659938335418701 -161,2.65906023979187 -162,2.6581509113311768 -163,2.657233476638794 -164,2.6563141345977783 -165,2.655404567718506 -166,2.6545050144195557 -167,2.653618812561035 -168,2.6527457237243652 -169,2.651887893676758 -170,2.651043176651001 -171,2.650209903717041 -172,2.6493892669677734 -173,2.6485791206359863 -174,2.6477811336517334 -175,2.6469900608062744 -176,2.646209955215454 -177,2.6454365253448486 -178,2.6446681022644043 -179,2.6439082622528076 -180,2.643153667449951 -181,2.6424012184143066 -182,2.6416542530059814 -183,2.6409099102020264 -184,2.6401679515838623 -185,2.639427661895752 -186,2.6386876106262207 -187,2.637948751449585 -188,2.6372103691101074 -189,2.636469841003418 -190,2.635728597640991 -191,2.634984016418457 -192,2.634237289428711 -193,2.633486032485962 -194,2.632730722427368 -195,2.631969451904297 -196,2.631201982498169 -197,2.6304264068603516 -198,2.6296417713165283 -199,2.6288468837738037 -200,2.628040313720703 diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.020/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.020/training_config.txt deleted file mode 100644 index fcae999..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.020/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.020/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.020/training_log.csv deleted file mode 100644 index 56bfeb9..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,3088.385009765625 -2,1144.3016357421875 -3,318.79901123046875 -4,140.68502807617188 -5,152.70545959472656 -6,75.14848327636719 -7,73.43697357177734 -8,66.32743072509766 -9,50.1694450378418 -10,33.105369567871094 -11,25.02813148498535 -12,22.472965240478516 -13,18.639480590820312 -14,15.920618057250977 -15,13.850238800048828 -16,13.115677833557129 -17,12.236470222473145 -18,11.852234840393066 -19,11.72622299194336 -20,11.202983856201172 -21,11.095417976379395 -22,11.030611038208008 -23,10.815608024597168 -24,10.718470573425293 -25,10.56022834777832 -26,10.564657211303711 -27,10.425938606262207 -28,10.180031776428223 -29,10.043537139892578 -30,9.891477584838867 -31,9.733704566955566 -32,9.577387809753418 -33,9.462329864501953 -34,9.409721374511719 -35,9.363308906555176 -36,9.301348686218262 -37,9.235352516174316 -38,9.170696258544922 -39,9.110300064086914 -40,9.056041717529297 -41,9.00932788848877 -42,8.970462799072266 -43,8.939508438110352 -44,8.91582202911377 -45,8.898208618164062 -46,8.885146141052246 -47,8.87503433227539 -48,8.866385459899902 -49,8.85802173614502 -50,8.849024772644043 -51,8.838545799255371 -52,8.832979202270508 -53,8.856711387634277 -54,8.841376304626465 -55,8.813333511352539 -56,8.778741836547852 -57,8.743232727050781 -58,8.709452629089355 -59,8.677081108093262 -60,8.644346237182617 -61,8.621743202209473 -62,8.610654830932617 -63,8.603785514831543 -64,8.598999977111816 -65,8.594789505004883 -66,8.590073585510254 -67,8.584161758422852 -68,8.576689720153809 -69,8.56757640838623 -70,8.556938171386719 -71,8.545018196105957 -72,8.532170295715332 -73,8.518823623657227 -74,8.505425453186035 -75,8.492317199707031 -76,8.479771614074707 -77,8.467961311340332 -78,8.456985473632812 -79,8.446765899658203 -80,8.437174797058105 -81,8.428035736083984 -82,8.419135093688965 -83,8.410305976867676 -84,8.401407241821289 -85,8.392420768737793 -86,8.383390426635742 -87,8.374467849731445 -88,8.365741729736328 -89,8.357316970825195 -90,8.349235534667969 -91,8.341489791870117 -92,8.334039688110352 -93,8.326807022094727 -94,8.319735527038574 -95,8.312769889831543 -96,8.30587100982666 -97,8.299010276794434 -98,8.292182922363281 -99,8.285400390625 -100,8.278678894042969 -101,8.27203369140625 -102,8.265478134155273 -103,8.259018898010254 -104,8.25264835357666 -105,8.24635124206543 -106,8.240104675292969 -107,8.233864784240723 -108,8.227606773376465 -109,8.221291542053223 -110,8.214883804321289 -111,8.208340644836426 -112,8.201622009277344 -113,8.194698333740234 -114,8.187500953674316 -115,8.17996597290039 -116,8.1719970703125 -117,8.163481712341309 -118,8.1542329788208 -119,8.144001007080078 -120,8.13238525390625 -121,8.118897438049316 -122,8.103504180908203 -123,8.099246978759766 -124,8.115421295166016 -125,8.134553909301758 -126,8.147979736328125 -127,8.157594680786133 -128,8.163418769836426 -129,8.165081024169922 -130,8.162735939025879 -131,8.157225608825684 -132,8.149883270263672 -133,8.142067909240723 -134,8.134814262390137 -135,8.128766059875488 -136,8.124173164367676 -137,8.120987892150879 -138,8.118855476379395 -139,8.117076873779297 -140,8.114785194396973 -141,8.111310005187988 -142,8.106478691101074 -143,8.100659370422363 -144,8.094478607177734 -145,8.088531494140625 -146,8.083104133605957 -147,8.078246116638184 -148,8.073824882507324 -149,8.06963062286377 -150,8.065448760986328 -151,8.06110668182373 -152,8.05648136138916 -153,8.051547050476074 -154,8.046350479125977 -155,8.040987968444824 -156,8.035585403442383 -157,8.030244827270508 -158,8.025039672851562 -159,8.019987106323242 -160,8.015042304992676 -161,8.01014518737793 -162,8.005194664001465 -163,8.000125885009766 -164,7.994886875152588 -165,7.989474296569824 -166,7.9839277267456055 -167,7.978292465209961 -168,7.9726057052612305 -169,7.966889381408691 -170,7.961117267608643 -171,7.955262660980225 -172,7.9492669105529785 -173,7.943070411682129 -174,7.936622142791748 -175,7.929859161376953 -176,7.922729969024658 -177,7.915169715881348 -178,7.907082557678223 -179,7.898306846618652 -180,7.8885908126831055 -181,7.877531051635742 -182,7.864451885223389 -183,7.848282814025879 -184,7.827654838562012 -185,7.820808410644531 -186,7.830437660217285 -187,7.848799705505371 -188,7.861062526702881 -189,7.866879463195801 -190,7.861657619476318 -191,7.848287105560303 -192,7.83420467376709 -193,7.8231635093688965 -194,7.8167314529418945 -195,7.815674781799316 -196,7.817105293273926 -197,7.812990188598633 -198,7.801965236663818 -199,7.791414737701416 -200,7.784646511077881 diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.040/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.040/training_config.txt deleted file mode 100644 index f2027f8..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.040/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.040/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.040/training_log.csv deleted file mode 100644 index c27aa04..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,1289.341552734375 -2,169.74205017089844 -3,80.52271270751953 -4,43.68131637573242 -5,20.937349319458008 -6,9.95045280456543 -7,5.791399002075195 -8,3.4011170864105225 -9,2.2140963077545166 -10,1.3589996099472046 -11,0.8892810940742493 -12,0.7924511432647705 -13,0.6559172868728638 -14,0.5640803575515747 -15,0.5065086483955383 -16,0.46587052941322327 -17,0.43497705459594727 -18,0.40961483120918274 -19,0.390272319316864 -20,0.375575989484787 -21,0.36431777477264404 -22,0.3530777394771576 -23,0.34686341881752014 -24,0.3405311703681946 -25,0.32142800092697144 -26,0.30399277806282043 -27,0.2940421402454376 -28,0.2890926003456116 -29,0.286124587059021 -30,0.28330594301223755 -31,0.2808089852333069 -32,0.27851444482803345 -33,0.2765269875526428 -34,0.27496540546417236 -35,0.2740345299243927 -36,0.27290958166122437 -37,0.27196675539016724 -38,0.2715482711791992 -39,0.2710689604282379 -40,0.27050724625587463 -41,0.2699800729751587 -42,0.2695007622241974 -43,0.2690669298171997 -44,0.2686779201030731 -45,0.26833468675613403 -46,0.26803550124168396 -47,0.26777052879333496 -48,0.2675304710865021 -49,0.2673094570636749 -50,0.26710379123687744 -51,0.26691052317619324 -52,0.26672792434692383 -53,0.2665548324584961 -54,0.2663900554180145 -55,0.26623332500457764 -56,0.26608356833457947 -57,0.26594021916389465 -58,0.26580268144607544 -59,0.26567041873931885 -60,0.2655431628227234 -61,0.2654201090335846 -62,0.26530104875564575 -63,0.26518580317497253 -64,0.2650740444660187 -65,0.264965295791626 -66,0.2648593783378601 -67,0.26475611329078674 -68,0.26465508341789246 -69,0.2645563781261444 -70,0.26445940136909485 -71,0.2643643617630005 -72,0.2642710506916046 -73,0.2641793191432953 -74,0.2640889883041382 -75,0.2639997899532318 -76,0.26391205191612244 -77,0.2638251781463623 -78,0.2637394070625305 -79,0.2636547088623047 -80,0.2635708153247833 -81,0.26348769664764404 -82,0.26340535283088684 -83,0.26332375407218933 -84,0.26324278116226196 -85,0.2631625533103943 -86,0.2630827724933624 -87,0.26300349831581116 -88,0.2629247307777405 -89,0.2628464102745056 -90,0.2627684772014618 -91,0.26269087195396423 -92,0.2626136839389801 -93,0.2625367343425751 -94,0.2624601423740387 -95,0.2623837888240814 -96,0.26230767369270325 -97,0.2622319161891937 -98,0.2621561586856842 -99,0.26208069920539856 -100,0.26200541853904724 -101,0.26193028688430786 -102,0.26185524463653564 -103,0.2617804706096649 -104,0.26170584559440613 -105,0.26163122057914734 -106,0.2615566849708557 -107,0.2614821791648865 -108,0.2614077627658844 -109,0.2613334655761719 -110,0.26125913858413696 -111,0.26118481159210205 -112,0.26111048460006714 -113,0.26103606820106506 -114,0.260961651802063 -115,0.26088714599609375 -116,0.26081234216690063 -117,0.26073750853538513 -118,0.26066240668296814 -119,0.26058709621429443 -120,0.260511577129364 -121,0.2604355812072754 -122,0.2603593170642853 -123,0.2602824866771698 -124,0.26020514965057373 -125,0.26012709736824036 -126,0.2600480616092682 -127,0.25996801257133484 -128,0.2598864734172821 -129,0.2598028779029846 -130,0.2597167193889618 -131,0.25962668657302856 -132,0.25953125953674316 -133,0.25942733883857727 -134,0.2593097686767578 -135,0.25917068123817444 -136,0.2590011656284332 -137,0.2588154375553131 -138,0.2586815357208252 -139,0.2586136758327484 -140,0.2585947513580322 -141,0.2585603594779968 -142,0.2585074305534363 -143,0.2584458887577057 -144,0.25838011503219604 -145,0.25831231474876404 -146,0.2582433223724365 -147,0.25817352533340454 -148,0.25810328125953674 -149,0.25803276896476746 -150,0.2579619884490967 -151,0.25789105892181396 -152,0.257820188999176 -153,0.257749080657959 -154,0.2576778829097748 -155,0.2576064467430115 -156,0.25753504037857056 -157,0.2574634552001953 -158,0.25739187002182007 -159,0.25732001662254333 -160,0.2572482228279114 -161,0.2571762204170227 -162,0.2571040689945221 -163,0.2570318579673767 -164,0.25695955753326416 -165,0.2568871080875397 -166,0.25681447982788086 -167,0.2567417621612549 -168,0.25666895508766174 -169,0.2565959692001343 -170,0.25652286410331726 -171,0.2564496099948883 -172,0.2563762366771698 -173,0.25630292296409607 -174,0.2562292814254761 -175,0.2561556398868561 -176,0.2560817301273346 -177,0.2560078501701355 -178,0.2559337913990021 -179,0.25585949420928955 -180,0.2557850778102875 -181,0.25571057200431824 -182,0.2556358873844147 -183,0.2555610239505768 -184,0.25548604130744934 -185,0.2554108798503876 -186,0.25533556938171387 -187,0.255260169506073 -188,0.255184531211853 -189,0.2551087439060211 -190,0.25503283739089966 -191,0.25495678186416626 -192,0.25488048791885376 -193,0.25480416417121887 -194,0.25472769141197205 -195,0.2546510696411133 -196,0.2545742690563202 -197,0.25449734926223755 -198,0.2544202208518982 -199,0.2543429732322693 -200,0.25426557660102844 diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.050/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.050/training_config.txt deleted file mode 100644 index 318238f..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.050/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.050/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.050/training_log.csv deleted file mode 100644 index f904d82..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,751.8114013671875 -2,130.89430236816406 -3,89.82174682617188 -4,63.68391418457031 -5,43.99032211303711 -6,31.095996856689453 -7,25.97844123840332 -8,23.500003814697266 -9,24.296039581298828 -10,25.648801803588867 -11,22.16640853881836 -12,20.829294204711914 -13,20.836965560913086 -14,19.963972091674805 -15,21.25447654724121 -16,19.66006851196289 -17,21.987844467163086 -18,19.279869079589844 -19,18.898744583129883 -20,19.826873779296875 -21,19.9918212890625 -22,19.450101852416992 -23,18.81133460998535 -24,18.70695686340332 -25,18.14211082458496 -26,17.82823944091797 -27,17.536941528320312 -28,17.246450424194336 -29,16.960630416870117 -30,16.723360061645508 -31,16.592172622680664 -32,16.580820083618164 -33,16.574913024902344 -34,16.361085891723633 -35,16.14403533935547 -36,16.023208618164062 -37,15.987268447875977 -38,16.022022247314453 -39,16.006994247436523 -40,15.889321327209473 -41,16.001598358154297 -42,15.413629531860352 -43,15.293877601623535 -44,14.868927955627441 -45,14.334311485290527 -46,15.300267219543457 -47,13.76059627532959 -48,13.92603588104248 -49,12.991423606872559 -50,14.897271156311035 -51,14.650100708007812 -52,13.899643898010254 -53,13.451325416564941 -54,12.87199592590332 -55,12.015827178955078 -56,11.035057067871094 -57,10.162378311157227 -58,9.626203536987305 -59,9.77097225189209 -60,8.695050239562988 -61,9.787149429321289 -62,10.72116756439209 -63,8.866284370422363 -64,4.62868595123291 -65,4.122593402862549 -66,3.6860625743865967 -67,3.4153401851654053 -68,3.203277349472046 -69,2.9480788707733154 -70,2.772449493408203 -71,2.5858964920043945 -72,2.3219501972198486 -73,1.173472285270691 -74,0.9639328122138977 -75,0.9020412564277649 -76,0.8489874005317688 -77,0.7789888381958008 -78,0.7139028310775757 -79,0.6817778944969177 -80,0.6499652862548828 -81,0.617756724357605 -82,0.5936900973320007 -83,0.5775876045227051 -84,0.5651906728744507 -85,0.5547771453857422 -86,0.5457246899604797 -87,0.5377205014228821 -88,0.5305731892585754 -89,0.5241448283195496 -90,0.5183328986167908 -91,0.5130601525306702 -92,0.5082560181617737 -93,0.5038605332374573 -94,0.49981173872947693 -95,0.49594882130622864 -96,0.4921724498271942 -97,0.48903343081474304 -98,0.4861673414707184 -99,0.4835188686847687 -100,0.481064110994339 -101,0.4787884056568146 -102,0.47666478157043457 -103,0.4746760427951813 -104,0.47280797362327576 -105,0.47104790806770325 -106,0.469384104013443 -107,0.46780672669410706 -108,0.4663074314594269 -109,0.4648783802986145 -110,0.4635125696659088 -111,0.4622034728527069 -112,0.46094557642936707 -113,0.4597340226173401 -114,0.4585638642311096 -115,0.45743075013160706 -116,0.45633068680763245 -117,0.455260306596756 -118,0.4542166590690613 -119,0.453197181224823 -120,0.45219916105270386 -121,0.4512207508087158 -122,0.4502596855163574 -123,0.4493138790130615 -124,0.4483817517757416 -125,0.44746142625808716 -126,0.4465518593788147 -127,0.4456513226032257 -128,0.44475871324539185 -129,0.44387298822402954 -130,0.4429936408996582 -131,0.442119836807251 -132,0.4412505328655243 -133,0.4403845965862274 -134,0.4395216703414917 -135,0.4386606514453888 -136,0.4378011226654053 -137,0.4369426965713501 -138,0.43608441948890686 -139,0.43522512912750244 -140,0.434364914894104 -141,0.43350234627723694 -142,0.43263688683509827 -143,0.4317673146724701 -144,0.43089258670806885 -145,0.430011510848999 -146,0.4291227459907532 -147,0.4282253086566925 -148,0.4273178279399872 -149,0.4263983964920044 -150,0.4254654347896576 -151,0.4245169460773468 -152,0.4235500693321228 -153,0.42256081104278564 -154,0.421546071767807 -155,0.4205014109611511 -156,0.4194214344024658 -157,0.41830214858055115 -158,0.4171343743801117 -159,0.41590481996536255 -160,0.4145958721637726 -161,0.41318830847740173 -162,0.41165217757225037 -163,0.4099459648132324 -164,0.408006489276886 -165,0.4057483673095703 -166,0.40303128957748413 -167,0.39963987469673157 -168,0.3951798975467682 -169,0.3890199661254883 -170,0.3805030584335327 -171,0.3696395456790924 -172,0.35881492495536804 -173,0.3038080036640167 -174,0.27980467677116394 -175,0.2760487496852875 -176,0.2734012305736542 -177,0.27130740880966187 -178,0.26955366134643555 -179,0.26803523302078247 -180,0.26669636368751526 -181,0.26550835371017456 -182,0.2644420862197876 -183,0.263476699590683 -184,0.262599378824234 -185,0.2618001699447632 -186,0.2610713541507721 -187,0.260405957698822 -188,0.259795218706131 -189,0.25923264026641846 -190,0.2587129771709442 -191,0.25823110342025757 -192,0.2577832341194153 -193,0.25736480951309204 -194,0.256972998380661 -195,0.2566049098968506 -196,0.25625842809677124 -197,0.25593122839927673 -198,0.2556215226650238 -199,0.2553277611732483 -200,0.2550479769706726 diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.080/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.080/training_config.txt deleted file mode 100644 index 6d5553b..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.080/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.080/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.080/training_log.csv deleted file mode 100644 index 2e3d5af..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,213.75233459472656 -2,144.91659545898438 -3,84.52567291259766 -4,14.287747383117676 -5,11.56564998626709 -6,5.72916841506958 -7,6.569018363952637 -8,6.6359543800354 -9,9.952731132507324 -10,11.137259483337402 -11,11.742608070373535 -12,7.280147552490234 -13,6.2738566398620605 -14,6.074666976928711 -15,5.9280548095703125 -16,5.809908390045166 -17,5.698787689208984 -18,5.58311653137207 -19,5.45709753036499 -20,5.315023899078369 -21,5.156440258026123 -22,4.9358086585998535 -23,4.7533698081970215 -24,4.514986991882324 -25,4.157462120056152 -26,3.3027751445770264 -27,3.014774799346924 -28,2.828134536743164 -29,2.6535589694976807 -30,2.4275925159454346 -31,1.224988579750061 -32,1.0326144695281982 -33,0.9040429592132568 -34,0.837325930595398 -35,0.7802968621253967 -36,0.7260206937789917 -37,0.5953264832496643 -38,0.5632900595664978 -39,0.5375853776931763 -40,0.5147020816802979 -41,0.49307960271835327 -42,0.47000372409820557 -43,0.4359363317489624 -44,0.3894558846950531 -45,0.37590739130973816 -46,0.36737021803855896 -47,0.36043238639831543 -48,0.3544873595237732 -49,0.3492850363254547 -50,0.3446817994117737 -51,0.3405757546424866 -52,0.33688613772392273 -53,0.3335525691509247 -54,0.3305256962776184 -55,0.3277673125267029 -56,0.32524824142456055 -57,0.32293397188186646 -58,0.3208073377609253 -59,0.3188490867614746 -60,0.3170395493507385 -61,0.3153652250766754 -62,0.31380632519721985 -63,0.3123517334461212 -64,0.31098899245262146 -65,0.30970969796180725 -66,0.3085029721260071 -67,0.3073629140853882 -68,0.30627936124801636 -69,0.3052476942539215 -70,0.3042643070220947 -71,0.3033238649368286 -72,0.30242201685905457 -73,0.3015551269054413 -74,0.3007185757160187 -75,0.2999109923839569 -76,0.2991292178630829 -77,0.29836970567703247 -78,0.29763063788414 -79,0.29691189527511597 -80,0.2962109446525574 -81,0.2955252230167389 -82,0.29485437273979187 -83,0.294197142124176 -84,0.293551504611969 -85,0.29291680455207825 -86,0.29229286313056946 -87,0.29167768359184265 -88,0.29107043147087097 -89,0.29047027230262756 -90,0.2898769974708557 -91,0.28929048776626587 -92,0.2887097895145416 -93,0.2881343364715576 -94,0.28756362199783325 -95,0.28699785470962524 -96,0.28643614053726196 -97,0.2858782708644867 -98,0.2853240370750427 -99,0.2847738265991211 -100,0.2842266857624054 -101,0.28368210792541504 -102,0.2831399738788605 -103,0.2826009690761566 -104,0.28206512331962585 -105,0.28153112530708313 -106,0.28099873661994934 -107,0.28046849370002747 -108,0.2799402177333832 -109,0.27941322326660156 -110,0.278888076543808 -111,0.2783638536930084 -112,0.27784091234207153 -113,0.2773183584213257 -114,0.2767961919307709 -115,0.27627477049827576 -116,0.2757546305656433 -117,0.27523505687713623 -118,0.27471596002578735 -119,0.2741974890232086 -120,0.27368006110191345 -121,0.27316227555274963 -122,0.27264121174812317 -123,0.2721223831176758 -124,0.2716078758239746 -125,0.27109768986701965 -126,0.27059072256088257 -127,0.27008548378944397 -128,0.26957958936691284 -129,0.269069105386734 -130,0.26854315400123596 -131,0.26795637607574463 -132,0.2667262852191925 -133,0.2658275365829468 -134,0.26519909501075745 -135,0.26457369327545166 -136,0.26396873593330383 -137,0.26340386271476746 -138,0.2628770172595978 -139,0.26237374544143677 -140,0.2618885338306427 -141,0.26142144203186035 -142,0.26097092032432556 -143,0.26053521037101746 -144,0.26011255383491516 -145,0.25970011949539185 -146,0.2592967748641968 -147,0.25890031456947327 -148,0.25850993394851685 -149,0.25812387466430664 -150,0.25774142146110535 -151,0.25736165046691895 -152,0.256981760263443 -153,0.256600022315979 -154,0.25621405243873596 -155,0.2558208405971527 -156,0.2554178237915039 -157,0.2550014853477478 -158,0.25456923246383667 -159,0.2541246712207794 -160,0.2536747455596924 -161,0.2532365620136261 -162,0.25284162163734436 -163,0.25247806310653687 -164,0.25211435556411743 -165,0.25173473358154297 -166,0.2513110041618347 -167,0.2507563829421997 -168,0.25014299154281616 -169,0.2501814365386963 -170,0.2501217722892761 -171,0.24999010562896729 -172,0.24982847273349762 -173,0.24965296685695648 -174,0.24947039783000946 -175,0.24928438663482666 -176,0.24909627437591553 -177,0.24890722334384918 -178,0.24871771037578583 -179,0.24852831661701202 -180,0.2483389526605606 -181,0.24814994633197784 -182,0.24796098470687866 -183,0.24777227640151978 -184,0.24758365750312805 -185,0.24739517271518707 -186,0.24720735847949982 -187,0.24702028930187225 -188,0.24683350324630737 -189,0.24664688110351562 -190,0.24646036326885223 -191,0.24627414345741272 -192,0.24608822166919708 -193,0.24590298533439636 -194,0.24571843445301056 -195,0.24553422629833221 -196,0.24535034596920013 -197,0.245167076587677 -198,0.24498440325260162 -199,0.2448023110628128 -200,0.24462100863456726 diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.100/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.100/training_config.txt deleted file mode 100644 index 4aec9b8..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.100/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.100/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.100/training_log.csv deleted file mode 100644 index 07c09ff..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,100.43730926513672 -2,96.93311309814453 -3,89.14500427246094 -4,16.861927032470703 -5,6.2114176750183105 -6,9.358972549438477 -7,4.834885597229004 -8,5.445126533508301 -9,8.018295288085938 -10,7.717260837554932 -11,8.220857620239258 -12,8.761358261108398 -13,11.86694049835205 -14,8.257389068603516 -15,8.167379379272461 -16,7.9784135818481445 -17,7.780478477478027 -18,7.464355945587158 -19,7.343413352966309 -20,7.376784324645996 -21,7.297830104827881 -22,7.13732385635376 -23,7.109685897827148 -24,7.030025005340576 -25,6.798166751861572 -26,6.609936237335205 -27,6.644596099853516 -28,6.62838077545166 -29,6.435997009277344 -30,6.391785621643066 -31,6.423974990844727 -32,6.252047061920166 -33,6.118603706359863 -34,6.134428977966309 -35,6.014663219451904 -36,5.922581672668457 -37,5.935040473937988 -38,5.789401054382324 -39,5.7784552574157715 -40,5.686356544494629 -41,5.6193108558654785 -42,5.561056613922119 -43,5.4459943771362305 -44,5.4203901290893555 -45,5.370626926422119 -46,5.343603610992432 -47,5.662219047546387 -48,5.549167156219482 -49,6.598168849945068 -50,5.829538345336914 -51,6.1988091468811035 -52,6.1207146644592285 -53,5.266809463500977 -54,6.596978664398193 -55,6.610266208648682 -56,5.96524715423584 -57,6.112117290496826 -58,5.745918273925781 -59,5.308660507202148 -60,5.324173927307129 -61,5.464718818664551 -62,5.423999786376953 -63,5.597532749176025 -64,5.491841793060303 -65,5.571497440338135 -66,5.510494232177734 -67,5.525457859039307 -68,5.578790664672852 -69,5.486977577209473 -70,5.637714862823486 -71,5.647332191467285 -72,5.562199592590332 -73,5.6814799308776855 -74,5.481347560882568 -75,5.489483833312988 -76,5.390561580657959 -77,5.346833229064941 -78,5.345126152038574 -79,5.304792404174805 -80,5.242499351501465 -81,5.252551078796387 -82,5.173880100250244 -83,5.17497444152832 -84,5.146238327026367 -85,5.095942974090576 -86,5.073659896850586 -87,5.0636467933654785 -88,5.031016826629639 -89,5.002904415130615 -90,4.988232612609863 -91,4.967968940734863 -92,4.927240371704102 -93,4.880908966064453 -94,4.84502649307251 -95,4.813478469848633 -96,4.7735466957092285 -97,4.7547287940979 -98,4.7235846519470215 -99,4.695253372192383 -100,4.66761589050293 -101,4.631405353546143 -102,4.601306915283203 -103,4.559226989746094 -104,4.5140380859375 -105,4.4657511711120605 -106,4.394598960876465 -107,4.199791431427002 -108,3.578441858291626 -109,3.502746343612671 -110,3.124742269515991 -111,2.6868035793304443 -112,2.3495395183563232 -113,1.6503485441207886 -114,1.3978327512741089 -115,2.0735440254211426 -116,2.037876844406128 -117,1.96929931640625 -118,1.8223828077316284 -119,0.888815701007843 -120,0.733945369720459 -121,0.6840583682060242 -122,0.6493815779685974 -123,0.6209799647331238 -124,0.597114622592926 -125,0.5767983794212341 -126,0.5591034293174744 -127,0.5435870885848999 -128,0.5298057794570923 -129,0.5173324942588806 -130,0.5059248208999634 -131,0.4954167604446411 -132,0.4856302738189697 -133,0.47635146975517273 -134,0.46737954020500183 -135,0.45856526494026184 -136,0.4496859014034271 -137,0.44047781825065613 -138,0.43099892139434814 -139,0.4213165044784546 -140,0.41163626313209534 -141,0.4023117125034332 -142,0.39364612102508545 -143,0.3854162395000458 -144,0.3773326873779297 -145,0.3681708872318268 -146,0.35411253571510315 -147,0.33172619342803955 -148,0.3144674301147461 -149,0.3061102330684662 -150,0.30091962218284607 -151,0.29698294401168823 -152,0.29382845759391785 -153,0.2913872003555298 -154,0.28941747546195984 -155,0.28778699040412903 -156,0.2863949239253998 -157,0.28517863154411316 -158,0.2841072082519531 -159,0.28314682841300964 -160,0.28228095173835754 -161,0.2814924418926239 -162,0.28076842427253723 -163,0.2801024317741394 -164,0.2794859707355499 -165,0.2789105474948883 -166,0.2783709466457367 -167,0.27786359190940857 -168,0.27738362550735474 -169,0.2769278287887573 -170,0.27649322152137756 -171,0.2760770320892334 -172,0.27567800879478455 -173,0.2752940058708191 -174,0.2749234139919281 -175,0.27456581592559814 -176,0.2742190957069397 -177,0.2738820016384125 -178,0.27355334162712097 -179,0.27323243021965027 -180,0.27291837334632874 -181,0.2726115882396698 -182,0.272310733795166 -183,0.27201494574546814 -184,0.2717238664627075 -185,0.2714369595050812 -186,0.2711535692214966 -187,0.2708740532398224 -188,0.27059799432754517 -189,0.27032479643821716 -190,0.27005380392074585 -191,0.2697846591472626 -192,0.26951709389686584 -193,0.2692510783672333 -194,0.26898691058158875 -195,0.2687237560749054 -196,0.26846104860305786 -197,0.26819872856140137 -198,0.2679362893104553 -199,0.2676732838153839 -200,0.267410010099411 diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.125/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.125/training_config.txt deleted file mode 100644 index 5ecab3a..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.125/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.125/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.125/training_log.csv deleted file mode 100644 index e9bfd43..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,95.41421508789062 -2,9.637655258178711 -3,2.501572370529175 -4,2.2572340965270996 -5,2.5352203845977783 -6,2.6880838871002197 -7,2.7544963359832764 -8,2.744837999343872 -9,2.7000091075897217 -10,2.6246747970581055 -11,2.4920523166656494 -12,2.3269684314727783 -13,2.1334569454193115 -14,1.8318654298782349 -15,1.207951545715332 -16,0.9834228754043579 -17,0.8784857988357544 -18,0.809000551700592 -19,0.7551987767219543 -20,0.7101470232009888 -21,0.6710558533668518 -22,0.6355773210525513 -23,0.6024050712585449 -24,0.5707368850708008 -25,0.5396313667297363 -26,0.5083053708076477 -27,0.47475922107696533 -28,0.4371282756328583 -29,0.4041551947593689 -30,0.3802357017993927 -31,0.362160861492157 -32,0.34823542833328247 -33,0.33688151836395264 -34,0.32754936814308167 -35,0.31982263922691345 -36,0.3132243752479553 -37,0.3075513243675232 -38,0.3026157021522522 -39,0.2982998490333557 -40,0.29447075724601746 -41,0.2910462021827698 -42,0.28796643018722534 -43,0.28517985343933105 -44,0.28264713287353516 -45,0.2803274691104889 -46,0.2781863212585449 -47,0.27620476484298706 -48,0.27436304092407227 -49,0.2726420760154724 -50,0.27102431654930115 -51,0.26950252056121826 -52,0.26807183027267456 -53,0.2667229473590851 -54,0.2654457986354828 -55,0.26423582434654236 -56,0.2630847692489624 -57,0.26198649406433105 -58,0.2609376907348633 -59,0.25993213057518005 -60,0.25896966457366943 -61,0.2580448091030121 -62,0.2571549713611603 -63,0.2562972903251648 -64,0.2554702162742615 -65,0.25466760993003845 -66,0.2538895606994629 -67,0.25313419103622437 -68,0.2523998022079468 -69,0.2516857385635376 -70,0.25099021196365356 -71,0.25031396746635437 -72,0.24965296685695648 -73,0.24900583922863007 -74,0.24837158620357513 -75,0.2477491945028305 -76,0.2471380978822708 -77,0.2465362399816513 -78,0.24594222009181976 -79,0.24535711109638214 -80,0.24478262662887573 -81,0.2442171275615692 -82,0.24366053938865662 -83,0.24311400949954987 -84,0.2425750195980072 -85,0.2420436292886734 -86,0.2415219247341156 -87,0.24101051688194275 -88,0.24050457775592804 -89,0.24000443518161774 -90,0.2395101934671402 -91,0.23902547359466553 -92,0.23854941129684448 -93,0.23808063566684723 -94,0.23761539161205292 -95,0.2371557205915451 -96,0.23670300841331482 -97,0.23626047372817993 -98,0.23582488298416138 -99,0.23539681732654572 -100,0.234974205493927 -101,0.23455804586410522 -102,0.2341471016407013 -103,0.23374170064926147 -104,0.23334072530269623 -105,0.23294417560100555 -106,0.23255202174186707 -107,0.23216494917869568 -108,0.23178164660930634 -109,0.23140251636505127 -110,0.23102767765522003 -111,0.2306574434041977 -112,0.2302904576063156 -113,0.2299269735813141 -114,0.22956769168376923 -115,0.22921140491962433 -116,0.2288571149110794 -117,0.22850479185581207 -118,0.22815434634685516 -119,0.22780588269233704 -120,0.22746078670024872 -121,0.22711844742298126 -122,0.22677931189537048 -123,0.22644400596618652 -124,0.22611108422279358 -125,0.2257806658744812 -126,0.22545281052589417 -127,0.22512859106063843 -128,0.22480659186840057 -129,0.22448676824569702 -130,0.22416941821575165 -131,0.22385519742965698 -132,0.22354330122470856 -133,0.22323349118232727 -134,0.22292596101760864 -135,0.22262100875377655 -136,0.22231870889663696 -137,0.22201837599277496 -138,0.22172003984451294 -139,0.2214239239692688 -140,0.22112996876239777 -141,0.22083812952041626 -142,0.22054846584796906 -143,0.22026120126247406 -144,0.21997550129890442 -145,0.2196919322013855 -146,0.21941110491752625 -147,0.21913203597068787 -148,0.21885502338409424 -149,0.218580961227417 -150,0.21830910444259644 -151,0.2180401086807251 -152,0.21777303516864777 -153,0.21750803291797638 -154,0.21724630892276764 -155,0.21698610484600067 -156,0.2167278528213501 -157,0.21647123992443085 -158,0.21621641516685486 -159,0.21596387028694153 -160,0.21571239829063416 -161,0.21546289324760437 -162,0.21521472930908203 -163,0.21496836841106415 -164,0.2147236168384552 -165,0.2144802212715149 -166,0.21423733234405518 -167,0.21399334073066711 -168,0.21374747157096863 -169,0.21349436044692993 -170,0.21321013569831848 -171,0.21281449496746063 -172,0.21257062256336212 -173,0.21238145232200623 -174,0.21218504011631012 -175,0.21199607849121094 -176,0.21183383464813232 -177,0.21167518198490143 -178,0.2115168273448944 -179,0.21135972440242767 -180,0.21120436489582062 -181,0.21105122566223145 -182,0.21090054512023926 -183,0.21075186133384705 -184,0.2106052041053772 -185,0.21046045422554016 -186,0.2103177160024643 -187,0.2101764976978302 -188,0.21003688871860504 -189,0.2098984569311142 -190,0.20976176857948303 -191,0.2096264660358429 -192,0.20949237048625946 -193,0.20935948193073273 -194,0.2092277854681015 -195,0.20909738540649414 -196,0.20896796882152557 -197,0.20883971452713013 -198,0.20871224999427795 -199,0.2085854709148407 -200,0.20845991373062134 diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.160/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.160/training_config.txt deleted file mode 100644 index db08521..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.160/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.160/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.160/training_log.csv deleted file mode 100644 index 7c3d2ea..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,53.14285659790039 -2,8.485350608825684 -3,4.047055244445801 -4,4.3218560218811035 -5,11.188774108886719 -6,169230544.0 -7,7.47413444519043 -8,104.8115234375 -9,97289.515625 -10,55537.1796875 -11,16373.12890625 -12,7047.0087890625 -13,3719.85791015625 -14,2398.782470703125 -15,1655.3016357421875 -16,1262.3192138671875 -17,1046.24072265625 -18,864.222900390625 -19,736.0709838867188 -20,661.6405639648438 -21,591.5280151367188 -22,523.3145751953125 -23,483.91607666015625 -24,453.1597595214844 -25,427.9263610839844 -26,406.2738037109375 -27,386.4601745605469 -28,366.847900390625 -29,349.10198974609375 -30,335.71240234375 -31,324.9439697265625 -32,315.41522216796875 -33,306.9287414550781 -34,299.3354797363281 -35,292.5215148925781 -36,286.3797607421875 -37,280.8400573730469 -38,275.8109130859375 -39,271.21514892578125 -40,266.9905090332031 -41,263.1186828613281 -42,259.5670471191406 -43,256.2671203613281 -44,253.18511962890625 -45,250.30577087402344 -46,247.60382080078125 -47,245.04502868652344 -48,242.6671142578125 -49,240.43145751953125 -50,238.3055877685547 -51,236.28091430664062 -52,234.33877563476562 -53,232.4892120361328 -54,230.72315979003906 -55,229.021728515625 -56,227.37222290039062 -57,225.77609252929688 -58,224.21893310546875 -59,222.6989288330078 -60,221.21461486816406 -61,219.77049255371094 -62,218.36122131347656 -63,216.98497009277344 -64,215.6407470703125 -65,214.32313537597656 -66,213.0328369140625 -67,211.7731170654297 -68,210.54345703125 -69,209.34364318847656 -70,208.17645263671875 -71,207.0418243408203 -72,205.93911743164062 -73,204.8711395263672 -74,203.8440399169922 -75,202.83505249023438 -76,201.8434295654297 -77,200.8687744140625 -78,199.9086151123047 -79,198.9639129638672 -80,198.03208923339844 -81,197.11354064941406 -82,196.2053680419922 -83,195.30914306640625 -84,194.42245483398438 -85,193.54763793945312 -86,192.6814727783203 -87,191.8249969482422 -88,190.9762725830078 -89,190.13815307617188 -90,189.30726623535156 -91,188.48464965820312 -92,187.66995239257812 -93,186.86358642578125 -94,186.0642547607422 -95,185.27232360839844 -96,184.4871368408203 -97,183.70884704589844 -98,182.93748474121094 -99,182.17227172851562 -100,181.41390991210938 -101,180.66070556640625 -102,179.9159698486328 -103,179.1767120361328 -104,178.44412231445312 -105,177.7157745361328 -106,176.99464416503906 -107,176.28028869628906 -108,175.5712890625 -109,174.86798095703125 -110,174.1688232421875 -111,173.47544860839844 -112,172.7867431640625 -113,172.10333251953125 -114,171.42430114746094 -115,170.75015258789062 -116,170.08148193359375 -117,169.41793823242188 -118,168.75991821289062 -119,168.1067352294922 -120,167.4604034423828 -121,166.81764221191406 -122,166.1800994873047 -123,165.54652404785156 -124,164.91824340820312 -125,164.294189453125 -126,163.67312622070312 -127,163.0565185546875 -128,162.443115234375 -129,161.83541870117188 -130,161.2306671142578 -131,160.62896728515625 -132,160.0313720703125 -133,159.43797302246094 -134,158.8486785888672 -135,158.26145935058594 -136,157.6769256591797 -137,157.0966796875 -138,156.52029418945312 -139,155.94744873046875 -140,155.3775634765625 -141,154.81069946289062 -142,154.24832153320312 -143,153.68838500976562 -144,153.1308135986328 -145,152.57681274414062 -146,152.02664184570312 -147,151.47885131835938 -148,150.9330291748047 -149,150.3901824951172 -150,149.84988403320312 -151,149.31214904785156 -152,148.77688598632812 -153,148.25265502929688 -154,147.735595703125 -155,147.22531127929688 -156,146.71868896484375 -157,146.21725463867188 -158,145.71971130371094 -159,145.22613525390625 -160,144.73741149902344 -161,144.25096130371094 -162,143.76693725585938 -163,143.28530883789062 -164,142.8070068359375 -165,142.33102416992188 -166,141.85716247558594 -167,141.38552856445312 -168,140.91590881347656 -169,140.4486846923828 -170,139.98268127441406 -171,139.5181884765625 -172,139.0554962158203 -173,138.59500122070312 -174,138.135498046875 -175,137.6772003173828 -176,137.22055053710938 -177,136.76617431640625 -178,136.31605529785156 -179,135.86781311035156 -180,135.42324829101562 -181,134.98043823242188 -182,134.54022216796875 -183,134.10147094726562 -184,133.66441345214844 -185,133.2293243408203 -186,132.7957000732422 -187,132.3634033203125 -188,131.93292236328125 -189,131.503662109375 -190,131.07534790039062 -191,130.6481475830078 -192,130.22186279296875 -193,129.79640197753906 -194,129.37232971191406 -195,128.94927978515625 -196,128.52684020996094 -197,128.10629272460938 -198,127.68697357177734 -199,127.2684097290039 -200,126.85059356689453 diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.200/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.200/training_config.txt deleted file mode 100644 index 9b9a598..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.200/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.200/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.200/training_log.csv deleted file mode 100644 index 6871722..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,30.786584854125977 -2,1.681357979774475 -3,1.2378876209259033 -4,0.6146263480186462 -5,0.5548313856124878 -6,0.5137755274772644 -7,0.4818757176399231 -8,0.4549444317817688 -9,0.4294472634792328 -10,0.4036228358745575 -11,0.37501829862594604 -12,0.3448524475097656 -13,0.32043248414993286 -14,0.3036501705646515 -15,0.2926180958747864 -16,0.284405916929245 -17,0.2779163122177124 -18,0.2725856304168701 -19,0.2679991126060486 -20,0.264038622379303 -21,0.2606087625026703 -22,0.2576262652873993 -23,0.2550634443759918 -24,0.25279951095581055 -25,0.25077083706855774 -26,0.24894018471240997 -27,0.24727317690849304 -28,0.24574851989746094 -29,0.2443452626466751 -30,0.2430475354194641 -31,0.24184155464172363 -32,0.2407168447971344 -33,0.23966296017169952 -34,0.2386685162782669 -35,0.23772922158241272 -36,0.2368369996547699 -37,0.23598597943782806 -38,0.23517362773418427 -39,0.2343970686197281 -40,0.23365278542041779 -41,0.23293717205524445 -42,0.23224800825119019 -43,0.2315862476825714 -44,0.2309478223323822 -45,0.23033185303211212 -46,0.22973735630512238 -47,0.22916392982006073 -48,0.2286134660243988 -49,0.2280857115983963 -50,0.2275809943675995 -51,0.22709544003009796 -52,0.22663012146949768 -53,0.22618259489536285 -54,0.22575268149375916 -55,0.2253376692533493 -56,0.22493690252304077 -57,0.22454865276813507 -58,0.22417187690734863 -59,0.2238062173128128 -60,0.22345033288002014 -61,0.22310476005077362 -62,0.22276675701141357 -63,0.2224380522966385 -64,0.22211626172065735 -65,0.22180162370204926 -66,0.22149285674095154 -67,0.22119003534317017 -68,0.22089259326457977 -69,0.22059887647628784 -70,0.2203090786933899 -71,0.22002220153808594 -72,0.21974101662635803 -73,0.21946243941783905 -74,0.21918843686580658 -75,0.21891681849956512 -76,0.21864725649356842 -77,0.21838174760341644 -78,0.21812690794467926 -79,0.21787941455841064 -80,0.2176399528980255 -81,0.21740728616714478 -82,0.21718090772628784 -83,0.21695969998836517 -84,0.21674367785453796 -85,0.21653267741203308 -86,0.2163265496492386 -87,0.2161242961883545 -88,0.21592600643634796 -89,0.21573171019554138 -90,0.21554075181484222 -91,0.21535338461399078 -92,0.21516874432563782 -93,0.2149873673915863 -94,0.21480873227119446 -95,0.2146330177783966 -96,0.21445977687835693 -97,0.21428894996643066 -98,0.21411976218223572 -99,0.2139524519443512 -100,0.21378664672374725 -101,0.21362271904945374 -102,0.21346044540405273 -103,0.21330036222934723 -104,0.2131413072347641 -105,0.21298328042030334 -106,0.21282383799552917 -107,0.21266339719295502 -108,0.21250155568122864 -109,0.2123381346464157 -110,0.21217478811740875 -111,0.2120106816291809 -112,0.21184618771076202 -113,0.2116815447807312 -114,0.211516872048378 -115,0.21135272085666656 -116,0.21118858456611633 -117,0.2110253870487213 -118,0.21086272597312927 -119,0.2107010781764984 -120,0.21053968369960785 -121,0.21037939190864563 -122,0.21021981537342072 -123,0.21006116271018982 -124,0.2099035680294037 -125,0.2097470760345459 -126,0.20959144830703735 -127,0.2094370275735855 -128,0.2092839777469635 -129,0.20913194119930267 -130,0.20898133516311646 -131,0.20883159339427948 -132,0.20868340134620667 -133,0.2085360735654831 -134,0.20838990807533264 -135,0.20824511349201202 -136,0.20810116827487946 -137,0.2079588919878006 -138,0.2078179568052292 -139,0.20767800509929657 -140,0.20753930509090424 -141,0.20740143954753876 -142,0.2072649598121643 -143,0.20712950825691223 -144,0.20699545741081238 -145,0.20686306059360504 -146,0.20673134922981262 -147,0.20660097897052765 -148,0.2064712792634964 -149,0.2063431292772293 -150,0.20621612668037415 -151,0.20609070360660553 -152,0.2059663087129593 -153,0.2058427929878235 -154,0.20572030544281006 -155,0.20559877157211304 -156,0.20547857880592346 -157,0.2053588181734085 -158,0.2052401453256607 -159,0.20512184500694275 -160,0.20500479638576508 -161,0.204888254404068 -162,0.20477284491062164 -163,0.20465780794620514 -164,0.20454367995262146 -165,0.20443008840084076 -166,0.20431707799434662 -167,0.20420458912849426 -168,0.20409280061721802 -169,0.20398202538490295 -170,0.20387178659439087 -171,0.20376236736774445 -172,0.20365354418754578 -173,0.20354589819908142 -174,0.2034396380186081 -175,0.20333422720432281 -176,0.20323002338409424 -177,0.20312710106372833 -178,0.20302511751651764 -179,0.20292435586452484 -180,0.20282457768917084 -181,0.2027253359556198 -182,0.20262713730335236 -183,0.2025294303894043 -184,0.20243266224861145 -185,0.20233644545078278 -186,0.20224088430404663 -187,0.2021460384130478 -188,0.20205163955688477 -189,0.20195838809013367 -190,0.20186589658260345 -191,0.20177403092384338 -192,0.20168283581733704 -193,0.20159222185611725 -194,0.20150259137153625 -195,0.20141348242759705 -196,0.20132528245449066 -197,0.2012374997138977 -198,0.20115014910697937 -199,0.20106373727321625 -200,0.2009778916835785 diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.250/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.250/training_config.txt deleted file mode 100644 index b0540f4..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.250/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.250/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.250/training_log.csv deleted file mode 100644 index 0b9e353..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,9.433856964111328 -2,2.5299301147460938 -3,3.142794370651245 -4,3.465233564376831 -5,3.6106789112091064 -6,3.592188835144043 -7,3.4246888160705566 -8,3.159924030303955 -9,2.843125104904175 -10,2.502859354019165 -11,2.1569066047668457 -12,1.8219718933105469 -13,1.519317865371704 -14,1.2742528915405273 -15,1.0896445512771606 -16,0.9504628777503967 -17,0.8406355381011963 -18,0.7525423765182495 -19,0.6805659532546997 -20,0.6205721497535706 -21,0.5700027346611023 -22,0.5271663665771484 -23,0.49078914523124695 -24,0.4597688317298889 -25,0.43347999453544617 -26,0.41160839796066284 -27,0.39356037974357605 -28,0.3781180679798126 -29,0.3649483621120453 -30,0.35368606448173523 -31,0.34402111172676086 -32,0.33566585183143616 -33,0.328347772359848 -34,0.32193684577941895 -35,0.3162860572338104 -36,0.3112390339374542 -37,0.3067215085029602 -38,0.30259305238723755 -39,0.29889318346977234 -40,0.29553481936454773 -41,0.29249563813209534 -42,0.28975072503089905 -43,0.28725308179855347 -44,0.28496432304382324 -45,0.2828529477119446 -46,0.2808634042739868 -47,0.2789967358112335 -48,0.27724477648735046 -49,0.27560004591941833 -50,0.27405259013175964 -51,0.27259358763694763 -52,0.2712130546569824 -53,0.2699030935764313 -54,0.268657922744751 -55,0.26746630668640137 -56,0.2663276791572571 -57,0.2652367353439331 -58,0.26419228315353394 -59,0.2631900906562805 -60,0.2622276842594147 -61,0.2613007426261902 -62,0.2604082524776459 -63,0.25954726338386536 -64,0.25871461629867554 -65,0.2579105496406555 -66,0.2571322023868561 -67,0.2563781440258026 -68,0.2556454539299011 -69,0.25493475794792175 -70,0.2542436420917511 -71,0.2535701394081116 -72,0.2529148757457733 -73,0.2522762715816498 -74,0.2516525387763977 -75,0.25104472041130066 -76,0.25045084953308105 -77,0.24986939132213593 -78,0.24930095672607422 -79,0.24874401092529297 -80,0.24819909036159515 -81,0.24766623973846436 -82,0.24714381992816925 -83,0.24663110077381134 -84,0.24612957239151 -85,0.24563758075237274 -86,0.24515391886234283 -87,0.24467957019805908 -88,0.2442140281200409 -89,0.24375654757022858 -90,0.24330686032772064 -91,0.242864727973938 -92,0.24243015050888062 -93,0.24200324714183807 -94,0.2415838986635208 -95,0.24117112159729004 -96,0.24076442420482635 -97,0.24036407470703125 -98,0.2399684637784958 -99,0.2395772635936737 -100,0.23919102549552917 -101,0.23880943655967712 -102,0.23843106627464294 -103,0.23805592954158783 -104,0.2376854121685028 -105,0.23731909692287445 -106,0.2369561493396759 -107,0.2365967035293579 -108,0.2362401932477951 -109,0.2358865588903427 -110,0.2355341762304306 -111,0.23518341779708862 -112,0.23483504354953766 -113,0.23449067771434784 -114,0.23415066301822662 -115,0.2338140308856964 -116,0.2334805130958557 -117,0.23315145075321198 -118,0.23282645642757416 -119,0.23250454664230347 -120,0.23218612372875214 -121,0.23187173902988434 -122,0.23156072199344635 -123,0.23125240206718445 -124,0.23094765841960907 -125,0.230645552277565 -126,0.23034662008285522 -127,0.2300506979227066 -128,0.22975826263427734 -129,0.22946886718273163 -130,0.22918203473091125 -131,0.22889865934848785 -132,0.22861698269844055 -133,0.22833673655986786 -134,0.2280593067407608 -135,0.2277848720550537 -136,0.2275126576423645 -137,0.2272426337003708 -138,0.226975679397583 -139,0.22671128809452057 -140,0.22644953429698944 -141,0.2261902093887329 -142,0.22593337297439575 -143,0.22567854821681976 -144,0.22542645037174225 -145,0.22517727315425873 -146,0.22493018209934235 -147,0.2246852070093155 -148,0.22444269061088562 -149,0.2242031842470169 -150,0.22396551072597504 -151,0.22372961044311523 -152,0.22349587082862854 -153,0.22326447069644928 -154,0.2230348289012909 -155,0.22280701994895935 -156,0.22258184850215912 -157,0.2223588228225708 -158,0.22213812172412872 -159,0.22191961109638214 -160,0.221703439950943 -161,0.22148913145065308 -162,0.22127680480480194 -163,0.2210668921470642 -164,0.22085902094841003 -165,0.22065305709838867 -166,0.2204488217830658 -167,0.2202477604150772 -168,0.2200484722852707 -169,0.2198508232831955 -170,0.21965506672859192 -171,0.2194613665342331 -172,0.21926961839199066 -173,0.21907933056354523 -174,0.21889100968837738 -175,0.2187042087316513 -176,0.2185191512107849 -177,0.21833576261997223 -178,0.2181544005870819 -179,0.21797414124011993 -180,0.2177950143814087 -181,0.21761807799339294 -182,0.21744289994239807 -183,0.21726882457733154 -184,0.21709583699703217 -185,0.2169242948293686 -186,0.21675477921962738 -187,0.21658632159233093 -188,0.21641886234283447 -189,0.21625308692455292 -190,0.21608847379684448 -191,0.2159251868724823 -192,0.2157626450061798 -193,0.21560148894786835 -194,0.21544122695922852 -195,0.2152819186449051 -196,0.2151239961385727 -197,0.21496735513210297 -198,0.21481168270111084 -199,0.21465693414211273 -200,0.2145029902458191 diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.300/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.300/training_config.txt deleted file mode 100644 index 6596ed8..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.300/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.300/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.300/training_log.csv deleted file mode 100644 index c931da9..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.300/training_log.csv +++ /dev/null @@ -1,47 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,6.646026134490967 -2,0.6921998858451843 -3,0.5801647901535034 -4,0.5539664030075073 -5,0.5352448225021362 -6,0.516973078250885 -7,0.49768322706222534 -8,0.47827211022377014 -9,0.45960789918899536 -10,0.44195565581321716 -11,0.425246924161911 -12,0.4094034433364868 -13,0.3944760262966156 -14,0.3805064260959625 -15,0.3674965798854828 -16,0.35542601346969604 -17,0.3442697525024414 -18,0.33402371406555176 -19,0.3246816098690033 -20,0.31616491079330444 -21,0.30842363834381104 -22,0.3014298379421234 -23,0.29512548446655273 -24,0.2894197404384613 -25,0.28433334827423096 -26,0.279852956533432 -27,0.2758702337741852 -28,0.2723054885864258 -29,0.2691437005996704 -30,0.26628929376602173 -31,0.2636895775794983 -32,0.2613082230091095 -33,0.2591358721256256 -34,0.25713443756103516 -35,546871.1875 -36,2501842176.0 -37,4865546752.0 -38,4806522880.0 -39,4581817856.0 -40,4479803392.0 -41,4458418688.0 -42,nan -43,nan -44,nan -45,nan diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.400/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.400/training_config.txt deleted file mode 100644 index 75dc71a..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.400/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.400/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.400/training_log.csv deleted file mode 100644 index 7a58b9c..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.400/training_log.csv +++ /dev/null @@ -1,32 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,2.362724542617798 -2,2.392826795578003 -3,2.2736353874206543 -4,1.9006738662719727 -5,1.394894003868103 -6,0.9551928043365479 -7,0.7115665674209595 -8,0.5500522255897522 -9,0.45250430703163147 -10,0.38881200551986694 -11,0.34427231550216675 -12,0.31002360582351685 -13,0.2798919677734375 -14,0.255569726228714 -15,14.465670585632324 -16,99704.53125 -17,963.663818359375 -18,1015.0718383789062 -19,975.5271606445312 -20,928.832275390625 -21,921.9561767578125 -22,968.0391845703125 -23,908.3063354492188 -24,907.8488159179688 -25,891.1127319335938 -26,879.9981079101562 -27,nan -28,nan -29,nan -30,nan diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.500/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.500/training_config.txt deleted file mode 100644 index 416e933..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.500/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.500/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.500/training_log.csv deleted file mode 100644 index 77a5ece..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.500/training_log.csv +++ /dev/null @@ -1,17 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,2.133845090866089 -2,2.3031303882598877 -3,1.9755902290344238 -4,1.4275221824645996 -5,0.941654622554779 -6,0.8686284422874451 -7,0.6571183800697327 -8,0.6279698014259338 -9,4022965248.0 -10,4916342272.0 -11,4916342272.0 -12,4916342272.0 -13,4916342272.0 -14,4916342272.0 -15,4916342272.0 diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.600/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.600/training_config.txt deleted file mode 100644 index 6604767..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.600/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.600/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.600/training_log.csv deleted file mode 100644 index 6155c24..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.600/training_log.csv +++ /dev/null @@ -1,15 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,2.6557106971740723 -2,2.378608226776123 -3,1.9489076137542725 -4,1.388096809387207 -5,0.9401108026504517 -6,0.6727041006088257 -7,2153934848.0 -8,4916342272.0 -9,4916342272.0 -10,4916342272.0 -11,4916342272.0 -12,4916342272.0 -13,4916342272.0 diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.700/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.700/training_config.txt deleted file mode 100644 index abbb175..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.700/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.700/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.700/training_log.csv deleted file mode 100644 index 203459d..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.700/training_log.csv +++ /dev/null @@ -1,11 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,2.9760751724243164 -2,2.6220152378082275 -3,2.1410739421844482 -4,1.5488426685333252 -5,1.053941249847412 -6,nan -7,nan -8,nan -9,nan diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.800/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.800/training_config.txt deleted file mode 100644 index 4533510..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.800/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.800/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.800/training_log.csv deleted file mode 100644 index 2bf5f3e..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.800/training_log.csv +++ /dev/null @@ -1,11 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,3.3278462886810303 -2,2.7362890243530273 -3,2.1506998538970947 -4,1.5317654609680176 -5,2.213674783706665 -6,nan -7,nan -8,nan -9,nan diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.900/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_0.900/training_config.txt deleted file mode 100644 index a90b4a1..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.900/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_0.900/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_0.900/training_log.csv deleted file mode 100644 index 60dbdd8..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_0.900/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,3.765124559402466 -2,2.760067939758301 -3,2.0699892044067383 -4,1.454463005065918 -5,nan -6,nan -7,nan -8,nan diff --git a/training/base_loss_learning_rate_sweep/five/lr_1.000/training_config.txt b/training/base_loss_learning_rate_sweep/five/lr_1.000/training_config.txt deleted file mode 100644 index ee30e36..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_1.000/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function Name: five -Loss Function Exponent: 5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def five_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/five/lr_1.000/training_log.csv b/training/base_loss_learning_rate_sweep/five/lr_1.000/training_log.csv deleted file mode 100644 index 60f9197..0000000 --- a/training/base_loss_learning_rate_sweep/five/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,9022.2587890625 -1,4.307502746582031 -2,2.659949779510498 -3,1.898849606513977 -4,nan -5,nan -6,nan -7,nan diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.003/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.003/training_config.txt deleted file mode 100644 index 496bf1e..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.003/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.0025 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.003/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.003/training_log.csv deleted file mode 100644 index 5f664db..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.003/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,997.6203002929688 -2,891.60498046875 -3,792.384521484375 -4,699.4647827148438 -5,632.1798095703125 -6,569.7451171875 -7,513.91748046875 -8,464.5724182128906 -9,416.0574035644531 -10,373.46307373046875 -11,338.5057067871094 -12,292.76019287109375 -13,264.6914367675781 -14,246.33180236816406 -15,215.95249938964844 -16,170.56326293945312 -17,152.89686584472656 -18,151.65560913085938 -19,147.84130859375 -20,132.6151885986328 -21,106.1927719116211 -22,83.6707992553711 -23,73.62540435791016 -24,72.54122924804688 -25,62.63677215576172 -26,63.071067810058594 -27,54.98676300048828 -28,51.694881439208984 -29,42.68155288696289 -30,42.36272048950195 -31,42.05136489868164 -32,41.26948547363281 -33,39.32139587402344 -34,39.47389602661133 -35,39.981895446777344 -36,41.160247802734375 -37,42.251258850097656 -38,40.95119094848633 -39,38.07765579223633 -40,36.905296325683594 -41,35.187828063964844 -42,35.23639678955078 -43,37.2154655456543 -44,36.79240798950195 -45,34.37611770629883 -46,33.77314758300781 -47,33.56240463256836 -48,33.355770111083984 -49,32.858489990234375 -50,36.315799713134766 -51,34.758338928222656 -52,35.0203857421875 -53,35.225364685058594 -54,35.16332244873047 -55,34.92576599121094 -56,34.686973571777344 -57,34.509300231933594 -58,34.39813995361328 -59,34.343021392822266 -60,34.32703399658203 -61,34.332740783691406 -62,34.34229278564453 -63,34.34109878540039 -64,34.32158279418945 -65,34.287845611572266 -66,34.251495361328125 -67,34.22208786010742 -68,34.19154357910156 -69,34.13505554199219 -70,34.065643310546875 -71,34.006343841552734 -72,33.959083557128906 -73,33.91813659667969 -74,33.874229431152344 -75,33.80865478515625 -76,33.63778305053711 -77,31.760251998901367 -78,31.786039352416992 -79,31.786176681518555 -80,31.76287841796875 -81,31.71902847290039 -82,31.659523010253906 -83,31.590347290039062 -84,31.516937255859375 -85,31.44348907470703 -86,31.373315811157227 -87,31.308292388916016 -88,31.249252319335938 -89,31.1966609954834 -90,31.150043487548828 -91,31.108301162719727 -92,31.06918716430664 -93,31.021446228027344 -94,32.83933639526367 -95,31.007394790649414 -96,31.02997398376465 -97,31.052167892456055 -98,31.032508850097656 -99,31.001428604125977 -100,30.98272705078125 -101,30.973342895507812 -102,30.967823028564453 -103,30.96207618713379 -104,30.953495025634766 -105,30.940954208374023 -106,30.924341201782227 -107,30.904329299926758 -108,30.88216209411621 -109,30.859107971191406 -110,30.8365478515625 -111,30.815479278564453 -112,30.79663848876953 -113,30.779943466186523 -114,30.765037536621094 -115,30.750843048095703 -116,30.73650550842285 -117,30.721729278564453 -118,30.706817626953125 -119,30.69219398498535 -120,30.678071975708008 -121,30.664165496826172 -122,30.649667739868164 -123,30.633665084838867 -124,30.615341186523438 -125,30.593040466308594 -126,30.56435775756836 -127,30.52349090576172 -128,30.44930076599121 -129,30.4921875 -130,30.823917388916016 -131,31.503942489624023 -132,32.437129974365234 -133,33.56292724609375 -134,35.21110916137695 -135,32.644065856933594 -136,32.01215744018555 -137,31.906841278076172 -138,31.555864334106445 -139,31.15477752685547 -140,31.122241973876953 -141,31.224224090576172 -142,31.729516983032227 -143,31.429323196411133 -144,31.475847244262695 -145,31.560016632080078 -146,31.65003776550293 -147,31.727834701538086 -148,31.78221893310547 -149,31.806644439697266 -150,31.798969268798828 -151,31.760112762451172 -152,31.693601608276367 -153,31.604965209960938 -154,31.50230598449707 -155,31.394044876098633 -156,31.287628173828125 -157,31.18984603881836 -158,31.10569953918457 -159,31.037321090698242 -160,30.98438262939453 -161,30.944093704223633 -162,30.911590576171875 -163,30.880977630615234 -164,30.846569061279297 -165,30.806386947631836 -166,30.7630672454834 -167,30.721088409423828 -168,30.68382453918457 -169,30.652652740478516 -170,30.627349853515625 -171,30.606979370117188 -172,30.590471267700195 -173,30.577104568481445 -174,30.5659236907959 -175,30.55608558654785 -176,30.54660987854004 -177,30.536556243896484 -178,30.52549934387207 -179,30.513559341430664 -180,30.50126075744629 -181,30.489152908325195 -182,30.47825813293457 -183,30.469242095947266 -184,30.462278366088867 -185,30.45747184753418 -186,30.4539794921875 -187,30.450651168823242 -188,30.446460723876953 -189,30.440563201904297 -190,30.433095932006836 -191,30.424863815307617 -192,30.41676902770996 -193,30.40935707092285 -194,30.4028263092041 -195,30.39715003967285 -196,30.39190673828125 -197,30.386859893798828 -198,30.381738662719727 -199,30.376243591308594 -200,30.37030601501465 diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.005/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.005/training_config.txt deleted file mode 100644 index 2503ade..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.005/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.005 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.005/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.005/training_log.csv deleted file mode 100644 index d2f9416..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.005/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,892.8590087890625 -2,698.3191528320312 -3,570.3776245117188 -4,476.5622863769531 -5,380.6253967285156 -6,305.2460632324219 -7,252.75254821777344 -8,190.08253479003906 -9,148.2053680419922 -10,137.71133422851562 -11,87.31128692626953 -12,69.86664581298828 -13,68.2911605834961 -14,55.73902130126953 -15,42.16077423095703 -16,45.81683349609375 -17,30.689926147460938 -18,33.03388977050781 -19,37.50985336303711 -20,39.31280517578125 -21,40.91934585571289 -22,37.541080474853516 -23,25.80324363708496 -24,25.349077224731445 -25,26.116735458374023 -26,23.89807891845703 -27,24.020008087158203 -28,23.74786376953125 -29,21.63871192932129 -30,21.43347930908203 -31,20.521602630615234 -32,19.806589126586914 -33,18.716327667236328 -34,18.2205810546875 -35,17.85334587097168 -36,17.59905242919922 -37,17.431428909301758 -38,17.315353393554688 -39,17.12322998046875 -40,16.512434005737305 -41,16.426111221313477 -42,16.37677764892578 -43,16.352676391601562 -44,16.342693328857422 -45,16.33089256286621 -46,16.28411102294922 -47,16.118549346923828 -48,15.43973445892334 -49,15.269601821899414 -50,14.699694633483887 -51,14.50003433227539 -52,14.53187084197998 -53,12.874610900878906 -54,12.370750427246094 -55,12.284062385559082 -56,12.245644569396973 -57,12.21902084350586 -58,12.199322700500488 -59,12.180035591125488 -60,12.014286994934082 -61,11.787736892700195 -62,11.723413467407227 -63,11.649492263793945 -64,11.540241241455078 -65,11.39716911315918 -66,11.24026870727539 -67,11.086420059204102 -68,10.940152168273926 -69,10.798566818237305 -70,10.65703010559082 -71,10.500772476196289 -72,10.298833847045898 -73,10.177816390991211 -74,10.07143497467041 -75,9.983602523803711 -76,9.909494400024414 -77,9.843381881713867 -78,9.781015396118164 -79,9.719908714294434 -80,9.658785820007324 -81,9.59704303741455 -82,9.53458309173584 -83,9.471447944641113 -84,9.407761573791504 -85,9.343637466430664 -86,9.279317855834961 -87,9.214919090270996 -88,9.150506019592285 -89,9.086077690124512 -90,9.021574020385742 -91,8.95689868927002 -92,8.891914367675781 -93,8.8264741897583 -94,8.760379791259766 -95,8.69336223602295 -96,8.625228881835938 -97,8.555773735046387 -98,8.484535217285156 -99,8.411334991455078 -100,8.336674690246582 -101,8.263079643249512 -102,8.202085494995117 -103,8.188687324523926 -104,8.081355094909668 -105,8.020689964294434 -106,7.97079610824585 -107,7.921357154846191 -108,7.946727275848389 -109,7.912241458892822 -110,7.872714996337891 -111,7.8333516120910645 -112,7.794707298278809 -113,7.756875038146973 -114,7.719827651977539 -115,7.683511257171631 -116,7.6478962898254395 -117,7.612988471984863 -118,7.578941822052002 -119,7.546220302581787 -120,7.515961170196533 -121,7.491048812866211 -122,7.466612815856934 -123,7.433409214019775 -124,7.408999919891357 -125,7.386017799377441 -126,7.363176345825195 -127,7.340090751647949 -128,7.316715240478516 -129,7.293026924133301 -130,7.269060134887695 -131,7.244910717010498 -132,7.2207932472229 -133,7.197107791900635 -134,7.174717903137207 -135,7.155032634735107 -136,7.135009288787842 -137,7.110218524932861 -138,7.088065147399902 -139,7.066854953765869 -140,7.045318126678467 -141,7.022775650024414 -142,6.998859882354736 -143,6.97316312789917 -144,6.945396423339844 -145,6.916000843048096 -146,6.893879413604736 -147,6.8827223777771 -148,6.887653350830078 -149,6.8826165199279785 -150,6.876326084136963 -151,6.870234966278076 -152,6.864363670349121 -153,6.858572959899902 -154,6.852811813354492 -155,6.8471221923828125 -156,6.841633319854736 -157,6.83650016784668 -158,6.831880569458008 -159,6.827744483947754 -160,6.823757171630859 -161,6.819786071777344 -162,6.815982341766357 -163,6.812315464019775 -164,6.808562278747559 -165,6.804400444030762 -166,6.797222137451172 -167,6.701035499572754 -168,6.696537017822266 -169,6.694585800170898 -170,6.69248104095459 -171,6.689929008483887 -172,6.686838150024414 -173,6.6828742027282715 -174,6.676235675811768 -175,6.748085021972656 -176,6.679407596588135 -177,6.681601524353027 -178,6.68332052230835 -179,6.684406280517578 -180,6.684821128845215 -181,6.684565544128418 -182,6.683947563171387 -183,6.683539867401123 -184,6.683383464813232 -185,6.681382179260254 -186,6.676774024963379 -187,6.60246467590332 -188,6.707942962646484 -189,6.65845251083374 -190,6.695793151855469 -191,6.731545448303223 -192,6.762855052947998 -193,6.789050579071045 -194,6.809808254241943 -195,6.824751377105713 -196,6.833522796630859 -197,6.836491584777832 -198,6.837913513183594 -199,6.87576150894165 -200,6.828190326690674 diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.010/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.010/training_config.txt deleted file mode 100644 index 3a14c5b..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.010/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.010/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.010/training_log.csv deleted file mode 100644 index f8deaa3..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,709.073974609375 -2,495.8191223144531 -3,341.93377685546875 -4,222.57504272460938 -5,139.26637268066406 -6,74.83617401123047 -7,53.98528289794922 -8,38.16682052612305 -9,27.8438777923584 -10,23.520118713378906 -11,22.530841827392578 -12,19.72782325744629 -13,17.38500213623047 -14,14.899020195007324 -15,14.787221908569336 -16,10.725893020629883 -17,9.341848373413086 -18,8.074580192565918 -19,7.725108623504639 -20,7.458004474639893 -21,6.513791084289551 -22,6.004675388336182 -23,5.53383731842041 -24,5.154810905456543 -25,4.863739967346191 -26,4.55768346786499 -27,4.2035627365112305 -28,4.016801357269287 -29,3.918255567550659 -30,3.719083547592163 -31,3.2709083557128906 -32,2.9124770164489746 -33,2.701054096221924 -34,2.493518352508545 -35,2.4270212650299072 -36,2.3753654956817627 -37,2.3193585872650146 -38,2.225131034851074 -39,2.1728315353393555 -40,2.090550184249878 -41,2.053687572479248 -42,2.02075457572937 -43,1.9915975332260132 -44,1.9656181335449219 -45,1.9419896602630615 -46,1.9166289567947388 -47,1.8342567682266235 -48,1.8140790462493896 -49,1.7414928674697876 -50,1.7183444499969482 -51,1.6922093629837036 -52,1.656137228012085 -53,1.564334511756897 -54,1.5517386198043823 -55,1.5408828258514404 -56,1.5310156345367432 -57,1.5218757390975952 -58,1.5132888555526733 -59,1.5051143169403076 -60,1.497243046760559 -61,1.4895693063735962 -62,1.4819893836975098 -63,1.4744113683700562 -64,1.4667556285858154 -65,1.4589650630950928 -66,1.4509804248809814 -67,1.442771077156067 -68,1.4343284368515015 -69,1.425663948059082 -70,1.4167985916137695 -71,1.4077568054199219 -72,1.39854097366333 -73,1.3891140222549438 -74,1.3793730735778809 -75,1.3691059350967407 -76,1.3579398393630981 -77,1.3455252647399902 -78,1.331896185874939 -79,1.3189271688461304 -80,1.3150438070297241 -81,1.3551465272903442 -82,1.3525034189224243 -83,1.3481627702713013 -84,1.3436731100082397 -85,1.3391659259796143 -86,1.3346431255340576 -87,1.330066204071045 -88,1.3254109621047974 -89,1.3206435441970825 -90,1.3157306909561157 -91,1.3106341361999512 -92,1.305282711982727 -93,1.2995468378067017 -94,1.2931654453277588 -95,1.285457968711853 -96,1.27390718460083 -97,1.241784930229187 -98,1.2112523317337036 -99,1.197953224182129 -100,1.1868702173233032 -101,1.1793657541275024 -102,1.174189805984497 -103,1.1699280738830566 -104,1.1660338640213013 -105,1.1622047424316406 -106,1.1581628322601318 -107,1.1534538269042969 -108,1.1470062732696533 -109,1.13511323928833 -110,1.1066371202468872 -111,1.09613835811615 -112,1.0915520191192627 -113,1.0877546072006226 -114,1.0836739540100098 -115,1.0782710313796997 -116,1.069420576095581 -117,1.051637887954712 -118,1.0333954095840454 -119,1.0285404920578003 -120,1.025154948234558 -121,1.0219345092773438 -122,1.0183597803115845 -123,1.0139137506484985 -124,1.0078046321868896 -125,0.998563289642334 -126,0.9844892621040344 -127,0.9735323190689087 -128,0.9691516160964966 -129,0.9641470313072205 -130,0.9285060167312622 -131,0.9258484840393066 -132,0.9253765344619751 -133,0.9247449636459351 -134,0.9239556193351746 -135,0.9230836033821106 -136,0.9221816658973694 -137,0.9212821125984192 -138,0.9204072952270508 -139,0.9195725917816162 -140,0.9187923669815063 -141,0.9180744886398315 -142,0.9174267649650574 -143,0.9168514609336853 -144,0.9163491725921631 -145,0.9159142374992371 -146,0.9155377149581909 -147,0.9152078032493591 -148,0.914908230304718 -149,0.9146233201026917 -150,0.9143359661102295 -151,0.9140377044677734 -152,0.9137194752693176 -153,0.9133816361427307 -154,0.9130290150642395 -155,0.912667453289032 -156,0.9123040437698364 -157,0.9119446873664856 -158,0.9115933775901794 -159,0.9112532138824463 -160,0.910923182964325 -161,0.9106049537658691 -162,0.9102975130081177 -163,0.9099986553192139 -164,0.9097082614898682 -165,0.9094234704971313 -166,0.9091455936431885 -167,0.9088707566261292 -168,0.9086005091667175 -169,0.9083322882652283 -170,0.9080663919448853 -171,0.9078022837638855 -172,0.9075387120246887 -173,0.9072778820991516 -174,0.907016396522522 -175,0.9067560434341431 -176,0.9064957499504089 -177,0.9062370657920837 -178,0.905977725982666 -179,0.9057194590568542 -180,0.9054611921310425 -181,0.9052043557167053 -182,0.9049480557441711 -183,0.9046922326087952 -184,0.9044370651245117 -185,0.9041823744773865 -186,0.9039294123649597 -187,0.9036769270896912 -188,0.9034252762794495 -189,0.9031746983528137 -190,0.902924656867981 -191,0.9026755094528198 -192,0.9024269580841064 -193,0.9021785259246826 -194,0.9019310474395752 -195,0.9016833901405334 -196,0.901436448097229 -197,0.9011895656585693 -198,0.9009423851966858 -199,0.9006948471069336 -200,0.9004477262496948 diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.020/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.020/training_config.txt deleted file mode 100644 index 9b6df4e..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.020/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.020/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.020/training_log.csv deleted file mode 100644 index 06b43df..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,493.4759521484375 -2,231.8644256591797 -3,74.98271942138672 -4,34.731101989746094 -5,40.91058349609375 -6,23.155973434448242 -7,17.50538444519043 -8,13.624457359313965 -9,9.656967163085938 -10,8.718084335327148 -11,5.820390701293945 -12,4.073130130767822 -13,2.8194162845611572 -14,2.123771905899048 -15,1.8268510103225708 -16,1.6188355684280396 -17,1.4526609182357788 -18,1.2036542892456055 -19,1.1610134840011597 -20,1.1221102476119995 -21,1.111304521560669 -22,1.059097170829773 -23,1.0939955711364746 -24,1.0240399837493896 -25,0.9261047840118408 -26,0.7364070415496826 -27,0.7431311011314392 -28,0.7233320474624634 -29,0.7144321203231812 -30,0.7045190930366516 -31,0.6955621242523193 -32,0.685667097568512 -33,0.6775506138801575 -34,0.6531374454498291 -35,0.62312251329422 -36,0.6128858327865601 -37,0.6042770743370056 -38,0.5960917472839355 -39,0.5880089402198792 -40,0.5804252028465271 -41,0.5773665904998779 -42,0.5167471766471863 -43,0.498338907957077 -44,0.4847373068332672 -45,0.47214746475219727 -46,0.4642353653907776 -47,0.448978066444397 -48,0.444645494222641 -49,0.440988153219223 -50,0.4376847445964813 -51,0.43460506200790405 -52,0.4317190945148468 -53,0.42901602387428284 -54,0.42648711800575256 -55,0.42412224411964417 -56,0.4219107925891876 -57,0.4198407232761383 -58,0.4179006516933441 -59,0.4160793721675873 -60,0.4143664836883545 -61,0.41275227069854736 -62,0.4112277626991272 -63,0.40978431701660156 -64,0.40841400623321533 -65,0.40710973739624023 -66,0.4058656692504883 -67,0.4046768844127655 -68,0.40353813767433167 -69,0.4024451673030853 -70,0.4013938307762146 -71,0.40038156509399414 -72,0.3994040787220001 -73,0.3984582722187042 -74,0.3975414037704468 -75,0.3966507017612457 -76,0.39578351378440857 -77,0.3949383497238159 -78,0.39411264657974243 -79,0.3933041989803314 -80,0.39251118898391724 -81,0.39173153042793274 -82,0.3909638226032257 -83,0.3902067244052887 -84,0.38945937156677246 -85,0.3887210190296173 -86,0.3879912197589874 -87,0.38727036118507385 -88,0.38655948638916016 -89,0.3858600854873657 -90,0.38517510890960693 -91,0.3845070004463196 -92,0.3838585615158081 -93,0.3832317590713501 -94,0.38262757658958435 -95,0.3820451498031616 -96,0.38148272037506104 -97,0.38093799352645874 -98,0.3804081976413727 -99,0.3798912763595581 -100,0.37938645482063293 -101,0.37833061814308167 -102,0.37949138879776 -103,0.3789972960948944 -104,0.37853196263313293 -105,0.3780079782009125 -106,0.3799726665019989 -107,0.3797537386417389 -108,0.37934213876724243 -109,0.3789252042770386 -110,0.3785046935081482 -111,0.3780812621116638 -112,0.3776549994945526 -113,0.3772265315055847 -114,0.3767963945865631 -115,0.376365065574646 -116,0.37593284249305725 -117,0.37550023198127747 -118,0.37506720423698425 -119,0.3746345341205597 -120,0.37420201301574707 -121,0.37377023696899414 -122,0.3733391761779785 -123,0.3729095757007599 -124,0.3724810779094696 -125,0.37205418944358826 -126,0.3716287314891815 -127,0.3712047338485718 -128,0.37078243494033813 -129,0.37036195397377014 -130,0.3699432611465454 -131,0.36952653527259827 -132,0.36911168694496155 -133,0.3686988055706024 -134,0.36828792095184326 -135,0.367879182100296 -136,0.36747244000434875 -137,0.3670676648616791 -138,0.3666650950908661 -139,0.3662647008895874 -140,0.3658662438392639 -141,0.36546987295150757 -142,0.3650754392147064 -143,0.36468300223350525 -144,0.3642923831939697 -145,0.36390364170074463 -146,0.36351659893989563 -147,0.36313116550445557 -148,0.3627473711967468 -149,0.36236515641212463 -150,0.3619844317436218 -151,0.3616049289703369 -152,0.36122655868530273 -153,0.360848993062973 -154,0.3604722023010254 -155,0.3600959777832031 -156,0.35971975326538086 -157,0.35934358835220337 -158,0.3589669167995453 -159,0.3585893213748932 -160,0.3582102358341217 -161,0.3578289747238159 -162,0.3574448227882385 -163,0.35705697536468506 -164,0.3566639721393585 -165,0.3562643229961395 -166,0.35585635900497437 -167,0.35543739795684814 -168,0.35500410199165344 -169,0.35455214977264404 -170,0.35407522320747375 -171,0.35356444120407104 -172,0.35300639271736145 -173,0.35237836837768555 -174,0.3516445457935333 -175,0.3507348895072937 -176,0.3495071828365326 -177,0.34760916233062744 -178,0.3438992500305176 -179,0.33222779631614685 -180,0.3015848398208618 -181,0.3012305200099945 -182,0.30088508129119873 -183,0.30004850029945374 -184,0.2983735203742981 -185,0.29803985357284546 -186,0.29748770594596863 -187,0.2976519465446472 -188,0.2999178171157837 -189,0.3013850450515747 -190,0.3018149435520172 -191,0.3014383912086487 -192,0.30032309889793396 -193,0.2984141409397125 -194,0.2959742844104767 -195,0.29493051767349243 -196,0.29480159282684326 -197,0.2946833074092865 -198,0.29449525475502014 -199,0.29426562786102295 -200,0.29401499032974243 diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.040/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.040/training_config.txt deleted file mode 100644 index f511c60..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.040/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.040/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.040/training_log.csv deleted file mode 100644 index b36155f..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,253.33795166015625 -2,45.80424499511719 -3,25.825145721435547 -4,16.4755916595459 -5,9.410402297973633 -6,3.9728331565856934 -7,1.800004482269287 -8,1.3182741403579712 -9,1.1249911785125732 -10,1.0045852661132812 -11,0.9323318600654602 -12,0.8709807991981506 -13,0.6613484025001526 -14,0.590243935585022 -15,0.5436501502990723 -16,0.4970184564590454 -17,0.47161486744880676 -18,0.4561002254486084 -19,0.4436229467391968 -20,0.43597620725631714 -21,0.42784276604652405 -22,0.41964825987815857 -23,0.41253232955932617 -24,0.40620720386505127 -25,0.40055468678474426 -26,0.3952847421169281 -27,0.390365868806839 -28,0.385780930519104 -29,0.3814968466758728 -30,0.3774702250957489 -31,0.37366336584091187 -32,0.37004172801971436 -33,0.36657091975212097 -34,0.3632228672504425 -35,0.3599718511104584 -36,0.35679373145103455 -37,0.3536655306816101 -38,0.35055750608444214 -39,0.34745293855667114 -40,0.3443319499492645 -41,0.341170072555542 -42,0.33794111013412476 -43,0.3346177339553833 -44,0.3311908543109894 -45,0.3276398181915283 -46,0.32393792271614075 -47,0.32005396485328674 -48,0.31597650051116943 -49,0.311715692281723 -50,0.30731064081192017 -51,0.30287235975265503 -52,0.2982768416404724 -53,0.2933236062526703 -54,0.28700676560401917 -55,0.2744968831539154 -56,0.2504507899284363 -57,0.24037237465381622 -58,0.23697751760482788 -59,0.23493534326553345 -60,0.23335000872612 -61,0.23200269043445587 -62,0.23080512881278992 -63,0.229712575674057 -64,0.228705033659935 -65,0.22776801884174347 -66,0.2268800437450409 -67,0.22603340446949005 -68,0.22522996366024017 -69,0.22446167469024658 -70,0.22372108697891235 -71,0.22301088273525238 -72,0.22233040630817413 -73,0.22167536616325378 -74,0.22104525566101074 -75,0.2204381823539734 -76,0.21985098719596863 -77,0.21927841007709503 -78,0.21871766448020935 -79,0.21816550195217133 -80,0.21761885285377502 -81,0.217072993516922 -82,0.21652279794216156 -83,0.21596549451351166 -84,0.21540065109729767 -85,0.21483899652957916 -86,0.2143053114414215 -87,0.21380974352359772 -88,0.21335522830486298 -89,0.2129397690296173 -90,0.2125503420829773 -91,0.2121739238500595 -92,0.21180400252342224 -93,0.21143756806850433 -94,0.21107426285743713 -95,0.21071504056453705 -96,0.21035990118980408 -97,0.21000896394252777 -98,0.20966216921806335 -99,0.20931969583034515 -100,0.20898093283176422 -101,0.2086455076932907 -102,0.2083137035369873 -103,0.2079847753047943 -104,0.20765823125839233 -105,0.20733380317687988 -106,0.20701144635677338 -107,0.2066904902458191 -108,0.2063707858324051 -109,0.20605218410491943 -110,0.20573419332504272 -111,0.2054162621498108 -112,0.20509827136993408 -113,0.2047804445028305 -114,0.2044622302055359 -115,0.20414365828037262 -116,0.20382466912269592 -117,0.20350508391857147 -118,0.20318488776683807 -119,0.20286431908607483 -120,0.2025442272424698 -121,0.2022247016429901 -122,0.2019062489271164 -123,0.20158982276916504 -124,0.20127613842487335 -125,0.20096565783023834 -126,0.20065905153751373 -127,0.20035697519779205 -128,0.2000596970319748 -129,0.19976764917373657 -130,0.19948124885559082 -131,0.19920064508914948 -132,0.19892551004886627 -133,0.19865578413009644 -134,0.19839173555374146 -135,0.19813266396522522 -136,0.1978784203529358 -137,0.19762872159481049 -138,0.19738328456878662 -139,0.1971416175365448 -140,0.19690367579460144 -141,0.19666928052902222 -142,0.19643785059452057 -143,0.19620905816555023 -144,0.1959826946258545 -145,0.1957588940858841 -146,0.1955370157957077 -147,0.19531691074371338 -148,0.1950983703136444 -149,0.1948813945055008 -150,0.19466543197631836 -151,0.1944502592086792 -152,0.19423557817935944 -153,0.19402137398719788 -154,0.19380685687065125 -155,0.19359144568443298 -156,0.19337455928325653 -157,0.19315576553344727 -158,0.19293376803398132 -159,0.19270721077919006 -160,0.19247455894947052 -161,0.19223438203334808 -162,0.19198423624038696 -163,0.19172175228595734 -164,0.19144487380981445 -165,0.19115254282951355 -166,0.19084583222866058 -167,0.19053016602993011 -168,0.19021639227867126 -169,0.18991774320602417 -170,0.18964335322380066 -171,0.18939486145973206 -172,0.1891680359840393 -173,0.18895632028579712 -174,0.18875430524349213 -175,0.1885584592819214 -176,0.1883668154478073 -177,0.18817731738090515 -178,0.18798872828483582 -179,0.1877996027469635 -180,0.18760891258716583 -181,0.1874145269393921 -182,0.18721400201320648 -183,0.18700382113456726 -184,0.18677860498428345 -185,0.18652983009815216 -186,0.18624606728553772 -187,0.18591801822185516 -188,0.18555717170238495 -189,0.18522000312805176 -190,0.1849629431962967 -191,0.1847710907459259 -192,0.18460716307163239 -193,0.1844528317451477 -194,0.1843007504940033 -195,0.18414883315563202 -196,0.18399596214294434 -197,0.1838420033454895 -198,0.18368744850158691 -199,0.18353231251239777 -200,0.1833767145872116 diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.050/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.050/training_config.txt deleted file mode 100644 index 2569c9e..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.050/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.050/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.050/training_log.csv deleted file mode 100644 index 1076050..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,168.8037567138672 -2,37.446983337402344 -3,15.201050758361816 -4,6.206844329833984 -5,1.900891900062561 -6,1.2928937673568726 -7,0.53959721326828 -8,0.39234036207199097 -9,0.3335712254047394 -10,0.303561270236969 -11,0.29242849349975586 -12,0.2858237624168396 -13,0.2743768095970154 -14,0.2610970139503479 -15,0.23385579884052277 -16,0.2145366668701172 -17,0.2070813626050949 -18,0.20200279355049133 -19,0.19795219600200653 -20,0.19437052309513092 -21,0.18969160318374634 -22,0.18701449036598206 -23,0.18504182994365692 -24,0.183305025100708 -25,0.18177363276481628 -26,0.18041014671325684 -27,0.17918525636196136 -28,0.1780770868062973 -29,0.17706739902496338 -30,0.17614296078681946 -31,0.17529171705245972 -32,0.17450518906116486 -33,0.17377537488937378 -34,0.17309515178203583 -35,0.17245927453041077 -36,0.17186272144317627 -37,0.17130157351493835 -38,0.17077195644378662 -39,0.17027027904987335 -40,0.16979408264160156 -41,0.16934029757976532 -42,0.1689072549343109 -43,0.16849283874034882 -44,0.16809505224227905 -45,0.1677125096321106 -46,0.16734285652637482 -47,0.1669856160879135 -48,0.16663943231105804 -49,0.16630303859710693 -50,0.1659761220216751 -51,0.16565768420696259 -52,0.16534684598445892 -53,0.1650434285402298 -54,0.16474653780460358 -55,0.16445572674274445 -56,0.16417063772678375 -57,0.1638907790184021 -58,0.16361567378044128 -59,0.16334481537342072 -60,0.1630783975124359 -61,0.16281579434871674 -62,0.1625569611787796 -63,0.1623014211654663 -64,0.16204912960529327 -65,0.16179987788200378 -66,0.16155338287353516 -67,0.16130955517292023 -68,0.1610681116580963 -69,0.16082876920700073 -70,0.16059139370918274 -71,0.16035576164722443 -72,0.16012166440486908 -73,0.15988866984844208 -74,0.15965639054775238 -75,0.15942464768886566 -76,0.1591930389404297 -77,0.15896111726760864 -78,0.15872836112976074 -79,0.15849418938159943 -80,0.15825818479061127 -81,0.15801993012428284 -82,0.1577788144350052 -83,0.15753473341464996 -84,0.1572876274585724 -85,0.15703721344470978 -86,0.15678274631500244 -87,0.15652316808700562 -88,0.15625640749931335 -89,0.15597723424434662 -90,0.15567433834075928 -91,0.1553262621164322 -92,0.154906764626503 -93,0.1544761061668396 -94,0.15419036149978638 -95,0.15401294827461243 -96,0.15387068688869476 -97,0.1537317931652069 -98,0.15358847379684448 -99,0.15344171226024628 -100,0.15329362452030182 -101,0.153145432472229 -102,0.15299825370311737 -103,0.15285244584083557 -104,0.1527082920074463 -105,0.15256595611572266 -106,0.1524258702993393 -107,0.15228796005249023 -108,0.15215222537517548 -109,0.15201859176158905 -110,0.1518874317407608 -111,0.15175843238830566 -112,0.15163178741931915 -113,0.15150722861289978 -114,0.15138481557369232 -115,0.1512645035982132 -116,0.15114623308181763 -117,0.1510297805070877 -118,0.1509149819612503 -119,0.15080200135707855 -120,0.15069064497947693 -121,0.15058085322380066 -122,0.1504722386598587 -123,0.15036490559577942 -124,0.15025866031646729 -125,0.1501535177230835 -126,0.15004925429821014 -127,0.14994587004184723 -128,0.14984329044818878 -129,0.1497412621974945 -130,0.14963987469673157 -131,0.14953893423080444 -132,0.14943856000900269 -133,0.149338498711586 -134,0.14923866093158722 -135,0.14913934469223022 -136,0.14904026687145233 -137,0.14894142746925354 -138,0.14884281158447266 -139,0.14874443411827087 -140,0.1486465334892273 -141,0.14854909479618073 -142,0.14845190942287445 -143,0.1483551263809204 -144,0.14825879037380219 -145,0.14816325902938843 -146,0.14806832373142242 -147,0.14797411859035492 -148,0.1478807032108307 -149,0.14778819680213928 -150,0.14769680798053741 -151,0.1476065218448639 -152,0.14751732349395752 -153,0.1474292278289795 -154,0.14734241366386414 -155,0.14725685119628906 -156,0.14717264473438263 -157,0.14708977937698364 -158,0.14700815081596375 -159,0.14692777395248413 -160,0.14684851467609406 -161,0.14677031338214874 -162,0.14669303596019745 -163,0.14661668241024017 -164,0.14654117822647095 -165,0.14646634459495544 -166,0.14639215171337128 -167,0.14631864428520203 -168,0.14624586701393127 -169,0.14617347717285156 -170,0.14610148966312408 -171,0.14602980017662048 -172,0.14595860242843628 -173,0.14588791131973267 -174,0.14581742882728577 -175,0.14574715495109558 -176,0.14567704498767853 -177,0.14560718834400177 -178,0.14553765952587128 -179,0.14546822011470795 -180,0.14539891481399536 -181,0.14532974362373352 -182,0.14526067674160004 -183,0.1451919674873352 -184,0.14512339234352112 -185,0.1450549215078354 -186,0.14498651027679443 -187,0.14491812884807587 -188,0.144849956035614 -189,0.14478208124637604 -190,0.14471425116062164 -191,0.14464643597602844 -192,0.14457865059375763 -193,0.1445109248161316 -194,0.14444345235824585 -195,0.1443759948015213 -196,0.14430856704711914 -197,0.14424116909503937 -198,0.14417381584644318 -199,0.1441066712141037 -200,0.14403969049453735 diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.080/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.080/training_config.txt deleted file mode 100644 index 69d845c..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.080/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.080/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.080/training_log.csv deleted file mode 100644 index c926f6e..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,55.68273162841797 -2,9.872925758361816 -3,1.963661551475525 -4,0.9787135124206543 -5,0.7916083335876465 -6,0.4721930921077728 -7,0.42302462458610535 -8,0.3919275104999542 -9,0.36464861035346985 -10,0.3468245267868042 -11,0.33433470129966736 -12,0.32403257489204407 -13,0.3147295415401459 -14,0.3060360848903656 -15,0.29778382182121277 -16,0.28982260823249817 -17,0.2820647656917572 -18,0.27448129653930664 -19,0.26704537868499756 -20,0.2596017122268677 -21,0.2515541613101959 -22,0.24218204617500305 -23,0.2301894724369049 -24,0.2158924639225006 -25,0.20453421771526337 -26,0.19752803444862366 -27,0.1927802562713623 -28,0.1891675740480423 -29,0.1861967146396637 -30,0.18366797268390656 -31,0.18144959211349487 -32,0.1794964224100113 -33,0.17777636647224426 -34,0.17623917758464813 -35,0.17485374212265015 -36,0.1735938936471939 -37,0.17243877053260803 -38,0.17137150466442108 -39,0.1703803688287735 -40,0.16945573687553406 -41,0.1685897260904312 -42,0.16777782142162323 -43,0.1670132428407669 -44,0.16629073023796082 -45,0.16560514271259308 -46,0.1649521440267563 -47,0.16432896256446838 -48,0.1637326180934906 -49,0.1631588190793991 -50,0.16260544955730438 -51,0.16207125782966614 -52,0.16155461966991425 -53,0.16105377674102783 -54,0.1605674922466278 -55,0.1600952297449112 -56,0.15963557362556458 -57,0.15918754041194916 -58,0.1587504744529724 -59,0.1583232581615448 -60,0.15790531039237976 -61,0.15749642252922058 -62,0.15709561109542847 -63,0.1567022055387497 -64,0.15631519258022308 -65,0.15593452751636505 -66,0.15556052327156067 -67,0.15519264340400696 -68,0.15483036637306213 -69,0.15447358787059784 -70,0.15412193536758423 -71,0.15377554297447205 -72,0.1534338891506195 -73,0.15309658646583557 -74,0.15276409685611725 -75,0.1524360477924347 -76,0.1521124392747879 -77,0.15179307758808136 -78,0.15147767961025238 -79,0.1511661559343338 -80,0.15085995197296143 -81,0.1505594551563263 -82,0.1502632051706314 -83,0.1499709188938141 -84,0.14968234300613403 -85,0.1493975669145584 -86,0.14911627769470215 -87,0.14883846044540405 -88,0.14856404066085815 -89,0.14829295873641968 -90,0.1480255275964737 -91,0.14776113629341125 -92,0.1474996954202652 -93,0.14724165201187134 -94,0.14698658883571625 -95,0.1467345654964447 -96,0.146484836935997 -97,0.1462372988462448 -98,0.14599229395389557 -99,0.14574968814849854 -100,0.14550957083702087 -101,0.14527229964733124 -102,0.1450374275445938 -103,0.1448051929473877 -104,0.14457577466964722 -105,0.14434877038002014 -106,0.1441236287355423 -107,0.14390061795711517 -108,0.14367987215518951 -109,0.14346177875995636 -110,0.14324592053890228 -111,0.14303214848041534 -112,0.14282050728797913 -113,0.1426113247871399 -114,0.14240434765815735 -115,0.1421993225812912 -116,0.1419966220855713 -117,0.1417957991361618 -118,0.14159731566905975 -119,0.14140087366104126 -120,0.14120642840862274 -121,0.14101392030715942 -122,0.1408233791589737 -123,0.14063473045825958 -124,0.14044782519340515 -125,0.1402626931667328 -126,0.14007918536663055 -127,0.1398979276418686 -128,0.1397186666727066 -129,0.13954110443592072 -130,0.13936544954776764 -131,0.1391913741827011 -132,0.13901928067207336 -133,0.13884882628917694 -134,0.1386798620223999 -135,0.13851258158683777 -136,0.1383471041917801 -137,0.13818322122097015 -138,0.13802076876163483 -139,0.13785958290100098 -140,0.13770020008087158 -141,0.13754227757453918 -142,0.13738587498664856 -143,0.13723087310791016 -144,0.137077197432518 -145,0.13692516088485718 -146,0.1367741972208023 -147,0.13662445545196533 -148,0.13647589087486267 -149,0.13632865250110626 -150,0.13618281483650208 -151,0.13603803515434265 -152,0.1358942985534668 -153,0.13575167953968048 -154,0.13561028242111206 -155,0.1354701817035675 -156,0.13533125817775726 -157,0.13519375026226044 -158,0.1350572407245636 -159,0.13492171466350555 -160,0.13478708267211914 -161,0.13465379178524017 -162,0.13452289998531342 -163,0.13439339399337769 -164,0.13426481187343597 -165,0.1341373473405838 -166,0.13401108980178833 -167,0.13388602435588837 -168,0.13376210629940033 -169,0.1336398869752884 -170,0.13351888954639435 -171,0.1333991289138794 -172,0.13328048586845398 -173,0.13316282629966736 -174,0.13304634392261505 -175,0.13293097913265228 -176,0.13281670212745667 -177,0.13270321488380432 -178,0.13259081542491913 -179,0.13247935473918915 -180,0.13236908614635468 -181,0.13225960731506348 -182,0.13215100765228271 -183,0.13204315304756165 -184,0.13193675875663757 -185,0.1318313479423523 -186,0.13172699511051178 -187,0.13162346184253693 -188,0.13152112066745758 -189,0.13141986727714539 -190,0.1313198059797287 -191,0.1312207281589508 -192,0.1311229020357132 -193,0.13102643191814423 -194,0.13093139231204987 -195,0.1308375895023346 -196,0.1307452768087387 -197,0.13065454363822937 -198,0.13056552410125732 -199,0.13047799468040466 -200,0.13039186596870422 diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.100/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.100/training_config.txt deleted file mode 100644 index 202f039..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.100/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.100/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.100/training_log.csv deleted file mode 100644 index 4c4d0d8..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,46.96703338623047 -2,215.8382568359375 -3,4.094510078430176 -4,1.8183082342147827 -5,1.04926598072052 -6,0.6929357051849365 -7,0.6246272921562195 -8,0.5809897184371948 -9,0.5476904511451721 -10,0.5209845304489136 -11,0.49953627586364746 -12,0.48101580142974854 -13,0.4639415442943573 -14,0.4483540952205658 -15,0.43413591384887695 -16,0.421217679977417 -17,0.40988436341285706 -18,0.4000408947467804 -19,0.3915589153766632 -20,0.38419127464294434 -21,0.3777240216732025 -22,0.37203750014305115 -23,0.3669728636741638 -24,0.3623976707458496 -25,0.35819607973098755 -26,0.3543192148208618 -27,0.3507266640663147 -28,0.3474043905735016 -29,0.34432777762413025 -30,0.34145602583885193 -31,0.3387616276741028 -32,0.3362254202365875 -33,0.3338276147842407 -34,0.3315518796443939 -35,0.3293822407722473 -36,0.3273060917854309 -37,0.3253139555454254 -38,0.32339656352996826 -39,0.3215467631816864 -40,0.3197569251060486 -41,0.31802159547805786 -42,0.316334992647171 -43,0.3146916925907135 -44,0.31308823823928833 -45,0.31151989102363586 -46,0.30998361110687256 -47,0.3084765374660492 -48,0.3069947063922882 -49,0.30553609132766724 -50,0.30409857630729675 -51,0.3026788532733917 -52,0.30127477645874023 -53,0.29988518357276917 -54,0.29850825667381287 -55,0.2971433401107788 -56,0.29578885436058044 -57,0.29444408416748047 -58,0.2931082546710968 -59,0.2917797267436981 -60,0.2904578149318695 -61,0.28914275765419006 -62,0.28783300518989563 -63,0.286528080701828 -64,0.2852277457714081 -65,0.2839314043521881 -66,0.282638818025589 -67,0.28134945034980774 -68,0.2800632119178772 -69,0.2787805497646332 -70,0.27750059962272644 -71,0.2762226164340973 -72,0.274946928024292 -73,0.2736726999282837 -74,0.27239856123924255 -75,0.27112266421318054 -76,0.26984456181526184 -77,0.2685655653476715 -78,0.26728516817092896 -79,0.2660021185874939 -80,0.2647096812725067 -81,0.26340290904045105 -82,0.2620942294597626 -83,0.26078423857688904 -84,0.259472519159317 -85,0.25816041231155396 -86,0.2568482458591461 -87,0.2555355727672577 -88,0.25422364473342896 -89,0.25291234254837036 -90,0.2516021728515625 -91,0.2502938210964203 -92,0.24898703396320343 -93,0.24768275022506714 -94,0.2463807314634323 -95,0.24508239328861237 -96,0.24378810822963715 -97,0.24249695241451263 -98,0.2412099391222 -99,0.23992682993412018 -100,0.23864810168743134 -101,0.23737378418445587 -102,0.23610378801822662 -103,0.23483656346797943 -104,0.23357287049293518 -105,0.23231376707553864 -106,0.23105895519256592 -107,0.2298084795475006 -108,0.2285614311695099 -109,0.22731627523899078 -110,0.22607190907001495 -111,0.2248329222202301 -112,0.2236006110906601 -113,0.22237347066402435 -114,0.22115246951580048 -115,0.21993736922740936 -116,0.21872633695602417 -117,0.2175176441669464 -118,0.21631740033626556 -119,0.2151271104812622 -120,0.2139468491077423 -121,0.2127772569656372 -122,0.21161945164203644 -123,0.2104736566543579 -124,0.2093401700258255 -125,0.20821693539619446 -126,0.2071075737476349 -127,0.20601247251033783 -128,0.20493170619010925 -129,0.20386061072349548 -130,0.20280055701732635 -131,0.20175863802433014 -132,0.20073530077934265 -133,0.19973139464855194 -134,0.19874787330627441 -135,0.19778387248516083 -136,0.1968403309583664 -137,0.1959167867898941 -138,0.19501352310180664 -139,0.19413042068481445 -140,0.19326692819595337 -141,0.1924235224723816 -142,0.19159796833992004 -143,0.19079041481018066 -144,0.19000135362148285 -145,0.18922974169254303 -146,0.1884726583957672 -147,0.18772822618484497 -148,0.1869969218969345 -149,0.18627792596817017 -150,0.18557040393352509 -151,0.18487697839736938 -152,0.18419566750526428 -153,0.18352806568145752 -154,0.1828753799200058 -155,0.18223674595355988 -156,0.18161168694496155 -157,0.18100093305110931 -158,0.18040356040000916 -159,0.17981959879398346 -160,0.1792488396167755 -161,0.1786903738975525 -162,0.17814412713050842 -163,0.17760956287384033 -164,0.17708641290664673 -165,0.1765754669904709 -166,0.17607606947422028 -167,0.17558734118938446 -168,0.1751091331243515 -169,0.17464062571525574 -170,0.17418168485164642 -171,0.17373202741146088 -172,0.17329108715057373 -173,0.17285823822021484 -174,0.17243355512619019 -175,0.17201721668243408 -176,0.1716080754995346 -177,0.17120610177516937 -178,0.1708107888698578 -179,0.17042167484760284 -180,0.17003877460956573 -181,0.1696620136499405 -182,0.16929090023040771 -183,0.16892534494400024 -184,0.16856539249420166 -185,0.16821065545082092 -186,0.16786113381385803 -187,0.1675170511007309 -188,0.16717761754989624 -189,0.1668427586555481 -190,0.16651247441768646 -191,0.1661866009235382 -192,0.16586506366729736 -193,0.16554787755012512 -194,0.16523492336273193 -195,0.1649257242679596 -196,0.16462019085884094 -197,0.16431882977485657 -198,0.16402101516723633 -199,0.1637265682220459 -200,0.16343551874160767 diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.125/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.125/training_config.txt deleted file mode 100644 index d722061..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.125/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.125/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.125/training_log.csv deleted file mode 100644 index d0870d2..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,25.416610717773438 -2,12.460535049438477 -3,3.301673412322998 -4,2.035637140274048 -5,1.9072792530059814 -6,1.8045721054077148 -7,1.2616894245147705 -8,1.383488416671753 -9,1.3897730112075806 -10,1.327128291130066 -11,0.5599731802940369 -12,0.4427085518836975 -13,0.35550200939178467 -14,0.32063814997673035 -15,0.2932041883468628 -16,0.27575379610061646 -17,0.2632509469985962 -18,0.2566462755203247 -19,0.2506917417049408 -20,0.2465008944272995 -21,0.2448531985282898 -22,0.25037911534309387 -23,0.257413387298584 -24,0.24231451749801636 -25,0.23797905445098877 -26,0.23620785772800446 -27,0.23359674215316772 -28,0.22942906618118286 -29,0.22509388625621796 -30,0.22137677669525146 -31,0.21825669705867767 -32,0.2156987488269806 -33,0.21356263756752014 -34,0.21173253655433655 -35,0.2101239562034607 -36,0.20868539810180664 -37,0.20737895369529724 -38,0.206176295876503 -39,0.20505648851394653 -40,0.20400212705135345 -41,0.203004390001297 -42,0.20205388963222504 -43,0.20114275813102722 -44,0.20026379823684692 -45,0.1994129717350006 -46,0.19858486950397491 -47,0.19777712225914001 -48,0.19698774814605713 -49,0.19621434807777405 -50,0.19545570015907288 -51,0.19470971822738647 -52,0.1939750462770462 -53,0.19325202703475952 -54,0.192539244890213 -55,0.1918366551399231 -56,0.19114284217357635 -57,0.1904570311307907 -58,0.18978029489517212 -59,0.18911223113536835 -60,0.18845421075820923 -61,0.1878041923046112 -62,0.18716353178024292 -63,0.18653032183647156 -64,0.1859055757522583 -65,0.1852886825799942 -66,0.1846802532672882 -67,0.18407940864562988 -68,0.1834857016801834 -69,0.18289890885353088 -70,0.18231847882270813 -71,0.18174611032009125 -72,0.18118013441562653 -73,0.18062226474285126 -74,0.18007032573223114 -75,0.17952516674995422 -76,0.17898808419704437 -77,0.17845849692821503 -78,0.17793715000152588 -79,0.17742258310317993 -80,0.17691533267498016 -81,0.17641441524028778 -82,0.17591972649097443 -83,0.1754307895898819 -84,0.1749468892812729 -85,0.1744685024023056 -86,0.17399463057518005 -87,0.17352597415447235 -88,0.17306190729141235 -89,0.1726016104221344 -90,0.17214526236057281 -91,0.17169179022312164 -92,0.17124241590499878 -93,0.1707962155342102 -94,0.17035266757011414 -95,0.1699114441871643 -96,0.16947190463542938 -97,0.16903504729270935 -98,0.1685996651649475 -99,0.1681666523218155 -100,0.16773536801338196 -101,0.16730543971061707 -102,0.1668776422739029 -103,0.16645094752311707 -104,0.16602490842342377 -105,0.16559816896915436 -106,0.1651712954044342 -107,0.16474437713623047 -108,0.16431687772274017 -109,0.16388902068138123 -110,0.1634603589773178 -111,0.16303059458732605 -112,0.16259914636611938 -113,0.16216568648815155 -114,0.16172975301742554 -115,0.16129212081432343 -116,0.16085131466388702 -117,0.16040660440921783 -118,0.15995921194553375 -119,0.15950998663902283 -120,0.15906113386154175 -121,0.15861767530441284 -122,0.1581854522228241 -123,0.1577717661857605 -124,0.15738065540790558 -125,0.15701137483119965 -126,0.15666204690933228 -127,0.1563282310962677 -128,0.1560068428516388 -129,0.15569107234477997 -130,0.15537510812282562 -131,0.1550557017326355 -132,0.1547318994998932 -133,0.15440315008163452 -134,0.15407121181488037 -135,0.15373623371124268 -136,0.15340030193328857 -137,0.15306580066680908 -138,0.15273365378379822 -139,0.15240582823753357 -140,0.15208397805690765 -141,0.1517682820558548 -142,0.15145832300186157 -143,0.15115343034267426 -144,0.1508520245552063 -145,0.15055416524410248 -146,0.15025962889194489 -147,0.14996697008609772 -148,0.1496761590242386 -149,0.14938677847385406 -150,0.14909863471984863 -151,0.1488126963376999 -152,0.1485283076763153 -153,0.14824539422988892 -154,0.14796419441699982 -155,0.14768461883068085 -156,0.1474074423313141 -157,0.14713236689567566 -158,0.146859809756279 -159,0.14658954739570618 -160,0.1463223248720169 -161,0.14605723321437836 -162,0.14579488337039948 -163,0.1455349326133728 -164,0.14527782797813416 -165,0.1450224667787552 -166,0.14476902782917023 -167,0.14451739192008972 -168,0.14426788687705994 -169,0.14401759207248688 -170,0.14376720786094666 -171,0.14351677894592285 -172,0.14326635003089905 -173,0.14301647245883942 -174,0.1427665799856186 -175,0.14251752197742462 -176,0.14226900041103363 -177,0.14202111959457397 -178,0.14177405834197998 -179,0.14152806997299194 -180,0.14128316938877106 -181,0.14103861153125763 -182,0.14079482853412628 -183,0.14055180549621582 -184,0.1403103470802307 -185,0.1400698870420456 -186,0.13983117043972015 -187,0.13959400355815887 -188,0.1393582820892334 -189,0.1391250044107437 -190,0.13889408111572266 -191,0.13866513967514038 -192,0.13843762874603271 -193,0.1382121741771698 -194,0.1379885971546173 -195,0.13776741921901703 -196,0.13754916191101074 -197,0.13733389973640442 -198,0.13712115585803986 -199,0.13691113889217377 -200,0.13670364022254944 diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.160/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.160/training_config.txt deleted file mode 100644 index 11b5e97..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.160/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.160/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.160/training_log.csv deleted file mode 100644 index 0d8b411..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,17.944499969482422 -2,2.47705340385437 -3,0.484020471572876 -4,0.24442158639431 -5,0.2179873287677765 -6,0.2021477073431015 -7,0.19135542213916779 -8,0.185320183634758 -9,0.18135514855384827 -10,0.17861361801624298 -11,0.17687556147575378 -12,0.1755312979221344 -13,0.17443758249282837 -14,0.1735261231660843 -15,0.17275245487689972 -16,0.17208434641361237 -17,0.1714925915002823 -18,0.17095743119716644 -19,0.17046169936656952 -20,0.16999274492263794 -21,0.16954094171524048 -22,0.1691007912158966 -23,0.16866475343704224 -24,0.16822782158851624 -25,0.16779044270515442 -26,0.1673501431941986 -27,0.1669057011604309 -28,0.16645866632461548 -29,0.16600798070430756 -30,0.16555380821228027 -31,0.1650974452495575 -32,0.16464105248451233 -33,0.1641860455274582 -34,0.16373208165168762 -35,0.16328014433383942 -36,0.16283167898654938 -37,0.16238801181316376 -38,0.1619482785463333 -39,0.161519393324852 -40,0.16110296547412872 -41,0.1607038825750351 -42,0.16031385958194733 -43,0.15993204712867737 -44,0.15955865383148193 -45,0.15919306874275208 -46,0.15883561968803406 -47,0.15848466753959656 -48,0.1581413447856903 -49,0.15780547261238098 -50,0.15747694671154022 -51,0.15715625882148743 -52,0.1568424552679062 -53,0.15653584897518158 -54,0.15623602271080017 -55,0.15594322979450226 -56,0.15565718710422516 -57,0.15537692606449127 -58,0.15510223805904388 -59,0.15483269095420837 -60,0.15456868708133698 -61,0.1543092429637909 -62,0.15405379235744476 -63,0.15380138158798218 -64,0.1535535752773285 -65,0.15331099927425385 -66,0.15307343006134033 -67,0.15284107625484467 -68,0.1526138037443161 -69,0.15239115059375763 -70,0.15217235684394836 -71,0.1519571989774704 -72,0.15174569189548492 -73,0.15153814852237701 -74,0.15133386850357056 -75,0.15113313496112823 -76,0.15093545615673065 -77,0.15074068307876587 -78,0.15054841339588165 -79,0.1503596305847168 -80,0.15017245709896088 -81,0.14998677372932434 -82,0.149802103638649 -83,0.14961914718151093 -84,0.14943701028823853 -85,0.14925558865070343 -86,0.1490735560655594 -87,0.14889158308506012 -88,0.1487090289592743 -89,0.14852556586265564 -90,0.14834226667881012 -91,0.14815910160541534 -92,0.14797575771808624 -93,0.14779269695281982 -94,0.1476098895072937 -95,0.1474272906780243 -96,0.1472456455230713 -97,0.14706560969352722 -98,0.14688701927661896 -99,0.1467091143131256 -100,0.1465328186750412 -101,0.1463572233915329 -102,0.14618365466594696 -103,0.146011620759964 -104,0.14584070444107056 -105,0.14567124843597412 -106,0.14550313353538513 -107,0.14533625543117523 -108,0.14517037570476532 -109,0.14500568807125092 -110,0.14484179019927979 -111,0.14467979967594147 -112,0.1445179581642151 -113,0.14435721933841705 -114,0.14419730007648468 -115,0.14403806626796722 -116,0.14387960731983185 -117,0.14372208714485168 -118,0.14356476068496704 -119,0.14340879023075104 -120,0.14325331151485443 -121,0.14309942722320557 -122,0.14294670522212982 -123,0.14279527962207794 -124,0.1426447480916977 -125,0.1424955427646637 -126,0.14234718680381775 -127,0.1422002613544464 -128,0.14205405116081238 -129,0.14190930128097534 -130,0.14176583290100098 -131,0.14162404835224152 -132,0.14148327708244324 -133,0.14134378731250763 -134,0.14120493829250336 -135,0.14106784760951996 -136,0.14093123376369476 -137,0.14079616963863373 -138,0.14066173136234283 -139,0.14052800834178925 -140,0.14039483666419983 -141,0.14026221632957458 -142,0.1401299089193344 -143,0.13999822735786438 -144,0.1398668885231018 -145,0.13973625004291534 -146,0.1396058350801468 -147,0.1394759565591812 -148,0.13934676349163055 -149,0.13921797275543213 -150,0.13909010589122772 -151,0.13896234333515167 -152,0.13883568346500397 -153,0.13870911300182343 -154,0.13858340680599213 -155,0.1384579837322235 -156,0.13833338022232056 -157,0.13820935785770416 -158,0.13808588683605194 -159,0.13796310126781464 -160,0.13784083724021912 -161,0.13771948218345642 -162,0.13759863376617432 -163,0.13747896254062653 -164,0.1373596042394638 -165,0.13724073767662048 -166,0.13712146878242493 -167,0.13700245320796967 -168,0.13688287138938904 -169,0.13676390051841736 -170,0.13664504885673523 -171,0.13652671873569489 -172,0.13640792667865753 -173,0.1362890750169754 -174,0.13617002964019775 -175,0.13605129718780518 -176,0.13593222200870514 -177,0.1358131468296051 -178,0.1356937438249588 -179,0.13557419180870056 -180,0.13545438647270203 -181,0.13533441722393036 -182,0.13521386682987213 -183,0.1350928097963333 -184,0.13497158885002136 -185,0.13484954833984375 -186,0.13472768664360046 -187,0.13460521399974823 -188,0.1344829797744751 -189,0.13436035811901093 -190,0.13423778116703033 -191,0.1341153383255005 -192,0.1339929699897766 -193,0.13387112319469452 -194,0.13374927639961243 -195,0.1336275041103363 -196,0.13350629806518555 -197,0.13338521122932434 -198,0.13326533138751984 -199,0.13314510881900787 -200,0.13302506506443024 diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.200/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.200/training_config.txt deleted file mode 100644 index edd76af..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.200/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.200/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.200/training_log.csv deleted file mode 100644 index 44c52ae..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,10.523073196411133 -2,1.0487546920776367 -3,0.8225482702255249 -4,0.35030558705329895 -5,0.264265239238739 -6,0.19754540920257568 -7,0.19256769120693207 -8,0.19020164012908936 -9,0.1887328028678894 -10,0.1883114129304886 -11,0.1884777545928955 -12,0.1881391555070877 -13,0.18750280141830444 -14,0.18667548894882202 -15,0.18571577966213226 -16,0.1846942901611328 -17,0.18363597989082336 -18,0.182551771402359 -19,0.18145303428173065 -20,0.18034888803958893 -21,0.1792447566986084 -22,0.17814350128173828 -23,0.1770513355731964 -24,0.1759728342294693 -25,0.174910306930542 -26,0.17386794090270996 -27,0.1728464514017105 -28,0.17184147238731384 -29,0.17085741460323334 -30,0.1698959320783615 -31,0.16895736753940582 -32,0.16804185509681702 -33,0.1671496033668518 -34,0.16628000140190125 -35,0.1654323786497116 -36,0.1646023988723755 -37,0.16378958523273468 -38,0.16299575567245483 -39,0.16222333908081055 -40,0.1614740490913391 -41,0.16074825823307037 -42,0.16003820300102234 -43,0.1593407243490219 -44,0.1586601734161377 -45,0.15799680352210999 -46,0.1573510318994522 -47,0.15672443807125092 -48,0.15611961483955383 -49,0.15554890036582947 -50,0.15500310063362122 -51,0.15447908639907837 -52,0.15397536754608154 -53,0.1534905880689621 -54,0.15302379429340363 -55,0.15257319808006287 -56,0.1521410048007965 -57,0.1517259031534195 -58,0.1513252854347229 -59,0.15093742311000824 -60,0.15056192874908447 -61,0.15019799768924713 -62,0.1498444825410843 -63,0.1495017558336258 -64,0.1491684764623642 -65,0.14884807169437408 -66,0.14854004979133606 -67,0.14824135601520538 -68,0.1479509472846985 -69,0.14766792953014374 -70,0.147392138838768 -71,0.1471225768327713 -72,0.14685960114002228 -73,0.14660316705703735 -74,0.14635297656059265 -75,0.14610791206359863 -76,0.14586761593818665 -77,0.1456320732831955 -78,0.14540094137191772 -79,0.14517422020435333 -80,0.1449516862630844 -81,0.1447330117225647 -82,0.14451785385608673 -83,0.14430657029151917 -84,0.1440984606742859 -85,0.14389431476593018 -86,0.14369367063045502 -87,0.14349649846553802 -88,0.14330197870731354 -89,0.14311029016971588 -90,0.14292126893997192 -91,0.14273469150066376 -92,0.14255082607269287 -93,0.14236922562122345 -94,0.14218996465206146 -95,0.1420130878686905 -96,0.14183850586414337 -97,0.1416657716035843 -98,0.14149489998817444 -99,0.14132565259933472 -100,0.14115816354751587 -101,0.14099234342575073 -102,0.1408277153968811 -103,0.14066441357135773 -104,0.14050231873989105 -105,0.14034222066402435 -106,0.14018329977989197 -107,0.14002615213394165 -108,0.13986995816230774 -109,0.1397157460451126 -110,0.13956262171268463 -111,0.13941112160682678 -112,0.13926084339618683 -113,0.13911210000514984 -114,0.1389646977186203 -115,0.13881848752498627 -116,0.13867396116256714 -117,0.1385304480791092 -118,0.1383887231349945 -119,0.1382482647895813 -120,0.13810965418815613 -121,0.1379726231098175 -122,0.13783687353134155 -123,0.1377028375864029 -124,0.1375698745250702 -125,0.13743841648101807 -126,0.13730791211128235 -127,0.13717877864837646 -128,0.13705067336559296 -129,0.13692374527454376 -130,0.1367981731891632 -131,0.1366734355688095 -132,0.13654987514019012 -133,0.1364271491765976 -134,0.1363050788640976 -135,0.1361834853887558 -136,0.13606294989585876 -137,0.13594330847263336 -138,0.1358238011598587 -139,0.13570469617843628 -140,0.13558584451675415 -141,0.13546763360500336 -142,0.1353498250246048 -143,0.1352328658103943 -144,0.1351166069507599 -145,0.13500121235847473 -146,0.13488689064979553 -147,0.1347731649875641 -148,0.13466036319732666 -149,0.13454805314540863 -150,0.1344371885061264 -151,0.13432690501213074 -152,0.1342175006866455 -153,0.13410913944244385 -154,0.1340017467737198 -155,0.13389530777931213 -156,0.133789524435997 -157,0.13368487358093262 -158,0.13358107209205627 -159,0.13347816467285156 -160,0.1333763301372528 -161,0.13327497243881226 -162,0.13317418098449707 -163,0.13307379186153412 -164,0.13297398388385773 -165,0.1328737586736679 -166,0.1327730268239975 -167,0.1326727569103241 -168,0.13257251679897308 -169,0.1324726790189743 -170,0.1323729306459427 -171,0.1322728991508484 -172,0.13217271864414215 -173,0.13207149505615234 -174,0.13196925818920135 -175,0.13186684250831604 -176,0.1317642778158188 -177,0.13166141510009766 -178,0.13155800104141235 -179,0.1314544528722763 -180,0.1313505470752716 -181,0.1312459409236908 -182,0.13114115595817566 -183,0.1310356855392456 -184,0.1309296190738678 -185,0.13082298636436462 -186,0.13071583211421967 -187,0.13060900568962097 -188,0.13050265610218048 -189,0.13039728999137878 -190,0.1302928775548935 -191,0.1301892250776291 -192,0.13008688390254974 -193,0.1299852579832077 -194,0.12988491356372833 -195,0.12978608906269073 -196,0.12968870997428894 -197,0.12959307432174683 -198,0.12949855625629425 -199,0.12940570712089539 -200,0.1293140947818756 diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.250/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.250/training_config.txt deleted file mode 100644 index ec9e8f8..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.250/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.250/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.250/training_log.csv deleted file mode 100644 index 2362c85..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,4.735718250274658 -2,1.1212695837020874 -3,1.0804215669631958 -4,0.9687809944152832 -5,0.664789617061615 -6,0.4559202194213867 -7,0.3644809424877167 -8,0.30109620094299316 -9,0.25103065371513367 -10,0.2182338833808899 -11,0.20096541941165924 -12,0.1904691457748413 -13,0.1832406371831894 -14,0.17790460586547852 -15,0.17378538846969604 -16,0.1705252081155777 -17,0.1678985208272934 -18,0.1657484918832779 -19,0.1639525592327118 -20,0.16242505609989166 -21,0.1611054688692093 -22,0.15997186303138733 -23,0.15900669991970062 -24,0.15817378461360931 -25,0.15744054317474365 -26,0.15679077804088593 -27,0.15620975196361542 -28,0.15568807721138 -29,0.15521635115146637 -30,0.15478578209877014 -31,0.15439066290855408 -32,0.1540256291627884 -33,0.1536850482225418 -34,0.15336422622203827 -35,0.1530608981847763 -36,0.1527712196111679 -37,0.15249258279800415 -38,0.1522228717803955 -39,0.1519608199596405 -40,0.1517060250043869 -41,0.1514573097229004 -42,0.15121351182460785 -43,0.15097670257091522 -44,0.15074703097343445 -45,0.15052597224712372 -46,0.15030896663665771 -47,0.1500956118106842 -48,0.14988499879837036 -49,0.14967674016952515 -50,0.14947077631950378 -51,0.14926649630069733 -52,0.14906525611877441 -53,0.14886687695980072 -54,0.14867021143436432 -55,0.14847566187381744 -56,0.14828374981880188 -57,0.14809352159500122 -58,0.14790494740009308 -59,0.14771825075149536 -60,0.1475338190793991 -61,0.14735087752342224 -62,0.14716871082782745 -63,0.14698754251003265 -64,0.14680695533752441 -65,0.1466272473335266 -66,0.14644865691661835 -67,0.14627180993556976 -68,0.14609526097774506 -69,0.1459190398454666 -70,0.14574378728866577 -71,0.14556874334812164 -72,0.14539436995983124 -73,0.1452210396528244 -74,0.1450481414794922 -75,0.14487643539905548 -76,0.14470753073692322 -77,0.1445402204990387 -78,0.14437434077262878 -79,0.14421044290065765 -80,0.14404833316802979 -81,0.14388754963874817 -82,0.14372828602790833 -83,0.14357063174247742 -84,0.14341437816619873 -85,0.14325998723506927 -86,0.14310836791992188 -87,0.14295753836631775 -88,0.14280842244625092 -89,0.14266178011894226 -90,0.14251698553562164 -91,0.1423739790916443 -92,0.142233744263649 -93,0.14209556579589844 -94,0.14195914566516876 -95,0.1418246626853943 -96,0.14169295132160187 -97,0.14156298339366913 -98,0.14143547415733337 -99,0.14130999147891998 -100,0.14118598401546478 -101,0.141063392162323 -102,0.14094212651252747 -103,0.1408226490020752 -104,0.1407041847705841 -105,0.140586718916893 -106,0.14047104120254517 -107,0.14035674929618835 -108,0.14024321734905243 -109,0.14013051986694336 -110,0.14001908898353577 -111,0.13990870118141174 -112,0.13979917764663696 -113,0.1396905481815338 -114,0.13958308100700378 -115,0.13947640359401703 -116,0.13937024772167206 -117,0.13926489651203156 -118,0.1391608566045761 -119,0.13905777037143707 -120,0.13895466923713684 -121,0.13885191082954407 -122,0.13874773681163788 -123,0.13864268362522125 -124,0.1385367065668106 -125,0.1384298950433731 -126,0.13832204043865204 -127,0.13821221888065338 -128,0.13809983432292938 -129,0.1379866898059845 -130,0.13787369430065155 -131,0.13776060938835144 -132,0.13764768838882446 -133,0.13753435015678406 -134,0.13742102682590485 -135,0.13730810582637787 -136,0.1371951550245285 -137,0.13708244264125824 -138,0.13696986436843872 -139,0.1368577927350998 -140,0.1367463320493698 -141,0.13663510978221893 -142,0.13652357459068298 -143,0.13641045987606049 -144,0.13629834353923798 -145,0.13618749380111694 -146,0.1360766738653183 -147,0.13596634566783905 -148,0.13585664331912994 -149,0.1357475370168686 -150,0.13563914597034454 -151,0.13552996516227722 -152,0.13542059063911438 -153,0.13531316816806793 -154,0.13520589470863342 -155,0.13509878516197205 -156,0.13499261438846588 -157,0.13488854467868805 -158,0.1347859501838684 -159,0.1346846967935562 -160,0.1345834732055664 -161,0.13448265194892883 -162,0.13438403606414795 -163,0.1342869997024536 -164,0.13419078290462494 -165,0.1340949684381485 -166,0.13400046527385712 -167,0.1339072436094284 -168,0.13381564617156982 -169,0.13372503221035004 -170,0.13363485038280487 -171,0.1335456520318985 -172,0.13345743715763092 -173,0.13337014615535736 -174,0.1332840770483017 -175,0.13319852948188782 -176,0.13311375677585602 -177,0.1330300122499466 -178,0.13294754922389984 -179,0.1328665018081665 -180,0.1327866017818451 -181,0.13270726799964905 -182,0.13262872397899628 -183,0.1325518637895584 -184,0.13247625529766083 -185,0.13240094482898712 -186,0.13232623040676117 -187,0.13225337862968445 -188,0.1321813017129898 -189,0.1321110725402832 -190,0.13204190135002136 -191,0.1319749802350998 -192,0.13190904259681702 -193,0.13184469938278198 -194,0.13178123533725739 -195,0.13171963393688202 -196,0.13165932893753052 -197,0.13159993290901184 -198,0.13154126703739166 -199,0.13148349523544312 -200,0.13142673671245575 diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.300/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.300/training_config.txt deleted file mode 100644 index bce164c..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.300/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.300/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.300/training_log.csv deleted file mode 100644 index 83d3892..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.300/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,2.4868006706237793 -2,1.484640121459961 -3,1.7096853256225586 -4,1.775104284286499 -5,1.6283886432647705 -6,1.4071059226989746 -7,1.179467797279358 -8,0.9630540013313293 -9,0.7795644402503967 -10,0.6399717926979065 -11,0.5351330041885376 -12,0.45858535170555115 -13,0.40104109048843384 -14,0.35629913210868835 -15,0.32037678360939026 -16,0.2911825478076935 -17,0.26756322383880615 -18,0.24803200364112854 -19,0.23176150023937225 -20,0.2182576060295105 -21,0.20708318054676056 -22,0.1860899031162262 -23,0.20702745020389557 -24,0.2633511424064636 -25,0.3418039381504059 -26,0.43667033314704895 -27,4.755534648895264 -28,29.954187393188477 -29,2.504987955093384 -30,1.2452938556671143 -31,1.1811820268630981 -32,1.23821222782135 -33,1.493226408958435 -34,1.491268277168274 -35,1.348840594291687 -36,1.3181228637695312 -37,1.4254050254821777 -38,1.4296796321868896 -39,1.311000108718872 -40,1.2047390937805176 -41,1.554042100906372 -42,1.4558824300765991 -43,1.762275218963623 -44,2.4600048065185547 -45,3.2620596885681152 -46,3.060593366622925 -47,5.07982873916626 -48,4.794989109039307 -49,4.253286361694336 -50,4.180200099945068 -51,3.853510618209839 -52,3.646782398223877 -53,3.3116555213928223 -54,3.248952865600586 -55,3.0244901180267334 -56,2.785672664642334 -57,2.552765369415283 -58,2.5126535892486572 -59,1.7299529314041138 -60,1.810992956161499 -61,1.6813421249389648 -62,2.373183488845825 -63,1.8572572469711304 -64,1.490265130996704 -65,1.2071611881256104 -66,1.0321846008300781 -67,0.8953080773353577 -68,0.7624420523643494 -69,0.6590163707733154 -70,0.5752772688865662 -71,0.5047502517700195 -72,0.44731876254081726 -73,0.4040822982788086 -74,0.37129536271095276 -75,0.34268179535865784 -76,0.32143375277519226 -77,0.3045608699321747 -78,0.2937592566013336 -79,0.3098408579826355 -80,0.30327191948890686 -81,0.2762918472290039 -82,0.2588250935077667 -83,0.25327014923095703 -84,0.2487478256225586 -85,0.24490554630756378 -86,0.2415083944797516 -87,0.23874922096729279 -88,0.23610429465770721 -89,0.2335314005613327 -90,0.23100532591342926 -91,0.2285926342010498 -92,0.22634120285511017 -93,0.22421899437904358 -94,0.22224673628807068 -95,0.22036027908325195 -96,0.21853965520858765 -97,0.2168452888727188 -98,0.21525098383426666 -99,0.21378274261951447 -100,0.2123057246208191 -101,0.21090421080589294 -102,0.20945139229297638 -103,0.20808307826519012 -104,0.20677639544010162 -105,0.20548024773597717 -106,0.20427177846431732 -107,0.20310868322849274 -108,0.2020944207906723 -109,0.20108744502067566 -110,0.20010478794574738 -111,0.1991451531648636 -112,0.19820468127727509 -113,0.19726260006427765 -114,0.19636264443397522 -115,0.1954805552959442 -116,0.19456946849822998 -117,0.1936563104391098 -118,0.19273792207241058 -119,0.19179967045783997 -120,0.19086560606956482 -121,0.1898810863494873 -122,0.18887849152088165 -123,0.18788135051727295 -124,0.1870756596326828 -125,0.18631485104560852 -126,0.18553754687309265 -127,0.18471305072307587 -128,0.18383847177028656 -129,0.18288269639015198 -130,0.18213161826133728 -131,0.18164408206939697 -132,0.18112273514270782 -133,0.18054257333278656 -134,0.17988277971744537 -135,0.17918159067630768 -136,0.1784527450799942 -137,0.1776980310678482 -138,0.17701567709445953 -139,0.17636188864707947 -140,0.17572693526744843 -141,0.1751149743795395 -142,0.17449145019054413 -143,0.1738605797290802 -144,0.17329174280166626 -145,0.17266027629375458 -146,0.17202697694301605 -147,0.17143873870372772 -148,0.1708587408065796 -149,0.17026464641094208 -150,0.16967269778251648 -151,0.16906973719596863 -152,0.16848166286945343 -153,0.167888343334198 -154,0.1672850400209427 -155,0.16668175160884857 -156,0.1660834401845932 -157,0.1655428558588028 -158,0.1649676114320755 -159,0.16443903744220734 -160,0.16393868625164032 -161,0.16343052685260773 -162,0.16290929913520813 -163,0.16239312291145325 -164,0.16184574365615845 -165,0.16130831837654114 -166,0.16077281534671783 -167,0.16025584936141968 -168,0.15976187586784363 -169,0.1592087298631668 -170,0.15869498252868652 -171,0.15818679332733154 -172,0.15768297016620636 -173,0.1571572870016098 -174,0.15662449598312378 -175,0.15607595443725586 -176,0.15556268393993378 -177,0.15505540370941162 -178,0.15450039505958557 -179,0.1539914309978485 -180,0.15351274609565735 -181,0.15303245186805725 -182,0.1525363326072693 -183,0.15202881395816803 -184,0.15154071152210236 -185,0.1510714292526245 -186,0.1505616307258606 -187,0.15005679428577423 -188,0.14956863224506378 -189,0.14911112189292908 -190,0.14865951240062714 -191,0.14824065566062927 -192,0.14778512716293335 -193,0.14734184741973877 -194,0.1469673365354538 -195,0.14652669429779053 -196,0.14614005386829376 -197,0.14572632312774658 -198,0.14531955122947693 -199,0.14494429528713226 -200,0.1445523053407669 diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.400/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.400/training_config.txt deleted file mode 100644 index fe48665..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.400/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.400/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.400/training_log.csv deleted file mode 100644 index c6cd2b2..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.400/training_log.csv +++ /dev/null @@ -1,36 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,1.301098108291626 -2,1.2400227785110474 -3,0.4081510901451111 -4,0.2377406358718872 -5,0.16962634027004242 -6,0.15084166824817657 -7,0.14229442179203033 -8,0.13815578818321228 -9,0.13637560606002808 -10,0.13574624061584473 -11,0.1355505883693695 -12,0.13551384210586548 -13,0.13551504909992218 -14,0.1354827880859375 -15,0.13595089316368103 -16,0.1362253725528717 -17,0.13604842126369476 -18,345.6454772949219 -19,3698461.5 -20,0.8034685850143433 -21,0.5383837819099426 -22,0.602199137210846 -23,0.6575281620025635 -24,0.7057322859764099 -25,2.505042552947998 -26,340.17291259765625 -27,14390.5888671875 -28,41889896.0 -29,41895364.0 -30,41895364.0 -31,41895364.0 -32,41895364.0 -33,41895364.0 -34,41895364.0 diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.500/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.500/training_config.txt deleted file mode 100644 index 2536c3d..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.500/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.500/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.500/training_log.csv deleted file mode 100644 index b72b6f9..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.500/training_log.csv +++ /dev/null @@ -1,16 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,0.9015329480171204 -2,0.5356652140617371 -3,0.3600023090839386 -4,0.2878721058368683 -5,0.23070190846920013 -6,0.1845388561487198 -7,0.16549819707870483 -8,0.15569016337394714 -9,0.14944268763065338 -10,0.14969000220298767 -11,nan -12,nan -13,nan -14,nan diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.600/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.600/training_config.txt deleted file mode 100644 index 3d391b4..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.600/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.600/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.600/training_log.csv deleted file mode 100644 index 470c7a2..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.600/training_log.csv +++ /dev/null @@ -1,13 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,0.7408900260925293 -2,0.5513259172439575 -3,0.32663169503211975 -4,0.21077245473861694 -5,0.17094270884990692 -6,0.1536352038383484 -7,0.14601771533489227 -8,nan -9,nan -10,nan -11,nan diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.700/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.700/training_config.txt deleted file mode 100644 index 4a3ecd9..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.700/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.700/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.700/training_log.csv deleted file mode 100644 index 26138bd..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.700/training_log.csv +++ /dev/null @@ -1,11 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,0.7520824074745178 -2,0.3878028988838196 -3,0.31070250272750854 -4,0.26424312591552734 -5,0.23525479435920715 -6,nan -7,nan -8,nan -9,nan diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.800/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.800/training_config.txt deleted file mode 100644 index 8e134d5..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.800/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.800/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.800/training_log.csv deleted file mode 100644 index 27f52fe..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.800/training_log.csv +++ /dev/null @@ -1,39 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,0.9854243397712708 -2,0.3447346091270447 -3,3.1059296131134033 -4,2.4283204078674316 -5,0.8883350491523743 -6,0.5983593463897705 -7,0.3887821435928345 -8,0.26367807388305664 -9,0.21425235271453857 -10,0.19075612723827362 -11,0.18758946657180786 -12,0.19773417711257935 -13,0.5665905475616455 -14,1.0985172986984253 -15,0.6045243144035339 -16,0.4887182116508484 -17,0.3533113896846771 -18,0.3073558509349823 -19,0.2879619002342224 -20,0.27278897166252136 -21,0.26235079765319824 -22,0.25458696484565735 -23,0.24976076185703278 -24,0.24581611156463623 -25,0.24198336899280548 -26,0.401131272315979 -27,7.404294967651367 -28,0.43706411123275757 -29,0.38758718967437744 -30,0.3742772042751312 -31,0.3644647002220154 -32,0.3567458689212799 -33,0.3482499420642853 -34,nan -35,nan -36,nan -37,nan diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.900/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_0.900/training_config.txt deleted file mode 100644 index ce2ff0d..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.900/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_0.900/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_0.900/training_log.csv deleted file mode 100644 index 762ea8b..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_0.900/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,1.283570647239685 -2,0.4022349417209625 -3,0.1734003722667694 -4,0.16121838986873627 -5,nan -6,nan -7,nan -8,nan diff --git a/training/base_loss_learning_rate_sweep/four/lr_1.000/training_config.txt b/training/base_loss_learning_rate_sweep/four/lr_1.000/training_config.txt deleted file mode 100644 index a8a4e19..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_1.000/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function Name: four -Loss Function Exponent: 4 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/four/lr_1.000/training_log.csv b/training/base_loss_learning_rate_sweep/four/lr_1.000/training_log.csv deleted file mode 100644 index fc96c31..0000000 --- a/training/base_loss_learning_rate_sweep/four/lr_1.000/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,1117.280029296875 -1,1.4419468641281128 -2,0.40070053935050964 -3,6.798883438110352 -4,0.3186725080013275 -5,nan -6,nan -7,nan -8,nan diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.003/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.003/training_config.txt deleted file mode 100644 index dad7bc3..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.003/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.0025 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.003/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.003/training_log.csv deleted file mode 100644 index 3b957a5..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.003/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,3.963522434234619 -2,3.9888622760772705 -3,3.9225497245788574 -4,3.9240901470184326 -5,3.9003593921661377 -6,3.864915132522583 -7,3.819655656814575 -8,3.764117956161499 -9,3.683419704437256 -10,3.601982355117798 -11,3.53326416015625 -12,3.5419116020202637 -13,3.446972608566284 -14,3.4784533977508545 -15,3.4375782012939453 -16,3.409879207611084 -17,3.385035753250122 -18,3.339855194091797 -19,3.2395052909851074 -20,3.187840223312378 -21,3.1527905464172363 -22,3.1107726097106934 -23,3.0820472240448 -24,3.0843427181243896 -25,3.0640883445739746 -26,3.0212812423706055 -27,2.9712183475494385 -28,2.927699327468872 -29,2.8170905113220215 -30,2.7729506492614746 -31,2.7596752643585205 -32,2.773958683013916 -33,2.7662241458892822 -34,2.7288053035736084 -35,2.6627039909362793 -36,2.579456090927124 -37,2.4478323459625244 -38,2.34645676612854 -39,2.2739596366882324 -40,2.253542900085449 -41,2.2439379692077637 -42,2.235929012298584 -43,2.2421483993530273 -44,2.233704090118408 -45,2.172818422317505 -46,2.1210594177246094 -47,2.076456308364868 -48,2.0373549461364746 -49,2.0229034423828125 -50,1.9868849515914917 -51,1.9337939023971558 -52,1.901742696762085 -53,1.8927990198135376 -54,1.9397913217544556 -55,1.9442741870880127 -56,1.9489076137542725 -57,1.9536356925964355 -58,1.9871755838394165 -59,1.982784628868103 -60,1.944566011428833 -61,1.9353190660476685 -62,1.930168867111206 -63,1.9257471561431885 -64,1.9208651781082153 -65,1.9132553339004517 -66,1.9168190956115723 -67,1.9204704761505127 -68,1.91783607006073 -69,1.9109975099563599 -70,1.8929510116577148 -71,1.9369525909423828 -72,1.9560388326644897 -73,1.9438265562057495 -74,1.9413725137710571 -75,1.9405161142349243 -76,1.9403035640716553 -77,1.9402594566345215 -78,1.9386273622512817 -79,1.9042456150054932 -80,1.9048928022384644 -81,1.929923415184021 -82,1.9079842567443848 -83,1.9095087051391602 -84,1.9071956872940063 -85,1.9027982950210571 -86,1.8966580629348755 -87,1.8872095346450806 -88,1.8877772092819214 -89,1.8855053186416626 -90,1.8682308197021484 -91,1.8860795497894287 -92,1.919615387916565 -93,1.9722322225570679 -94,1.966689109802246 -95,1.9541494846343994 -96,1.9644767045974731 -97,1.9660719633102417 -98,1.926274299621582 -99,1.9324336051940918 -100,1.9339964389801025 -101,1.9339215755462646 -102,1.9330699443817139 -103,1.9318578243255615 -104,1.9306535720825195 -105,1.9295986890792847 -106,1.9287693500518799 -107,1.928287148475647 -108,1.9288476705551147 -109,1.9374741315841675 -110,1.92831552028656 -111,1.939621090888977 -112,1.947784423828125 -113,1.9060616493225098 -114,1.9051066637039185 -115,1.907423973083496 -116,1.9109662771224976 -117,1.9066026210784912 -118,1.7930749654769897 -119,1.7972123622894287 -120,1.7959470748901367 -121,1.8178856372833252 -122,1.8666131496429443 -123,1.8748290538787842 -124,1.871961236000061 -125,1.8632855415344238 -126,1.843876600265503 -127,1.8531075716018677 -128,1.8433283567428589 -129,1.8424128293991089 -130,1.8327503204345703 -131,1.796920657157898 -132,1.7893810272216797 -133,1.7686102390289307 -134,1.8229548931121826 -135,1.8487292528152466 -136,1.9277045726776123 -137,1.9510546922683716 -138,1.9479349851608276 -139,1.9565191268920898 -140,1.9679232835769653 -141,1.9770493507385254 -142,1.979770541191101 -143,1.9732258319854736 -144,1.9214292764663696 -145,1.9217631816864014 -146,1.9297049045562744 -147,1.903525710105896 -148,1.8224226236343384 -149,1.7629413604736328 -150,1.747879981994629 -151,1.7367205619812012 -152,1.7589328289031982 -153,1.7826303243637085 -154,1.7969175577163696 -155,1.7851141691207886 -156,1.7617847919464111 -157,1.7641550302505493 -158,1.7651972770690918 -159,1.749405026435852 -160,1.7381927967071533 -161,1.7504769563674927 -162,1.7568849325180054 -163,1.758018136024475 -164,1.7558397054672241 -165,1.7521754503250122 -166,1.7479863166809082 -167,1.7434741258621216 -168,1.73854398727417 -169,1.7330549955368042 -170,1.7268606424331665 -171,1.7197084426879883 -172,1.7109545469284058 -173,1.732032299041748 -174,1.748680591583252 -175,1.7511513233184814 -176,1.7508295774459839 -177,1.7493915557861328 -178,1.7456213235855103 -179,1.7291630506515503 -180,1.7300832271575928 -181,1.6990926265716553 -182,1.705142617225647 -183,1.7081230878829956 -184,1.7074358463287354 -185,1.7031837701797485 -186,1.6935193538665771 -187,1.7453819513320923 -188,1.749239206314087 -189,1.7505894899368286 -190,1.751473069190979 -191,1.7521042823791504 -192,1.752482295036316 -193,1.7525955438613892 -194,1.752454161643982 -195,1.7520745992660522 -196,1.7514790296554565 -197,1.7506929636001587 -198,1.7497503757476807 -199,1.7486625909805298 -200,1.7473865747451782 diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.005/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.005/training_config.txt deleted file mode 100644 index a2eae12..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.005/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.005 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.005/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.005/training_log.csv deleted file mode 100644 index f5fdaf2..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.005/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,3.990422248840332 -2,3.8911526203155518 -3,3.7174606323242188 -4,3.652510643005371 -5,3.585259199142456 -6,3.4993817806243896 -7,3.403716564178467 -8,3.2982420921325684 -9,3.1660006046295166 -10,3.042965888977051 -11,2.830322504043579 -12,2.738447904586792 -13,2.5458619594573975 -14,2.5553841590881348 -15,2.620288610458374 -16,2.631861925125122 -17,2.3002169132232666 -18,2.172130584716797 -19,2.1420843601226807 -20,2.1320247650146484 -21,2.0987677574157715 -22,2.1253466606140137 -23,2.0775904655456543 -24,1.9781895875930786 -25,2.0294933319091797 -26,1.9838149547576904 -27,1.944596767425537 -28,1.9291807413101196 -29,1.898640751838684 -30,1.8370060920715332 -31,1.8186086416244507 -32,1.7536468505859375 -33,1.6476868391036987 -34,1.6607187986373901 -35,1.6271039247512817 -36,1.5889735221862793 -37,1.5847842693328857 -38,1.5820635557174683 -39,1.5798313617706299 -40,1.5787181854248047 -41,1.581514596939087 -42,1.5558497905731201 -43,1.5450695753097534 -44,1.5264945030212402 -45,1.518525242805481 -46,1.5148359537124634 -47,1.4706188440322876 -48,1.4477322101593018 -49,1.44329833984375 -50,1.4433972835540771 -51,1.4457560777664185 -52,1.446202278137207 -53,1.4456604719161987 -54,1.4441941976547241 -55,1.4417288303375244 -56,1.4378013610839844 -57,1.4294685125350952 -58,1.427802562713623 -59,1.424027442932129 -60,1.4206206798553467 -61,1.417088270187378 -62,1.4134273529052734 -63,1.4096781015396118 -64,1.4058828353881836 -65,1.40206778049469 -66,1.3982571363449097 -67,1.3944652080535889 -68,1.3907110691070557 -69,1.3870209455490112 -70,1.3834198713302612 -71,1.379911184310913 -72,1.376500129699707 -73,1.3731722831726074 -74,1.3699066638946533 -75,1.3666348457336426 -76,1.363201379776001 -77,1.3592071533203125 -78,1.3524600267410278 -79,1.3729199171066284 -80,1.3734666109085083 -81,1.3730088472366333 -82,1.3719255924224854 -83,1.3701916933059692 -84,1.367869257926941 -85,1.365092158317566 -86,1.3620266914367676 -87,1.35886812210083 -88,1.355776071548462 -89,1.3527889251708984 -90,1.3495936393737793 -91,1.3420227766036987 -92,1.3313560485839844 -93,1.3355408906936646 -94,1.3381829261779785 -95,1.338296890258789 -96,1.3358906507492065 -97,1.3310972452163696 -98,1.3244893550872803 -99,1.3158305883407593 -100,1.3077188730239868 -101,1.301912546157837 -102,1.297008752822876 -103,1.2901585102081299 -104,1.3497705459594727 -105,1.3541955947875977 -106,1.3561590909957886 -107,1.3570154905319214 -108,1.3569484949111938 -109,1.3560750484466553 -110,1.3545352220535278 -111,1.352437973022461 -112,1.3498576879501343 -113,1.3468689918518066 -114,1.3435770273208618 -115,1.340105414390564 -116,1.33656907081604 -117,1.3330093622207642 -118,1.329271674156189 -119,1.3243111371994019 -120,1.2912635803222656 -121,1.2718510627746582 -122,1.2848191261291504 -123,1.2989050149917603 -124,1.2962759733200073 -125,1.2609903812408447 -126,1.2550674676895142 -127,1.2492940425872803 -128,1.2518370151519775 -129,1.249082326889038 -130,1.209307074546814 -131,1.203815221786499 -132,1.201616883277893 -133,1.1995816230773926 -134,1.1978845596313477 -135,1.1967235803604126 -136,1.1960667371749878 -137,1.1957756280899048 -138,1.1955246925354004 -139,1.194878101348877 -140,1.1934188604354858 -141,1.1904188394546509 -142,1.1741851568222046 -143,1.171349287033081 -144,1.1702061891555786 -145,1.169299840927124 -146,1.1684727668762207 -147,1.1675102710723877 -148,1.1654918193817139 -149,1.1632205247879028 -150,1.1610933542251587 -151,1.1591583490371704 -152,1.1574671268463135 -153,1.156077265739441 -154,1.1549975872039795 -155,1.1541920900344849 -156,1.153581976890564 -157,1.1531051397323608 -158,1.1528023481369019 -159,1.1522853374481201 -160,1.151012659072876 -161,1.149850606918335 -162,1.1489225625991821 -163,1.1481417417526245 -164,1.1474554538726807 -165,1.14682936668396 -166,1.1462383270263672 -167,1.1456626653671265 -168,1.1450865268707275 -169,1.1445029973983765 -170,1.143903374671936 -171,1.1432857513427734 -172,1.1426478624343872 -173,1.1419930458068848 -174,1.1413257122039795 -175,1.1406499147415161 -176,1.1399685144424438 -177,1.1392863988876343 -178,1.138607382774353 -179,1.1379330158233643 -180,1.1372624635696411 -181,1.136594295501709 -182,1.1359261274337769 -183,1.13525390625 -184,1.134574294090271 -185,1.1338845491409302 -186,1.1331833600997925 -187,1.1324715614318848 -188,1.131750226020813 -189,1.131020426750183 -190,1.130283236503601 -191,1.129539132118225 -192,1.1287888288497925 -193,1.1280345916748047 -194,1.1272753477096558 -195,1.1265130043029785 -196,1.1257472038269043 -197,1.1249797344207764 -198,1.124213695526123 -199,1.123448133468628 -200,1.1226873397827148 diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.010/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.010/training_config.txt deleted file mode 100644 index 9e862ca..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.010/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.010/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.010/training_log.csv deleted file mode 100644 index dadb534..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,3.953225612640381 -2,3.729419469833374 -3,3.415592670440674 -4,3.3805315494537354 -5,3.2633025646209717 -6,2.9987971782684326 -7,2.743154764175415 -8,2.3982350826263428 -9,2.295786142349243 -10,2.022376298904419 -11,1.861076831817627 -12,1.6363632678985596 -13,1.6290702819824219 -14,1.597785472869873 -15,1.6138927936553955 -16,1.5713011026382446 -17,1.54221773147583 -18,1.5123796463012695 -19,1.5132865905761719 -20,1.530835747718811 -21,1.495173454284668 -22,1.463236689567566 -23,1.42014741897583 -24,1.36419677734375 -25,1.3310626745224 -26,1.29025399684906 -27,1.1904032230377197 -28,1.1319605112075806 -29,1.1426321268081665 -30,1.0810388326644897 -31,1.040626883506775 -32,1.0207668542861938 -33,1.0342671871185303 -34,1.0421315431594849 -35,1.0247066020965576 -36,0.9710699319839478 -37,0.9511175155639648 -38,0.9294475317001343 -39,0.9080752730369568 -40,0.8484463691711426 -41,0.8211091756820679 -42,0.7971150279045105 -43,0.7843632698059082 -44,0.7840893268585205 -45,0.7762402296066284 -46,0.765717625617981 -47,0.7550165057182312 -48,0.7452750205993652 -49,0.748799204826355 -50,0.7524365186691284 -51,0.7298861145973206 -52,0.7182402610778809 -53,0.7062907814979553 -54,0.6823190450668335 -55,0.6746886968612671 -56,0.6683070659637451 -57,0.6625221371650696 -58,0.657468855381012 -59,0.646770179271698 -60,0.6500394940376282 -61,0.6441409587860107 -62,0.6307448744773865 -63,0.6276013851165771 -64,0.6262722611427307 -65,0.6282514929771423 -66,0.6289167404174805 -67,0.6286797523498535 -68,0.6279221177101135 -69,0.6108086109161377 -70,0.6095709204673767 -71,0.6079769730567932 -72,0.606041431427002 -73,0.6036718487739563 -74,0.6006910800933838 -75,0.5969040989875793 -76,0.5920751094818115 -77,0.6047831773757935 -78,0.5990642309188843 -79,0.5796857476234436 -80,0.5765700936317444 -81,0.5733601450920105 -82,0.570292055606842 -83,0.5659050345420837 -84,0.5583539605140686 -85,0.5536695122718811 -86,0.5553829669952393 -87,0.5533574819564819 -88,0.5530272722244263 -89,0.5529417991638184 -90,0.5509293079376221 -91,0.5496436953544617 -92,0.5491540431976318 -93,0.545516312122345 -94,0.5444504022598267 -95,0.5464065074920654 -96,0.5446236729621887 -97,0.5438387989997864 -98,0.5433920621871948 -99,0.5409575700759888 -100,0.5399165749549866 -101,0.5385094285011292 -102,0.5349714756011963 -103,0.5322896838188171 -104,0.5325157046318054 -105,0.5282973051071167 -106,0.5154492259025574 -107,0.5278825163841248 -108,0.5346413850784302 -109,0.5369623303413391 -110,0.5333026051521301 -111,0.5335693955421448 -112,0.5351334810256958 -113,0.5312021970748901 -114,0.5332398414611816 -115,0.5330787301063538 -116,0.5314154624938965 -117,0.5289480686187744 -118,0.525689959526062 -119,0.5208154916763306 -120,0.514001190662384 -121,0.5033302307128906 -122,0.513274073600769 -123,0.5169450044631958 -124,0.5223499536514282 -125,0.5240121483802795 -126,0.5241529941558838 -127,0.5223808884620667 -128,0.5253286361694336 -129,0.525793194770813 -130,0.5242817997932434 -131,0.5160840153694153 -132,0.5145168900489807 -133,0.5147525072097778 -134,0.5121307969093323 -135,0.5052525401115417 -136,0.5069926977157593 -137,0.5060696005821228 -138,0.5013381838798523 -139,0.5021734833717346 -140,0.5009047985076904 -141,0.49894165992736816 -142,0.4921840727329254 -143,0.48661237955093384 -144,0.48625704646110535 -145,0.49435192346572876 -146,0.4966621398925781 -147,0.49599534273147583 -148,0.49652099609375 -149,0.4961308538913727 -150,0.49352166056632996 -151,0.4832078814506531 -152,0.5009403228759766 -153,0.5075869560241699 -154,0.5110575556755066 -155,0.5138604044914246 -156,0.5163310766220093 -157,0.5184594988822937 -158,0.5196236968040466 -159,0.5185276865959167 -160,0.5213249921798706 -161,0.520356297492981 -162,0.5251315236091614 -163,0.52463299036026 -164,0.5196038484573364 -165,0.518244206905365 -166,0.5187790989875793 -167,0.5135602355003357 -168,0.5117872953414917 -169,0.5118716955184937 -170,0.5086294412612915 -171,0.5064350962638855 -172,0.5054977536201477 -173,0.5021916627883911 -174,0.5000315308570862 -175,0.4976111054420471 -176,0.494938462972641 -177,0.4965144395828247 -178,0.4962211549282074 -179,0.49395206570625305 -180,0.49221694469451904 -181,0.4919939935207367 -182,0.48997998237609863 -183,0.4863494634628296 -184,0.4831198453903198 -185,0.4776991307735443 -186,0.4747084081172943 -187,0.47319379448890686 -188,0.4582948386669159 -189,0.48094797134399414 -190,0.4854058027267456 -191,0.48871222138404846 -192,0.4898836016654968 -193,0.49208882451057434 -194,0.48885440826416016 -195,0.4947928786277771 -196,0.49657076597213745 -197,0.4966678023338318 -198,0.49812108278274536 -199,0.4983540177345276 -200,0.498261421918869 diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.020/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.020/training_config.txt deleted file mode 100644 index d0ade76..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.020/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.020/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.020/training_log.csv deleted file mode 100644 index 6cf391a..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,3.903593063354492 -2,3.457392692565918 -3,2.8471570014953613 -4,2.2594919204711914 -5,1.9589691162109375 -6,1.7301255464553833 -7,1.556784987449646 -8,1.5196400880813599 -9,1.4173146486282349 -10,1.3284329175949097 -11,1.2010862827301025 -12,1.115606665611267 -13,1.0725276470184326 -14,1.0550142526626587 -15,1.0825785398483276 -16,1.0792574882507324 -17,1.0643119812011719 -18,1.0694539546966553 -19,1.0935721397399902 -20,1.0710829496383667 -21,1.0770384073257446 -22,1.1486220359802246 -23,1.1024301052093506 -24,1.0073139667510986 -25,0.9336976408958435 -26,0.8498474955558777 -27,0.8628523349761963 -28,0.8168367147445679 -29,0.7230870723724365 -30,0.6681948900222778 -31,0.6455386281013489 -32,0.5867748856544495 -33,0.534885048866272 -34,0.5200978517532349 -35,0.5071628093719482 -36,0.5284988284111023 -37,0.5020185708999634 -38,0.470261812210083 -39,0.44255730509757996 -40,0.42054811120033264 -41,0.41687634587287903 -42,0.4181579053401947 -43,0.4218485951423645 -44,0.42645224928855896 -45,0.42786023020744324 -46,0.42653897404670715 -47,0.42150717973709106 -48,0.4177984297275543 -49,0.40791869163513184 -50,0.405196875333786 -51,0.4024365544319153 -52,0.39885061979293823 -53,0.3950836658477783 -54,0.38980522751808167 -55,0.3744969964027405 -56,0.37628471851348877 -57,0.37609225511550903 -58,0.3785718083381653 -59,0.3763442039489746 -60,0.36417925357818604 -61,0.35784274339675903 -62,0.3609679639339447 -63,0.35621339082717896 -64,0.3545798063278198 -65,0.35251232981681824 -66,0.35075950622558594 -67,0.3510352671146393 -68,0.3526144325733185 -69,0.3525196313858032 -70,0.3499819040298462 -71,0.34949228167533875 -72,0.348470538854599 -73,0.34641245007514954 -74,0.3456190526485443 -75,0.3476206064224243 -76,0.3478429615497589 -77,0.34823039174079895 -78,0.34891098737716675 -79,0.3466486632823944 -80,0.34205445647239685 -81,0.34201061725616455 -82,0.34226876497268677 -83,0.3426741361618042 -84,0.3413786292076111 -85,0.3414434790611267 -86,0.33753836154937744 -87,0.3360321521759033 -88,0.33408597111701965 -89,0.34024104475975037 -90,0.3685630261898041 -91,0.37072837352752686 -92,0.3730284869670868 -93,0.37688276171684265 -94,0.3783247470855713 -95,0.3780849575996399 -96,0.3770989775657654 -97,0.3688984811306 -98,0.3679825961589813 -99,0.36646315455436707 -100,0.3589998185634613 -101,0.3626347482204437 -102,0.3625587522983551 -103,0.3622335195541382 -104,0.36173099279403687 -105,0.3610178530216217 -106,0.3601015508174896 -107,0.3589678108692169 -108,0.35764700174331665 -109,0.35620754957199097 -110,0.354758620262146 -111,0.3528125286102295 -112,0.3475327491760254 -113,0.35184165835380554 -114,0.3519858121871948 -115,0.35154202580451965 -116,0.35065922141075134 -117,0.3495197296142578 -118,0.34821659326553345 -119,0.346695214509964 -120,0.3448866307735443 -121,0.3426973223686218 -122,0.3400258719921112 -123,0.33672139048576355 -124,0.3325802683830261 -125,0.3241865038871765 -126,0.32279953360557556 -127,0.31918150186538696 -128,0.31427669525146484 -129,0.31152889132499695 -130,0.3135307729244232 -131,0.31139999628067017 -132,0.3087184727191925 -133,0.3082326650619507 -134,0.30731022357940674 -135,0.30328625440597534 -136,0.3009006977081299 -137,0.2995359003543854 -138,0.29771527647972107 -139,0.2946333885192871 -140,0.2914969027042389 -141,0.2893349528312683 -142,0.2870011031627655 -143,0.2842211425304413 -144,0.2799043357372284 -145,0.2754595875740051 -146,0.27020981907844543 -147,0.26025626063346863 -148,0.25030216574668884 -149,0.24394233524799347 -150,0.23713278770446777 -151,0.22878000140190125 -152,0.22412285208702087 -153,0.22126446664333344 -154,0.21852028369903564 -155,0.21551300585269928 -156,0.20951133966445923 -157,0.20702922344207764 -158,0.20450370013713837 -159,0.20143473148345947 -160,0.19526521861553192 -161,0.1886628270149231 -162,0.1862521767616272 -163,0.18269959092140198 -164,0.1766459047794342 -165,0.17294150590896606 -166,0.17058232426643372 -167,0.1727607250213623 -168,0.17368654906749725 -169,0.17290137708187103 -170,0.17103491723537445 -171,0.16801248490810394 -172,0.16325724124908447 -173,0.15818828344345093 -174,0.1564728319644928 -175,0.1559436172246933 -176,0.1546533703804016 -177,0.15394389629364014 -178,0.15245582163333893 -179,0.1512802094221115 -180,0.15105676651000977 -181,0.15085680782794952 -182,0.15021206438541412 -183,0.14886076748371124 -184,0.14727556705474854 -185,0.14576615393161774 -186,0.14593151211738586 -187,0.14526157081127167 -188,0.1439370959997177 -189,0.1430584341287613 -190,0.1431015133857727 -191,0.14261670410633087 -192,0.14199700951576233 -193,0.14132755994796753 -194,0.1404089629650116 -195,0.1397162526845932 -196,0.13957329094409943 -197,0.13869528472423553 -198,0.1382524073123932 -199,0.1379198580980301 -200,0.1374301314353943 diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.040/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.040/training_config.txt deleted file mode 100644 index 7ba5a04..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.040/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.040/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.040/training_log.csv deleted file mode 100644 index d7ae8e6..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,3.7323923110961914 -2,2.535491943359375 -3,1.9318350553512573 -4,1.7511979341506958 -5,2.1690571308135986 -6,2.4020678997039795 -7,2.1512770652770996 -8,1.9578406810760498 -9,1.533918857574463 -10,1.1911944150924683 -11,1.1371313333511353 -12,1.0130773782730103 -13,0.9704388976097107 -14,0.8430973291397095 -15,0.7890205979347229 -16,0.7486925721168518 -17,0.719261646270752 -18,0.7045726180076599 -19,0.6954882144927979 -20,0.6738880276679993 -21,0.6805222034454346 -22,0.6813308596611023 -23,0.6774165034294128 -24,0.6863090395927429 -25,0.6881459355354309 -26,0.6746810674667358 -27,0.6371486783027649 -28,0.6098301410675049 -29,0.5693537592887878 -30,0.5578895807266235 -31,0.5161844491958618 -32,0.5069202780723572 -33,0.4975198805332184 -34,0.46632227301597595 -35,0.43865323066711426 -36,0.44608911871910095 -37,0.44431427121162415 -38,0.4363994896411896 -39,0.4195713698863983 -40,0.39992254972457886 -41,0.39257729053497314 -42,0.3871491551399231 -43,0.37698668241500854 -44,0.3725813329219818 -45,0.3699132204055786 -46,0.36317387223243713 -47,0.3669027090072632 -48,0.36222848296165466 -49,0.3600107431411743 -50,0.35313329100608826 -51,0.34890255331993103 -52,0.3478706479072571 -53,0.34593167901039124 -54,0.34433144330978394 -55,0.3403784930706024 -56,0.3357291519641876 -57,0.3435056805610657 -58,0.34427475929260254 -59,0.3417699337005615 -60,0.3356377184391022 -61,0.3247806429862976 -62,0.30405184626579285 -63,0.28724318742752075 -64,0.2839128077030182 -65,0.28240710496902466 -66,0.2911953926086426 -67,0.28949156403541565 -68,0.2861670255661011 -69,0.284209281206131 -70,0.283344030380249 -71,0.27478402853012085 -72,0.2619704306125641 -73,0.2541889548301697 -74,0.25541356205940247 -75,0.25741100311279297 -76,0.2530311048030853 -77,0.2405872941017151 -78,0.23808951675891876 -79,0.23762695491313934 -80,0.23514015972614288 -81,0.23093122243881226 -82,0.22021260857582092 -83,0.21490025520324707 -84,0.2121829092502594 -85,0.2041694074869156 -86,0.1976567804813385 -87,0.19195161759853363 -88,0.18867337703704834 -89,0.18607300519943237 -90,0.1813371479511261 -91,0.17789635062217712 -92,0.17570316791534424 -93,0.17048987746238708 -94,0.19037120044231415 -95,0.19848370552062988 -96,0.20286518335342407 -97,0.20698657631874084 -98,0.2080669403076172 -99,0.2041770964860916 -100,0.19749651849269867 -101,0.1867242157459259 -102,0.17758451402187347 -103,0.181895911693573 -104,0.18310768902301788 -105,0.1801558881998062 -106,0.17647026479244232 -107,0.1720864474773407 -108,0.17373637855052948 -109,0.17469589412212372 -110,0.1719128042459488 -111,0.168778195977211 -112,0.1654103398323059 -113,0.16195717453956604 -114,0.16235513985157013 -115,0.16282199323177338 -116,0.15783871710300446 -117,0.15492889285087585 -118,0.1554182767868042 -119,0.15519629418849945 -120,0.1540839672088623 -121,0.15326859056949615 -122,0.15138642489910126 -123,0.1528278887271881 -124,0.15359368920326233 -125,0.15055398643016815 -126,0.14706489443778992 -127,0.144182026386261 -128,0.14131054282188416 -129,0.13980793952941895 -130,0.1402544230222702 -131,0.1399432122707367 -132,0.14178597927093506 -133,0.1389961987733841 -134,0.13778844475746155 -135,0.13590852916240692 -136,0.13537034392356873 -137,0.13357701897621155 -138,0.13363997638225555 -139,0.13163959980010986 -140,0.13196343183517456 -141,0.1292020082473755 -142,0.12873287498950958 -143,0.1279287040233612 -144,0.128829225897789 -145,0.1258808821439743 -146,0.12526078522205353 -147,0.12432391941547394 -148,0.12494143843650818 -149,0.12408635020256042 -150,0.1234072595834732 -151,0.12245821952819824 -152,0.12335696816444397 -153,0.12277809530496597 -154,0.12301969528198242 -155,0.1219860166311264 -156,0.12183218449354172 -157,0.1213250532746315 -158,0.12139493972063065 -159,0.12111556529998779 -160,0.12083634734153748 -161,0.12069443613290787 -162,0.12038543075323105 -163,0.12005776166915894 -164,0.12009329348802567 -165,0.1197114810347557 -166,0.11948554217815399 -167,0.119443379342556 -168,0.11892090737819672 -169,0.1189330592751503 -170,0.11887889355421066 -171,0.1183595061302185 -172,0.11892096698284149 -173,0.11866891384124756 -174,0.11849001049995422 -175,0.11815447360277176 -176,0.11769348382949829 -177,0.11803056299686432 -178,0.11751874536275864 -179,0.1180587038397789 -180,0.11726740002632141 -181,0.11730358004570007 -182,0.11669653654098511 -183,0.11651931703090668 -184,0.11635690182447433 -185,0.11610626429319382 -186,0.11642970144748688 -187,0.11705024540424347 -188,0.11645334959030151 -189,0.11717185378074646 -190,0.11558307707309723 -191,0.1171857938170433 -192,0.11674302816390991 -193,0.11604871600866318 -194,0.11547132581472397 -195,0.11701779067516327 -196,0.11766012012958527 -197,0.11580950021743774 -198,0.11688949167728424 -199,0.11622247844934464 -200,0.11573082208633423 diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.050/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.050/training_config.txt deleted file mode 100644 index 412f0f6..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.050/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.050/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.050/training_log.csv deleted file mode 100644 index c2f8d8b..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,3.612351179122925 -2,2.4963791370391846 -3,1.5934194326400757 -4,1.3473695516586304 -5,1.0536189079284668 -6,1.207385540008545 -7,1.4886800050735474 -8,1.5244691371917725 -9,1.5979914665222168 -10,1.5880401134490967 -11,1.3085048198699951 -12,1.0820896625518799 -13,0.8495988249778748 -14,0.6851304769515991 -15,0.5563355088233948 -16,0.4888017475605011 -17,0.4441712498664856 -18,0.43495917320251465 -19,0.4161086678504944 -20,0.39155179262161255 -21,0.37342289090156555 -22,0.3663838803768158 -23,0.36003032326698303 -24,0.3548450767993927 -25,0.3487318754196167 -26,0.3430083990097046 -27,0.3372791111469269 -28,0.3311948776245117 -29,0.32491546869277954 -30,0.3184869587421417 -31,0.3120383620262146 -32,0.305783748626709 -33,0.299933522939682 -34,0.2945919632911682 -35,0.28979113698005676 -36,0.2856196165084839 -37,0.2819584012031555 -38,0.2786679267883301 -39,0.27559876441955566 -40,0.27263885736465454 -41,0.26969394087791443 -42,0.2667207419872284 -43,0.263704776763916 -44,0.26064518094062805 -45,0.2575497031211853 -46,0.2543642818927765 -47,0.251034677028656 -48,0.24748113751411438 -49,0.2435804307460785 -50,0.23917432129383087 -51,0.2342233508825302 -52,0.22939155995845795 -53,0.22088442742824554 -54,0.21597003936767578 -55,0.21850410103797913 -56,0.21301710605621338 -57,0.21454815566539764 -58,0.21403948962688446 -59,0.2117394357919693 -60,0.20813842117786407 -61,0.2027948498725891 -62,0.19392265379428864 -63,0.19500872492790222 -64,0.1904148906469345 -65,0.18076753616333008 -66,0.17997804284095764 -67,0.17600734531879425 -68,0.16898822784423828 -69,0.169520303606987 -70,0.1655246764421463 -71,0.161051407456398 -72,0.15791374444961548 -73,0.15446937084197998 -74,0.15111662447452545 -75,0.14824753999710083 -76,0.1471020132303238 -77,0.14320814609527588 -78,0.14078088104724884 -79,0.1411173939704895 -80,0.13654057681560516 -81,0.13534009456634521 -82,0.13482461869716644 -83,0.1320399045944214 -84,0.129648819565773 -85,0.13146133720874786 -86,0.12660743296146393 -87,0.12682580947875977 -88,0.1276531219482422 -89,0.12553197145462036 -90,0.12479197233915329 -91,0.12621046602725983 -92,0.12354817241430283 -93,0.11885293573141098 -94,0.12059293687343597 -95,0.1183353066444397 -96,0.11708249896764755 -97,0.11691765487194061 -98,0.11633419245481491 -99,0.114724300801754 -100,0.11590023338794708 -101,0.11416757106781006 -102,0.11356769502162933 -103,0.11307063698768616 -104,0.11084793508052826 -105,0.11127885431051254 -106,0.11044444888830185 -107,0.10970709472894669 -108,0.10868123173713684 -109,0.10912954807281494 -110,0.10865961015224457 -111,0.10705497860908508 -112,0.10713979601860046 -113,0.10667764395475388 -114,0.10584284365177155 -115,0.10571305453777313 -116,0.10517314821481705 -117,0.10473707318305969 -118,0.10456312447786331 -119,0.10414014011621475 -120,0.10349155217409134 -121,0.10337328165769577 -122,0.1027732715010643 -123,0.10241185128688812 -124,0.1021328866481781 -125,0.10154236853122711 -126,0.10160288214683533 -127,0.10080257058143616 -128,0.10048000514507294 -129,0.1000688225030899 -130,0.09988997876644135 -131,0.09919799864292145 -132,0.09953206032514572 -133,0.09884369373321533 -134,0.0984199121594429 -135,0.0989515483379364 -136,0.09793195873498917 -137,0.09793736785650253 -138,0.09725731611251831 -139,0.09765831381082535 -140,0.09675636142492294 -141,0.0971052497625351 -142,0.096748948097229 -143,0.09620185941457748 -144,0.09683864563703537 -145,0.0954737737774849 -146,0.09718823432922363 -147,0.09675006568431854 -148,0.09521942585706711 -149,0.09646908193826675 -150,0.09604519605636597 -151,0.09497801959514618 -152,0.09552798420190811 -153,0.09537788480520248 -154,0.09392866492271423 -155,0.09484797716140747 -156,0.09461963921785355 -157,0.09315896779298782 -158,0.09347132593393326 -159,0.09255224466323853 -160,0.0922960564494133 -161,0.0926903486251831 -162,0.09212515503168106 -163,0.09216354042291641 -164,0.09153395891189575 -165,0.09385261684656143 -166,0.09191253036260605 -167,0.09269707649946213 -168,0.09266095608472824 -169,0.09229622781276703 -170,0.0926264226436615 -171,0.0927659273147583 -172,0.09157516807317734 -173,0.09060820937156677 -174,0.09063369035720825 -175,0.09170613437891006 -176,0.09117858111858368 -177,0.08982545137405396 -178,0.09046147763729095 -179,0.09107673168182373 -180,0.09127630293369293 -181,0.09078624099493027 -182,0.08967884629964828 -183,0.08966311067342758 -184,0.08999454230070114 -185,0.08928848803043365 -186,0.08959274739027023 -187,0.09018062800168991 -188,0.08925081789493561 -189,0.08935074508190155 -190,0.08906304091215134 -191,0.09023429453372955 -192,0.08910657465457916 -193,0.08969481289386749 -194,0.09024147689342499 -195,0.08869876712560654 -196,0.08844279497861862 -197,0.08778057247400284 -198,0.09011486917734146 -199,0.08913744240999222 -200,0.08792754262685776 diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.080/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.080/training_config.txt deleted file mode 100644 index b39a1fd..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.080/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.080/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.080/training_log.csv deleted file mode 100644 index 4db99cc..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,3.419178009033203 -2,1.6703035831451416 -3,1.5943325757980347 -4,1.3731248378753662 -5,1.2581024169921875 -6,0.7770068645477295 -7,0.5376855134963989 -8,0.44320228695869446 -9,0.4741661846637726 -10,0.46590104699134827 -11,0.4292841851711273 -12,0.42441171407699585 -13,0.41203707456588745 -14,0.403369665145874 -15,0.3874838054180145 -16,0.3632272183895111 -17,0.341977596282959 -18,0.3218798041343689 -19,0.29715925455093384 -20,0.2715689241886139 -21,0.24421830475330353 -22,0.20931683480739594 -23,0.19255177676677704 -24,0.1932240128517151 -25,0.19109086692333221 -26,0.1840924471616745 -27,0.17264306545257568 -28,0.15701936185359955 -29,0.14165431261062622 -30,0.15022306144237518 -31,0.15214675664901733 -32,0.1488388329744339 -33,0.14111526310443878 -34,0.13370223343372345 -35,0.12935326993465424 -36,0.13122469186782837 -37,0.12937693297863007 -38,0.12413480877876282 -39,0.12038340419530869 -40,0.11884630471467972 -41,0.11589612811803818 -42,0.11254197359085083 -43,0.10926966369152069 -44,0.1099669486284256 -45,0.10920098423957825 -46,0.10661851614713669 -47,0.10421338677406311 -48,0.10373781621456146 -49,0.10373051464557648 -50,0.10126226395368576 -51,0.10046708583831787 -52,0.10086172819137573 -53,0.09999646246433258 -54,0.09788379818201065 -55,0.09857109189033508 -56,0.0983087569475174 -57,0.09587684273719788 -58,0.09539543837308884 -59,0.09548558294773102 -60,0.09365972131490707 -61,0.0910557433962822 -62,0.09255766123533249 -63,0.09259849041700363 -64,0.0907040685415268 -65,0.09065978974103928 -66,0.09133996814489365 -67,0.09018048644065857 -68,0.08972930163145065 -69,0.09086407721042633 -70,0.08970893174409866 -71,0.08723355084657669 -72,0.09015130251646042 -73,0.08945140242576599 -74,0.08691415190696716 -75,0.08701160550117493 -76,0.08565939962863922 -77,0.08675390481948853 -78,0.08576995879411697 -79,0.08620280027389526 -80,0.08660783618688583 -81,0.08483967185020447 -82,0.08695647865533829 -83,0.08651058375835419 -84,0.08501064032316208 -85,0.08598237484693527 -86,0.08455117791891098 -87,0.08502265810966492 -88,0.0844205766916275 -89,0.08471369743347168 -90,0.08517123758792877 -91,0.08349757641553879 -92,0.08532831072807312 -93,0.08375466614961624 -94,0.08477264642715454 -95,0.08563568443059921 -96,0.08423199504613876 -97,0.08424539864063263 -98,0.08407414704561234 -99,0.08438196778297424 -100,0.08522052317857742 -101,0.0838756188750267 -102,0.08310531079769135 -103,0.08343721926212311 -104,0.08265400677919388 -105,0.08265747129917145 -106,0.08226631581783295 -107,0.08221769332885742 -108,0.08211681991815567 -109,0.08192244917154312 -110,0.0825042799115181 -111,0.08210139721632004 -112,0.08338610827922821 -113,0.08158882707357407 -114,0.08257275819778442 -115,0.08207886666059494 -116,0.08241172879934311 -117,0.08157205581665039 -118,0.08346231281757355 -119,0.08304435014724731 -120,0.08184739947319031 -121,0.08118521422147751 -122,0.08128386735916138 -123,0.08117905259132385 -124,0.08115368336439133 -125,0.0822071060538292 -126,0.08130569756031036 -127,0.08106941729784012 -128,0.08311822265386581 -129,0.0815405622124672 -130,0.08507606387138367 -131,0.08598286658525467 -132,0.08258435875177383 -133,0.08469512313604355 -134,0.08565117418766022 -135,0.08181801438331604 -136,0.08671247214078903 -137,0.08923918753862381 -138,0.0866539403796196 -139,0.08165432512760162 -140,0.08639572560787201 -141,0.08847962319850922 -142,0.08434965461492538 -143,0.08487610518932343 -144,0.08743375539779663 -145,0.08647074550390244 -146,0.08297106623649597 -147,0.0856141448020935 -148,0.08484329283237457 -149,0.08262341469526291 -150,0.08498645573854446 -151,0.08396445214748383 -152,0.08196330815553665 -153,0.08271375298500061 -154,0.08222024887800217 -155,0.08178400248289108 -156,0.08206485956907272 -157,0.08109264075756073 -158,0.08239196985960007 -159,0.08093208819627762 -160,0.0811598151922226 -161,0.08209806680679321 -162,0.08052172511816025 -163,0.08060465008020401 -164,0.08053484559059143 -165,0.08168024569749832 -166,0.08145564049482346 -167,0.08068739622831345 -168,0.08110011368989944 -169,0.08066826313734055 -170,0.08090054988861084 -171,0.08180723339319229 -172,0.08034897595643997 -173,0.0817285031080246 -174,0.08211279660463333 -175,0.08394096791744232 -176,0.08237791061401367 -177,0.08226580172777176 -178,0.08249939978122711 -179,0.08049460500478745 -180,0.0807402953505516 -181,0.08140530437231064 -182,0.07998945564031601 -183,0.08424723893404007 -184,0.08434179425239563 -185,0.08039935678243637 -186,0.0843963474035263 -187,0.08455488830804825 -188,0.07949654012918472 -189,0.08512463420629501 -190,0.08596425503492355 -191,0.0825038030743599 -192,0.0839724987745285 -193,0.08546009659767151 -194,0.0825190469622612 -195,0.08164364099502563 -196,0.08284784108400345 -197,0.08185198158025742 -198,0.0824718028306961 -199,0.08305442333221436 -200,0.08130885660648346 diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.100/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.100/training_config.txt deleted file mode 100644 index a3af097..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.100/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.100/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.100/training_log.csv deleted file mode 100644 index f84f933..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,3.6563639640808105 -2,1.682649850845337 -3,3.4530885219573975 -4,3.297663688659668 -5,2.4659841060638428 -6,1.4573732614517212 -7,1.1257236003875732 -8,0.8302134275436401 -9,0.7848021984100342 -10,0.6974580883979797 -11,0.6942741274833679 -12,0.6835929155349731 -13,0.6698393225669861 -14,0.6610105633735657 -15,0.6446107029914856 -16,0.6318349838256836 -17,0.6142277717590332 -18,0.5916954874992371 -19,0.5538696646690369 -20,0.5157958269119263 -21,0.4476734399795532 -22,0.38439303636550903 -23,0.3543162941932678 -24,0.32728636264801025 -25,0.3019813299179077 -26,0.2668279707431793 -27,0.2381678819656372 -28,0.2171739637851715 -29,0.2038930058479309 -30,0.18824225664138794 -31,0.16914013028144836 -32,0.1517806500196457 -33,0.16028626263141632 -34,0.1604776680469513 -35,0.15491406619548798 -36,0.1435731202363968 -37,0.12615780532360077 -38,0.11881227046251297 -39,0.123050756752491 -40,0.1270367056131363 -41,0.1277918517589569 -42,0.12527599930763245 -43,0.11988456547260284 -44,0.11227276921272278 -45,0.11011403799057007 -46,0.11099041998386383 -47,0.10786668956279755 -48,0.10110985487699509 -49,0.09372589737176895 -50,0.09446537494659424 -51,0.09799593687057495 -52,0.09879957139492035 -53,0.09691852331161499 -54,0.09478973597288132 -55,0.09243771433830261 -56,0.09068377315998077 -57,0.09205537289381027 -58,0.09315748512744904 -59,0.09273409098386765 -60,0.0910736545920372 -61,0.08990073204040527 -62,0.09028158336877823 -63,0.08986573666334152 -64,0.0883973240852356 -65,0.08861357718706131 -66,0.08822938054800034 -67,0.08719094097614288 -68,0.08702532947063446 -69,0.08745010942220688 -70,0.08737198263406754 -71,0.08673824369907379 -72,0.08641985058784485 -73,0.08620484173297882 -74,0.08530744165182114 -75,0.08553629368543625 -76,0.08638608455657959 -77,0.08521383255720139 -78,0.08438096195459366 -79,0.08529548346996307 -80,0.08406579494476318 -81,0.08407793194055557 -82,0.08381840586662292 -83,0.0834948867559433 -84,0.08356400579214096 -85,0.08407215774059296 -86,0.08393196016550064 -87,0.08359077572822571 -88,0.08312152326107025 -89,0.08401144295930862 -90,0.08396704494953156 -91,0.08270830661058426 -92,0.08400212973356247 -93,0.0822480320930481 -94,0.08257357776165009 -95,0.08164355158805847 -96,0.08444415032863617 -97,0.08386583626270294 -98,0.0818147361278534 -99,0.08271916955709457 -100,0.0816514641046524 -101,0.08491350710391998 -102,0.08320003002882004 -103,0.08342207968235016 -104,0.08450698852539062 -105,0.08210506290197372 -106,0.08522220700979233 -107,0.08543099462985992 -108,0.08263371884822845 -109,0.08464286476373672 -110,0.08620654046535492 -111,0.08387161791324615 -112,0.08270334452390671 -113,0.08348360657691956 -114,0.08215546607971191 -115,0.08267930150032043 -116,0.08148252218961716 -117,0.0821424275636673 -118,0.0811886265873909 -119,0.08364802598953247 -120,0.08386580646038055 -121,0.08076338469982147 -122,0.08543073385953903 -123,0.08584306389093399 -124,0.08058430999517441 -125,0.08575408160686493 -126,0.08797390758991241 -127,0.08544366806745529 -128,0.08145052194595337 -129,0.08715581893920898 -130,0.08794734627008438 -131,0.08614236861467361 -132,0.08407282829284668 -133,0.08628077059984207 -134,0.08570221811532974 -135,0.08158799260854721 -136,0.08651290833950043 -137,0.08809245377779007 -138,0.0839899331331253 -139,0.08309141546487808 -140,0.08661476522684097 -141,0.08573730289936066 -142,0.08144127577543259 -143,0.08549036085605621 -144,0.08749020099639893 -145,0.08346635103225708 -146,0.08298013359308243 -147,0.0861562043428421 -148,0.08461710065603256 -149,0.0820460394024849 -150,0.08204265683889389 -151,0.08139491081237793 -152,0.08339357376098633 -153,0.08450419455766678 -154,0.08380396664142609 -155,0.08135084062814713 -156,0.08393177390098572 -157,0.08362013101577759 -158,0.08154136687517166 -159,0.08453211188316345 -160,0.08516505360603333 -161,0.08193029463291168 -162,0.08255625516176224 -163,0.08213027566671371 -164,0.08103007078170776 -165,0.08251512795686722 -166,0.08086477965116501 -167,0.08073725551366806 -168,0.0798107236623764 -169,0.08006145060062408 -170,0.07959020137786865 -171,0.0806116908788681 -172,0.0803106501698494 -173,0.08107250928878784 -174,0.07939854264259338 -175,0.08164120465517044 -176,0.08086729049682617 -177,0.08170024305582047 -178,0.08242808282375336 -179,0.07999443262815475 -180,0.08161386102437973 -181,0.08189528435468674 -182,0.08112075924873352 -183,0.08104882389307022 -184,0.0792253166437149 -185,0.08157899230718613 -186,0.08093738555908203 -187,0.08134761452674866 -188,0.080221027135849 -189,0.08185800164937973 -190,0.08303073048591614 -191,0.08053598552942276 -192,0.08169656991958618 -193,0.08080348372459412 -194,0.08252923935651779 -195,0.08373855799436569 -196,0.08070754259824753 -197,0.08094974607229233 -198,0.08147668093442917 -199,0.08025535196065903 -200,0.07867201417684555 diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.125/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.125/training_config.txt deleted file mode 100644 index 792bed0..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.125/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.125/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.125/training_log.csv deleted file mode 100644 index fa72d75..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,4.095664978027344 -2,1.7805324792861938 -3,4.8860554695129395 -4,3.7339413166046143 -5,2.415883779525757 -6,1.7577773332595825 -7,0.8458391427993774 -8,0.546190619468689 -9,0.4876641035079956 -10,0.5163665413856506 -11,0.4821946918964386 -12,0.44759100675582886 -13,0.4117186665534973 -14,0.3771078586578369 -15,0.3472077548503876 -16,0.3192114531993866 -17,0.2910940945148468 -18,0.26645803451538086 -19,0.2429882138967514 -20,0.21521273255348206 -21,0.20455597341060638 -22,0.20095090568065643 -23,0.19963766634464264 -24,0.1991938054561615 -25,0.19895242154598236 -26,0.19611525535583496 -27,0.18973997235298157 -28,0.18013685941696167 -29,0.17008231580257416 -30,0.16373799741268158 -31,0.15996141731739044 -32,0.1580546498298645 -33,0.157029926776886 -34,0.1560744196176529 -35,0.1546565145254135 -36,0.15241767466068268 -37,0.14925771951675415 -38,0.14536581933498383 -39,0.14111949503421783 -40,0.13695374131202698 -41,0.1333199143409729 -42,0.13116823136806488 -43,0.13072367012500763 -44,0.12893648445606232 -45,0.12533695995807648 -46,0.12176293879747391 -47,0.12043493986129761 -48,0.11957075446844101 -49,0.11843836307525635 -50,0.11648479849100113 -51,0.11359257251024246 -52,0.11058980226516724 -53,0.1092883050441742 -54,0.1087360605597496 -55,0.1064937487244606 -56,0.10518262535333633 -57,0.1048884466290474 -58,0.10262016206979752 -59,0.1014794260263443 -60,0.10127326101064682 -61,0.10010677576065063 -62,0.09834180027246475 -63,0.09850849956274033 -64,0.09665509313344955 -65,0.0963427871465683 -66,0.09580275416374207 -67,0.09472264349460602 -68,0.09436897188425064 -69,0.09328451007604599 -70,0.09256957471370697 -71,0.09220980852842331 -72,0.09161019325256348 -73,0.09111683070659637 -74,0.09071487933397293 -75,0.08997463434934616 -76,0.08891329914331436 -77,0.08962425589561462 -78,0.08849736303091049 -79,0.08829433470964432 -80,0.08829182386398315 -81,0.08743100613355637 -82,0.08753017336130142 -83,0.0870525911450386 -84,0.08672817051410675 -85,0.08706063777208328 -86,0.08645568042993546 -87,0.08645258843898773 -88,0.08587493002414703 -89,0.08635514229536057 -90,0.08618350327014923 -91,0.08525090664625168 -92,0.0851074755191803 -93,0.08487974107265472 -94,0.08452791720628738 -95,0.08445735275745392 -96,0.08448575437068939 -97,0.08442241698503494 -98,0.08441808819770813 -99,0.08386020362377167 -100,0.08441787958145142 -101,0.08351758122444153 -102,0.08477132767438889 -103,0.08470868319272995 -104,0.08346886932849884 -105,0.08509516716003418 -106,0.08380863815546036 -107,0.08503671735525131 -108,0.08555049449205399 -109,0.08333656191825867 -110,0.08497819304466248 -111,0.0850677415728569 -112,0.08266882598400116 -113,0.08386129140853882 -114,0.08297715336084366 -115,0.0840027779340744 -116,0.08345711976289749 -117,0.08384381234645844 -118,0.08450520783662796 -119,0.08263351023197174 -120,0.08434387296438217 -121,0.08387687802314758 -122,0.08248091489076614 -123,0.08355055004358292 -124,0.08232168108224869 -125,0.08303870260715485 -126,0.08258876204490662 -127,0.08344534039497375 -128,0.0832255557179451 -129,0.08242885768413544 -130,0.08185485005378723 -131,0.08333670347929001 -132,0.08279857039451599 -133,0.08299145847558975 -134,0.08227155357599258 -135,0.08392583578824997 -136,0.08448246866464615 -137,0.08211087435483932 -138,0.0849820226430893 -139,0.08501696586608887 -140,0.08201368153095245 -141,0.08504879474639893 -142,0.08547443896532059 -143,0.08456989377737045 -144,0.0836540162563324 -145,0.08568496257066727 -146,0.08434681594371796 -147,0.08319008350372314 -148,0.08356692641973495 -149,0.08422017842531204 -150,0.08402426540851593 -151,0.08198509365320206 -152,0.08188465237617493 -153,0.08367147296667099 -154,0.08254946023225784 -155,0.08294893801212311 -156,0.08270503580570221 -157,0.08305253833532333 -158,0.08361338078975677 -159,0.08125696331262589 -160,0.08437199145555496 -161,0.08407364040613174 -162,0.08170302957296371 -163,0.08222191780805588 -164,0.08136448264122009 -165,0.081566222012043 -166,0.08104061335325241 -167,0.08155842870473862 -168,0.08205188065767288 -169,0.08096210658550262 -170,0.08148784935474396 -171,0.08082762360572815 -172,0.08308339864015579 -173,0.08255477994680405 -174,0.0813416838645935 -175,0.08058138191699982 -176,0.08181150257587433 -177,0.08177192509174347 -178,0.08364127576351166 -179,0.0824742242693901 -180,0.0824945867061615 -181,0.08277931809425354 -182,0.08263665437698364 -183,0.0838431864976883 -184,0.08253896236419678 -185,0.08207124471664429 -186,0.08378276228904724 -187,0.08200693875551224 -188,0.08278229087591171 -189,0.08216235041618347 -190,0.08214545994997025 -191,0.08522124588489532 -192,0.08356976509094238 -193,0.0819060206413269 -194,0.08331896364688873 -195,0.08081706613302231 -196,0.08484630286693573 -197,0.08550697565078735 -198,0.08254247903823853 -199,0.08319857716560364 -200,0.0836077481508255 diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.160/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.160/training_config.txt deleted file mode 100644 index 62d38af..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.160/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.160/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.160/training_log.csv deleted file mode 100644 index 4f84e4f..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,5.170135021209717 -2,1.0293694734573364 -3,0.9425896406173706 -4,1.215986967086792 -5,0.6653750538825989 -6,0.5899087190628052 -7,0.5179368257522583 -8,0.4776137173175812 -9,0.41760683059692383 -10,0.34499818086624146 -11,0.2714305818080902 -12,0.2313881814479828 -13,0.21957607567310333 -14,0.20803435146808624 -15,0.19097904860973358 -16,0.17757150530815125 -17,0.1661844551563263 -18,0.15965117514133453 -19,0.14548811316490173 -20,0.15920399129390717 -21,0.164178729057312 -22,0.1655188649892807 -23,0.1638672947883606 -24,0.1594279259443283 -25,0.15340009331703186 -26,0.14569176733493805 -27,0.13654036819934845 -28,0.1257438361644745 -29,0.11296765506267548 -30,0.11470222473144531 -31,0.11984436213970184 -32,0.12217012047767639 -33,0.12323692440986633 -34,0.12330091744661331 -35,0.12199980020523071 -36,0.11946611851453781 -37,0.11583562940359116 -38,0.1113339364528656 -39,0.10598286241292953 -40,0.09987228363752365 -41,0.09719900786876678 -42,0.10071973502635956 -43,0.10259098559617996 -44,0.1022416204214096 -45,0.09990973770618439 -46,0.0958491787314415 -47,0.09111975878477097 -48,0.0915803611278534 -49,0.09306570887565613 -50,0.09370657801628113 -51,0.09322476387023926 -52,0.09198407828807831 -53,0.08994118869304657 -54,0.08759744465351105 -55,0.08733464777469635 -56,0.08898240327835083 -57,0.08915816992521286 -58,0.08665178716182709 -59,0.08824999630451202 -60,0.08844725787639618 -61,0.0866357833147049 -62,0.08702484518289566 -63,0.08576900511980057 -64,0.08495878428220749 -65,0.08552172034978867 -66,0.08546292036771774 -67,0.08494245260953903 -68,0.0852101594209671 -69,0.08613257110118866 -70,0.08517161756753922 -71,0.08417832106351852 -72,0.0854429081082344 -73,0.08498700708150864 -74,0.08439096063375473 -75,0.08420442044734955 -76,0.0843198373913765 -77,0.08361461013555527 -78,0.08365509659051895 -79,0.08357475697994232 -80,0.08337731659412384 -81,0.08330214023590088 -82,0.08368325233459473 -83,0.08307887613773346 -84,0.08293303102254868 -85,0.08324781060218811 -86,0.08335412293672562 -87,0.08284494280815125 -88,0.08370473980903625 -89,0.08281046897172928 -90,0.08279009163379669 -91,0.08319325000047684 -92,0.0829467698931694 -93,0.08283808827400208 -94,0.08378253132104874 -95,0.08219970762729645 -96,0.08301408588886261 -97,0.08224016427993774 -98,0.08296972513198853 -99,0.08228636533021927 -100,0.08337663114070892 -101,0.08370713144540787 -102,0.08216194063425064 -103,0.08466129004955292 -104,0.08427223563194275 -105,0.08281224220991135 -106,0.0840863287448883 -107,0.0859018936753273 -108,0.084769606590271 -109,0.08192166686058044 -110,0.08393175899982452 -111,0.08305332064628601 -112,0.08280471712350845 -113,0.08308098465204239 -114,0.08228963613510132 -115,0.08285187184810638 -116,0.08290364593267441 -117,0.08188975602388382 -118,0.08312123268842697 -119,0.08151405304670334 -120,0.08335414528846741 -121,0.08301755040884018 -122,0.08277341723442078 -123,0.08383654803037643 -124,0.08327271044254303 -125,0.08128082007169724 -126,0.08255466818809509 -127,0.08135668188333511 -128,0.08407092094421387 -129,0.08399634808301926 -130,0.08370433002710342 -131,0.0843421220779419 -132,0.0823436975479126 -133,0.0830005630850792 -134,0.08178491145372391 -135,0.08385555446147919 -136,0.08364633470773697 -137,0.08277593553066254 -138,0.08283772319555283 -139,0.08110911399126053 -140,0.08189273625612259 -141,0.08246374130249023 -142,0.081562340259552 -143,0.08260967582464218 -144,0.08145260065793991 -145,0.08325411379337311 -146,0.08280689269304276 -147,0.08187534660100937 -148,0.08474510163068771 -149,0.08221542835235596 -150,0.08393722772598267 -151,0.08544410765171051 -152,0.08253446966409683 -153,0.0813356339931488 -154,0.0807698518037796 -155,0.08103430271148682 -156,0.08040080964565277 -157,0.08089447766542435 -158,0.08160009980201721 -159,0.0825074166059494 -160,0.0797625482082367 -161,0.08492795377969742 -162,0.08520867675542831 -163,0.08144430071115494 -164,0.08360860496759415 -165,0.08313537389039993 -166,0.08081336319446564 -167,0.08543164283037186 -168,0.08359238505363464 -169,0.08117134869098663 -170,0.08305046707391739 -171,0.08072303235530853 -172,0.08046098053455353 -173,0.08076484501361847 -174,0.08199708163738251 -175,0.07991287112236023 -176,0.08462245762348175 -177,0.08390161395072937 -178,0.07984484732151031 -179,0.0799265205860138 -180,0.080972820520401 -181,0.08029784262180328 -182,0.08052351325750351 -183,0.08099893480539322 -184,0.07929995656013489 -185,0.08107292652130127 -186,0.08063376694917679 -187,0.08074472844600677 -188,0.0800761803984642 -189,0.08187062293291092 -190,0.07948295772075653 -191,0.08372602611780167 -192,0.08460218459367752 -193,0.07978836447000504 -194,0.08406941592693329 -195,0.08603247255086899 -196,0.08119571208953857 -197,0.08307749032974243 -198,0.08612953126430511 -199,0.0832892432808876 -200,0.07968327403068542 diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.200/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.200/training_config.txt deleted file mode 100644 index 2d63dca..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.200/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.200/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.200/training_log.csv deleted file mode 100644 index 1807e37..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,7.8574395179748535 -2,1.7517822980880737 -3,3.0827555656433105 -4,0.9985743761062622 -5,0.537251889705658 -6,0.45048391819000244 -7,0.41220593452453613 -8,0.3774106502532959 -9,0.33838921785354614 -10,0.2991445064544678 -11,0.259488046169281 -12,0.22028027474880219 -13,0.19860662519931793 -14,0.18364223837852478 -15,0.173497274518013 -16,0.16669949889183044 -17,0.16886435449123383 -18,0.17144830524921417 -19,0.17199839651584625 -20,0.17004689574241638 -21,0.16573494672775269 -22,0.159626767039299 -23,0.15255172550678253 -24,0.1452314704656601 -25,0.14160597324371338 -26,0.14066946506500244 -27,0.13976651430130005 -28,0.1385127454996109 -29,0.13683269917964935 -30,0.1347236931324005 -31,0.13217873871326447 -32,0.12916257977485657 -33,0.12606607377529144 -34,0.12343083322048187 -35,0.12219846248626709 -36,0.1209983378648758 -37,0.11881925910711288 -38,0.1157289370894432 -39,0.11300835758447647 -40,0.11218798905611038 -41,0.11200474947690964 -42,0.11132209748029709 -43,0.11001067608594894 -44,0.10832607001066208 -45,0.10683754831552505 -46,0.10490458458662033 -47,0.10266505181789398 -48,0.10157846659421921 -49,0.1014140397310257 -50,0.09981905668973923 -51,0.09789860248565674 -52,0.09709680080413818 -53,0.09681165963411331 -54,0.0960020199418068 -55,0.09497261047363281 -56,0.09387852996587753 -57,0.09276121109724045 -58,0.0924435406923294 -59,0.09239214658737183 -60,0.09118716418743134 -61,0.09124886989593506 -62,0.09134574979543686 -63,0.09109623730182648 -64,0.08996996283531189 -65,0.08986356109380722 -66,0.08951681852340698 -67,0.08989456295967102 -68,0.08874586969614029 -69,0.08870668709278107 -70,0.08850348740816116 -71,0.08821079134941101 -72,0.08740676939487457 -73,0.08731550723314285 -74,0.08704742789268494 -75,0.08669685572385788 -76,0.08689377456903458 -77,0.0863192230463028 -78,0.08605615794658661 -79,0.08669045567512512 -80,0.08681733906269073 -81,0.08689762651920319 -82,0.08628346771001816 -83,0.08557022362947464 -84,0.08600917458534241 -85,0.08504058420658112 -86,0.0850776880979538 -87,0.08553317934274673 -88,0.08464568853378296 -89,0.08429964631795883 -90,0.0844673216342926 -91,0.08380802720785141 -92,0.08399877697229385 -93,0.08398895710706711 -94,0.08481849730014801 -95,0.08484237641096115 -96,0.08359505236148834 -97,0.08503653854131699 -98,0.08431482315063477 -99,0.08400445431470871 -100,0.0849563404917717 -101,0.0843823179602623 -102,0.08406459540128708 -103,0.08360961824655533 -104,0.08409148454666138 -105,0.08383096009492874 -106,0.08326707035303116 -107,0.08531635999679565 -108,0.08356159925460815 -109,0.08655279129743576 -110,0.08768072724342346 -111,0.08467179536819458 -112,0.08642642945051193 -113,0.08757064491510391 -114,0.08357997983694077 -115,0.08574635535478592 -116,0.08638464659452438 -117,0.08494942635297775 -118,0.08359209448099136 -119,0.08533767610788345 -120,0.08567076921463013 -121,0.08344317227602005 -122,0.08516974002122879 -123,0.08498547971248627 -124,0.08329635858535767 -125,0.0846729651093483 -126,0.08336611837148666 -127,0.0848851129412651 -128,0.08549007028341293 -129,0.0832807645201683 -130,0.08368661999702454 -131,0.08326932042837143 -132,0.08358433097600937 -133,0.08312097191810608 -134,0.08507795631885529 -135,0.0852065160870552 -136,0.08391615003347397 -137,0.08420705050230026 -138,0.08532629162073135 -139,0.0851413682103157 -140,0.08470432460308075 -141,0.08246320486068726 -142,0.08597779273986816 -143,0.08481024205684662 -144,0.08408600091934204 -145,0.0831417366862297 -146,0.08440092951059341 -147,0.08348771184682846 -148,0.08328810334205627 -149,0.08459571748971939 -150,0.08339346945285797 -151,0.08327370882034302 -152,0.08364953100681305 -153,0.08257367461919785 -154,0.08348064869642258 -155,0.08306638896465302 -156,0.08269140869379044 -157,0.08342785388231277 -158,0.08349200338125229 -159,0.08348407596349716 -160,0.08230955898761749 -161,0.08239839226007462 -162,0.08227246254682541 -163,0.08277023583650589 -164,0.08228759467601776 -165,0.08276895433664322 -166,0.08355323225259781 -167,0.08336442708969116 -168,0.08337832987308502 -169,0.0835428237915039 -170,0.08310714364051819 -171,0.08352722972631454 -172,0.08210236579179764 -173,0.08380849659442902 -174,0.0837876945734024 -175,0.08269309997558594 -176,0.08321716636419296 -177,0.08236087113618851 -178,0.0824437066912651 -179,0.08223946392536163 -180,0.08375191688537598 -181,0.08132091164588928 -182,0.08285864442586899 -183,0.08247839659452438 -184,0.08301738649606705 -185,0.08337469398975372 -186,0.08540235459804535 -187,0.08459045737981796 -188,0.08233901113271713 -189,0.08627553284168243 -190,0.0847293809056282 -191,0.08348724246025085 -192,0.08538008481264114 -193,0.08324899524450302 -194,0.08425655215978622 -195,0.08623168617486954 -196,0.08289453387260437 -197,0.0829869732260704 -198,0.08364758640527725 -199,0.08267616480588913 -200,0.08205847442150116 diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.250/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.250/training_config.txt deleted file mode 100644 index 3f6c5ed..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.250/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.250/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.250/training_log.csv deleted file mode 100644 index 5e2becc..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,16.569746017456055 -2,0.70541912317276 -3,9.735209465026855 -4,0.5907891392707825 -5,0.5510286092758179 -6,0.5148029327392578 -7,0.4975604712963104 -8,0.4466138482093811 -9,0.4301788806915283 -10,0.415568083524704 -11,0.39722996950149536 -12,0.3808116912841797 -13,0.3661763668060303 -14,0.35164669156074524 -15,0.336710125207901 -16,0.3208846151828766 -17,0.3072963356971741 -18,0.29763197898864746 -19,0.2917107343673706 -20,0.2898629307746887 -21,0.2875921428203583 -22,0.2847115993499756 -23,0.2812332510948181 -24,0.2770935595035553 -25,0.27228978276252747 -26,0.2668255567550659 -27,0.2606792449951172 -28,0.2538316547870636 -29,0.2463974803686142 -30,0.23833394050598145 -31,0.22977261245250702 -32,0.22078144550323486 -33,0.21131078898906708 -34,0.20129935443401337 -35,0.19068066775798798 -36,0.17955909669399261 -37,0.16869358718395233 -38,0.15972881019115448 -39,0.15195825695991516 -40,0.14587143063545227 -41,0.1451452225446701 -42,0.14832808077335358 -43,0.15386654436588287 -44,0.1584208905696869 -45,0.1610894501209259 -46,0.16175322234630585 -47,0.16045615077018738 -48,0.15742944180965424 -49,0.15302446484565735 -50,0.14819298684597015 -51,0.14467768371105194 -52,0.14312267303466797 -53,0.14237961173057556 -54,0.14182619750499725 -55,0.14172981679439545 -56,0.14180317521095276 -57,0.14218918979167938 -58,0.14272277057170868 -59,0.14274536073207855 -60,0.14215803146362305 -61,0.14099940657615662 -62,0.13943399488925934 -63,0.13789102435112 -64,0.13674920797348022 -65,0.13587898015975952 -66,0.13508868217468262 -67,0.134379044175148 -68,0.13402102887630463 -69,0.13408616185188293 -70,0.134229838848114 -71,0.1339368224143982 -72,0.13307268917560577 -73,0.13189555704593658 -74,0.13088181614875793 -75,0.13030537962913513 -76,0.1300944834947586 -77,0.13000112771987915 -78,0.12979938089847565 -79,0.12941089272499084 -80,0.12890951335430145 -81,0.12835977971553802 -82,0.12778781354427338 -83,0.12721966207027435 -84,0.12665510177612305 -85,0.1261354386806488 -86,0.12574748694896698 -87,0.1254482865333557 -88,0.12510013580322266 -89,0.12459905445575714 -90,0.12398651987314224 -91,0.1234021857380867 -92,0.12295260280370712 -93,0.12261173874139786 -94,0.1222444698214531 -95,0.12178327888250351 -96,0.12130574136972427 -97,0.12082775682210922 -98,0.1203407421708107 -99,0.11984921246767044 -100,0.11944516003131866 -101,0.11909487843513489 -102,0.11864257603883743 -103,0.11815985292196274 -104,0.11771049350500107 -105,0.11725996434688568 -106,0.11686119437217712 -107,0.1164616271853447 -108,0.11602501571178436 -109,0.1156032383441925 -110,0.11513824760913849 -111,0.11471877992153168 -112,0.11429334431886673 -113,0.11389357596635818 -114,0.11345499753952026 -115,0.1130450963973999 -116,0.11261176317930222 -117,0.11229532212018967 -118,0.11189880222082138 -119,0.11155560612678528 -120,0.11109194904565811 -121,0.1107662096619606 -122,0.11040439456701279 -123,0.11015961319208145 -124,0.10969690978527069 -125,0.10911346226930618 -126,0.10929371416568756 -127,0.10840576142072678 -128,0.10817263275384903 -129,0.10775525122880936 -130,0.10735537856817245 -131,0.10711212456226349 -132,0.10682740062475204 -133,0.10625461488962173 -134,0.10637039691209793 -135,0.10545326769351959 -136,0.10554678738117218 -137,0.10534925758838654 -138,0.10458224266767502 -139,0.10473336279392242 -140,0.10421288758516312 -141,0.1037839874625206 -142,0.1037934422492981 -143,0.10292982310056686 -144,0.10310819745063782 -145,0.10277441143989563 -146,0.10180072486400604 -147,0.10170041769742966 -148,0.10084004700183868 -149,0.10071027278900146 -150,0.10004439949989319 -151,0.1000431701540947 -152,0.09924867749214172 -153,0.09967898577451706 -154,0.09897034615278244 -155,0.09865702688694 -156,0.09862220287322998 -157,0.09759517014026642 -158,0.09835178405046463 -159,0.09810569137334824 -160,0.09658686071634293 -161,0.09674268960952759 -162,0.09613707661628723 -163,0.09674949198961258 -164,0.09622407704591751 -165,0.09593760222196579 -166,0.09603849798440933 -167,0.09531818330287933 -168,0.09534592181444168 -169,0.09479149430990219 -170,0.09484545141458511 -171,0.0949842631816864 -172,0.09416720271110535 -173,0.0937424898147583 -174,0.09352843463420868 -175,0.0930289626121521 -176,0.0929173156619072 -177,0.09211251139640808 -178,0.09236233681440353 -179,0.09155915677547455 -180,0.0916643738746643 -181,0.09102364629507065 -182,0.09162549674510956 -183,0.09113489091396332 -184,0.09046327322721481 -185,0.09039407968521118 -186,0.0897187814116478 -187,0.09028080850839615 -188,0.08970122784376144 -189,0.08928564935922623 -190,0.08924933522939682 -191,0.08835882693529129 -192,0.08913490176200867 -193,0.08878065645694733 -194,0.08762074261903763 -195,0.0879184752702713 -196,0.08736513555049896 -197,0.0874432921409607 -198,0.08725513517856598 -199,0.08642156422138214 -200,0.08713392168283463 diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.300/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.300/training_config.txt deleted file mode 100644 index 70322c8..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.300/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.300/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.300/training_log.csv deleted file mode 100644 index eb69cf4..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.300/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,26.561803817749023 -2,0.5867254734039307 -3,24.300159454345703 -4,0.566983699798584 -5,0.5101915001869202 -6,0.5028000473976135 -7,0.48691070079803467 -8,0.4758921265602112 -9,0.4625037610530853 -10,0.4443182647228241 -11,0.40885594487190247 -12,0.3916442096233368 -13,0.37546277046203613 -14,0.357437402009964 -15,0.33941683173179626 -16,0.32265299558639526 -17,0.3051711916923523 -18,0.2860953211784363 -19,0.2677481770515442 -20,0.2589857578277588 -21,0.2545440196990967 -22,0.24954549968242645 -23,0.24354985356330872 -24,0.23677845299243927 -25,0.22915269434452057 -26,0.22059281170368195 -27,0.21064865589141846 -28,0.19883500039577484 -29,0.18442127108573914 -30,0.1699577122926712 -31,0.1625010371208191 -32,0.16389399766921997 -33,0.17024914920330048 -34,0.1711592823266983 -35,0.16774509847164154 -36,0.16072304546833038 -37,0.15170255303382874 -38,0.14607380330562592 -39,0.1447693556547165 -40,0.1470157951116562 -41,0.1484570950269699 -42,0.1483246386051178 -43,0.1462797373533249 -44,0.14368334412574768 -45,0.14141952991485596 -46,0.13991396129131317 -47,0.13899855315685272 -48,0.13798125088214874 -49,0.1365251988172531 -50,0.13485106825828552 -51,0.13317789137363434 -52,0.13230806589126587 -53,0.13212747871875763 -54,0.13073968887329102 -55,0.12919476628303528 -56,0.1284719854593277 -57,0.1290193349123001 -58,0.12958553433418274 -59,0.12964485585689545 -60,0.12912020087242126 -61,0.12816259264945984 -62,0.12699013948440552 -63,0.1258636862039566 -64,0.12515874207019806 -65,0.1253788322210312 -66,0.1258149892091751 -67,0.1256508231163025 -68,0.12452854216098785 -69,0.12340418249368668 -70,0.12329661101102829 -71,0.12340830266475677 -72,0.12336494773626328 -73,0.12305396795272827 -74,0.12248983234167099 -75,0.1218051016330719 -76,0.12126104533672333 -77,0.12094952911138535 -78,0.12070183455944061 -79,0.1204904094338417 -80,0.12001524865627289 -81,0.1195463016629219 -82,0.11935465037822723 -83,0.11914228647947311 -84,0.11882305145263672 -85,0.11839085817337036 -86,0.1179615706205368 -87,0.11769262701272964 -88,0.11738494038581848 -89,0.11704006046056747 -90,0.11655107140541077 -91,0.11632180213928223 -92,0.11607834696769714 -93,0.11572983860969543 -94,0.11529089510440826 -95,0.11495418101549149 -96,0.11473781615495682 -97,0.11438800394535065 -98,0.11395329236984253 -99,0.1136789321899414 -100,0.11338783800601959 -101,0.11298958212137222 -102,0.1126185953617096 -103,0.11236300319433212 -104,0.1119958683848381 -105,0.11162704974412918 -106,0.11135410517454147 -107,0.11100463569164276 -108,0.11065074801445007 -109,0.11035437136888504 -110,0.1100202202796936 -111,0.10966523736715317 -112,0.10936103761196136 -113,0.10902141034603119 -114,0.10868830978870392 -115,0.10839410871267319 -116,0.1080455556511879 -117,0.10770880430936813 -118,0.10746244341135025 -119,0.10712526738643646 -120,0.10690280050039291 -121,0.10651350766420364 -122,0.10607466101646423 -123,0.10612857341766357 -124,0.10546021908521652 -125,0.10528337955474854 -126,0.10493534803390503 -127,0.1044468879699707 -128,0.10445509105920792 -129,0.10377341508865356 -130,0.10354458540678024 -131,0.10318972915410995 -132,0.10277317464351654 -133,0.1026875451207161 -134,0.10209202766418457 -135,0.10196544229984283 -136,0.10176915675401688 -137,0.10120748728513718 -138,0.10086290538311005 -139,0.10047529637813568 -140,0.10026377439498901 -141,0.0999305322766304 -142,0.09960191696882248 -143,0.09903892874717712 -144,0.09891288727521896 -145,0.09847858548164368 -146,0.09815060347318649 -147,0.0976778194308281 -148,0.09740784019231796 -149,0.09682437777519226 -150,0.09648513793945312 -151,0.09593745321035385 -152,0.0956859216094017 -153,0.09535915404558182 -154,0.09479263424873352 -155,0.0945654958486557 -156,0.09440574049949646 -157,0.09396648406982422 -158,0.0929063931107521 -159,0.09324003756046295 -160,0.09247418493032455 -161,0.09211673587560654 -162,0.09103968739509583 -163,0.09072986990213394 -164,0.09011536836624146 -165,0.09051544964313507 -166,0.08909214287996292 -167,0.08934027701616287 -168,0.08845429867506027 -169,0.08892277628183365 -170,0.08849996328353882 -171,0.08808182924985886 -172,0.08829429745674133 -173,0.08825750648975372 -174,0.08694233000278473 -175,0.08712185174226761 -176,0.08642880618572235 -177,0.08581958711147308 -178,0.08619074523448944 -179,0.0852394551038742 -180,0.08484179526567459 -181,0.0859464630484581 -182,0.08480525761842728 -183,0.08453582972288132 -184,0.08370526880025864 -185,0.08296359330415726 -186,0.08301705867052078 -187,0.08325115591287613 -188,0.0828530415892601 -189,0.08245117217302322 -190,0.08246126770973206 -191,0.08257320523262024 -192,0.08330798894166946 -193,0.08217421919107437 -194,0.08424980938434601 -195,0.08455304801464081 -196,0.08292844146490097 -197,0.08317992836236954 -198,0.08164729923009872 -199,0.08419931679964066 -200,0.08407948911190033 diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.400/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.400/training_config.txt deleted file mode 100644 index 1216317..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.400/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.400/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.400/training_log.csv deleted file mode 100644 index 78827c9..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.400/training_log.csv +++ /dev/null @@ -1,32 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,40.39252853393555 -2,1.1029574871063232 -3,1.4618662595748901 -4,6.307432651519775 -5,0.5955013632774353 -6,0.5186156630516052 -7,0.4576726257801056 -8,0.41843381524086 -9,0.3928395211696625 -10,0.37179142236709595 -11,0.35298722982406616 -12,0.335514634847641 -13,0.3187231123447418 -14,0.3023539185523987 -15,0.2862251102924347 -16,0.2701539397239685 -17,0.25409844517707825 -18,0.2376135289669037 -19,0.22208499908447266 -20,0.2089492231607437 -21,0.198738232254982 -22,0.1935775876045227 -23,0.18836846947669983 -24,0.17949914932250977 -25,0.16927434504032135 -26,0.1589018702507019 -27,nan -28,nan -29,nan -30,nan diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.500/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.500/training_config.txt deleted file mode 100644 index 4de677e..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.500/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.500/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.500/training_log.csv deleted file mode 100644 index 074c30e..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.500/training_log.csv +++ /dev/null @@ -1,21 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,46.655487060546875 -2,0.7201232314109802 -3,0.4432002902030945 -4,0.39316973090171814 -5,0.3517614006996155 -6,0.2982473373413086 -7,0.2745814025402069 -8,0.24704137444496155 -9,0.22268591821193695 -10,0.19943509995937347 -11,0.1806628257036209 -12,0.16632463037967682 -13,0.16399165987968445 -14,0.16294510662555695 -15,0.15936443209648132 -16,nan -17,nan -18,nan -19,nan diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.600/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.600/training_config.txt deleted file mode 100644 index a2f842b..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.600/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.600/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.600/training_log.csv deleted file mode 100644 index 65097aa..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.600/training_log.csv +++ /dev/null @@ -1,17 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,49.223384857177734 -2,0.4357685446739197 -3,0.5372183918952942 -4,0.22322911024093628 -5,0.2352856695652008 -6,0.21626491844654083 -7,0.213080033659935 -8,0.1583859920501709 -9,0.15527108311653137 -10,0.1560240536928177 -11,0.18459126353263855 -12,nan -13,nan -14,nan -15,nan diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.700/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.700/training_config.txt deleted file mode 100644 index c4e09c5..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.700/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.700/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.700/training_log.csv deleted file mode 100644 index 80b0469..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.700/training_log.csv +++ /dev/null @@ -1,12 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,49.897552490234375 -2,0.5284045338630676 -3,0.4135121703147888 -4,0.431703120470047 -5,0.3725558817386627 -6,0.3514736294746399 -7,nan -8,nan -9,nan -10,nan diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.800/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.800/training_config.txt deleted file mode 100644 index c16cf9b..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.800/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.800/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.800/training_log.csv deleted file mode 100644 index 23c142a..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.800/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,50.10381317138672 -2,2.6007003784179688 -3,0.5629014372825623 -4,0.514721691608429 -5,nan -6,nan -7,nan -8,nan diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.900/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_0.900/training_config.txt deleted file mode 100644 index 1ee147a..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.900/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_0.900/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_0.900/training_log.csv deleted file mode 100644 index fac7d40..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_0.900/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,50.1434440612793 -2,37.67593002319336 -3,0.6318652629852295 -4,0.5756456851959229 -5,nan -6,nan -7,nan -8,nan diff --git a/training/base_loss_learning_rate_sweep/one/lr_1.000/training_config.txt b/training/base_loss_learning_rate_sweep/one/lr_1.000/training_config.txt deleted file mode 100644 index f6ea964..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_1.000/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function Name: one -Loss Function Exponent: 1 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def abs_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one/lr_1.000/training_log.csv b/training/base_loss_learning_rate_sweep/one/lr_1.000/training_log.csv deleted file mode 100644 index 961a3ef..0000000 --- a/training/base_loss_learning_rate_sweep/one/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,4.094233512878418 -1,50.15916061401367 -2,48.36441421508789 -3,0.8812976479530334 -4,nan -5,nan -6,nan -7,nan diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.003/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.003/training_config.txt deleted file mode 100644 index 02d2922..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.003/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.0025 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.003/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.003/training_log.csv deleted file mode 100644 index b7cc242..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.003/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,1.641305923461914 -2,1.5900251865386963 -3,1.6172038316726685 -4,1.613431453704834 -5,1.623598337173462 -6,1.6107110977172852 -7,1.618196725845337 -8,1.6092618703842163 -9,1.612236738204956 -10,1.6094019412994385 -11,1.5992329120635986 -12,1.6097997426986694 -13,1.607957363128662 -14,1.59490966796875 -15,1.5722979307174683 -16,1.5748666524887085 -17,1.5701560974121094 -18,1.557390570640564 -19,1.5368387699127197 -20,1.5245200395584106 -21,1.5495884418487549 -22,1.5561894178390503 -23,1.5559803247451782 -24,1.5522915124893188 -25,1.5454641580581665 -26,1.5515507459640503 -27,1.5441633462905884 -28,1.5257773399353027 -29,1.5061843395233154 -30,1.5328022241592407 -31,1.5299444198608398 -32,1.523077368736267 -33,1.5174994468688965 -34,1.5027304887771606 -35,1.4997022151947021 -36,1.4831252098083496 -37,1.4573460817337036 -38,1.4310129880905151 -39,1.4236667156219482 -40,1.4589884281158447 -41,1.4711823463439941 -42,1.4814789295196533 -43,1.4701485633850098 -44,1.4612420797348022 -45,1.4464504718780518 -46,1.433473825454712 -47,1.433813214302063 -48,1.4274630546569824 -49,1.4186679124832153 -50,1.414894461631775 -51,1.404853105545044 -52,1.3943818807601929 -53,1.3784419298171997 -54,1.3498607873916626 -55,1.3426837921142578 -56,1.342220664024353 -57,1.3247162103652954 -58,1.3270792961120605 -59,1.3124210834503174 -60,1.3008174896240234 -61,1.270581841468811 -62,1.2459354400634766 -63,1.2242001295089722 -64,1.211704969406128 -65,1.2294487953186035 -66,1.236870527267456 -67,1.2272251844406128 -68,1.229359745979309 -69,1.2334424257278442 -70,1.2431726455688477 -71,1.234955906867981 -72,1.2351316213607788 -73,1.2306421995162964 -74,1.2216496467590332 -75,1.1958153247833252 -76,1.206743836402893 -77,1.1959943771362305 -78,1.2035911083221436 -79,1.2072421312332153 -80,1.2089567184448242 -81,1.2100286483764648 -82,1.2105488777160645 -83,1.2096381187438965 -84,1.2075165510177612 -85,1.2049109935760498 -86,1.2009170055389404 -87,1.186955451965332 -88,1.1890053749084473 -89,1.1858770847320557 -90,1.1841830015182495 -91,1.195876121520996 -92,1.2046706676483154 -93,1.214917778968811 -94,1.213905930519104 -95,1.2046995162963867 -96,1.198334813117981 -97,1.1880887746810913 -98,1.1878782510757446 -99,1.1878111362457275 -100,1.1878983974456787 -101,1.188112735748291 -102,1.1887534856796265 -103,1.1900686025619507 -104,1.1883008480072021 -105,1.1870495080947876 -106,1.1860431432724 -107,1.1849961280822754 -108,1.1837483644485474 -109,1.1821796894073486 -110,1.1800135374069214 -111,1.176060676574707 -112,1.1895979642868042 -113,1.1860476732254028 -114,1.189887523651123 -115,1.1875884532928467 -116,1.1944401264190674 -117,1.1958967447280884 -118,1.1961919069290161 -119,1.1965644359588623 -120,1.1978228092193604 -121,1.196687936782837 -122,1.1954023838043213 -123,1.1943751573562622 -124,1.1928441524505615 -125,1.1873767375946045 -126,1.1868444681167603 -127,1.1882339715957642 -128,1.1915770769119263 -129,1.182468056678772 -130,1.2118899822235107 -131,1.2289471626281738 -132,1.2073160409927368 -133,1.2126848697662354 -134,1.2029670476913452 -135,1.199741244316101 -136,1.1885066032409668 -137,1.1881028413772583 -138,1.1882095336914062 -139,1.188374638557434 -140,1.1865650415420532 -141,1.208829641342163 -142,1.1977941989898682 -143,1.1970148086547852 -144,1.1972609758377075 -145,1.1977651119232178 -146,1.19833242893219 -147,1.1988874673843384 -148,1.1993707418441772 -149,1.199741005897522 -150,1.1999506950378418 -151,1.1999647617340088 -152,1.1997498273849487 -153,1.1992286443710327 -154,1.1981172561645508 -155,1.1939561367034912 -156,1.2105050086975098 -157,1.2137519121170044 -158,1.2153029441833496 -159,1.2158359289169312 -160,1.2348634004592896 -161,1.214935064315796 -162,1.2336137294769287 -163,1.2136095762252808 -164,1.2377907037734985 -165,1.239959478378296 -166,1.2410365343093872 -167,1.2415560483932495 -168,1.2417068481445312 -169,1.2415876388549805 -170,1.241251826286316 -171,1.2407371997833252 -172,1.2400665283203125 -173,1.2392501831054688 -174,1.2382885217666626 -175,1.237173318862915 -176,1.2358849048614502 -177,1.2343803644180298 -178,1.2325855493545532 -179,1.2303481101989746 -180,1.2271713018417358 -181,1.20693838596344 -182,1.2052844762802124 -183,1.2015581130981445 -184,1.1953991651535034 -185,1.18805992603302 -186,1.1869990825653076 -187,1.1737784147262573 -188,1.1843106746673584 -189,1.1852399110794067 -190,1.1853774785995483 -191,1.1857887506484985 -192,1.183579683303833 -193,1.164002537727356 -194,1.2205950021743774 -195,1.2664272785186768 -196,1.2816182374954224 -197,1.305185317993164 -198,1.3122978210449219 -199,1.3211674690246582 -200,1.3256176710128784 diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.005/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.005/training_config.txt deleted file mode 100644 index 97a3fdf..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.005/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.005 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.005/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.005/training_log.csv deleted file mode 100644 index de94f9e..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.005/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,1.6273162364959717 -2,1.6364121437072754 -3,1.625051498413086 -4,1.588850498199463 -5,1.5752395391464233 -6,1.567718744277954 -7,1.565665602684021 -8,1.5370062589645386 -9,1.5401359796524048 -10,1.528342843055725 -11,1.5451645851135254 -12,1.5592191219329834 -13,1.5527088642120361 -14,1.542201042175293 -15,1.5383301973342896 -16,1.5253227949142456 -17,1.4977017641067505 -18,1.4634264707565308 -19,1.4358917474746704 -20,1.4128127098083496 -21,1.425528883934021 -22,1.3921757936477661 -23,1.3208709955215454 -24,1.303941249847412 -25,1.2870078086853027 -26,1.2890022993087769 -27,1.2719476222991943 -28,1.2490234375 -29,1.2355221509933472 -30,1.2264305353164673 -31,1.188551664352417 -32,1.1507799625396729 -33,1.1636607646942139 -34,1.15106201171875 -35,1.1488889455795288 -36,1.1502913236618042 -37,1.134558916091919 -38,1.1350916624069214 -39,1.1233266592025757 -40,1.1204683780670166 -41,1.1154959201812744 -42,1.1113954782485962 -43,1.1106103658676147 -44,1.1075611114501953 -45,1.0988882780075073 -46,1.0973362922668457 -47,1.1045209169387817 -48,1.1067349910736084 -49,1.1082671880722046 -50,1.1085467338562012 -51,1.1082210540771484 -52,1.1077876091003418 -53,1.1050843000411987 -54,1.1026115417480469 -55,1.1005762815475464 -56,1.0990296602249146 -57,1.106938123703003 -58,1.0958245992660522 -59,1.0941241979599 -60,1.0886820554733276 -61,1.0987439155578613 -62,1.0991166830062866 -63,1.0980585813522339 -64,1.095791220664978 -65,1.0850189924240112 -66,1.1120023727416992 -67,1.1084258556365967 -68,1.0992577075958252 -69,1.1127641201019287 -70,1.1170729398727417 -71,1.1358680725097656 -72,1.1386386156082153 -73,1.1311366558074951 -74,1.122660756111145 -75,1.1091443300247192 -76,1.1179263591766357 -77,1.0884844064712524 -78,1.0511083602905273 -79,1.051797866821289 -80,1.0520389080047607 -81,1.0661506652832031 -82,1.0638043880462646 -83,1.056640863418579 -84,1.05266535282135 -85,1.0501130819320679 -86,1.0425236225128174 -87,1.040152907371521 -88,1.0375587940216064 -89,1.0345697402954102 -90,1.0317342281341553 -91,1.0291311740875244 -92,1.0267211198806763 -93,1.0242055654525757 -94,1.0216292142868042 -95,1.0196425914764404 -96,1.0376158952713013 -97,1.0185911655426025 -98,1.0207045078277588 -99,1.0216355323791504 -100,1.0216277837753296 -101,1.0210609436035156 -102,1.02000892162323 -103,1.0183358192443848 -104,1.0158716440200806 -105,1.0140100717544556 -106,1.0127214193344116 -107,1.0119327306747437 -108,1.012411117553711 -109,1.0110766887664795 -110,1.0096795558929443 -111,1.0087682008743286 -112,1.0080287456512451 -113,1.007324457168579 -114,1.0065672397613525 -115,1.005686640739441 -116,1.0046250820159912 -117,1.0033513307571411 -118,1.0018879175186157 -119,1.0000756978988647 -120,0.996504008769989 -121,0.9938016533851624 -122,0.9920912384986877 -123,0.9911277294158936 -124,0.990293562412262 -125,0.9894237518310547 -126,0.9883860945701599 -127,0.987042248249054 -128,0.9852285385131836 -129,0.9839868545532227 -130,0.9835692644119263 -131,0.9838313460350037 -132,0.9827507734298706 -133,0.9812488555908203 -134,0.980378270149231 -135,0.9799690246582031 -136,0.9796861410140991 -137,0.9790266156196594 -138,0.9778978228569031 -139,0.9767558574676514 -140,0.9758115410804749 -141,0.9749875664710999 -142,0.9740992784500122 -143,0.9741153120994568 -144,0.9740455150604248 -145,0.9739214181900024 -146,0.9737744927406311 -147,0.9735954403877258 -148,0.9733501672744751 -149,0.9730344414710999 -150,0.9726876616477966 -151,0.9723451733589172 -152,0.9720023274421692 -153,0.9716289043426514 -154,0.9711982607841492 -155,0.9707410335540771 -156,0.9702888131141663 -157,0.9698177576065063 -158,0.9692877531051636 -159,0.968669593334198 -160,0.9680259227752686 -161,0.968339741230011 -162,0.9682193994522095 -163,0.967724621295929 -164,0.9669626355171204 -165,0.9671398997306824 -166,0.967118501663208 -167,0.9665513038635254 -168,0.9657617807388306 -169,0.9651477336883545 -170,0.9658750891685486 -171,0.9649342894554138 -172,0.9642198085784912 -173,0.9642741680145264 -174,0.9641088247299194 -175,0.9635240435600281 -176,0.9626334309577942 -177,0.9629553556442261 -178,0.962859570980072 -179,0.9623056650161743 -180,0.9612108469009399 -181,0.9615015983581543 -182,0.9616615772247314 -183,0.9613626599311829 -184,0.9607539176940918 -185,0.959989070892334 -186,0.9592446088790894 -187,0.9601885676383972 -188,0.9587420225143433 -189,0.957757294178009 -190,0.9578480124473572 -191,0.9572086930274963 -192,0.9574750661849976 -193,0.9572041630744934 -194,0.9565179347991943 -195,0.9552291631698608 -196,0.9559460282325745 -197,0.9558562636375427 -198,0.9551531672477722 -199,0.9542058706283569 -200,0.9534108638763428 diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.010/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.010/training_config.txt deleted file mode 100644 index c752aaf..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.010/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.010/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.010/training_log.csv deleted file mode 100644 index db03436..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,1.6512227058410645 -2,1.6322399377822876 -3,1.5995244979858398 -4,1.5866289138793945 -5,1.5607682466506958 -6,1.5234488248825073 -7,1.4971790313720703 -8,1.4425532817840576 -9,1.4153223037719727 -10,1.398521065711975 -11,1.358423113822937 -12,1.363187313079834 -13,1.338208794593811 -14,1.3123852014541626 -15,1.2705804109573364 -16,1.2668262720108032 -17,1.2548803091049194 -18,1.2469391822814941 -19,1.169582724571228 -20,1.1655949354171753 -21,1.1746087074279785 -22,1.160980463027954 -23,1.165627121925354 -24,1.1663469076156616 -25,1.163887858390808 -26,1.1596136093139648 -27,1.1505953073501587 -28,1.1537114381790161 -29,1.1521248817443848 -30,1.1390104293823242 -31,1.139570713043213 -32,1.1417012214660645 -33,1.136596918106079 -34,1.123494029045105 -35,1.1090041399002075 -36,1.0810117721557617 -37,1.0953043699264526 -38,1.0742342472076416 -39,1.0753588676452637 -40,1.0628869533538818 -41,1.056282639503479 -42,1.0300543308258057 -43,1.0142652988433838 -44,0.9919458627700806 -45,0.9741153120994568 -46,0.9664507508277893 -47,0.9699262976646423 -48,0.9737330675125122 -49,0.9682859778404236 -50,0.9542985558509827 -51,0.948043167591095 -52,0.9517790079116821 -53,0.9475159049034119 -54,0.9503787159919739 -55,0.951149582862854 -56,0.9499524235725403 -57,0.9473782181739807 -58,0.9436960220336914 -59,0.9398351311683655 -60,0.9472770094871521 -61,0.959125280380249 -62,0.9747881889343262 -63,0.9917530417442322 -64,0.9913091063499451 -65,0.9979321956634521 -66,1.019960880279541 -67,1.0190988779067993 -68,1.022321343421936 -69,1.0241219997406006 -70,1.0221565961837769 -71,1.0188956260681152 -72,1.0140659809112549 -73,1.0028074979782104 -74,1.0009037256240845 -75,0.9972639679908752 -76,0.9943658709526062 -77,0.9924213290214539 -78,0.9901096224784851 -79,0.9953734278678894 -80,0.9945358633995056 -81,0.9924521446228027 -82,0.986567497253418 -83,0.9854756593704224 -84,0.983444094657898 -85,0.9791045188903809 -86,0.9785827398300171 -87,0.9777540564537048 -88,0.9837590456008911 -89,0.9750737547874451 -90,0.9680313467979431 -91,0.9601575136184692 -92,0.9609605669975281 -93,0.9637589454650879 -94,0.9676647782325745 -95,0.960102379322052 -96,0.9587392807006836 -97,0.9595332145690918 -98,0.9622029662132263 -99,0.9631024599075317 -100,0.9602185487747192 -101,0.9575866460800171 -102,0.9552538394927979 -103,0.9555241465568542 -104,0.9556044936180115 -105,0.9548184275627136 -106,0.9542003870010376 -107,0.949712872505188 -108,0.946268618106842 -109,0.9503349661827087 -110,0.9561575651168823 -111,0.9468002319335938 -112,0.927402138710022 -113,0.8973875045776367 -114,0.8873655796051025 -115,0.8925773501396179 -116,0.9019726514816284 -117,0.9032853245735168 -118,0.9011204838752747 -119,0.8839423060417175 -120,0.8580743670463562 -121,0.8321598172187805 -122,0.8353549242019653 -123,0.8255967497825623 -124,0.8183194398880005 -125,0.7903240323066711 -126,0.7737199664115906 -127,0.7285094261169434 -128,0.7175226211547852 -129,0.7067896127700806 -130,0.6966803669929504 -131,0.6866130828857422 -132,0.6797703504562378 -133,0.6693608164787292 -134,0.6606093049049377 -135,0.6567363142967224 -136,0.6618087291717529 -137,0.6645094156265259 -138,0.6667860150337219 -139,0.6658257842063904 -140,0.6623283624649048 -141,0.6557410955429077 -142,0.649384081363678 -143,0.6459650993347168 -144,0.6452180743217468 -145,0.645178496837616 -146,0.6430527567863464 -147,0.6393420100212097 -148,0.6425104737281799 -149,0.6496487855911255 -150,0.64775151014328 -151,0.6366769075393677 -152,0.6455910801887512 -153,0.6463124752044678 -154,0.6432802081108093 -155,0.6360915303230286 -156,0.6638985872268677 -157,0.6732724905014038 -158,0.6795377135276794 -159,0.6834025382995605 -160,0.6826817989349365 -161,0.682664692401886 -162,0.6936141848564148 -163,0.6970431208610535 -164,0.6978861689567566 -165,0.6971088647842407 -166,0.6880555152893066 -167,0.685530424118042 -168,0.6826947331428528 -169,0.6794865131378174 -170,0.6759467124938965 -171,0.672026515007019 -172,0.6597915291786194 -173,0.6576979756355286 -174,0.6508421301841736 -175,0.6469857096672058 -176,0.6426334977149963 -177,0.638283371925354 -178,0.6335828900337219 -179,0.6282439827919006 -180,0.6218481659889221 -181,0.6113656759262085 -182,0.5847345590591431 -183,0.5827882289886475 -184,0.5978504419326782 -185,0.6020200848579407 -186,0.6004992723464966 -187,0.5937893986701965 -188,0.5893838405609131 -189,0.5726014971733093 -190,0.579797089099884 -191,0.6041467785835266 -192,0.6257359385490417 -193,0.6347194314002991 -194,0.6380216479301453 -195,0.6447299122810364 -196,0.641959547996521 -197,0.6430046558380127 -198,0.6430095434188843 -199,0.6419850587844849 -200,0.6396645307540894 diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.020/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.020/training_config.txt deleted file mode 100644 index e077c14..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.020/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.020/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.020/training_log.csv deleted file mode 100644 index 8274638..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,1.6677532196044922 -2,1.6255738735198975 -3,1.5415006875991821 -4,1.4413533210754395 -5,1.3378545045852661 -6,1.2186338901519775 -7,1.214963674545288 -8,1.1638178825378418 -9,1.1656248569488525 -10,1.1653136014938354 -11,1.1539641618728638 -12,1.1393293142318726 -13,1.1491657495498657 -14,1.1436967849731445 -15,1.1358295679092407 -16,1.1233086585998535 -17,1.1028908491134644 -18,1.1196790933609009 -19,1.1187866926193237 -20,1.1257601976394653 -21,1.1647775173187256 -22,1.161597490310669 -23,1.2055211067199707 -24,1.2368160486221313 -25,1.1939128637313843 -26,1.1679973602294922 -27,1.1442991495132446 -28,1.1152849197387695 -29,1.0965187549591064 -30,1.0953096151351929 -31,1.097537875175476 -32,1.094205379486084 -33,1.0945733785629272 -34,1.0950897932052612 -35,1.0995490550994873 -36,1.098240613937378 -37,1.0977264642715454 -38,1.0938680171966553 -39,1.0962148904800415 -40,1.0961164236068726 -41,1.0882322788238525 -42,1.0959454774856567 -43,1.104204535484314 -44,1.0548216104507446 -45,1.0269135236740112 -46,0.9821216464042664 -47,0.935343861579895 -48,0.888431966304779 -49,0.8894188404083252 -50,0.8814705014228821 -51,0.804743230342865 -52,0.7116991281509399 -53,0.6647509932518005 -54,0.6613137722015381 -55,0.6363576650619507 -56,0.5818822979927063 -57,0.6000834703445435 -58,0.6029173135757446 -59,0.6166421175003052 -60,0.6199735999107361 -61,0.6291251182556152 -62,0.618159294128418 -63,0.6122064590454102 -64,0.5995673537254333 -65,0.5985409617424011 -66,0.5812270045280457 -67,0.5740554928779602 -68,0.5619866251945496 -69,0.5526401400566101 -70,0.567690372467041 -71,0.5728273987770081 -72,0.5687457323074341 -73,0.5640209317207336 -74,0.5616559386253357 -75,0.5531882643699646 -76,0.5710815191268921 -77,0.5675150752067566 -78,0.5951424241065979 -79,0.5951895713806152 -80,0.6114627122879028 -81,0.6041780710220337 -82,0.586231529712677 -83,0.5520951747894287 -84,0.539106011390686 -85,0.5217911005020142 -86,0.5112457275390625 -87,0.5087457895278931 -88,0.504572331905365 -89,0.5097288489341736 -90,0.4980928599834442 -91,0.5097248554229736 -92,0.5070976614952087 -93,0.5143601894378662 -94,0.5284360647201538 -95,0.5386337637901306 -96,0.5428728461265564 -97,0.5640044212341309 -98,0.5717318058013916 -99,0.5756427049636841 -100,0.5773664712905884 -101,0.5773065686225891 -102,0.5755136013031006 -103,0.571292519569397 -104,0.567021906375885 -105,0.5675342082977295 -106,0.5648835897445679 -107,0.5580986738204956 -108,0.5429087281227112 -109,0.5413979887962341 -110,0.5193430185317993 -111,0.5163629055023193 -112,0.5061143636703491 -113,0.494139164686203 -114,0.48400408029556274 -115,0.48290202021598816 -116,0.47717875242233276 -117,0.46538442373275757 -118,0.47161129117012024 -119,0.47423678636550903 -120,0.47296956181526184 -121,0.47083133459091187 -122,0.46801531314849854 -123,0.46400678157806396 -124,0.4583907723426819 -125,0.44523775577545166 -126,0.43620729446411133 -127,0.42961442470550537 -128,0.4232198894023895 -129,0.4070360064506531 -130,0.40283745527267456 -131,0.39678817987442017 -132,0.3951560854911804 -133,0.3969760239124298 -134,0.3920533061027527 -135,0.38894540071487427 -136,0.38326388597488403 -137,0.3775498867034912 -138,0.370589941740036 -139,0.3603892922401428 -140,0.35153335332870483 -141,0.34568339586257935 -142,0.3484565317630768 -143,0.35423028469085693 -144,0.3415726125240326 -145,0.3446289598941803 -146,0.3438115119934082 -147,0.3396689295768738 -148,0.32688307762145996 -149,0.3128059506416321 -150,0.3114140033721924 -151,0.3277035653591156 -152,0.3400592505931854 -153,0.314687043428421 -154,0.31977397203445435 -155,0.3274259567260742 -156,0.3289061188697815 -157,0.3263154625892639 -158,0.3267013728618622 -159,0.31697335839271545 -160,0.31392931938171387 -161,0.3054920434951782 -162,0.29841670393943787 -163,0.2971726059913635 -164,0.30105674266815186 -165,0.31591087579727173 -166,0.3183286488056183 -167,0.30637219548225403 -168,0.2970917522907257 -169,0.30742374062538147 -170,0.30542853474617004 -171,0.30174002051353455 -172,0.30698132514953613 -173,0.30075061321258545 -174,0.30291035771369934 -175,0.3055758476257324 -176,0.306600421667099 -177,0.3104933202266693 -178,0.30713191628456116 -179,0.30263370275497437 -180,0.2996687591075897 -181,0.2973376512527466 -182,0.2957579791545868 -183,0.2943185567855835 -184,0.2923468053340912 -185,0.2905573546886444 -186,0.28971776366233826 -187,0.2891962230205536 -188,0.2858985364437103 -189,0.28548377752304077 -190,0.28378427028656006 -191,0.28032582998275757 -192,0.27744412422180176 -193,0.2739223837852478 -194,0.2701258659362793 -195,0.26679670810699463 -196,0.2630458474159241 -197,0.26205161213874817 -198,0.2617625296115875 -199,0.2577894330024719 -200,0.255835622549057 diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.040/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.040/training_config.txt deleted file mode 100644 index c681337..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.040/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.040/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.040/training_log.csv deleted file mode 100644 index 2fb8b08..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,1.6766841411590576 -2,1.5070680379867554 -3,1.4104236364364624 -4,1.2473362684249878 -5,1.242732048034668 -6,1.1897211074829102 -7,1.1880074739456177 -8,1.1867766380310059 -9,1.2279305458068848 -10,1.1631571054458618 -11,1.0843549966812134 -12,1.0484787225723267 -13,0.9931166768074036 -14,0.9471749067306519 -15,0.9123624563217163 -16,0.8219213485717773 -17,0.781735897064209 -18,0.7343801259994507 -19,0.7000044584274292 -20,0.6623193621635437 -21,0.6691563129425049 -22,0.6962240934371948 -23,0.675551176071167 -24,0.6550577878952026 -25,0.6340246200561523 -26,0.6318193078041077 -27,0.6309962868690491 -28,0.6284300684928894 -29,0.6220040321350098 -30,0.6035372614860535 -31,0.6108411550521851 -32,0.6217907667160034 -33,0.620686411857605 -34,0.6104059219360352 -35,0.6069158315658569 -36,0.6018107533454895 -37,0.6075133085250854 -38,0.6056291460990906 -39,0.587668776512146 -40,0.6123623847961426 -41,0.6345495581626892 -42,0.6398538947105408 -43,0.6451601386070251 -44,0.6480259299278259 -45,0.6477656364440918 -46,0.6453169584274292 -47,0.6416741013526917 -48,0.6377272009849548 -49,0.6338893175125122 -50,0.6196855306625366 -51,0.6143150329589844 -52,0.6112369298934937 -53,0.6044145226478577 -54,0.5973841547966003 -55,0.5907110571861267 -56,0.5902817845344543 -57,0.5808218717575073 -58,0.5766597390174866 -59,0.5768337845802307 -60,0.574669599533081 -61,0.5690263509750366 -62,0.5682252049446106 -63,0.5689070224761963 -64,0.5682659149169922 -65,0.5633720755577087 -66,0.5651618838310242 -67,0.5677374601364136 -68,0.5676422715187073 -69,0.5642248392105103 -70,0.564355194568634 -71,0.5569403171539307 -72,0.5615599155426025 -73,0.5552824139595032 -74,0.5505906343460083 -75,0.5439455509185791 -76,0.5423746705055237 -77,0.5441914796829224 -78,0.5316057205200195 -79,0.5469052791595459 -80,0.564885675907135 -81,0.5679214000701904 -82,0.568531334400177 -83,0.5677354335784912 -84,0.5684131383895874 -85,0.5673571228981018 -86,0.5654816627502441 -87,0.5656774640083313 -88,0.5671020150184631 -89,0.5673553347587585 -90,0.5668550133705139 -91,0.5658907294273376 -92,0.5645461678504944 -93,0.5628973841667175 -94,0.5611240267753601 -95,0.5591740608215332 -96,0.5570331811904907 -97,0.5545586943626404 -98,0.5515416264533997 -99,0.547348141670227 -100,0.5469821691513062 -101,0.5484087467193604 -102,0.5476536154747009 -103,0.5445821285247803 -104,0.5420945286750793 -105,0.5421151518821716 -106,0.5402837991714478 -107,0.5393050312995911 -108,0.5356111526489258 -109,0.5365321040153503 -110,0.5346508026123047 -111,0.5288248062133789 -112,0.5213366746902466 -113,0.5224271416664124 -114,0.5133718848228455 -115,0.5050725340843201 -116,0.4988340735435486 -117,0.48841628432273865 -118,0.47826358675956726 -119,0.45308664441108704 -120,0.4573333263397217 -121,0.4460793733596802 -122,0.44259050488471985 -123,0.4276066720485687 -124,0.4224284589290619 -125,0.42725449800491333 -126,0.41958335041999817 -127,0.4046100080013275 -128,0.40770837664604187 -129,0.4048246741294861 -130,0.39798304438591003 -131,0.3896526098251343 -132,0.3888660669326782 -133,0.38756701350212097 -134,0.3870817720890045 -135,0.3804449439048767 -136,0.37395939230918884 -137,0.3626475930213928 -138,0.36104920506477356 -139,0.3597657382488251 -140,0.36079832911491394 -141,0.3580579459667206 -142,0.3524234890937805 -143,0.342422217130661 -144,0.33300063014030457 -145,0.3156777024269104 -146,0.30218714475631714 -147,0.29452523589134216 -148,0.2937866151332855 -149,0.2952360212802887 -150,0.29709237813949585 -151,0.2997400164604187 -152,0.29845982789993286 -153,0.2949928343296051 -154,0.2880740761756897 -155,0.28028643131256104 -156,0.2691488265991211 -157,0.25772836804389954 -158,0.24809208512306213 -159,0.24458128213882446 -160,0.2476329654455185 -161,0.2622957229614258 -162,0.251370370388031 -163,0.24062243103981018 -164,0.2567928731441498 -165,0.257872611284256 -166,0.2587830722332001 -167,0.25507035851478577 -168,0.2462383210659027 -169,0.23538880050182343 -170,0.24581538140773773 -171,0.23649664223194122 -172,0.22245365381240845 -173,0.23131826519966125 -174,0.23523859679698944 -175,0.2342355102300644 -176,0.22835151851177216 -177,0.22042520344257355 -178,0.22418585419654846 -179,0.22243209183216095 -180,0.21905067563056946 -181,0.22746542096138 -182,0.23531146347522736 -183,0.238255575299263 -184,0.2366534024477005 -185,0.23641018569469452 -186,0.24163101613521576 -187,0.23500783741474152 -188,0.23257872462272644 -189,0.23427212238311768 -190,0.23263008892536163 -191,0.2285507172346115 -192,0.22249627113342285 -193,0.22039224207401276 -194,0.2254604548215866 -195,0.22210049629211426 -196,0.21290558576583862 -197,0.2101304531097412 -198,0.21404044330120087 -199,0.21395020186901093 -200,0.20091652870178223 diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.050/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.050/training_config.txt deleted file mode 100644 index ae14833..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.050/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.050/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.050/training_log.csv deleted file mode 100644 index 25acaa6..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,1.6696146726608276 -2,1.5167477130889893 -3,1.361884355545044 -4,1.1819002628326416 -5,1.225856065750122 -6,1.3080248832702637 -7,1.3157707452774048 -8,1.2880113124847412 -9,1.3317409753799438 -10,1.298192024230957 -11,1.2245182991027832 -12,1.1899617910385132 -13,1.140191912651062 -14,1.126400351524353 -15,1.1274698972702026 -16,1.0701690912246704 -17,1.0383753776550293 -18,0.9663735628128052 -19,0.9819089770317078 -20,0.9371464848518372 -21,0.9175171256065369 -22,0.8903799653053284 -23,0.8601094484329224 -24,0.7952120304107666 -25,0.7290639281272888 -26,0.7209663391113281 -27,0.7263458967208862 -28,0.7364989519119263 -29,0.7201422452926636 -30,0.7035380601882935 -31,0.6838456392288208 -32,0.6690639853477478 -33,0.6520328521728516 -34,0.6275292634963989 -35,0.5977568030357361 -36,0.5587949156761169 -37,0.5200703740119934 -38,0.5046232342720032 -39,0.48480725288391113 -40,0.45829886198043823 -41,0.43394243717193604 -42,0.42887765169143677 -43,0.40746182203292847 -44,0.3902290463447571 -45,0.3703930675983429 -46,0.3698030114173889 -47,0.3570772707462311 -48,0.3295000195503235 -49,0.2862567603588104 -50,0.2660553455352783 -51,0.26966848969459534 -52,0.25226059556007385 -53,0.24342955648899078 -54,0.2557736039161682 -55,0.2494829148054123 -56,0.22743013501167297 -57,0.22273452579975128 -58,0.22683466970920563 -59,0.22583797574043274 -60,0.22063589096069336 -61,0.21139006316661835 -62,0.2006981372833252 -63,0.19937002658843994 -64,0.19511133432388306 -65,0.18657872080802917 -66,0.18295633792877197 -67,0.17828427255153656 -68,0.17042195796966553 -69,0.15730270743370056 -70,0.15526385605335236 -71,0.15368306636810303 -72,0.14844229817390442 -73,0.1511734277009964 -74,0.1512060910463333 -75,0.1448386311531067 -76,0.1424727439880371 -77,0.14469482004642487 -78,0.14317171275615692 -79,0.1427503526210785 -80,0.14137373864650726 -81,0.13847461342811584 -82,0.1391398012638092 -83,0.13409626483917236 -84,0.13382163643836975 -85,0.1314999759197235 -86,0.12735402584075928 -87,0.13732993602752686 -88,0.13227085769176483 -89,0.13550294935703278 -90,0.14020416140556335 -91,0.13675284385681152 -92,0.12717518210411072 -93,0.13206905126571655 -94,0.13443078100681305 -95,0.1257784366607666 -96,0.13086770474910736 -97,0.13273008167743683 -98,0.12971343100070953 -99,0.12106695771217346 -100,0.13153114914894104 -101,0.1314326822757721 -102,0.11995085328817368 -103,0.12747010588645935 -104,0.13095924258232117 -105,0.12743791937828064 -106,0.11896556615829468 -107,0.1294955164194107 -108,0.13214026391506195 -109,0.12039152532815933 -110,0.12320265918970108 -111,0.12900996208190918 -112,0.12847520411014557 -113,0.12291653454303741 -114,0.11573217809200287 -115,0.1282360851764679 -116,0.13039232790470123 -117,0.12213445454835892 -118,0.12078770995140076 -119,0.12667742371559143 -120,0.1269681602716446 -121,0.12448983639478683 -122,0.1187160387635231 -123,0.12010122835636139 -124,0.1263233721256256 -125,0.11870936304330826 -126,0.11789063364267349 -127,0.12384223937988281 -128,0.12400119006633759 -129,0.11893399059772491 -130,0.11325887590646744 -131,0.11763045936822891 -132,0.11327274888753891 -133,0.11657555401325226 -134,0.11524936556816101 -135,0.11389286816120148 -136,0.11487486958503723 -137,0.11146624386310577 -138,0.11469625681638718 -139,0.11640451103448868 -140,0.1134275570511818 -141,0.11506174504756927 -142,0.11268585920333862 -143,0.11409398168325424 -144,0.11340706050395966 -145,0.11436909437179565 -146,0.11643385887145996 -147,0.11527203023433685 -148,0.11337008327245712 -149,0.11087019741535187 -150,0.10871374607086182 -151,0.11020566523075104 -152,0.11217241734266281 -153,0.11138812452554703 -154,0.1137474775314331 -155,0.10893546044826508 -156,0.1148616150021553 -157,0.11857745796442032 -158,0.11686050891876221 -159,0.11016984283924103 -160,0.11752090603113174 -161,0.12028653174638748 -162,0.11365478485822678 -163,0.11548323184251785 -164,0.11951883137226105 -165,0.11940832436084747 -166,0.11654618382453918 -167,0.10939336568117142 -168,0.12094993889331818 -169,0.12392134219408035 -170,0.11636946350336075 -171,0.11083204299211502 -172,0.11618388444185257 -173,0.11541309952735901 -174,0.110104039311409 -175,0.11032292991876602 -176,0.1105421707034111 -177,0.10805567353963852 -178,0.11088384687900543 -179,0.10890740156173706 -180,0.1069987565279007 -181,0.10626524686813354 -182,0.10912728309631348 -183,0.10985616594552994 -184,0.10667875409126282 -185,0.11018616706132889 -186,0.1089671179652214 -187,0.10730613023042679 -188,0.10972216725349426 -189,0.10728772729635239 -190,0.10647156834602356 -191,0.10454710572957993 -192,0.10831315815448761 -193,0.1096140444278717 -194,0.10637237131595612 -195,0.10938780754804611 -196,0.10810542106628418 -197,0.10636749863624573 -198,0.10805419087409973 -199,0.10515985637903214 -200,0.10612954944372177 diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.080/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.080/training_config.txt deleted file mode 100644 index fc5f566..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.080/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.080/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.080/training_log.csv deleted file mode 100644 index 8428afe..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,1.6644786596298218 -2,1.5131367444992065 -3,1.22650945186615 -4,1.313050389289856 -5,1.4972407817840576 -6,1.4857381582260132 -7,1.4293829202651978 -8,1.3757712841033936 -9,1.2901185750961304 -10,1.2634015083312988 -11,1.1692782640457153 -12,1.0735294818878174 -13,1.0352948904037476 -14,1.0014375448226929 -15,0.9525418877601624 -16,0.950638473033905 -17,1.0285969972610474 -18,0.9270251989364624 -19,0.8427431583404541 -20,0.7638986110687256 -21,0.7154088020324707 -22,0.7523590326309204 -23,0.7766008377075195 -24,0.7686439752578735 -25,0.7616793513298035 -26,0.7514174580574036 -27,0.7380810379981995 -28,0.7099398374557495 -29,0.6596367955207825 -30,0.6396325826644897 -31,0.6122204661369324 -32,0.5889737010002136 -33,0.5605206489562988 -34,0.5288934707641602 -35,0.4953545928001404 -36,0.4546687602996826 -37,0.39528051018714905 -38,0.35856956243515015 -39,0.3402106761932373 -40,0.3245995342731476 -41,0.30911561846733093 -42,0.2906879782676697 -43,0.26997193694114685 -44,0.24992915987968445 -45,0.23091726005077362 -46,0.21660153567790985 -47,0.2067953497171402 -48,0.19277918338775635 -49,0.18478140234947205 -50,0.1918211728334427 -51,0.17718929052352905 -52,0.17456169426441193 -53,0.16311399638652802 -54,0.15102316439151764 -55,0.15459781885147095 -56,0.15450626611709595 -57,0.1473630666732788 -58,0.1491481363773346 -59,0.14329558610916138 -60,0.12221614271402359 -61,0.13275028765201569 -62,0.1365504413843155 -63,0.1282302439212799 -64,0.13268159329891205 -65,0.1333027482032776 -66,0.12160295993089676 -67,0.11914831399917603 -68,0.1262785792350769 -69,0.11744782328605652 -70,0.11652140319347382 -71,0.1221819594502449 -72,0.11376728862524033 -73,0.11394012719392776 -74,0.1144716888666153 -75,0.11673358827829361 -76,0.12143195420503616 -77,0.11192724853754044 -78,0.1107993945479393 -79,0.11114837229251862 -80,0.11226972192525864 -81,0.11666344106197357 -82,0.10982397198677063 -83,0.11592086404561996 -84,0.11430785059928894 -85,0.11001421511173248 -86,0.11405181139707565 -87,0.10902661085128784 -88,0.10899509489536285 -89,0.10896822065114975 -90,0.10846051573753357 -91,0.1064111515879631 -92,0.10631143301725388 -93,0.10666217654943466 -94,0.1059129610657692 -95,0.10392019152641296 -96,0.10490623861551285 -97,0.10830069333314896 -98,0.10664963722229004 -99,0.10731613636016846 -100,0.10569960623979568 -101,0.10747935622930527 -102,0.10578946024179459 -103,0.10575482994318008 -104,0.10501338541507721 -105,0.1045561283826828 -106,0.1040487289428711 -107,0.10671205818653107 -108,0.1059231385588646 -109,0.1061209961771965 -110,0.10325056314468384 -111,0.10357045382261276 -112,0.10276901721954346 -113,0.10446008294820786 -114,0.10364757478237152 -115,0.10774428397417068 -116,0.10510455816984177 -117,0.11026545614004135 -118,0.11023266613483429 -119,0.1049487441778183 -120,0.10794638097286224 -121,0.10357455909252167 -122,0.11036849766969681 -123,0.1053205356001854 -124,0.11543846875429153 -125,0.12000396102666855 -126,0.11079694330692291 -127,0.10434605926275253 -128,0.1133352518081665 -129,0.11333350837230682 -130,0.10945342481136322 -131,0.11416945606470108 -132,0.1112019494175911 -133,0.1058807447552681 -134,0.11690542101860046 -135,0.11055850237607956 -136,0.11504030227661133 -137,0.12050749361515045 -138,0.11154679954051971 -139,0.10574458539485931 -140,0.10957367718219757 -141,0.10437340289354324 -142,0.10520248115062714 -143,0.10515571385622025 -144,0.10517928004264832 -145,0.10742595046758652 -146,0.11025519669055939 -147,0.10297141224145889 -148,0.10731638222932816 -149,0.10864979028701782 -150,0.10480480641126633 -151,0.10489072650671005 -152,0.10504742711782455 -153,0.10324075818061829 -154,0.11213219910860062 -155,0.10412395745515823 -156,0.11363596469163895 -157,0.11846134066581726 -158,0.11319264024496078 -159,0.10552427917718887 -160,0.11294441670179367 -161,0.11719463765621185 -162,0.11348439753055573 -163,0.10697607696056366 -164,0.10802250355482101 -165,0.1118597537279129 -166,0.109479159116745 -167,0.10576596856117249 -168,0.11028440296649933 -169,0.11454199254512787 -170,0.10921917110681534 -171,0.10529948025941849 -172,0.1038082167506218 -173,0.10755833983421326 -174,0.10519345104694366 -175,0.10237400233745575 -176,0.10867983102798462 -177,0.10380120575428009 -178,0.10684938728809357 -179,0.11071232706308365 -180,0.10523207485675812 -181,0.10824736952781677 -182,0.10977943986654282 -183,0.1035478487610817 -184,0.106789231300354 -185,0.10834815353155136 -186,0.11475469172000885 -187,0.10554592311382294 -188,0.1077628955245018 -189,0.1110517606139183 -190,0.11032932251691818 -191,0.10360336303710938 -192,0.10581094771623611 -193,0.11304594576358795 -194,0.10639164596796036 -195,0.10316465049982071 -196,0.1057066097855568 -197,0.10768542438745499 -198,0.10608693957328796 -199,0.10468229651451111 -200,0.10203105211257935 diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.100/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.100/training_config.txt deleted file mode 100644 index 414ab99..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.100/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.100/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.100/training_log.csv deleted file mode 100644 index bac91eb..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,1.7564382553100586 -2,1.4102065563201904 -3,1.5133370161056519 -4,1.477989912033081 -5,1.7732182741165161 -6,1.7447196245193481 -7,1.491986870765686 -8,1.3185359239578247 -9,1.2503632307052612 -10,1.1511030197143555 -11,1.1128522157669067 -12,1.0079079866409302 -13,0.9404762983322144 -14,0.8331423997879028 -15,0.8166132569313049 -16,0.8401657342910767 -17,0.8338320255279541 -18,0.824763834476471 -19,0.8215798735618591 -20,0.8121118545532227 -21,0.7981793284416199 -22,0.7826099991798401 -23,0.7649215459823608 -24,0.7463399171829224 -25,0.7269122004508972 -26,0.685674786567688 -27,0.6719130277633667 -28,0.6566196084022522 -29,0.6461236476898193 -30,0.637119472026825 -31,0.6292327046394348 -32,0.616506040096283 -33,0.6020943522453308 -34,0.5737008452415466 -35,0.5443545579910278 -36,0.4924120306968689 -37,0.4061889350414276 -38,0.3732151985168457 -39,0.3614524006843567 -40,0.3600784242153168 -41,0.3384068012237549 -42,0.31749626994132996 -43,0.2949640154838562 -44,0.3106078505516052 -45,0.2831968069076538 -46,0.26122429966926575 -47,0.2513507306575775 -48,0.22700658440589905 -49,0.24329020082950592 -50,0.21572673320770264 -51,0.18980719149112701 -52,0.21281775832176208 -53,0.2029283344745636 -54,0.17374658584594727 -55,0.14442165195941925 -56,0.16714946925640106 -57,0.1702657788991928 -58,0.15854839980602264 -59,0.15983019769191742 -60,0.15267261862754822 -61,0.14748093485832214 -62,0.1493981033563614 -63,0.14070186018943787 -64,0.13811901211738586 -65,0.1323980838060379 -66,0.12423272430896759 -67,0.1459108293056488 -68,0.144241601228714 -69,0.1294282078742981 -70,0.12390021234750748 -71,0.12279518693685532 -72,0.13643211126327515 -73,0.1321306973695755 -74,0.1308295726776123 -75,0.131374329328537 -76,0.12247581034898758 -77,0.13137763738632202 -78,0.12860305607318878 -79,0.10638529062271118 -80,0.11994668841362 -81,0.12371022999286652 -82,0.11678306758403778 -83,0.11537306755781174 -84,0.11357107758522034 -85,0.10681022703647614 -86,0.1067122220993042 -87,0.11255466192960739 -88,0.11193867772817612 -89,0.11019593477249146 -90,0.10326139628887177 -91,0.1107267290353775 -92,0.11122949421405792 -93,0.11211764067411423 -94,0.10551462322473526 -95,0.11225533485412598 -96,0.11231417208909988 -97,0.10720165073871613 -98,0.10763243585824966 -99,0.10914774984121323 -100,0.10604093968868256 -101,0.11231765151023865 -102,0.11660924553871155 -103,0.10797128081321716 -104,0.11187496781349182 -105,0.11101928353309631 -106,0.10624147951602936 -107,0.11384865641593933 -108,0.10901667177677155 -109,0.10586744546890259 -110,0.10953713953495026 -111,0.10661628097295761 -112,0.10921737551689148 -113,0.11042262613773346 -114,0.10796327888965607 -115,0.1036132350564003 -116,0.10713694989681244 -117,0.11119785159826279 -118,0.10435424745082855 -119,0.11336654424667358 -120,0.1213340312242508 -121,0.11651495844125748 -122,0.10268086194992065 -123,0.10973436385393143 -124,0.10678336769342422 -125,0.10951649397611618 -126,0.10534282773733139 -127,0.11131841689348221 -128,0.11119560152292252 -129,0.10475215315818787 -130,0.11154276877641678 -131,0.10867065191268921 -132,0.10535899549722672 -133,0.11321931332349777 -134,0.10698460042476654 -135,0.10380090028047562 -136,0.1031230241060257 -137,0.11716707050800323 -138,0.11631570756435394 -139,0.10549315065145493 -140,0.10606871545314789 -141,0.10881087183952332 -142,0.10179591923952103 -143,0.1002720296382904 -144,0.10603557527065277 -145,0.10025248676538467 -146,0.09870294481515884 -147,0.10385292768478394 -148,0.10065294057130814 -149,0.10579010844230652 -150,0.10643089562654495 -151,0.10115749388933182 -152,0.11148837953805923 -153,0.10368940979242325 -154,0.10799147933721542 -155,0.11135470122098923 -156,0.10060865432024002 -157,0.09987308830022812 -158,0.09665383398532867 -159,0.10159161686897278 -160,0.10270679742097855 -161,0.10037215054035187 -162,0.09942592680454254 -163,0.09827394783496857 -164,0.10236488282680511 -165,0.10182729363441467 -166,0.09967998415231705 -167,0.0976024717092514 -168,0.10544546693563461 -169,0.10311169177293777 -170,0.10749227553606033 -171,0.1116853877902031 -172,0.10593410581350327 -173,0.10840152204036713 -174,0.11406365782022476 -175,0.09886971861124039 -176,0.11521700024604797 -177,0.1258605718612671 -178,0.12095467001199722 -179,0.10470299422740936 -180,0.11319851130247116 -181,0.12033773213624954 -182,0.109676793217659 -183,0.10421185940504074 -184,0.10562838613986969 -185,0.10582629591226578 -186,0.10762709379196167 -187,0.10264762490987778 -188,0.10137801617383957 -189,0.1052822694182396 -190,0.09674900025129318 -191,0.09649181365966797 -192,0.10269513726234436 -193,0.10064397752285004 -194,0.09573426097631454 -195,0.0976778045296669 -196,0.09396396577358246 -197,0.09885518997907639 -198,0.1028083860874176 -199,0.09967886656522751 -200,0.09791306406259537 diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.125/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.125/training_config.txt deleted file mode 100644 index e9690ed..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.125/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.125/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.125/training_log.csv deleted file mode 100644 index d86933e..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,1.9045621156692505 -2,1.3995968103408813 -3,1.565283179283142 -4,1.5562368631362915 -5,1.5392417907714844 -6,2.171665668487549 -7,2.181612730026245 -8,1.730399489402771 -9,1.5497078895568848 -10,1.374951958656311 -11,1.2087481021881104 -12,1.0964096784591675 -13,0.9894232749938965 -14,0.9538858532905579 -15,0.8928349018096924 -16,0.7817351818084717 -17,0.7178166508674622 -18,0.729648232460022 -19,0.7386195659637451 -20,0.7343392372131348 -21,0.7107438445091248 -22,0.6902040243148804 -23,0.6771382093429565 -24,0.6438115835189819 -25,0.6213304400444031 -26,0.6053860783576965 -27,0.5967727303504944 -28,0.5827388763427734 -29,0.5642722249031067 -30,0.5248772501945496 -31,0.50511234998703 -32,0.4823167026042938 -33,0.44996875524520874 -34,0.3866383731365204 -35,0.37889018654823303 -36,0.3631402552127838 -37,0.38125118613243103 -38,0.39694371819496155 -39,0.41778647899627686 -40,0.4192742705345154 -41,0.41991564631462097 -42,0.4119701683521271 -43,0.40360257029533386 -44,0.3839889466762543 -45,0.3529549837112427 -46,0.3437860906124115 -47,0.33402395248413086 -48,0.3335527777671814 -49,0.327614426612854 -50,0.31660720705986023 -51,0.3020876944065094 -52,0.28999796509742737 -53,0.29583120346069336 -54,0.2777356505393982 -55,0.29477569460868835 -56,0.2912484109401703 -57,0.2814011573791504 -58,0.27202799916267395 -59,0.24954181909561157 -60,0.23458625376224518 -61,0.22061389684677124 -62,0.18977090716362 -63,0.18999916315078735 -64,0.17417685687541962 -65,0.1793673187494278 -66,0.1830199658870697 -67,0.163173645734787 -68,0.14939367771148682 -69,0.14388807117938995 -70,0.13448597490787506 -71,0.13192127645015717 -72,0.13780680298805237 -73,0.14207156002521515 -74,0.1358393132686615 -75,0.13115622103214264 -76,0.1253879815340042 -77,0.12736263871192932 -78,0.1288186013698578 -79,0.1248847097158432 -80,0.1192508414387703 -81,0.11887671798467636 -82,0.12439849227666855 -83,0.1216423436999321 -84,0.1169462576508522 -85,0.12648221850395203 -86,0.11549890041351318 -87,0.11829575151205063 -88,0.12388748675584793 -89,0.11923786252737045 -90,0.11310471594333649 -91,0.12277067452669144 -92,0.11799514293670654 -93,0.11134215444326401 -94,0.11676780134439468 -95,0.11388380080461502 -96,0.11025341600179672 -97,0.11700249463319778 -98,0.10976795852184296 -99,0.11012638360261917 -100,0.1175704225897789 -101,0.11712589114904404 -102,0.11235492676496506 -103,0.10735036432743073 -104,0.11337699741125107 -105,0.10906622558832169 -106,0.11021123826503754 -107,0.10966537147760391 -108,0.11526913940906525 -109,0.10329121351242065 -110,0.12600845098495483 -111,0.1345047503709793 -112,0.12537086009979248 -113,0.1142050251364708 -114,0.11966116726398468 -115,0.12498317658901215 -116,0.12012677639722824 -117,0.10549049824476242 -118,0.12028446048498154 -119,0.1283523142337799 -120,0.1245347261428833 -121,0.11045289039611816 -122,0.11676888912916183 -123,0.11971796303987503 -124,0.11520741879940033 -125,0.10606487840414047 -126,0.10774333029985428 -127,0.10571879148483276 -128,0.10943331569433212 -129,0.10922707617282867 -130,0.11103913187980652 -131,0.11008667945861816 -132,0.10308774560689926 -133,0.108791783452034 -134,0.1145397201180458 -135,0.10950219631195068 -136,0.10622911155223846 -137,0.11433260887861252 -138,0.11477053910493851 -139,0.10952183604240417 -140,0.10531928390264511 -141,0.11128249019384384 -142,0.10449434816837311 -143,0.10340987890958786 -144,0.10300195217132568 -145,0.10460929572582245 -146,0.103884257376194 -147,0.10470941662788391 -148,0.09926404803991318 -149,0.10209322720766068 -150,0.09956508874893188 -151,0.1081547737121582 -152,0.10709107667207718 -153,0.10027745366096497 -154,0.10131058841943741 -155,0.10310506075620651 -156,0.09851756691932678 -157,0.1085904985666275 -158,0.11118591576814651 -159,0.10452301800251007 -160,0.10667247325181961 -161,0.10731536149978638 -162,0.10020451992750168 -163,0.11228850483894348 -164,0.11753920465707779 -165,0.11006177961826324 -166,0.10107146948575974 -167,0.11246830970048904 -168,0.11279354244470596 -169,0.10260073840618134 -170,0.10863053798675537 -171,0.11611004173755646 -172,0.11401861160993576 -173,0.10480646044015884 -174,0.1076759397983551 -175,0.1131332591176033 -176,0.10827654600143433 -177,0.10011319071054459 -178,0.10789310932159424 -179,0.11073217540979385 -180,0.1014716774225235 -181,0.11118707060813904 -182,0.11450739949941635 -183,0.11026617139577866 -184,0.10653990507125854 -185,0.10796236991882324 -186,0.097308449447155 -187,0.10655497759580612 -188,0.10818929970264435 -189,0.10681653767824173 -190,0.09934134036302567 -191,0.10500726848840714 -192,0.10000135749578476 -193,0.10303457081317902 -194,0.10364111512899399 -195,0.10069464892148972 -196,0.1085035502910614 -197,0.10889021307229996 -198,0.10509385913610458 -199,0.10277070850133896 -200,0.10961063951253891 diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.160/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.160/training_config.txt deleted file mode 100644 index 5f56ae3..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.160/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.160/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.160/training_log.csv deleted file mode 100644 index f4e1f91..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,2.1863491535186768 -2,1.22118079662323 -3,1.1258351802825928 -4,0.8156316876411438 -5,0.6499906182289124 -6,0.6223911643028259 -7,0.5542542338371277 -8,0.525512158870697 -9,0.4748975932598114 -10,0.43220752477645874 -11,0.3781580328941345 -12,0.2928144037723541 -13,0.271991491317749 -14,0.2661914527416229 -15,0.26487451791763306 -16,0.2549747824668884 -17,0.23381125926971436 -18,0.2148357778787613 -19,0.20026662945747375 -20,0.1790643036365509 -21,0.1490255743265152 -22,0.16833190619945526 -23,0.1682741492986679 -24,0.17671318352222443 -25,0.181552916765213 -26,0.18061934411525726 -27,0.17673465609550476 -28,0.16957297921180725 -29,0.15410856902599335 -30,0.13602623343467712 -31,0.12483993172645569 -32,0.13254472613334656 -33,0.1349283754825592 -34,0.13591822981834412 -35,0.13417568802833557 -36,0.12533262372016907 -37,0.12076742202043533 -38,0.12395332753658295 -39,0.11895262449979782 -40,0.1129101812839508 -41,0.11778730154037476 -42,0.11918989568948746 -43,0.11213826388120651 -44,0.11717631667852402 -45,0.11908414214849472 -46,0.11569139361381531 -47,0.10477996617555618 -48,0.11198560893535614 -49,0.11155566573143005 -50,0.10427334904670715 -51,0.11547777056694031 -52,0.11587993800640106 -53,0.11493588238954544 -54,0.10964890569448471 -55,0.1104816123843193 -56,0.11185424029827118 -57,0.1032898873090744 -58,0.10882148891687393 -59,0.11027606576681137 -60,0.10798002779483795 -61,0.11038847267627716 -62,0.11042964458465576 -63,0.10521461814641953 -64,0.10874294489622116 -65,0.11295057088136673 -66,0.11269140988588333 -67,0.10673132538795471 -68,0.11061324924230576 -69,0.10646744817495346 -70,0.11330927163362503 -71,0.11483916640281677 -72,0.10990270227193832 -73,0.10991864651441574 -74,0.10199317336082458 -75,0.10463765263557434 -76,0.10598097741603851 -77,0.10557989776134491 -78,0.10220525413751602 -79,0.10433295369148254 -80,0.1030861958861351 -81,0.10426269471645355 -82,0.10491470247507095 -83,0.10441523790359497 -84,0.10402484983205795 -85,0.10642306506633759 -86,0.10541023313999176 -87,0.10027309507131577 -88,0.10409668833017349 -89,0.10393133014440536 -90,0.1029856950044632 -91,0.10466218739748001 -92,0.10489154607057571 -93,0.10445611923933029 -94,0.10439201444387436 -95,0.1001683920621872 -96,0.10097770392894745 -97,0.10342413932085037 -98,0.10188205540180206 -99,0.1026606634259224 -100,0.1031661257147789 -101,0.10578221827745438 -102,0.1020585298538208 -103,0.1008959487080574 -104,0.10484960675239563 -105,0.10011225938796997 -106,0.10268645733594894 -107,0.10418187081813812 -108,0.09955861419439316 -109,0.1047094538807869 -110,0.10131386667490005 -111,0.102012038230896 -112,0.10033895820379257 -113,0.09950822591781616 -114,0.10384200513362885 -115,0.10179709643125534 -116,0.09846332669258118 -117,0.10058951377868652 -118,0.10438378155231476 -119,0.10857444256544113 -120,0.10015672445297241 -121,0.1138351634144783 -122,0.11542755365371704 -123,0.10272745788097382 -124,0.11261290311813354 -125,0.11687183380126953 -126,0.10798200219869614 -127,0.10656577348709106 -128,0.11012596637010574 -129,0.10164870321750641 -130,0.10638748109340668 -131,0.10515864938497543 -132,0.10966763645410538 -133,0.10868681222200394 -134,0.10489477217197418 -135,0.1117633804678917 -136,0.11589603126049042 -137,0.10877405107021332 -138,0.10325556248426437 -139,0.11108525097370148 -140,0.11535318195819855 -141,0.10834450274705887 -142,0.11093045026063919 -143,0.11057621985673904 -144,0.11687533557415009 -145,0.11324780434370041 -146,0.11914302408695221 -147,0.10865995287895203 -148,0.10758738219738007 -149,0.11705218255519867 -150,0.11256861686706543 -151,0.10941749066114426 -152,0.11188913881778717 -153,0.10918795317411423 -154,0.10674096643924713 -155,0.10644735395908356 -156,0.09974382817745209 -157,0.10635808855295181 -158,0.10824335366487503 -159,0.10642565786838531 -160,0.10253790766000748 -161,0.10340315103530884 -162,0.10813785344362259 -163,0.10619380325078964 -164,0.10205547511577606 -165,0.09926649928092957 -166,0.10060994327068329 -167,0.10422027111053467 -168,0.09759795665740967 -169,0.10326915234327316 -170,0.0950128361582756 -171,0.10258466005325317 -172,0.10188574343919754 -173,0.0976358950138092 -174,0.10281599313020706 -175,0.10188955068588257 -176,0.10900090634822845 -177,0.10239703953266144 -178,0.10857761651277542 -179,0.1136348769068718 -180,0.10684238374233246 -181,0.10148654133081436 -182,0.1039317175745964 -183,0.09870798885822296 -184,0.10158699005842209 -185,0.09894850850105286 -186,0.0975879579782486 -187,0.0990993082523346 -188,0.10076303035020828 -189,0.09837216883897781 -190,0.09857147932052612 -191,0.09582342952489853 -192,0.10322459042072296 -193,0.10088251531124115 -194,0.09967116266489029 -195,0.10380567610263824 -196,0.10871955752372742 -197,0.10397651791572571 -198,0.09706918895244598 -199,0.10203994810581207 -200,0.09595273435115814 diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.200/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.200/training_config.txt deleted file mode 100644 index 05abec9..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.200/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.200/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.200/training_log.csv deleted file mode 100644 index 47e00ea..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,2.5633933544158936 -2,1.2640786170959473 -3,3.28230619430542 -4,3.1241629123687744 -5,2.4711685180664062 -6,1.1843388080596924 -7,1.1406772136688232 -8,1.211784839630127 -9,1.3621406555175781 -10,1.3362369537353516 -11,1.134156584739685 -12,0.9951890707015991 -13,0.9845186471939087 -14,0.9411166310310364 -15,0.8766012787818909 -16,0.7641318440437317 -17,0.7254592180252075 -18,0.71737140417099 -19,0.7055376768112183 -20,0.693801760673523 -21,0.6787286400794983 -22,0.6636238098144531 -23,0.6491953134536743 -24,0.6348307132720947 -25,0.6207294464111328 -26,0.6067044138908386 -27,0.5926824808120728 -28,0.5794340968132019 -29,0.5684370398521423 -30,0.5636420845985413 -31,0.5582643151283264 -32,0.551585853099823 -33,0.5442655086517334 -34,0.5368982553482056 -35,0.5291872620582581 -36,0.5210384130477905 -37,0.5123907327651978 -38,0.5032982230186462 -39,0.4986434578895569 -40,0.49335813522338867 -41,0.48708558082580566 -42,0.4800320565700531 -43,0.47263503074645996 -44,0.4646347165107727 -45,0.45602133870124817 -46,0.44698840379714966 -47,0.4399259686470032 -48,0.4339192509651184 -49,0.42680567502975464 -50,0.41847237944602966 -51,0.40892714262008667 -52,0.39819687604904175 -53,0.3863610625267029 -54,0.37472808361053467 -55,0.36326688528060913 -56,0.3491607904434204 -57,0.3305952250957489 -58,0.31135162711143494 -59,0.2904682755470276 -60,0.29463741183280945 -61,0.3005240559577942 -62,0.2980738878250122 -63,0.29581335186958313 -64,0.29017218947410583 -65,0.2811586558818817 -66,0.26882776618003845 -67,0.25466251373291016 -68,0.23670345544815063 -69,0.21362890303134918 -70,0.18092204630374908 -71,0.14908893406391144 -72,0.17867214977741241 -73,0.19917058944702148 -74,0.19642405211925507 -75,0.185709610581398 -76,0.17388930916786194 -77,0.14901088178157806 -78,0.14393234252929688 -79,0.16219639778137207 -80,0.16574802994728088 -81,0.1567806452512741 -82,0.1394440084695816 -83,0.11568290740251541 -84,0.13803444802761078 -85,0.15007877349853516 -86,0.15136048197746277 -87,0.136987566947937 -88,0.11756185442209244 -89,0.12509165704250336 -90,0.12692418694496155 -91,0.12310498207807541 -92,0.11445263773202896 -93,0.11555024981498718 -94,0.12038757652044296 -95,0.11233033239841461 -96,0.11209040880203247 -97,0.11736061424016953 -98,0.11571595072746277 -99,0.10762131214141846 -100,0.11585473269224167 -101,0.1171165332198143 -102,0.1076771542429924 -103,0.11778247356414795 -104,0.12548217177391052 -105,0.12194150686264038 -106,0.10663154721260071 -107,0.1169537678360939 -108,0.12618029117584229 -109,0.12081795185804367 -110,0.1020759865641594 -111,0.11835499107837677 -112,0.1292600780725479 -113,0.12618564069271088 -114,0.11009389162063599 -115,0.10894346982240677 -116,0.11817289888858795 -117,0.11349934339523315 -118,0.10446659475564957 -119,0.10829585045576096 -120,0.10649671405553818 -121,0.11260469257831573 -122,0.10652108490467072 -123,0.1123180016875267 -124,0.11318724602460861 -125,0.10254421085119247 -126,0.1095927283167839 -127,0.10457408428192139 -128,0.11109603941440582 -129,0.1153421625494957 -130,0.10673607140779495 -131,0.11064399778842926 -132,0.11232686042785645 -133,0.0999869853258133 -134,0.10259313136339188 -135,0.10069971531629562 -136,0.10687078535556793 -137,0.10339870303869247 -138,0.11345242708921432 -139,0.1124761551618576 -140,0.10160201787948608 -141,0.1171683669090271 -142,0.11846201121807098 -143,0.10827095061540604 -144,0.113421231508255 -145,0.11749856919050217 -146,0.10785795748233795 -147,0.10722152143716812 -148,0.1089281365275383 -149,0.1041518971323967 -150,0.10847196727991104 -151,0.10303500294685364 -152,0.11021856218576431 -153,0.11113637685775757 -154,0.10275840014219284 -155,0.11636938154697418 -156,0.11962950229644775 -157,0.10995414853096008 -158,0.10684834420681 -159,0.12380494177341461 -160,0.12252136319875717 -161,0.10716481506824493 -162,0.10422144830226898 -163,0.10992481559515 -164,0.10973125696182251 -165,0.10926265269517899 -166,0.10814760625362396 -167,0.10835502296686172 -168,0.10249170660972595 -169,0.10634040832519531 -170,0.11002394556999207 -171,0.1102043017745018 -172,0.1215362474322319 -173,0.11686914414167404 -174,0.11068353801965714 -175,0.11445434391498566 -176,0.10651464760303497 -177,0.10427273064851761 -178,0.1058315634727478 -179,0.10905760526657104 -180,0.10944128781557083 -181,0.09929760545492172 -182,0.09836196154356003 -183,0.10449820011854172 -184,0.10379122942686081 -185,0.1006605476140976 -186,0.10644879937171936 -187,0.10060807317495346 -188,0.10849369317293167 -189,0.1077294796705246 -190,0.09889832884073257 -191,0.09866338223218918 -192,0.10702350735664368 -193,0.11042586714029312 -194,0.10247575491666794 -195,0.10648184269666672 -196,0.10572244226932526 -197,0.10172433406114578 -198,0.10392595082521439 -199,0.10319917649030685 -200,0.10039845108985901 diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.250/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.250/training_config.txt deleted file mode 100644 index 38c4087..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.250/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.250/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.250/training_log.csv deleted file mode 100644 index 147883c..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,2.958117723464966 -2,0.8228814005851746 -3,0.6566405296325684 -4,1.031280755996704 -5,0.5823011994361877 -6,0.6276980042457581 -7,0.5882408618927002 -8,0.5066269636154175 -9,0.44031643867492676 -10,0.4085299074649811 -11,0.3766898512840271 -12,0.3419921100139618 -13,0.3073914349079132 -14,0.26020413637161255 -15,0.23594187200069427 -16,0.22658629715442657 -17,0.2574898898601532 -18,0.26350516080856323 -19,0.24113915860652924 -20,0.1923733949661255 -21,0.1648111492395401 -22,0.18108509480953217 -23,0.19114291667938232 -24,0.19443202018737793 -25,0.18677550554275513 -26,0.17228750884532928 -27,0.15721432864665985 -28,0.16598497331142426 -29,0.16462479531764984 -30,0.15291212499141693 -31,0.1330343335866928 -32,0.12918230891227722 -33,0.13387008011341095 -34,0.13660219311714172 -35,0.13481193780899048 -36,0.12777027487754822 -37,0.12087828665971756 -38,0.12358415871858597 -39,0.11901934444904327 -40,0.11565536260604858 -41,0.1132514551281929 -42,0.12094098329544067 -43,0.11970381438732147 -44,0.1197657361626625 -45,0.11453326046466827 -46,0.1223020926117897 -47,0.12204968184232712 -48,0.12534844875335693 -49,0.12201151996850967 -50,0.11165449768304825 -51,0.12713228166103363 -52,0.1341288834810257 -53,0.13802319765090942 -54,0.13142432272434235 -55,0.11395563185214996 -56,0.12665297091007233 -57,0.13654334843158722 -58,0.12596121430397034 -59,0.112862728536129 -60,0.12079519033432007 -61,0.12856170535087585 -62,0.12737680971622467 -63,0.1216268390417099 -64,0.10609150677919388 -65,0.12188772857189178 -66,0.13261830806732178 -67,0.12919379770755768 -68,0.11313541978597641 -69,0.11324146389961243 -70,0.12432772666215897 -71,0.12200035154819489 -72,0.11760302633047104 -73,0.10727259516716003 -74,0.12438531965017319 -75,0.13381561636924744 -76,0.13031430542469025 -77,0.11623011529445648 -78,0.11285322904586792 -79,0.1198066845536232 -80,0.11184599250555038 -81,0.11120530962944031 -82,0.11312280595302582 -83,0.1086774468421936 -84,0.10994118452072144 -85,0.10457770526409149 -86,0.10858944058418274 -87,0.11013426631689072 -88,0.10679198056459427 -89,0.10570477694272995 -90,0.10627342760562897 -91,0.10568898916244507 -92,0.10493652522563934 -93,0.10611726343631744 -94,0.1064395010471344 -95,0.09978067129850388 -96,0.1023767739534378 -97,0.10019358992576599 -98,0.10012149065732956 -99,0.10373133420944214 -100,0.10126931220293045 -101,0.10825379192829132 -102,0.10182718932628632 -103,0.11518043279647827 -104,0.11521799862384796 -105,0.10482148081064224 -106,0.11182905733585358 -107,0.11198963224887848 -108,0.10638224333524704 -109,0.11003334820270538 -110,0.1104782298207283 -111,0.10781363397836685 -112,0.1014292910695076 -113,0.10761678963899612 -114,0.10456285625696182 -115,0.09912816435098648 -116,0.10093879699707031 -117,0.10395320504903793 -118,0.10663782805204391 -119,0.10225342959165573 -120,0.10132215172052383 -121,0.10072150081396103 -122,0.10001859068870544 -123,0.10663445293903351 -124,0.10034241527318954 -125,0.10165569186210632 -126,0.10204381495714188 -127,0.0986558198928833 -128,0.09928424656391144 -129,0.10065317898988724 -130,0.10358072072267532 -131,0.09794145077466965 -132,0.10343914479017258 -133,0.09561653435230255 -134,0.11043912172317505 -135,0.10936813056468964 -136,0.09888260066509247 -137,0.112994484603405 -138,0.11346328258514404 -139,0.10073809325695038 -140,0.1079230085015297 -141,0.108001209795475 -142,0.09803438186645508 -143,0.10959883034229279 -144,0.10650306195020676 -145,0.09873057156801224 -146,0.09799062460660934 -147,0.10257352143526077 -148,0.10314920544624329 -149,0.09776661545038223 -150,0.094619020819664 -151,0.10107620805501938 -152,0.09835455566644669 -153,0.10297175496816635 -154,0.10355254262685776 -155,0.09543588012456894 -156,0.10579553991556168 -157,0.10952318459749222 -158,0.10765306651592255 -159,0.1010316014289856 -160,0.09798994660377502 -161,0.10104088485240936 -162,0.10291346162557602 -163,0.10069282352924347 -164,0.10085078328847885 -165,0.09489597380161285 -166,0.09599227458238602 -167,0.09689439833164215 -168,0.09818142652511597 -169,0.09835472702980042 -170,0.0994315817952156 -171,0.09734860062599182 -172,0.09929701685905457 -173,0.10186690837144852 -174,0.09542078524827957 -175,0.09928111732006073 -176,0.08958400785923004 -177,0.09597950428724289 -178,0.09212342649698257 -179,0.09213405847549438 -180,0.09906558692455292 -181,0.10204850882291794 -182,0.0928349643945694 -183,0.11069227010011673 -184,0.11675156652927399 -185,0.1046043336391449 -186,0.09649759531021118 -187,0.1045922264456749 -188,0.09551648795604706 -189,0.0948028638958931 -190,0.09866579622030258 -191,0.0909261703491211 -192,0.09237450361251831 -193,0.08975859731435776 -194,0.08940213173627853 -195,0.0953289344906807 -196,0.09117235988378525 -197,0.10493902117013931 -198,0.10540591925382614 -199,0.09080922603607178 -200,0.10852352529764175 diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.300/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.300/training_config.txt deleted file mode 100644 index 9c848bb..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.300/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.300/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.300/training_log.csv deleted file mode 100644 index af82af4..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.300/training_log.csv +++ /dev/null @@ -1,57 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,3.0862982273101807 -2,0.8975382447242737 -3,0.7943339943885803 -4,3.4232873916625977 -5,3.592200994491577 -6,3.602565288543701 -7,3.606079578399658 -8,3.6077446937561035 -9,3.6082425117492676 -10,3.608309030532837 -11,3.608285903930664 -12,3.608123540878296 -13,3.6077919006347656 -14,3.6072845458984375 -15,3.6067821979522705 -16,3.6062300205230713 -17,3.6055972576141357 -18,3.604887008666992 -19,3.6041765213012695 -20,3.6035139560699463 -21,3.6029915809631348 -22,3.6025898456573486 -23,3.602213144302368 -24,3.601771354675293 -25,3.6012094020843506 -26,3.6004981994628906 -27,3.5996570587158203 -28,3.5985958576202393 -29,3.5973470211029053 -30,3.5959129333496094 -31,3.5942442417144775 -32,3.5923056602478027 -33,3.5900795459747314 -34,3.5873756408691406 -35,3.583836555480957 -36,3.5787429809570312 -37,3.573512315750122 -38,3.566936731338501 -39,3.559589385986328 -40,3.5529544353485107 -41,3.546844482421875 -42,3.540074586868286 -43,3.5307528972625732 -44,3.512153387069702 -45,3.479426860809326 -46,3.4222183227539062 -47,3.3110432624816895 -48,3.0277936458587646 -49,2.3387563228607178 -50,1.2730333805084229 -51,1.0669808387756348 -52,nan -53,nan -54,nan -55,nan diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.400/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.400/training_config.txt deleted file mode 100644 index 9ef64b2..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.400/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.400/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.400/training_log.csv deleted file mode 100644 index f5ca1fa..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.400/training_log.csv +++ /dev/null @@ -1,53 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,3.3481249809265137 -2,2.0960352420806885 -3,0.6947607398033142 -4,0.5561820864677429 -5,0.4216906428337097 -6,0.34008079767227173 -7,0.33160075545310974 -8,0.2826291620731354 -9,0.3030816316604614 -10,0.2958921790122986 -11,0.2569655776023865 -12,0.22248737514019012 -13,0.2198162078857422 -14,0.20333054661750793 -15,0.16842415928840637 -16,0.12989185750484467 -17,0.1616332232952118 -18,0.17635484039783478 -19,0.1829419583082199 -20,0.17252272367477417 -21,0.16091398894786835 -22,0.14389626681804657 -23,0.13087843358516693 -24,0.13418570160865784 -25,0.12324725836515427 -26,0.11749763786792755 -27,0.12045533210039139 -28,0.1182689443230629 -29,0.11624954640865326 -30,0.21294298768043518 -31,0.7375748157501221 -32,1.323203444480896 -33,1.9531601667404175 -34,2.689584493637085 -35,3.26188063621521 -36,3.3934812545776367 -37,3.4397432804107666 -38,3.4396698474884033 -39,3.427288770675659 -40,3.4228692054748535 -41,3.4899368286132812 -42,3.540362596511841 -43,3.5381627082824707 -44,3.571392059326172 -45,3.6102092266082764 -46,3.610281229019165 -47,3.610281229019165 -48,3.610281229019165 -49,3.610281229019165 -50,3.610281229019165 -51,3.610281229019165 diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.500/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.500/training_config.txt deleted file mode 100644 index dfb168e..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.500/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.500/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.500/training_log.csv deleted file mode 100644 index e395426..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.500/training_log.csv +++ /dev/null @@ -1,22 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,3.5755066871643066 -2,2.9182827472686768 -3,0.843454897403717 -4,0.6245173811912537 -5,3.5973763465881348 -6,3.6026883125305176 -7,3.6037847995758057 -8,3.604029893875122 -9,3.602764844894409 -10,3.4502792358398438 -11,3.2931203842163086 -12,2.9238834381103516 -13,2.7024707794189453 -14,2.3220434188842773 -15,0.616536557674408 -16,0.4325403869152069 -17,nan -18,nan -19,nan -20,nan diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.600/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.600/training_config.txt deleted file mode 100644 index 6a5daec..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.600/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.600/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.600/training_log.csv deleted file mode 100644 index c9b9d3f..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.600/training_log.csv +++ /dev/null @@ -1,17 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,3.6139354705810547 -2,3.4909169673919678 -3,2.3145945072174072 -4,0.6437315940856934 -5,0.5556336641311646 -6,0.44724252820014954 -7,0.3653893768787384 -8,0.3098275363445282 -9,0.29804766178131104 -10,0.24801962077617645 -11,0.1908198446035385 -12,nan -13,nan -14,nan -15,nan diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.700/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.700/training_config.txt deleted file mode 100644 index 6e39c87..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.700/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.700/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.700/training_log.csv deleted file mode 100644 index d80a97d..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.700/training_log.csv +++ /dev/null @@ -1,12 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,3.621945381164551 -2,3.6227030754089355 -3,3.619267702102661 -4,3.5196328163146973 -5,3.076256036758423 -6,1.6538499593734741 -7,nan -8,nan -9,nan -10,nan diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.800/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.800/training_config.txt deleted file mode 100644 index 235e9be..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.800/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.800/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.800/training_log.csv deleted file mode 100644 index a26f43e..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,3.6231026649475098 -2,3.623187780380249 -3,3.623187780380249 -4,3.623187780380249 -5,3.623187780380249 -6,3.623187780380249 -7,3.623187780380249 diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.900/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.900/training_config.txt deleted file mode 100644 index 5245ec8..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.900/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.900/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_0.900/training_log.csv deleted file mode 100644 index ec8007d..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_0.900/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,3.623187780380249 -2,3.623187780380249 -3,3.623187780380249 -4,3.623187780380249 -5,3.623187780380249 -6,3.623187780380249 diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_1.000/training_config.txt b/training/base_loss_learning_rate_sweep/one_fifth/lr_1.000/training_config.txt deleted file mode 100644 index f757693..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_1.000/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function Name: one_fifth -Loss Function Exponent: 0.2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fifth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/5, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fifth/lr_1.000/training_log.csv b/training/base_loss_learning_rate_sweep/one_fifth/lr_1.000/training_log.csv deleted file mode 100644 index ec8007d..0000000 --- a/training/base_loss_learning_rate_sweep/one_fifth/lr_1.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,1.648984670639038 -1,3.623187780380249 -2,3.623187780380249 -3,3.623187780380249 -4,3.623187780380249 -5,3.623187780380249 -6,3.623187780380249 diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.003/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.003/training_config.txt deleted file mode 100644 index 0b095ad..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.003/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.0025 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.003/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.003/training_log.csv deleted file mode 100644 index 7e0614a..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.003/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,1.7095179557800293 -2,1.6527163982391357 -3,1.6859534978866577 -4,1.6860325336456299 -5,1.6936945915222168 -6,1.6791810989379883 -7,1.6705375909805298 -8,1.6773838996887207 -9,1.6884124279022217 -10,1.6887431144714355 -11,1.6803241968154907 -12,1.666319727897644 -13,1.6688265800476074 -14,1.6644991636276245 -15,1.6501127481460571 -16,1.6463627815246582 -17,1.6375945806503296 -18,1.6159824132919312 -19,1.5972034931182861 -20,1.5997228622436523 -21,1.5964065790176392 -22,1.5890376567840576 -23,1.599177598953247 -24,1.5938475131988525 -25,1.5821106433868408 -26,1.5760976076126099 -27,1.570220708847046 -28,1.5581337213516235 -29,1.5860449075698853 -30,1.5889626741409302 -31,1.5785317420959473 -32,1.5574012994766235 -33,1.5449647903442383 -34,1.5374232530593872 -35,1.535811424255371 -36,1.5310090780258179 -37,1.527225136756897 -38,1.524773120880127 -39,1.5177010297775269 -40,1.5065888166427612 -41,1.4895774126052856 -42,1.4578595161437988 -43,1.4379303455352783 -44,1.411708950996399 -45,1.419094204902649 -46,1.408826470375061 -47,1.4121299982070923 -48,1.413943886756897 -49,1.4104183912277222 -50,1.4045926332473755 -51,1.3837628364562988 -52,1.4058603048324585 -53,1.4045401811599731 -54,1.4183677434921265 -55,1.427010416984558 -56,1.4281734228134155 -57,1.4250608682632446 -58,1.4063243865966797 -59,1.4056870937347412 -60,1.404737949371338 -61,1.409575343132019 -62,1.3924421072006226 -63,1.3869603872299194 -64,1.3752703666687012 -65,1.3630539178848267 -66,1.3510265350341797 -67,1.3328427076339722 -68,1.33834707736969 -69,1.3371027708053589 -70,1.368423581123352 -71,1.3575094938278198 -72,1.3871084451675415 -73,1.3944947719573975 -74,1.4000184535980225 -75,1.4032936096191406 -76,1.403072476387024 -77,1.3791402578353882 -78,1.4031370878219604 -79,1.3980293273925781 -80,1.3877089023590088 -81,1.3804714679718018 -82,1.380277156829834 -83,1.3872231245040894 -84,1.3958441019058228 -85,1.3945283889770508 -86,1.391401767730713 -87,1.3887346982955933 -88,1.3877500295639038 -89,1.3888449668884277 -90,1.38897705078125 -91,1.3872463703155518 -92,1.383994221687317 -93,1.3813902139663696 -94,1.3798190355300903 -95,1.378953218460083 -96,1.3778318166732788 -97,1.37595796585083 -98,1.3733360767364502 -99,1.3705748319625854 -100,1.3684051036834717 -101,1.3666141033172607 -102,1.364837646484375 -103,1.3626271486282349 -104,1.3593729734420776 -105,1.3493516445159912 -106,1.3404514789581299 -107,1.3429628610610962 -108,1.3432424068450928 -109,1.3465745449066162 -110,1.3331499099731445 -111,1.313061237335205 -112,1.3132555484771729 -113,1.311843752861023 -114,1.3177376985549927 -115,1.3101197481155396 -116,1.3248379230499268 -117,1.325218677520752 -118,1.325307846069336 -119,1.3241124153137207 -120,1.3188951015472412 -121,1.3254921436309814 -122,1.330551266670227 -123,1.338739037513733 -124,1.3481199741363525 -125,1.354309320449829 -126,1.3419253826141357 -127,1.3375537395477295 -128,1.3406076431274414 -129,1.3425184488296509 -130,1.3438317775726318 -131,1.3444911241531372 -132,1.3437985181808472 -133,1.3348580598831177 -134,1.3452796936035156 -135,1.3490207195281982 -136,1.35148024559021 -137,1.353372573852539 -138,1.3547389507293701 -139,1.355584740638733 -140,1.3560761213302612 -141,1.3563342094421387 -142,1.3564369678497314 -143,1.3564831018447876 -144,1.3565528392791748 -145,1.3566184043884277 -146,1.3565713167190552 -147,1.3564226627349854 -148,1.3563209772109985 -149,1.3563039302825928 -150,1.3563132286071777 -151,1.3562917709350586 -152,1.3561899662017822 -153,1.3559932708740234 -154,1.3557158708572388 -155,1.3553895950317383 -156,1.3550419807434082 -157,1.3546990156173706 -158,1.354384183883667 -159,1.354097843170166 -160,1.3538362979888916 -161,1.353592872619629 -162,1.3533498048782349 -163,1.3530985116958618 -164,1.3528293371200562 -165,1.3525437116622925 -166,1.3522411584854126 -167,1.351927399635315 -168,1.351613163948059 -169,1.351300597190857 -170,1.3509985208511353 -171,1.3507102727890015 -172,1.3504339456558228 -173,1.3501697778701782 -174,1.3499137163162231 -175,1.3496649265289307 -176,1.3494157791137695 -177,1.3491640090942383 -178,1.348913550376892 -179,1.3486610651016235 -180,1.3484069108963013 -181,1.348151445388794 -182,1.347893476486206 -183,1.3476362228393555 -184,1.3473749160766602 -185,1.3471134901046753 -186,1.3468505144119263 -187,1.346591830253601 -188,1.34633469581604 -189,1.3460774421691895 -190,1.3458138704299927 -191,1.3455389738082886 -192,1.3452417850494385 -193,1.3449082374572754 -194,1.3444985151290894 -195,1.3438857793807983 -196,1.3393784761428833 -197,1.3323872089385986 -198,1.331299901008606 -199,1.3315638303756714 -200,1.3319547176361084 diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.005/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.005/training_config.txt deleted file mode 100644 index 2860d04..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.005/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.005 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.005/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.005/training_log.csv deleted file mode 100644 index dd64f11..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.005/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,1.6943824291229248 -2,1.702987551689148 -3,1.688840389251709 -4,1.6477051973342896 -5,1.6366324424743652 -6,1.625512957572937 -7,1.6285818815231323 -8,1.6402634382247925 -9,1.6357043981552124 -10,1.622266173362732 -11,1.6007122993469238 -12,1.5744653940200806 -13,1.5495424270629883 -14,1.5117613077163696 -15,1.4769303798675537 -16,1.4775563478469849 -17,1.4699223041534424 -18,1.4609488248825073 -19,1.4009455442428589 -20,1.368247151374817 -21,1.3507299423217773 -22,1.3295680284500122 -23,1.3082525730133057 -24,1.2872271537780762 -25,1.280369520187378 -26,1.2530345916748047 -27,1.2312796115875244 -28,1.2339750528335571 -29,1.2092909812927246 -30,1.1972627639770508 -31,1.187126636505127 -32,1.178499460220337 -33,1.1615504026412964 -34,1.162696361541748 -35,1.1615711450576782 -36,1.1738176345825195 -37,1.222111463546753 -38,1.2560876607894897 -39,1.2650569677352905 -40,1.267662763595581 -41,1.2669181823730469 -42,1.2647727727890015 -43,1.2604464292526245 -44,1.2277272939682007 -45,1.2257221937179565 -46,1.2167518138885498 -47,1.2136542797088623 -48,1.2131860256195068 -49,1.2178231477737427 -50,1.2203381061553955 -51,1.218098759651184 -52,1.2040458917617798 -53,1.1971453428268433 -54,1.188370704650879 -55,1.1691594123840332 -56,1.1902141571044922 -57,1.1727447509765625 -58,1.1744486093521118 -59,1.1768863201141357 -60,1.1761547327041626 -61,1.173809289932251 -62,1.1708385944366455 -63,1.1681078672409058 -64,1.174796223640442 -65,1.177417278289795 -66,1.168410301208496 -67,1.177575945854187 -68,1.1903880834579468 -69,1.1650937795639038 -70,1.1606460809707642 -71,1.149785041809082 -72,1.1515541076660156 -73,1.1413061618804932 -74,1.1295790672302246 -75,1.1307076215744019 -76,1.1317600011825562 -77,1.1319457292556763 -78,1.131203055381775 -79,1.1294236183166504 -80,1.1249796152114868 -81,1.1135504245758057 -82,1.096404790878296 -83,1.0739963054656982 -84,1.0756381750106812 -85,1.070771336555481 -86,1.0652254819869995 -87,1.061496615409851 -88,1.0535351037979126 -89,1.034406304359436 -90,1.0449376106262207 -91,1.042389154434204 -92,1.0334268808364868 -93,1.0315791368484497 -94,1.0175327062606812 -95,1.014047384262085 -96,1.01158607006073 -97,1.0161182880401611 -98,1.0177823305130005 -99,1.0174366235733032 -100,1.0129565000534058 -101,1.0129643678665161 -102,1.0139861106872559 -103,1.0142898559570312 -104,1.0142351388931274 -105,1.013837456703186 -106,1.0209336280822754 -107,1.0180597305297852 -108,1.0178555250167847 -109,1.0179544687271118 -110,1.0179386138916016 -111,1.017765760421753 -112,1.017448902130127 -113,1.0170135498046875 -114,1.016478180885315 -115,1.0158612728118896 -116,1.015179991722107 -117,1.0144474506378174 -118,1.0136783123016357 -119,1.012884259223938 -120,1.0120899677276611 -121,1.0113219022750854 -122,1.0106322765350342 -123,1.0100960731506348 -124,1.0097572803497314 -125,1.0092298984527588 -126,1.008407711982727 -127,1.0077201128005981 -128,1.0071810483932495 -129,1.006707787513733 -130,1.0062506198883057 -131,1.0057802200317383 -132,1.0052902698516846 -133,1.0047842264175415 -134,1.0042678117752075 -135,1.003753423690796 -136,1.0032507181167603 -137,1.0027692317962646 -138,1.002320408821106 -139,1.0019044876098633 -140,1.0015106201171875 -141,1.0011177062988281 -142,1.000699758529663 -143,1.0002647638320923 -144,0.9998356699943542 -145,0.9994271397590637 -146,0.9990410804748535 -147,0.998675525188446 -148,0.9983207583427429 -149,0.9979793429374695 -150,0.9976497888565063 -151,0.9973384141921997 -152,0.9970499873161316 -153,0.9967957139015198 -154,0.9965807795524597 -155,0.9963887333869934 -156,0.9961811304092407 -157,0.9959279894828796 -158,0.995635986328125 -159,0.9953477382659912 -160,0.9950860142707825 -161,0.9948452711105347 -162,0.9946129322052002 -163,0.9943796992301941 -164,0.9941418766975403 -165,0.9938961267471313 -166,0.9936437606811523 -167,0.9933875799179077 -168,0.9931350350379944 -169,0.9928900003433228 -170,0.9926567673683167 -171,0.9924327731132507 -172,0.9922075867652893 -173,0.9919676184654236 -174,0.9917144775390625 -175,0.9914630651473999 -176,0.9912222027778625 -177,0.9909847974777222 -178,0.9907481074333191 -179,0.9905049800872803 -180,0.9902569055557251 -181,0.9900078177452087 -182,0.9897611737251282 -183,0.9895155429840088 -184,0.9892719984054565 -185,0.9890261292457581 -186,0.988775908946991 -187,0.9885253310203552 -188,0.9882734417915344 -189,0.9880231022834778 -190,0.9877715706825256 -191,0.9875186681747437 -192,0.9872635006904602 -193,0.9870073795318604 -194,0.9867498278617859 -195,0.9864915609359741 -196,0.9862328767776489 -197,0.9859719276428223 -198,0.9857109189033508 -199,0.9854478240013123 -200,0.9851830005645752 diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.010/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.010/training_config.txt deleted file mode 100644 index 89ef18e..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.010/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.010/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.010/training_log.csv deleted file mode 100644 index 0c1ecf2..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,1.7197374105453491 -2,1.6973302364349365 -3,1.660200834274292 -4,1.6254853010177612 -5,1.6432315111160278 -6,1.6069681644439697 -7,1.5976532697677612 -8,1.5427677631378174 -9,1.5090266466140747 -10,1.5191278457641602 -11,1.505623698234558 -12,1.4532363414764404 -13,1.402607798576355 -14,1.3691133260726929 -15,1.3298923969268799 -16,1.320536494255066 -17,1.2876288890838623 -18,1.2752108573913574 -19,1.2075657844543457 -20,1.198596477508545 -21,1.195318579673767 -22,1.159224510192871 -23,1.156866431236267 -24,1.0961354970932007 -25,1.0699689388275146 -26,1.0761741399765015 -27,1.0637803077697754 -28,1.036228895187378 -29,0.9949517846107483 -30,0.98835289478302 -31,0.9750354290008545 -32,0.9557214379310608 -33,0.9549897909164429 -34,0.9337149262428284 -35,0.9298276901245117 -36,0.9284268617630005 -37,0.9370652437210083 -38,0.9462317228317261 -39,0.9502404928207397 -40,0.9538318514823914 -41,0.9695411920547485 -42,0.9501056671142578 -43,0.9240241646766663 -44,0.9199616312980652 -45,0.9389179944992065 -46,0.9426783323287964 -47,0.9460600018501282 -48,0.9361833930015564 -49,0.9316400289535522 -50,0.9314329624176025 -51,0.9343132972717285 -52,0.9393123984336853 -53,0.9350566267967224 -54,0.932072639465332 -55,0.930029034614563 -56,0.9283632636070251 -57,0.9199985861778259 -58,0.8956100940704346 -59,0.9021239280700684 -60,0.8910646438598633 -61,0.864869236946106 -62,0.8599783182144165 -63,0.8474463820457458 -64,0.8290667533874512 -65,0.8235291242599487 -66,0.8196908831596375 -67,0.8153508305549622 -68,0.8111757040023804 -69,0.8094183206558228 -70,0.8047997355461121 -71,0.8038548231124878 -72,0.8043690323829651 -73,0.8082161545753479 -74,0.7969765663146973 -75,0.7904513478279114 -76,0.7823737859725952 -77,0.7802255749702454 -78,0.7770637273788452 -79,0.7729499936103821 -80,0.7673047780990601 -81,0.7674883604049683 -82,0.7565836906433105 -83,0.7561835646629333 -84,0.7750470638275146 -85,0.7670753002166748 -86,0.7668288350105286 -87,0.7643964886665344 -88,0.7664310932159424 -89,0.7664807438850403 -90,0.7655986547470093 -91,0.762714684009552 -92,0.7533164024353027 -93,0.7597295045852661 -94,0.7560907602310181 -95,0.7556574940681458 -96,0.7561959624290466 -97,0.7561521530151367 -98,0.755056619644165 -99,0.7535369992256165 -100,0.7474843859672546 -101,0.7463672161102295 -102,0.76022869348526 -103,0.7608368396759033 -104,0.7507451772689819 -105,0.7488868832588196 -106,0.7421762347221375 -107,0.7102901935577393 -108,0.7309862375259399 -109,0.734150767326355 -110,0.7346704602241516 -111,0.7335329651832581 -112,0.729004979133606 -113,0.713294506072998 -114,0.7058188319206238 -115,0.7132059335708618 -116,0.7122992873191833 -117,0.7080503702163696 -118,0.6976969838142395 -119,0.6962044835090637 -120,0.6934767961502075 -121,0.6904560923576355 -122,0.6914603114128113 -123,0.6920540928840637 -124,0.6917093396186829 -125,0.6903112530708313 -126,0.6875757575035095 -127,0.6866225600242615 -128,0.6868538856506348 -129,0.686264157295227 -130,0.6820198893547058 -131,0.6781589388847351 -132,0.6777008771896362 -133,0.6765717267990112 -134,0.6759493350982666 -135,0.6747256517410278 -136,0.671966016292572 -137,0.6728705763816833 -138,0.6725016236305237 -139,0.6711512804031372 -140,0.6689977645874023 -141,0.6659990549087524 -142,0.666405200958252 -143,0.6657832264900208 -144,0.6632097363471985 -145,0.664437472820282 -146,0.6657446026802063 -147,0.6655445694923401 -148,0.6644737124443054 -149,0.662656307220459 -150,0.6596994400024414 -151,0.6531162261962891 -152,0.653213381767273 -153,0.6440640687942505 -154,0.6445842981338501 -155,0.6442936658859253 -156,0.6439341306686401 -157,0.644210696220398 -158,0.6446107625961304 -159,0.6443482041358948 -160,0.6435532569885254 -161,0.6423197984695435 -162,0.6420142650604248 -163,0.6416265964508057 -164,0.6409299969673157 -165,0.6397722363471985 -166,0.6378921270370483 -167,0.6370236277580261 -168,0.6357962489128113 -169,0.6369701027870178 -170,0.6371928453445435 -171,0.6353068947792053 -172,0.6349373459815979 -173,0.6338311433792114 -174,0.63181072473526 -175,0.6322062015533447 -176,0.6303260326385498 -177,0.6293587684631348 -178,0.6277531385421753 -179,0.6254082918167114 -180,0.623823881149292 -181,0.6203795075416565 -182,0.6189556121826172 -183,0.6168811321258545 -184,0.6146014928817749 -185,0.6116983890533447 -186,0.6097431182861328 -187,0.6075665950775146 -188,0.6053000092506409 -189,0.605588972568512 -190,0.6057565212249756 -191,0.6053577065467834 -192,0.6048108339309692 -193,0.6039908528327942 -194,0.6043931245803833 -195,0.6031497120857239 -196,0.6028684973716736 -197,0.6016789674758911 -198,0.6008641123771667 -199,0.6013892292976379 -200,0.5998504757881165 diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.020/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.020/training_config.txt deleted file mode 100644 index bfe9845..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.020/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.020/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.020/training_log.csv deleted file mode 100644 index 8639d47..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,1.7368121147155762 -2,1.686917781829834 -3,1.5895603895187378 -4,1.4756200313568115 -5,1.3504817485809326 -6,1.2610491514205933 -7,1.2560375928878784 -8,1.1945266723632812 -9,1.1948857307434082 -10,1.181138277053833 -11,1.1898690462112427 -12,1.1369988918304443 -13,1.1125118732452393 -14,1.1104367971420288 -15,1.0962363481521606 -16,1.0850435495376587 -17,1.0504580736160278 -18,1.038218379020691 -19,1.0097545385360718 -20,0.9685238599777222 -21,0.9682077169418335 -22,0.9427229166030884 -23,0.9570072889328003 -24,0.959062933921814 -25,0.9290242195129395 -26,0.9235204458236694 -27,0.9155319333076477 -28,0.9123177528381348 -29,0.8821287155151367 -30,0.8579422831535339 -31,0.8168681859970093 -32,0.818242609500885 -33,0.7954636812210083 -34,0.7826253771781921 -35,0.7805294990539551 -36,0.7890610098838806 -37,0.7904859781265259 -38,0.7917928695678711 -39,0.7943702936172485 -40,0.7951298952102661 -41,0.7949166297912598 -42,0.7892999649047852 -43,0.8118125200271606 -44,0.813446044921875 -45,0.7944542169570923 -46,0.801439106464386 -47,0.7973641753196716 -48,0.7944254875183105 -49,0.7971693277359009 -50,0.7927452921867371 -51,0.7877364158630371 -52,0.7831223607063293 -53,0.7797466516494751 -54,0.7736339569091797 -55,0.7720046043395996 -56,0.7710026502609253 -57,0.771065354347229 -58,0.7704401612281799 -59,0.768435537815094 -60,0.7672090530395508 -61,0.7668871879577637 -62,0.7661851644515991 -63,0.76518714427948 -64,0.7635550498962402 -65,0.7607097625732422 -66,0.7543734312057495 -67,0.7453753352165222 -68,0.7743762135505676 -69,0.7764243483543396 -70,0.7716860175132751 -71,0.776652991771698 -72,0.7791141867637634 -73,0.7834314703941345 -74,0.7850832939147949 -75,0.7790513038635254 -76,0.7958233952522278 -77,0.8074731826782227 -78,0.8142993450164795 -79,0.8221749067306519 -80,0.8090649843215942 -81,0.803492546081543 -82,0.7975133061408997 -83,0.7931302189826965 -84,0.7926924824714661 -85,0.7839614748954773 -86,0.7836201190948486 -87,0.7791435718536377 -88,0.7789539098739624 -89,0.780070424079895 -90,0.7837070226669312 -91,0.7739121317863464 -92,0.7692332863807678 -93,0.7630419731140137 -94,0.7600550651550293 -95,0.7573501467704773 -96,0.746745765209198 -97,0.7397337555885315 -98,0.7495510578155518 -99,0.7614642977714539 -100,0.7627988457679749 -101,0.7609210014343262 -102,0.7561967968940735 -103,0.7450494766235352 -104,0.7353449463844299 -105,0.7341433167457581 -106,0.7224351763725281 -107,0.7185412645339966 -108,0.7146376967430115 -109,0.7102459669113159 -110,0.6997371912002563 -111,0.6922457814216614 -112,0.6850433349609375 -113,0.6788449287414551 -114,0.6729808449745178 -115,0.6681844592094421 -116,0.6598499417304993 -117,0.643014132976532 -118,0.6389641761779785 -119,0.6332361698150635 -120,0.6323096752166748 -121,0.6142994165420532 -122,0.5989659428596497 -123,0.6017979383468628 -124,0.5973343253135681 -125,0.5854955911636353 -126,0.5771716833114624 -127,0.5792672634124756 -128,0.5734975934028625 -129,0.5655572414398193 -130,0.5643900632858276 -131,0.5623900294303894 -132,0.5575323700904846 -133,0.5592935085296631 -134,0.5555965900421143 -135,0.5561732053756714 -136,0.5527864694595337 -137,0.5468209385871887 -138,0.5409223437309265 -139,0.5381744503974915 -140,0.5373021364212036 -141,0.5511252284049988 -142,0.5474002957344055 -143,0.5432632565498352 -144,0.5464993119239807 -145,0.5467693209648132 -146,0.5453659296035767 -147,0.5436545014381409 -148,0.5415343046188354 -149,0.5387001037597656 -150,0.5355272889137268 -151,0.5340492129325867 -152,0.5340378880500793 -153,0.532354474067688 -154,0.5291454195976257 -155,0.5262129902839661 -156,0.5228156447410583 -157,0.5169269442558289 -158,0.5094068050384521 -159,0.5053744912147522 -160,0.5105534195899963 -161,0.5087038278579712 -162,0.494091659784317 -163,0.5034729242324829 -164,0.496415913105011 -165,0.5055012702941895 -166,0.5086771845817566 -167,0.5090999603271484 -168,0.5097966194152832 -169,0.5101670622825623 -170,0.5086668133735657 -171,0.5062201023101807 -172,0.5036429166793823 -173,0.5000672936439514 -174,0.49509045481681824 -175,0.488479882478714 -176,0.4744621813297272 -177,0.4775654375553131 -178,0.4844098389148712 -179,0.46441859006881714 -180,0.44938069581985474 -181,0.46719643473625183 -182,0.45368003845214844 -183,0.4479310214519501 -184,0.4309684932231903 -185,0.4159008264541626 -186,0.4012814164161682 -187,0.385877788066864 -188,0.3705442547798157 -189,0.3500499725341797 -190,0.32325685024261475 -191,0.3139200508594513 -192,0.3067973256111145 -193,0.3005402684211731 -194,0.2917519807815552 -195,0.28130292892456055 -196,0.27516791224479675 -197,0.26904240250587463 -198,0.2608562707901001 -199,0.26735323667526245 -200,0.2665662169456482 diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.040/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.040/training_config.txt deleted file mode 100644 index a577cfb..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.040/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.040/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.040/training_log.csv deleted file mode 100644 index 03cb6dc..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,1.745347261428833 -2,1.5536308288574219 -3,1.4162877798080444 -4,1.285792589187622 -5,1.3547260761260986 -6,1.3423008918762207 -7,1.3679019212722778 -8,1.3802825212478638 -9,1.3856616020202637 -10,1.363811731338501 -11,1.3810633420944214 -12,1.3788013458251953 -13,1.3518818616867065 -14,1.3230856657028198 -15,1.2163982391357422 -16,1.1663957834243774 -17,1.1394739151000977 -18,1.0643647909164429 -19,1.0130934715270996 -20,0.9356722235679626 -21,0.9167641401290894 -22,0.844606876373291 -23,0.7813268899917603 -24,0.7433270812034607 -25,0.7116581797599792 -26,0.6962543725967407 -27,0.6672810912132263 -28,0.6296089291572571 -29,0.6103111505508423 -30,0.5859113335609436 -31,0.5778296589851379 -32,0.5750982761383057 -33,0.5744791626930237 -34,0.5713178515434265 -35,0.5668491721153259 -36,0.5614168643951416 -37,0.554591715335846 -38,0.5370392799377441 -39,0.5249916315078735 -40,0.5131415724754333 -41,0.512813925743103 -42,0.5089086294174194 -43,0.5032411813735962 -44,0.4962730407714844 -45,0.487618088722229 -46,0.4764236509799957 -47,0.45762544870376587 -48,0.43748539686203003 -49,0.42167457938194275 -50,0.4048764705657959 -51,0.3955472707748413 -52,0.38613826036453247 -53,0.3781187832355499 -54,0.35153311491012573 -55,0.3165009319782257 -56,0.3024257719516754 -57,0.28940996527671814 -58,0.2773687243461609 -59,0.26358428597450256 -60,0.2511427700519562 -61,0.2484617829322815 -62,0.23883011937141418 -63,0.22594185173511505 -64,0.20807795226573944 -65,0.1799604296684265 -66,0.1933739334344864 -67,0.19452597200870514 -68,0.19109854102134705 -69,0.17952843010425568 -70,0.17279484868049622 -71,0.1741500198841095 -72,0.16363929212093353 -73,0.16230671107769012 -74,0.17388121783733368 -75,0.1700432151556015 -76,0.1530924141407013 -77,0.16531236469745636 -78,0.16859586536884308 -79,0.165288046002388 -80,0.15707281231880188 -81,0.15843814611434937 -82,0.15696682035923004 -83,0.15189078450202942 -84,0.14262887835502625 -85,0.14934031665325165 -86,0.15046335756778717 -87,0.1464373618364334 -88,0.13712193071842194 -89,0.13602374494075775 -90,0.13591988384723663 -91,0.13781097531318665 -92,0.1327839195728302 -93,0.13197633624076843 -94,0.1331455111503601 -95,0.12969017028808594 -96,0.1285659670829773 -97,0.13008999824523926 -98,0.1289069652557373 -99,0.1282588690519333 -100,0.1261896938085556 -101,0.12552519142627716 -102,0.12852632999420166 -103,0.12561000883579254 -104,0.12930192053318024 -105,0.12439857423305511 -106,0.13004370033740997 -107,0.13115063309669495 -108,0.12670400738716125 -109,0.12894049286842346 -110,0.12706609070301056 -111,0.1252181977033615 -112,0.12758362293243408 -113,0.12222443521022797 -114,0.12703295052051544 -115,0.12536749243736267 -116,0.12876220047473907 -117,0.13137118518352509 -118,0.12740598618984222 -119,0.11949368566274643 -120,0.1336873471736908 -121,0.13503217697143555 -122,0.12621425092220306 -123,0.124782495200634 -124,0.12890270352363586 -125,0.1274586319923401 -126,0.11973773688077927 -127,0.12600693106651306 -128,0.12645907700061798 -129,0.1175236701965332 -130,0.1264248490333557 -131,0.13132961094379425 -132,0.1296115219593048 -133,0.122540682554245 -134,0.11901052296161652 -135,0.1295952945947647 -136,0.128477081656456 -137,0.11859394609928131 -138,0.12240825593471527 -139,0.1265052855014801 -140,0.12446768581867218 -141,0.1163499653339386 -142,0.12472663819789886 -143,0.12922772765159607 -144,0.12230733782052994 -145,0.11735539138317108 -146,0.12252099812030792 -147,0.12173589318990707 -148,0.11668094992637634 -149,0.11704686284065247 -150,0.11908938735723495 -151,0.11389089375734329 -152,0.11690325289964676 -153,0.11821333318948746 -154,0.11424384266138077 -155,0.11415844410657883 -156,0.114013671875 -157,0.11286403238773346 -158,0.11504695564508438 -159,0.11224374175071716 -160,0.11250155419111252 -161,0.11088760942220688 -162,0.11405076086521149 -163,0.11531560868024826 -164,0.11252199113368988 -165,0.11213676631450653 -166,0.11264226585626602 -167,0.11047378927469254 -168,0.11212298274040222 -169,0.10927252471446991 -170,0.1107262521982193 -171,0.1087178960442543 -172,0.10871060192584991 -173,0.10737510770559311 -174,0.10897079110145569 -175,0.1083306074142456 -176,0.10686749964952469 -177,0.1075131818652153 -178,0.10673888027667999 -179,0.10839789360761642 -180,0.10786118358373642 -181,0.10599078983068466 -182,0.1122165396809578 -183,0.10939576476812363 -184,0.10962923616170883 -185,0.11206331849098206 -186,0.10921941697597504 -187,0.1092371940612793 -188,0.111348956823349 -189,0.10678576678037643 -190,0.11006688326597214 -191,0.11346349120140076 -192,0.11192162334918976 -193,0.10710311681032181 -194,0.10798697173595428 -195,0.10532095283269882 -196,0.10835225135087967 -197,0.11100004613399506 -198,0.10852491110563278 -199,0.10629092901945114 -200,0.10633423179388046 diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.050/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.050/training_config.txt deleted file mode 100644 index 6a1ed02..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.050/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.050/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.050/training_log.csv deleted file mode 100644 index af3eb3f..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,1.7375118732452393 -2,1.5627084970474243 -3,1.3830585479736328 -4,1.2556519508361816 -5,1.2446081638336182 -6,1.1868040561676025 -7,1.1511822938919067 -8,1.2625303268432617 -9,1.474048376083374 -10,1.4820780754089355 -11,1.33821702003479 -12,1.1507985591888428 -13,1.0442827939987183 -14,0.8913726210594177 -15,0.8751717209815979 -16,0.8446232080459595 -17,0.8151570558547974 -18,0.7328643202781677 -19,0.7403907775878906 -20,0.7315385341644287 -21,0.7123292088508606 -22,0.7077196836471558 -23,0.7027509212493896 -24,0.6766099333763123 -25,0.6685457229614258 -26,0.6741662621498108 -27,0.6673860549926758 -28,0.6570305228233337 -29,0.6436725854873657 -30,0.6300418376922607 -31,0.6177306175231934 -32,0.6015034914016724 -33,0.5906494855880737 -34,0.5767501592636108 -35,0.5652531981468201 -36,0.5524862408638 -37,0.5299646854400635 -38,0.5111064314842224 -39,0.4904092252254486 -40,0.47257208824157715 -41,0.46026811003685 -42,0.44919490814208984 -43,0.4379708766937256 -44,0.42554301023483276 -45,0.4013806879520416 -46,0.3873078525066376 -47,0.3734281659126282 -48,0.359309583902359 -49,0.34525254368782043 -50,0.3361993134021759 -51,0.3251411020755768 -52,0.31173714995384216 -53,0.29934436082839966 -54,0.2862469255924225 -55,0.26990047097206116 -56,0.25709524750709534 -57,0.24496608972549438 -58,0.20444601774215698 -59,0.24172598123550415 -60,0.25263896584510803 -61,0.2501552700996399 -62,0.23876626789569855 -63,0.22187268733978271 -64,0.19698235392570496 -65,0.20258988440036774 -66,0.21180427074432373 -67,0.21250730752944946 -68,0.20908012986183167 -69,0.20238077640533447 -70,0.19481134414672852 -71,0.18780210614204407 -72,0.18127648532390594 -73,0.16720595955848694 -74,0.18053387105464935 -75,0.18670782446861267 -76,0.17933757603168488 -77,0.1654188632965088 -78,0.16148732602596283 -79,0.16746477782726288 -80,0.16778285801410675 -81,0.16313770413398743 -82,0.15261687338352203 -83,0.13830526173114777 -84,0.1392585188150406 -85,0.1455642729997635 -86,0.1398032009601593 -87,0.12939199805259705 -88,0.13659997284412384 -89,0.13919703662395477 -90,0.1350151151418686 -91,0.12921422719955444 -92,0.121852807700634 -93,0.13523101806640625 -94,0.1319938451051712 -95,0.12880811095237732 -96,0.12559841573238373 -97,0.13360589742660522 -98,0.13334183394908905 -99,0.12565000355243683 -100,0.12615691125392914 -101,0.12340376526117325 -102,0.12075391411781311 -103,0.12127900868654251 -104,0.12336426973342896 -105,0.11742524057626724 -106,0.11564993858337402 -107,0.11496298760175705 -108,0.11401866376399994 -109,0.10904646664857864 -110,0.10964904725551605 -111,0.11580487340688705 -112,0.1131034716963768 -113,0.1118987500667572 -114,0.10980261117219925 -115,0.10938644409179688 -116,0.11116863787174225 -117,0.1112205907702446 -118,0.10683256387710571 -119,0.10642138868570328 -120,0.11178047209978104 -121,0.108712337911129 -122,0.1078651025891304 -123,0.1096978411078453 -124,0.10913997143507004 -125,0.10777253657579422 -126,0.10703439265489578 -127,0.10545507073402405 -128,0.10541344434022903 -129,0.10556837171316147 -130,0.1045670360326767 -131,0.10467458516359329 -132,0.10579247772693634 -133,0.10363729298114777 -134,0.10441358387470245 -135,0.10370301455259323 -136,0.10329681634902954 -137,0.10654342174530029 -138,0.10317124426364899 -139,0.1037917509675026 -140,0.10194531083106995 -141,0.10215764492750168 -142,0.10376491397619247 -143,0.10531305521726608 -144,0.10186129808425903 -145,0.1061548963189125 -146,0.10521452128887177 -147,0.10643158853054047 -148,0.10905316472053528 -149,0.10218362510204315 -150,0.10856962203979492 -151,0.10576286166906357 -152,0.10794718563556671 -153,0.11379168182611465 -154,0.10787734389305115 -155,0.10371947288513184 -156,0.1100538969039917 -157,0.10622504353523254 -158,0.1037500873208046 -159,0.10604789853096008 -160,0.10348835587501526 -161,0.10386402159929276 -162,0.10258278250694275 -163,0.10555916279554367 -164,0.10806013643741608 -165,0.10193496942520142 -166,0.108650803565979 -167,0.10892666131258011 -168,0.10402341187000275 -169,0.11196520179510117 -170,0.11310755461454391 -171,0.11019358038902283 -172,0.1020030751824379 -173,0.1108393445611 -174,0.1170835942029953 -175,0.10800649970769882 -176,0.10640400648117065 -177,0.11659625917673111 -178,0.11605576425790787 -179,0.10504385828971863 -180,0.10255786031484604 -181,0.10840312391519547 -182,0.10253392159938812 -183,0.10440953820943832 -184,0.10681748390197754 -185,0.10303833335638046 -186,0.10203234106302261 -187,0.10463139414787292 -188,0.10438301414251328 -189,0.09891833364963531 -190,0.10157692432403564 -191,0.09991303831338882 -192,0.10162484645843506 -193,0.10377871990203857 -194,0.10138124972581863 -195,0.09980442374944687 -196,0.10257740318775177 -197,0.10004131495952606 -198,0.10325883328914642 -199,0.10266001522541046 -200,0.09969852864742279 diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.080/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.080/training_config.txt deleted file mode 100644 index d0557e5..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.080/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.080/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.080/training_log.csv deleted file mode 100644 index 1588e43..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,1.7352348566055298 -2,1.5507261753082275 -3,1.2398545742034912 -4,1.1414846181869507 -5,1.0536106824874878 -6,0.9665489792823792 -7,0.8417056798934937 -8,0.7595813274383545 -9,0.6785212755203247 -10,0.612639844417572 -11,0.6577947735786438 -12,0.6505259275436401 -13,0.6362590789794922 -14,0.6171540021896362 -15,0.5775485634803772 -16,0.5454703569412231 -17,0.5186687707901001 -18,0.49425390362739563 -19,0.4574437737464905 -20,0.43621626496315 -21,0.3927801549434662 -22,0.34744739532470703 -23,0.34990194439888 -24,0.336473286151886 -25,0.3239845931529999 -26,0.30104103684425354 -27,0.29815322160720825 -28,0.3092297315597534 -29,0.3071884512901306 -30,0.29751989245414734 -31,0.2811616361141205 -32,0.25803884863853455 -33,0.23963208496570587 -34,0.23882102966308594 -35,0.23130923509597778 -36,0.21425679326057434 -37,0.20084770023822784 -38,0.20451761782169342 -39,0.20763640105724335 -40,0.1991184800863266 -41,0.18499740958213806 -42,0.1837179809808731 -43,0.17831523716449738 -44,0.17140378057956696 -45,0.16903232038021088 -46,0.1698157787322998 -47,0.15685832500457764 -48,0.13941101729869843 -49,0.14577244222164154 -50,0.15035884082317352 -51,0.14454805850982666 -52,0.13202016055583954 -53,0.13615509867668152 -54,0.13918939232826233 -55,0.13434113562107086 -56,0.1372462660074234 -57,0.1288396716117859 -58,0.128061905503273 -59,0.1316749006509781 -60,0.1281915009021759 -61,0.12727147340774536 -62,0.13364936411380768 -63,0.13048836588859558 -64,0.12308703362941742 -65,0.12567979097366333 -66,0.12386368960142136 -67,0.11649412661790848 -68,0.11753132939338684 -69,0.11376795172691345 -70,0.11691471934318542 -71,0.11889440566301346 -72,0.11643724143505096 -73,0.11058409512042999 -74,0.11952593177556992 -75,0.12205634266138077 -76,0.11254527419805527 -77,0.11442477256059647 -78,0.12084392458200455 -79,0.11877167224884033 -80,0.11625224351882935 -81,0.11372780799865723 -82,0.11670082807540894 -83,0.10828135162591934 -84,0.1096096858382225 -85,0.10848237574100494 -86,0.1065855398774147 -87,0.10509057343006134 -88,0.10539679229259491 -89,0.10617280751466751 -90,0.10408851504325867 -91,0.10538750886917114 -92,0.10652444511651993 -93,0.10650710016489029 -94,0.10605455189943314 -95,0.10415694117546082 -96,0.10601045191287994 -97,0.10786549001932144 -98,0.10783389210700989 -99,0.10574839264154434 -100,0.10197199881076813 -101,0.10168729722499847 -102,0.10499563813209534 -103,0.10207417607307434 -104,0.10252548009157181 -105,0.10462304949760437 -106,0.10357631742954254 -107,0.10105036199092865 -108,0.10069882124662399 -109,0.10572122782468796 -110,0.10013909637928009 -111,0.10887014120817184 -112,0.10428155958652496 -113,0.11094926297664642 -114,0.1126912534236908 -115,0.10416968911886215 -116,0.11031234264373779 -117,0.11086113005876541 -118,0.10259874165058136 -119,0.11533328890800476 -120,0.11927498877048492 -121,0.11177834123373032 -122,0.10173217952251434 -123,0.10975456982851028 -124,0.10440842807292938 -125,0.10553783178329468 -126,0.1049351692199707 -127,0.09892717748880386 -128,0.10604781657457352 -129,0.10598335415124893 -130,0.10066282004117966 -131,0.10891234129667282 -132,0.10744255036115646 -133,0.10195630043745041 -134,0.11420756578445435 -135,0.11233958601951599 -136,0.10058464854955673 -137,0.10603268444538116 -138,0.10388627648353577 -139,0.10640431940555573 -140,0.10802800953388214 -141,0.10035410523414612 -142,0.10862637311220169 -143,0.10953439027070999 -144,0.09834146499633789 -145,0.11060792952775955 -146,0.11065730452537537 -147,0.09893319755792618 -148,0.11130470782518387 -149,0.11270322650671005 -150,0.1013779416680336 -151,0.10912657529115677 -152,0.11435306817293167 -153,0.10331812500953674 -154,0.10922220349311829 -155,0.11415223032236099 -156,0.11235282570123672 -157,0.10642695426940918 -158,0.10156340897083282 -159,0.10287291556596756 -160,0.09986108541488647 -161,0.0988859012722969 -162,0.09824816137552261 -163,0.10221783071756363 -164,0.09676522761583328 -165,0.09672234207391739 -166,0.09621956199407578 -167,0.09785886108875275 -168,0.0972171202301979 -169,0.0976337194442749 -170,0.09782774746417999 -171,0.09585591405630112 -172,0.10021265596151352 -173,0.10123761743307114 -174,0.09637308865785599 -175,0.09774387627840042 -176,0.09851579368114471 -177,0.09625008702278137 -178,0.10274302214384079 -179,0.10389779508113861 -180,0.09446319937705994 -181,0.10506585240364075 -182,0.10426563769578934 -183,0.09795600175857544 -184,0.10393557697534561 -185,0.10913743078708649 -186,0.10626445710659027 -187,0.10114319622516632 -188,0.09828735142946243 -189,0.10850004106760025 -190,0.10829144716262817 -191,0.09798289835453033 -192,0.10824369639158249 -193,0.11323384195566177 -194,0.10520803183317184 -195,0.10223259776830673 -196,0.10644877701997757 -197,0.10148044675588608 -198,0.10238815099000931 -199,0.1075398251414299 -200,0.09894648939371109 diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.100/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.100/training_config.txt deleted file mode 100644 index fa1ceb3..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.100/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.100/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.100/training_log.csv deleted file mode 100644 index 4701e22..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,1.8394345045089722 -2,1.444337010383606 -3,1.6507548093795776 -4,1.8273214101791382 -5,1.6729693412780762 -6,1.5326405763626099 -7,1.5611541271209717 -8,1.5389859676361084 -9,1.5413236618041992 -10,1.5499751567840576 -11,1.5402107238769531 -12,1.5341891050338745 -13,1.4865669012069702 -14,1.456494927406311 -15,1.4092789888381958 -16,1.3530808687210083 -17,1.3018723726272583 -18,1.2623469829559326 -19,1.201284408569336 -20,1.0817036628723145 -21,1.0319353342056274 -22,0.9747253060340881 -23,0.9041361808776855 -24,0.8701120615005493 -25,0.780373752117157 -26,0.7742506265640259 -27,0.7419680953025818 -28,0.6924952268600464 -29,0.6824163198471069 -30,0.6801948547363281 -31,0.679767906665802 -32,0.6750238537788391 -33,0.6620345711708069 -34,0.6453966498374939 -35,0.6391070485115051 -36,0.6278873682022095 -37,0.6188264489173889 -38,0.5980880856513977 -39,0.5803741216659546 -40,0.5565985441207886 -41,0.5380753874778748 -42,0.523917555809021 -43,0.5065122246742249 -44,0.49355432391166687 -45,0.46745556592941284 -46,0.4462563991546631 -47,0.4218546748161316 -48,0.4041630029678345 -49,0.38815224170684814 -50,0.37335458397865295 -51,0.3538927435874939 -52,0.3289211094379425 -53,0.3050719201564789 -54,0.2726910412311554 -55,0.2565634846687317 -56,0.24730665981769562 -57,0.23620682954788208 -58,0.22297820448875427 -59,0.23356369137763977 -60,0.22315850853919983 -61,0.19045545160770416 -62,0.18029722571372986 -63,0.17243853211402893 -64,0.17818577587604523 -65,0.1804979294538498 -66,0.17863470315933228 -67,0.17201325297355652 -68,0.1600656360387802 -69,0.1408470720052719 -70,0.14676648378372192 -71,0.1545746922492981 -72,0.15006190538406372 -73,0.13559557497501373 -74,0.11956578493118286 -75,0.12892360985279083 -76,0.13127568364143372 -77,0.1269584447145462 -78,0.11689989268779755 -79,0.12413975596427917 -80,0.1270974725484848 -81,0.12280958890914917 -82,0.12369261682033539 -83,0.12320177257061005 -84,0.11804328858852386 -85,0.1148456335067749 -86,0.12217448651790619 -87,0.1155763640999794 -88,0.11385735869407654 -89,0.11977870017290115 -90,0.11902882903814316 -91,0.11226405948400497 -92,0.11743765324354172 -93,0.1188483014702797 -94,0.11583041399717331 -95,0.1091771349310875 -96,0.11158663779497147 -97,0.10731127113103867 -98,0.10609211027622223 -99,0.10785610973834991 -100,0.11091101914644241 -101,0.10648687183856964 -102,0.10315170884132385 -103,0.10580553859472275 -104,0.10532037913799286 -105,0.1081361174583435 -106,0.10815426707267761 -107,0.105088010430336 -108,0.10656020045280457 -109,0.10362447798252106 -110,0.10769392549991608 -111,0.10278742760419846 -112,0.10290880501270294 -113,0.1057305783033371 -114,0.10081487149000168 -115,0.10960476100444794 -116,0.1038808822631836 -117,0.10141433775424957 -118,0.10059083253145218 -119,0.10375339537858963 -120,0.10216508060693741 -121,0.09906735271215439 -122,0.10005956143140793 -123,0.0977931097149849 -124,0.10271231085062027 -125,0.1011175736784935 -126,0.10483309626579285 -127,0.10067089647054672 -128,0.10527124255895615 -129,0.10316035896539688 -130,0.10304978489875793 -131,0.10331933945417404 -132,0.10182080417871475 -133,0.10517917573451996 -134,0.0982242003083229 -135,0.11086997389793396 -136,0.11237164586782455 -137,0.10148385167121887 -138,0.10627903044223785 -139,0.10806747525930405 -140,0.10086929798126221 -141,0.10590513795614243 -142,0.10479150712490082 -143,0.10224257409572601 -144,0.10284736007452011 -145,0.0977058857679367 -146,0.09757238626480103 -147,0.09776301681995392 -148,0.09835749119520187 -149,0.0987224206328392 -150,0.09787540137767792 -151,0.10243776440620422 -152,0.09816447645425797 -153,0.10343199223279953 -154,0.1010899692773819 -155,0.10182788223028183 -156,0.09949877858161926 -157,0.10532055795192719 -158,0.1070202887058258 -159,0.1007329449057579 -160,0.10241559147834778 -161,0.1007380560040474 -162,0.09970371425151825 -163,0.09729938209056854 -164,0.10530808568000793 -165,0.10266135632991791 -166,0.10121951252222061 -167,0.10382057726383209 -168,0.09824831038713455 -169,0.10482415556907654 -170,0.10184551775455475 -171,0.10164810717105865 -172,0.1040460616350174 -173,0.10239437222480774 -174,0.09991958737373352 -175,0.09809903055429459 -176,0.09876378625631332 -177,0.09864699840545654 -178,0.10475655645132065 -179,0.10348261892795563 -180,0.09742134064435959 -181,0.10250317305326462 -182,0.1014232411980629 -183,0.10169924050569534 -184,0.10182447731494904 -185,0.10195038467645645 -186,0.10093142837285995 -187,0.09818288683891296 -188,0.10183389484882355 -189,0.10212167352437973 -190,0.10171399265527725 -191,0.10092312842607498 -192,0.10128534585237503 -193,0.09752386063337326 -194,0.1016889438033104 -195,0.10267797857522964 -196,0.10048901289701462 -197,0.09688424319028854 -198,0.09834616631269455 -199,0.09667927771806717 -200,0.10063140094280243 diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.125/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.125/training_config.txt deleted file mode 100644 index b53e196..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.125/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.125/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.125/training_log.csv deleted file mode 100644 index 980a70b..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,2.0115010738372803 -2,1.4565157890319824 -3,1.5545438528060913 -4,1.4796231985092163 -5,1.4781749248504639 -6,1.491492509841919 -7,1.577485203742981 -8,1.5571962594985962 -9,1.5257620811462402 -10,1.3887172937393188 -11,1.2051982879638672 -12,1.0075736045837402 -13,0.900478184223175 -14,0.857939600944519 -15,0.8019299507141113 -16,0.7375494837760925 -17,0.6473445296287537 -18,0.6929441094398499 -19,0.6985029578208923 -20,0.6946462392807007 -21,0.6814618110656738 -22,0.6271178722381592 -23,0.6187586784362793 -24,0.6105377674102783 -25,0.5873873233795166 -26,0.5577475428581238 -27,0.5237042903900146 -28,0.48057422041893005 -29,0.44330793619155884 -30,0.3967115581035614 -31,0.3455107510089874 -32,0.3334231972694397 -33,0.31272566318511963 -34,0.29196035861968994 -35,0.2731146216392517 -36,0.2585000991821289 -37,0.2547570765018463 -38,0.2409687489271164 -39,0.2243029624223709 -40,0.225763201713562 -41,0.2065431922674179 -42,0.20060506463050842 -43,0.1850205510854721 -44,0.17732827365398407 -45,0.16831928491592407 -46,0.1650141477584839 -47,0.16354911029338837 -48,0.1629684865474701 -49,0.16035370528697968 -50,0.15883471071720123 -51,0.1505957990884781 -52,0.1425630748271942 -53,0.13227806985378265 -54,0.1280480921268463 -55,0.1273137480020523 -56,0.12466990947723389 -57,0.12664683163166046 -58,0.1269284039735794 -59,0.12726052105426788 -60,0.1220775917172432 -61,0.13516029715538025 -62,0.13684754073619843 -63,0.12452413886785507 -64,0.1203036978840828 -65,0.12095606327056885 -66,0.11888065934181213 -67,0.11847909539937973 -68,0.1261405199766159 -69,0.12350890040397644 -70,0.1135505810379982 -71,0.11752260476350784 -72,0.11461762338876724 -73,0.1138904020190239 -74,0.11524128913879395 -75,0.1106514036655426 -76,0.10736384987831116 -77,0.10961978137493134 -78,0.11197984963655472 -79,0.1103009283542633 -80,0.10627912729978561 -81,0.10531818121671677 -82,0.1079559326171875 -83,0.10870490223169327 -84,0.10544849932193756 -85,0.11008647084236145 -86,0.10669578611850739 -87,0.11356513202190399 -88,0.11514744162559509 -89,0.10642680525779724 -90,0.11569622159004211 -91,0.11931414157152176 -92,0.108310766518116 -93,0.11276470124721527 -94,0.11946743726730347 -95,0.11544176936149597 -96,0.10469486564397812 -97,0.11959178745746613 -98,0.12334268540143967 -99,0.11351162195205688 -100,0.10805945843458176 -101,0.11969207227230072 -102,0.11955179274082184 -103,0.1080818772315979 -104,0.11081720888614655 -105,0.1156390830874443 -106,0.10924563556909561 -107,0.10780207812786102 -108,0.11316195130348206 -109,0.11072760820388794 -110,0.10625919699668884 -111,0.11072713136672974 -112,0.10507581382989883 -113,0.109664686024189 -114,0.1151462197303772 -115,0.11043130606412888 -116,0.10199879109859467 -117,0.11149866878986359 -118,0.10874390602111816 -119,0.10319390147924423 -120,0.10880494117736816 -121,0.10518069565296173 -122,0.10376337170600891 -123,0.10510720312595367 -124,0.10159648209810257 -125,0.10494786500930786 -126,0.1005527675151825 -127,0.10156760364770889 -128,0.1009763702750206 -129,0.10233153402805328 -130,0.10141700506210327 -131,0.10067334026098251 -132,0.10365420579910278 -133,0.1025552749633789 -134,0.10063602030277252 -135,0.10372884571552277 -136,0.10054902732372284 -137,0.10108260810375214 -138,0.10100524127483368 -139,0.1013055294752121 -140,0.10073766112327576 -141,0.10129912942647934 -142,0.10174349695444107 -143,0.10128365457057953 -144,0.10108031332492828 -145,0.09972360730171204 -146,0.10300709307193756 -147,0.1019330620765686 -148,0.10274340957403183 -149,0.10122649371623993 -150,0.09859327226877213 -151,0.10230568796396255 -152,0.09916453063488007 -153,0.09932965785264969 -154,0.09955588728189468 -155,0.10047801584005356 -156,0.10104618966579437 -157,0.101438008248806 -158,0.09775801002979279 -159,0.10207150876522064 -160,0.10238400846719742 -161,0.09973788261413574 -162,0.10411148518323898 -163,0.10474247485399246 -164,0.10046468675136566 -165,0.10131248831748962 -166,0.10025197267532349 -167,0.10406551510095596 -168,0.09840526431798935 -169,0.09982366114854813 -170,0.09731984883546829 -171,0.09941282123327255 -172,0.09860698878765106 -173,0.09767565131187439 -174,0.10118965059518814 -175,0.09802479296922684 -176,0.09947852790355682 -177,0.09918952733278275 -178,0.09685269743204117 -179,0.09694761782884598 -180,0.09799246490001678 -181,0.09844715148210526 -182,0.09867054224014282 -183,0.0991000309586525 -184,0.09790630638599396 -185,0.09919419139623642 -186,0.09883516281843185 -187,0.09956348687410355 -188,0.10009787976741791 -189,0.09608661383390427 -190,0.09973499923944473 -191,0.09978596866130829 -192,0.09829278290271759 -193,0.09979436546564102 -194,0.0980251133441925 -195,0.09863880276679993 -196,0.09925469756126404 -197,0.09661511331796646 -198,0.09656757116317749 -199,0.0967240184545517 -200,0.09572776407003403 diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.160/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.160/training_config.txt deleted file mode 100644 index 0f0ae5a..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.160/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.160/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.160/training_log.csv deleted file mode 100644 index 15fcb62..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,2.348148822784424 -2,1.1869157552719116 -3,0.9933316707611084 -4,2.7534618377685547 -5,1.3326640129089355 -6,0.7743490934371948 -7,0.6778977513313293 -8,0.6404923796653748 -9,0.6407592296600342 -10,0.6131616830825806 -11,0.5944095849990845 -12,0.5740767121315002 -13,0.571038007736206 -14,0.5210162401199341 -15,0.49988406896591187 -16,0.5153886079788208 -17,0.5157323479652405 -18,0.5069182515144348 -19,0.478044331073761 -20,0.4622882604598999 -21,0.449305921792984 -22,0.42464542388916016 -23,0.42319175601005554 -24,0.4106145203113556 -25,0.37488728761672974 -26,0.35626015067100525 -27,0.336966872215271 -28,0.30639827251434326 -29,0.2877732515335083 -30,0.2851060628890991 -31,0.28175950050354004 -32,0.2794468104839325 -33,0.27925950288772583 -34,0.2725583612918854 -35,0.25995510816574097 -36,0.24385981261730194 -37,0.2269989401102066 -38,0.21017102897167206 -39,0.199651837348938 -40,0.19237075746059418 -41,0.18673570454120636 -42,0.1788167953491211 -43,0.16813239455223083 -44,0.16044706106185913 -45,0.15782266855239868 -46,0.14532743394374847 -47,0.15133802592754364 -48,0.1541273295879364 -49,0.1468270868062973 -50,0.1330007016658783 -51,0.14811278879642487 -52,0.1555662900209427 -53,0.14670440554618835 -54,0.12256485223770142 -55,0.13163620233535767 -56,0.14005564153194427 -57,0.1474052369594574 -58,0.14534063637256622 -59,0.13465961813926697 -60,0.12581586837768555 -61,0.12407480925321579 -62,0.1199817880988121 -63,0.12373513728380203 -64,0.11867959797382355 -65,0.1172228753566742 -66,0.12040083110332489 -67,0.1152496412396431 -68,0.11940455436706543 -69,0.12343524396419525 -70,0.11706611514091492 -71,0.10977896302938461 -72,0.10663150250911713 -73,0.11299226433038712 -74,0.11479201167821884 -75,0.10813024640083313 -76,0.11869362741708755 -77,0.11297598481178284 -78,0.10784491151571274 -79,0.11493618041276932 -80,0.10821395367383957 -81,0.11606676131486893 -82,0.11807172000408173 -83,0.10888290405273438 -84,0.11150459200143814 -85,0.11555319279432297 -86,0.10723233222961426 -87,0.1095171794295311 -88,0.10784884542226791 -89,0.10799660533666611 -90,0.11209160834550858 -91,0.10337278246879578 -92,0.11066559702157974 -93,0.11369480192661285 -94,0.10646093636751175 -95,0.10897178947925568 -96,0.10933645814657211 -97,0.10366033017635345 -98,0.10918613523244858 -99,0.10668352991342545 -100,0.1047137975692749 -101,0.10948539525270462 -102,0.10052762925624847 -103,0.11222277581691742 -104,0.11394011974334717 -105,0.10969085991382599 -106,0.10473208129405975 -107,0.1053280159831047 -108,0.10217908024787903 -109,0.10230666399002075 -110,0.09984413534402847 -111,0.10440646857023239 -112,0.10072710365056992 -113,0.10320534557104111 -114,0.10296092927455902 -115,0.10216397047042847 -116,0.09883468598127365 -117,0.10253624618053436 -118,0.09837901592254639 -119,0.10780393332242966 -120,0.1083613857626915 -121,0.1010480746626854 -122,0.109275221824646 -123,0.11017094552516937 -124,0.10703602433204651 -125,0.11480920761823654 -126,0.11457936465740204 -127,0.1082044467329979 -128,0.10997014492750168 -129,0.10475260764360428 -130,0.10406330227851868 -131,0.10984507203102112 -132,0.10009149461984634 -133,0.11072596162557602 -134,0.11543622612953186 -135,0.10305939614772797 -136,0.11148472130298615 -137,0.11645646393299103 -138,0.10963768512010574 -139,0.09912331402301788 -140,0.1037134975194931 -141,0.10238996893167496 -142,0.1013580858707428 -143,0.10094544291496277 -144,0.10834470391273499 -145,0.10946972668170929 -146,0.10000401735305786 -147,0.10885928571224213 -148,0.11148656159639359 -149,0.10439886152744293 -150,0.10500508546829224 -151,0.10362391173839569 -152,0.10426661372184753 -153,0.10911904275417328 -154,0.09954342246055603 -155,0.11153576523065567 -156,0.11704898625612259 -157,0.10434526205062866 -158,0.10339627414941788 -159,0.10821100324392319 -160,0.09711483120918274 -161,0.10670414566993713 -162,0.1075463742017746 -163,0.09476111084222794 -164,0.10221633315086365 -165,0.10050950199365616 -166,0.09395046532154083 -167,0.0982334241271019 -168,0.09591842442750931 -169,0.09535618126392365 -170,0.0964958593249321 -171,0.09638145565986633 -172,0.09780541807413101 -173,0.09451510012149811 -174,0.10181723535060883 -175,0.09913427382707596 -176,0.10401889681816101 -177,0.09891488403081894 -178,0.10734383761882782 -179,0.11049558222293854 -180,0.10205890238285065 -181,0.10034206509590149 -182,0.09917192161083221 -183,0.10120968520641327 -184,0.10239173471927643 -185,0.09557565301656723 -186,0.09590045362710953 -187,0.09798859059810638 -188,0.09866058081388474 -189,0.09465808421373367 -190,0.09336071461439133 -191,0.09562073647975922 -192,0.09462494403123856 -193,0.09674045443534851 -194,0.0922100692987442 -195,0.1056896299123764 -196,0.10637716203927994 -197,0.10121334344148636 -198,0.10006698220968246 -199,0.10164962708950043 -200,0.10071029514074326 diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.200/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.200/training_config.txt deleted file mode 100644 index 6c8b78c..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.200/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.200/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.200/training_log.csv deleted file mode 100644 index 788ef9a..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,2.832991123199463 -2,1.2960435152053833 -3,2.205174446105957 -4,1.8734546899795532 -5,1.765629768371582 -6,1.6013737916946411 -7,1.5569158792495728 -8,1.3407679796218872 -9,1.0420506000518799 -10,0.8924403190612793 -11,0.7691155672073364 -12,0.7336738109588623 -13,0.6953920722007751 -14,0.6289449334144592 -15,0.6032009720802307 -16,0.5800419449806213 -17,0.5671816468238831 -18,0.536942183971405 -19,0.509723961353302 -20,0.46622776985168457 -21,0.41727688908576965 -22,0.3824954628944397 -23,0.3506295382976532 -24,0.31531068682670593 -25,0.28478574752807617 -26,0.26812437176704407 -27,0.25346019864082336 -28,0.23675650358200073 -29,0.22408190369606018 -30,0.22219522297382355 -31,0.2147274911403656 -32,0.20107203722000122 -33,0.17608267068862915 -34,0.18467174470424652 -35,0.18615825474262238 -36,0.17298579216003418 -37,0.1584315001964569 -38,0.14557144045829773 -39,0.15521685779094696 -40,0.16768789291381836 -41,0.1649685651063919 -42,0.15103410184383392 -43,0.1465006321668625 -44,0.15604978799819946 -45,0.15602348744869232 -46,0.14194291830062866 -47,0.11957981437444687 -48,0.13265468180179596 -49,0.1436096727848053 -50,0.14306329190731049 -51,0.13364046812057495 -52,0.1260354071855545 -53,0.12183576822280884 -54,0.1314990371465683 -55,0.1308518648147583 -56,0.12127001583576202 -57,0.11982080340385437 -58,0.11956068128347397 -59,0.11632373929023743 -60,0.11670663952827454 -61,0.11749440431594849 -62,0.1136302575469017 -63,0.10822053998708725 -64,0.11389625072479248 -65,0.11917926371097565 -66,0.11364175379276276 -67,0.10544177144765854 -68,0.11131703853607178 -69,0.10974758863449097 -70,0.1030239686369896 -71,0.10830788314342499 -72,0.10803429037332535 -73,0.10778787732124329 -74,0.10200697928667068 -75,0.10655058920383453 -76,0.10819855332374573 -77,0.10612618923187256 -78,0.1016167402267456 -79,0.10496976971626282 -80,0.10335154086351395 -81,0.10127530992031097 -82,0.10563050210475922 -83,0.1023721843957901 -84,0.1051606759428978 -85,0.1056988537311554 -86,0.10126782208681107 -87,0.10292568057775497 -88,0.10325456410646439 -89,0.10055910050868988 -90,0.10019141435623169 -91,0.10056494921445847 -92,0.09948666393756866 -93,0.09917055070400238 -94,0.10165788978338242 -95,0.10139574110507965 -96,0.09858512878417969 -97,0.09926290810108185 -98,0.099910669028759 -99,0.09857365489006042 -100,0.10414807498455048 -101,0.10012879222631454 -102,0.10768501460552216 -103,0.1055738627910614 -104,0.10068463534116745 -105,0.10719411075115204 -106,0.10184454917907715 -107,0.10600622743368149 -108,0.10335983335971832 -109,0.10592350363731384 -110,0.10607270896434784 -111,0.10232637077569962 -112,0.10660053044557571 -113,0.09873639792203903 -114,0.11326152831315994 -115,0.11684751510620117 -116,0.10641647130250931 -117,0.10405745357275009 -118,0.10491996258497238 -119,0.10023829340934753 -120,0.09733697026968002 -121,0.0997997373342514 -122,0.09988045692443848 -123,0.10051066428422928 -124,0.09780725091695786 -125,0.09717375785112381 -126,0.09881015866994858 -127,0.10370513051748276 -128,0.09821386635303497 -129,0.10433431714773178 -130,0.10619954019784927 -131,0.10006754100322723 -132,0.1031719222664833 -133,0.10519189387559891 -134,0.1037399098277092 -135,0.09814730286598206 -136,0.10351185500621796 -137,0.09846843779087067 -138,0.10872848331928253 -139,0.11183944344520569 -140,0.1015220582485199 -141,0.11116122454404831 -142,0.1167728379368782 -143,0.10550350695848465 -144,0.10726209729909897 -145,0.11508595943450928 -146,0.10941402614116669 -147,0.0974503755569458 -148,0.10779531300067902 -149,0.10103685408830643 -150,0.10699661821126938 -151,0.11186853796243668 -152,0.10573537647724152 -153,0.10171868652105331 -154,0.11104945838451385 -155,0.1072736456990242 -156,0.10513079166412354 -157,0.11736692488193512 -158,0.11954263597726822 -159,0.1081753596663475 -160,0.10473832488059998 -161,0.11703679710626602 -162,0.11401727795600891 -163,0.10251307487487793 -164,0.1052054688334465 -165,0.10457988828420639 -166,0.09805044531822205 -167,0.10350439697504044 -168,0.09780126810073853 -169,0.10791491717100143 -170,0.10883784294128418 -171,0.09810446202754974 -172,0.11195454746484756 -173,0.11643289774656296 -174,0.10565169900655746 -175,0.10533618927001953 -176,0.11373332142829895 -177,0.10932503640651703 -178,0.09579626470804214 -179,0.10940037667751312 -180,0.10804655402898788 -181,0.09524168074131012 -182,0.101932592689991 -183,0.09842945635318756 -184,0.09680302441120148 -185,0.10189493000507355 -186,0.10307615250349045 -187,0.09457676857709885 -188,0.10249315202236176 -189,0.10279261320829391 -190,0.1015014722943306 -191,0.10038593411445618 -192,0.1004088819026947 -193,0.10539044439792633 -194,0.0960085317492485 -195,0.1011056900024414 -196,0.10065913200378418 -197,0.10566840320825577 -198,0.1032366156578064 -199,0.09853827208280563 -200,0.10612871497869492 diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.250/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.250/training_config.txt deleted file mode 100644 index 69c5e9e..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.250/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.250/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.250/training_log.csv deleted file mode 100644 index f3b46da..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,3.293550729751587 -2,0.8147019743919373 -3,0.9006409645080566 -4,0.7494362592697144 -5,0.7347787618637085 -6,0.7302146553993225 -7,0.7831394076347351 -8,1.049973726272583 -9,0.5578994750976562 -10,0.5247738361358643 -11,0.4846658706665039 -12,0.43453529477119446 -13,0.39100342988967896 -14,0.36219656467437744 -15,0.3350771367549896 -16,0.3105609118938446 -17,0.28513023257255554 -18,0.265813946723938 -19,0.2520503103733063 -20,0.22277463972568512 -21,0.19363462924957275 -22,0.17515960335731506 -23,0.1883932650089264 -24,0.1731896549463272 -25,0.1662874072790146 -26,0.1674523502588272 -27,0.1740422546863556 -28,0.16175752878189087 -29,0.1484455168247223 -30,0.15401801466941833 -31,0.14598892629146576 -32,0.13632473349571228 -33,0.13310851156711578 -34,0.13083773851394653 -35,0.12945841252803802 -36,0.13140109181404114 -37,0.1285054087638855 -38,0.12354420125484467 -39,0.11934669315814972 -40,0.11689849197864532 -41,0.11886812746524811 -42,0.11183226108551025 -43,0.11560972034931183 -44,0.11907388269901276 -45,0.11044749617576599 -46,0.11620831489562988 -47,0.11908591538667679 -48,0.11166846752166748 -49,0.11896694451570511 -50,0.11707576364278793 -51,0.11423791199922562 -52,0.11938149482011795 -53,0.11720946431159973 -54,0.10563711822032928 -55,0.11264974623918533 -56,0.11050006747245789 -57,0.10536898672580719 -58,0.11118382960557938 -59,0.11340852826833725 -60,0.11281341314315796 -61,0.11351378262042999 -62,0.10679890215396881 -63,0.11178837716579437 -64,0.11053705215454102 -65,0.10401912033557892 -66,0.11118678748607635 -67,0.10930760949850082 -68,0.10395500808954239 -69,0.10950668901205063 -70,0.1073668897151947 -71,0.10891428589820862 -72,0.10297846794128418 -73,0.11241929233074188 -74,0.11223848164081573 -75,0.10556533932685852 -76,0.1166052371263504 -77,0.11785920709371567 -78,0.10773641616106033 -79,0.1090693548321724 -80,0.10772816836833954 -81,0.10305244475603104 -82,0.10950818657875061 -83,0.11003539711236954 -84,0.10865901410579681 -85,0.09870271384716034 -86,0.10852458328008652 -87,0.11028937250375748 -88,0.10423383116722107 -89,0.11130457371473312 -90,0.10791126638650894 -91,0.11539939045906067 -92,0.11502311378717422 -93,0.1093577966094017 -94,0.11540094763040543 -95,0.1063448041677475 -96,0.10566396266222 -97,0.11479177325963974 -98,0.1109965369105339 -99,0.10229077935218811 -100,0.1020563542842865 -101,0.11091508716344833 -102,0.11346662044525146 -103,0.1065247654914856 -104,0.10907601565122604 -105,0.1178116425871849 -106,0.11305161565542221 -107,0.1042952761054039 -108,0.11187934875488281 -109,0.11665543913841248 -110,0.11793674528598785 -111,0.10688503086566925 -112,0.10228832811117172 -113,0.10750195384025574 -114,0.09937411546707153 -115,0.10795064270496368 -116,0.11145839840173721 -117,0.10738008469343185 -118,0.10213619470596313 -119,0.10648453235626221 -120,0.10178666561841965 -121,0.10413201153278351 -122,0.10927499085664749 -123,0.1044892817735672 -124,0.09945005178451538 -125,0.11069120466709137 -126,0.1128077358007431 -127,0.10778448730707169 -128,0.09938421845436096 -129,0.10313399881124496 -130,0.10103179514408112 -131,0.10036179423332214 -132,0.10347294807434082 -133,0.10227828472852707 -134,0.10241495817899704 -135,0.10371770709753036 -136,0.0963219553232193 -137,0.10737169533967972 -138,0.1105225533246994 -139,0.1026623547077179 -140,0.10192569345235825 -141,0.10314051061868668 -142,0.09953068196773529 -143,0.10914398729801178 -144,0.10653729736804962 -145,0.09988891333341599 -146,0.10172314196825027 -147,0.10206479579210281 -148,0.09970054775476456 -149,0.10026151686906815 -150,0.09987568855285645 -151,0.10379737615585327 -152,0.1014043316245079 -153,0.10391416400671005 -154,0.09536198526620865 -155,0.10479968041181564 -156,0.10094042867422104 -157,0.10657305270433426 -158,0.10643375664949417 -159,0.10717509686946869 -160,0.10503655672073364 -161,0.10258673876523972 -162,0.1114378347992897 -163,0.11372994631528854 -164,0.11264151334762573 -165,0.10718340426683426 -166,0.10661538690328598 -167,0.11471618711948395 -168,0.10704857110977173 -169,0.10043128579854965 -170,0.1074705570936203 -171,0.09832245111465454 -172,0.10795468091964722 -173,0.11206326633691788 -174,0.1067449077963829 -175,0.10333873331546783 -176,0.10133843123912811 -177,0.10021723061800003 -178,0.10835225880146027 -179,0.10393564403057098 -180,0.10034473985433578 -181,0.10498666018247604 -182,0.09779099375009537 -183,0.09349223226308823 -184,0.10051602125167847 -185,0.10260467231273651 -186,0.1028551459312439 -187,0.09323897957801819 -188,0.09647121280431747 -189,0.10229671746492386 -190,0.09897160530090332 -191,0.0940350815653801 -192,0.09630832821130753 -193,0.0964658334851265 -194,0.09172269701957703 -195,0.09199914336204529 -196,0.09254147857427597 -197,0.09371351450681686 -198,0.09505066275596619 -199,0.09468865394592285 -200,0.09453749656677246 diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.300/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.300/training_config.txt deleted file mode 100644 index d80e617..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.300/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.300/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.300/training_log.csv deleted file mode 100644 index 8412765..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.300/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,3.4521710872650146 -2,0.9116301536560059 -3,2.7847726345062256 -4,2.9355485439300537 -5,3.5012426376342773 -6,3.1735994815826416 -7,3.03721022605896 -8,2.6589183807373047 -9,2.7268946170806885 -10,0.904280424118042 -11,0.7768167853355408 -12,0.8345966935157776 -13,0.7894214987754822 -14,0.6646577715873718 -15,0.5118480324745178 -16,0.44973841309547424 -17,0.3963086009025574 -18,0.27722495794296265 -19,0.2507689893245697 -20,0.21107541024684906 -21,0.21518367528915405 -22,0.2056153267621994 -23,0.21423442661762238 -24,0.1878947913646698 -25,0.15807664394378662 -26,0.16249004006385803 -27,0.1764296442270279 -28,0.1713491827249527 -29,0.1496727615594864 -30,0.1594238579273224 -31,0.17924395203590393 -32,0.16993816196918488 -33,0.13189129531383514 -34,0.1441991925239563 -35,0.16221921145915985 -36,0.16370849311351776 -37,0.15001355111598969 -38,0.14437231421470642 -39,0.14060722291469574 -40,0.14693744480609894 -41,0.14811716973781586 -42,0.12885238230228424 -43,0.12064936012029648 -44,0.1325743943452835 -45,0.12777800858020782 -46,0.11794261634349823 -47,0.1172482892870903 -48,0.11711663007736206 -49,0.11803892999887466 -50,0.11836662143468857 -51,0.11419309675693512 -52,0.10777003318071365 -53,0.11816771328449249 -54,0.1223667562007904 -55,0.11023767292499542 -56,0.11098134517669678 -57,0.11445485800504684 -58,0.10703185200691223 -59,0.11191718280315399 -60,0.11122289299964905 -61,0.10304103791713715 -62,0.10859684646129608 -63,0.10446609556674957 -64,0.11118539422750473 -65,0.10742301493883133 -66,0.1000959575176239 -67,0.11008381843566895 -68,0.10999886691570282 -69,0.10954173654317856 -70,0.10477831959724426 -71,0.10928963869810104 -72,0.10388773679733276 -73,0.10830555111169815 -74,0.10978288948535919 -75,0.0999133437871933 -76,0.11150417476892471 -77,0.10713177174329758 -78,0.10513312369585037 -79,0.11458912491798401 -80,0.10965283960103989 -81,0.10585194081068039 -82,0.11771650612354279 -83,0.11443967372179031 -84,0.11077452450990677 -85,0.11962564289569855 -86,0.11678584665060043 -87,0.1071959137916565 -88,0.10781822353601456 -89,0.10867753624916077 -90,0.10588526725769043 -91,0.10900595039129257 -92,0.11060018837451935 -93,0.10429010540246964 -94,0.10988958179950714 -95,0.10361309349536896 -96,0.10678429901599884 -97,0.11140845715999603 -98,0.10423382371664047 -99,0.10663741827011108 -100,0.1126902624964714 -101,0.10653335601091385 -102,0.1065358966588974 -103,0.11260442435741425 -104,0.1030743196606636 -105,0.11124106496572495 -106,0.11513441056013107 -107,0.10391645133495331 -108,0.10827872902154922 -109,0.11051002144813538 -110,0.09669332951307297 -111,0.11385391652584076 -112,0.11504845321178436 -113,0.09781152009963989 -114,0.11384612321853638 -115,0.12092848122119904 -116,0.11244353652000427 -117,0.1048501655459404 -118,0.11377597600221634 -119,0.10515092313289642 -120,0.10417313873767853 -121,0.112485870718956 -122,0.1047728955745697 -123,0.10318561643362045 -124,0.10723080486059189 -125,0.09799838066101074 -126,0.10972173511981964 -127,0.1131768450140953 -128,0.10371983796358109 -129,0.10893210768699646 -130,0.1232944205403328 -131,0.11751553416252136 -132,0.10193660855293274 -133,0.1098228171467781 -134,0.11233987659215927 -135,0.11318037658929825 -136,0.10843892395496368 -137,0.10515767335891724 -138,0.10396549105644226 -139,0.10637225955724716 -140,0.10570089519023895 -141,0.09966583549976349 -142,0.10950620472431183 -143,0.11165173351764679 -144,0.09643322974443436 -145,0.11310746520757675 -146,0.12197831273078918 -147,0.11483921110630035 -148,0.09916307777166367 -149,0.10647234320640564 -150,0.1069784164428711 -151,0.10815495252609253 -152,0.11372394114732742 -153,0.10817565768957138 -154,0.10650632530450821 -155,0.10662379860877991 -156,0.10979273915290833 -157,0.09841994196176529 -158,0.10589315742254257 -159,0.1136607751250267 -160,0.1083793044090271 -161,0.10231925547122955 -162,0.0978764221072197 -163,0.09736427664756775 -164,0.09811848402023315 -165,0.10496999323368073 -166,0.09951172024011612 -167,0.09951896220445633 -168,0.1013539582490921 -169,0.09907874464988708 -170,0.09889096021652222 -171,0.09424411505460739 -172,0.09494839608669281 -173,0.09311846643686295 -174,0.09121590852737427 -175,0.09544593095779419 -176,0.10044083744287491 -177,0.10030121356248856 -178,0.09171587228775024 -179,0.0995110496878624 -180,0.09043031930923462 -181,0.09893834590911865 -182,0.10193898528814316 -183,0.09542983025312424 -184,0.09585593640804291 -185,0.10175807774066925 -186,0.09267456084489822 -187,0.10739004611968994 -188,0.11141551285982132 -189,0.0991525650024414 -190,0.09325006604194641 -191,0.09792865812778473 -192,0.09710827469825745 -193,0.08949118107557297 -194,0.09196411073207855 -195,0.09536109864711761 -196,0.09372970461845398 -197,0.08876045793294907 -198,0.09834067523479462 -199,0.09948709607124329 -200,0.0911635085940361 diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.400/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.400/training_config.txt deleted file mode 100644 index c69d188..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.400/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.400/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.400/training_log.csv deleted file mode 100644 index 8334c0c..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.400/training_log.csv +++ /dev/null @@ -1,26 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,3.7730588912963867 -2,1.8660722970962524 -3,0.6987763047218323 -4,2.3066775798797607 -5,0.5175975561141968 -6,0.49580851197242737 -7,0.4760381877422333 -8,0.45379066467285156 -9,0.4260327219963074 -10,0.39047670364379883 -11,0.3364996314048767 -12,0.3045363426208496 -13,0.30772700905799866 -14,0.31082722544670105 -15,0.3045419752597809 -16,0.29031670093536377 -17,0.269631952047348 -18,0.2510835826396942 -19,0.23414510488510132 -20,0.2210993468761444 -21,nan -22,nan -23,nan -24,nan diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.500/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.500/training_config.txt deleted file mode 100644 index 5844afb..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.500/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.500/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.500/training_log.csv deleted file mode 100644 index 0b129b9..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.500/training_log.csv +++ /dev/null @@ -1,34 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,4.035703659057617 -2,3.3090760707855225 -3,0.8224667906761169 -4,0.5908019542694092 -5,0.6923775672912598 -6,0.3750142455101013 -7,0.31550341844558716 -8,0.2428933084011078 -9,0.20740775763988495 -10,0.23921488225460052 -11,0.22407959401607513 -12,0.2094106376171112 -13,0.20930860936641693 -14,0.19424383342266083 -15,0.1663331240415573 -16,0.1315726786851883 -17,0.14409054815769196 -18,0.1526607871055603 -19,0.16479842364788055 -20,0.1858406960964203 -21,0.2622055113315582 -22,0.5372576117515564 -23,0.3889973759651184 -24,0.3783969283103943 -25,0.3432566225528717 -26,0.21166566014289856 -27,0.2070247083902359 -28,0.24291419982910156 -29,nan -30,nan -31,nan -32,nan diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.600/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.600/training_config.txt deleted file mode 100644 index ef3d59c..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.600/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.600/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.600/training_log.csv deleted file mode 100644 index c53d832..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.600/training_log.csv +++ /dev/null @@ -1,15 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,4.075389862060547 -2,3.9720983505249023 -3,2.2739076614379883 -4,0.583659827709198 -5,0.5076976418495178 -6,0.4137022793292999 -7,0.4178966283798218 -8,0.31755465269088745 -9,0.2742602527141571 -10,nan -11,nan -12,nan -13,nan diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.700/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.700/training_config.txt deleted file mode 100644 index 786d42c..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.700/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.700/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.700/training_log.csv deleted file mode 100644 index f782f41..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.700/training_log.csv +++ /dev/null @@ -1,12 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,4.083685398101807 -2,4.084898948669434 -3,4.084285736083984 -4,4.08209753036499 -5,3.8381686210632324 -6,3.14666485786438 -7,nan -8,nan -9,nan -10,nan diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.800/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.800/training_config.txt deleted file mode 100644 index 6d6c42f..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.800/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.800/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.800/training_log.csv deleted file mode 100644 index e817151..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,4.084804058074951 -2,4.084907531738281 -3,4.084907531738281 -4,4.084907531738281 -5,4.084907531738281 -6,4.084907531738281 -7,4.084907531738281 diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.900/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.900/training_config.txt deleted file mode 100644 index 260630c..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.900/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.900/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_0.900/training_log.csv deleted file mode 100644 index 95db329..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_0.900/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,4.084907531738281 -2,4.084907531738281 -3,4.084907531738281 -4,4.084907531738281 -5,4.084907531738281 -6,4.084907531738281 diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_1.000/training_config.txt b/training/base_loss_learning_rate_sweep/one_fourth/lr_1.000/training_config.txt deleted file mode 100644 index 11b77a7..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_1.000/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function Name: one_fourth -Loss Function Exponent: 0.25 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_fourth_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/4, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_fourth/lr_1.000/training_log.csv b/training/base_loss_learning_rate_sweep/one_fourth/lr_1.000/training_log.csv deleted file mode 100644 index 95db329..0000000 --- a/training/base_loss_learning_rate_sweep/one_fourth/lr_1.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,1.7186180353164673 -1,4.084907531738281 -2,4.084907531738281 -3,4.084907531738281 -4,4.084907531738281 -5,4.084907531738281 -6,4.084907531738281 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.003/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.003/training_config.txt deleted file mode 100644 index 0ef8130..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.003/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.0025 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.003/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.003/training_log.csv deleted file mode 100644 index e4348fb..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.003/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,2.162527561187744 -2,2.1092329025268555 -3,2.0686657428741455 -4,2.073481798171997 -5,2.0663325786590576 -6,2.075603485107422 -7,2.060519218444824 -8,2.0697553157806396 -9,2.0568783283233643 -10,1.9824451208114624 -11,1.9766473770141602 -12,1.99843430519104 -13,1.9986932277679443 -14,2.0094146728515625 -15,2.0347464084625244 -16,2.0114240646362305 -17,2.04256010055542 -18,2.03515625 -19,2.0073258876800537 -20,1.9963029623031616 -21,1.9779481887817383 -22,1.9369677305221558 -23,1.9273499250411987 -24,1.9239282608032227 -25,1.9094386100769043 -26,1.8949352502822876 -27,1.8834713697433472 -28,1.8726519346237183 -29,1.8549365997314453 -30,1.805792212486267 -31,1.7417219877243042 -32,1.7354912757873535 -33,1.7255655527114868 -34,1.7225379943847656 -35,1.739658236503601 -36,1.7301628589630127 -37,1.7113059759140015 -38,1.6913121938705444 -39,1.671394944190979 -40,1.6665234565734863 -41,1.667553424835205 -42,1.6717934608459473 -43,1.6725454330444336 -44,1.680608868598938 -45,1.6843611001968384 -46,1.6816285848617554 -47,1.6796823740005493 -48,1.6779927015304565 -49,1.6759580373764038 -50,1.6722790002822876 -51,1.6479082107543945 -52,1.6631250381469727 -53,1.6475170850753784 -54,1.6215102672576904 -55,1.6182606220245361 -56,1.599734902381897 -57,1.5883959531784058 -58,1.5870929956436157 -59,1.5870461463928223 -60,1.586819052696228 -61,1.5857393741607666 -62,1.579696774482727 -63,1.5631608963012695 -64,1.563684105873108 -65,1.5623791217803955 -66,1.5599008798599243 -67,1.5566446781158447 -68,1.552986741065979 -69,1.5490914583206177 -70,1.5451853275299072 -71,1.54072904586792 -72,1.535017728805542 -73,1.52622652053833 -74,1.5123339891433716 -75,1.5120902061462402 -76,1.5214227437973022 -77,1.5299687385559082 -78,1.5341235399246216 -79,1.5349116325378418 -80,1.5341335535049438 -81,1.532774806022644 -82,1.5312711000442505 -83,1.5288549661636353 -84,1.5098114013671875 -85,1.5078229904174805 -86,1.5066086053848267 -87,1.5055674314498901 -88,1.5044949054718018 -89,1.5029993057250977 -90,1.5007152557373047 -91,1.4972052574157715 -92,1.4883530139923096 -93,1.5141797065734863 -94,1.5105246305465698 -95,1.4952012300491333 -96,1.5235859155654907 -97,1.5337592363357544 -98,1.539881944656372 -99,1.543805718421936 -100,1.545546531677246 -101,1.5452030897140503 -102,1.5432133674621582 -103,1.5398824214935303 -104,1.5353785753250122 -105,1.5291386842727661 -106,1.5235552787780762 -107,1.504044532775879 -108,1.564168095588684 -109,1.5761873722076416 -110,1.5890213251113892 -111,1.5838701725006104 -112,1.5864527225494385 -113,1.5991108417510986 -114,1.5794007778167725 -115,1.5667997598648071 -116,1.5664142370224 -117,1.5696094036102295 -118,1.574564814567566 -119,1.5802181959152222 -120,1.5835342407226562 -121,1.583357572555542 -122,1.5749692916870117 -123,1.5719513893127441 -124,1.5691921710968018 -125,1.5658962726593018 -126,1.5617327690124512 -127,1.5509346723556519 -128,1.5008320808410645 -129,1.4755232334136963 -130,1.4818534851074219 -131,1.4877738952636719 -132,1.4629197120666504 -133,1.4423243999481201 -134,1.4364129304885864 -135,1.4491552114486694 -136,1.4411462545394897 -137,1.421816349029541 -138,1.4210047721862793 -139,1.4168368577957153 -140,1.4083043336868286 -141,1.3882571458816528 -142,1.3937962055206299 -143,1.3802738189697266 -144,1.36080002784729 -145,1.3507853746414185 -146,1.3427997827529907 -147,1.3412532806396484 -148,1.3433901071548462 -149,1.326590895652771 -150,1.3362103700637817 -151,1.311815857887268 -152,1.3069714307785034 -153,1.305045485496521 -154,1.2958269119262695 -155,1.2868770360946655 -156,1.274185061454773 -157,1.2588577270507812 -158,1.2570765018463135 -159,1.255861759185791 -160,1.254150390625 -161,1.249708652496338 -162,1.2205488681793213 -163,1.221326231956482 -164,1.2215290069580078 -165,1.221323847770691 -166,1.2207822799682617 -167,1.219899296760559 -168,1.2185256481170654 -169,1.2158515453338623 -170,1.224191665649414 -171,1.2257039546966553 -172,1.2256500720977783 -173,1.2249114513397217 -174,1.2233588695526123 -175,1.2190583944320679 -176,1.2134301662445068 -177,1.2141600847244263 -178,1.2139605283737183 -179,1.213349461555481 -180,1.2124415636062622 -181,1.2111984491348267 -182,1.2092937231063843 -183,1.206516981124878 -184,1.2039995193481445 -185,1.201701045036316 -186,1.1995444297790527 -187,1.197488784790039 -188,1.1955324411392212 -189,1.1938905715942383 -190,1.1925169229507446 -191,1.1913479566574097 -192,1.1903502941131592 -193,1.1894927024841309 -194,1.1887491941452026 -195,1.1880815029144287 -196,1.1874407529830933 -197,1.1867631673812866 -198,1.185991883277893 -199,1.185099482536316 -200,1.1840916872024536 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.005/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.005/training_config.txt deleted file mode 100644 index 79bc7b2..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.005/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.005 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.005/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.005/training_log.csv deleted file mode 100644 index e13ebd7..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.005/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,2.137160539627075 -2,2.129725933074951 -3,2.129074811935425 -4,2.1167705059051514 -5,2.11788272857666 -6,2.1084232330322266 -7,2.093566417694092 -8,2.075098991394043 -9,2.05342435836792 -10,2.021378755569458 -11,2.001770257949829 -12,1.9425947666168213 -13,1.8787808418273926 -14,1.8555057048797607 -15,1.7876214981079102 -16,1.7724279165267944 -17,1.7632673978805542 -18,1.7224229574203491 -19,1.6587011814117432 -20,1.6667537689208984 -21,1.6712275743484497 -22,1.6429239511489868 -23,1.6050344705581665 -24,1.6224048137664795 -25,1.638614296913147 -26,1.623384952545166 -27,1.6272461414337158 -28,1.6226801872253418 -29,1.6239984035491943 -30,1.6212077140808105 -31,1.6052535772323608 -32,1.5397212505340576 -33,1.5281345844268799 -34,1.503400206565857 -35,1.496975302696228 -36,1.47413170337677 -37,1.4480055570602417 -38,1.4373414516448975 -39,1.394233226776123 -40,1.4444373846054077 -41,1.4406622648239136 -42,1.4003595113754272 -43,1.3862507343292236 -44,1.3936032056808472 -45,1.3955702781677246 -46,1.3968983888626099 -47,1.397375226020813 -48,1.3974299430847168 -49,1.3971848487854004 -50,1.3965784311294556 -51,1.3952864408493042 -52,1.3929917812347412 -53,1.3900254964828491 -54,1.3866857290267944 -55,1.3828593492507935 -56,1.3742399215698242 -57,1.3495453596115112 -58,1.3803380727767944 -59,1.3870842456817627 -60,1.3777918815612793 -61,1.3775510787963867 -62,1.3789161443710327 -63,1.3801428079605103 -64,1.3799982070922852 -65,1.3788374662399292 -66,1.3883367776870728 -67,1.3853240013122559 -68,1.3815391063690186 -69,1.3742117881774902 -70,1.3785618543624878 -71,1.4270323514938354 -72,1.4493464231491089 -73,1.4460203647613525 -74,1.4174139499664307 -75,1.3736544847488403 -76,1.381502389907837 -77,1.3912527561187744 -78,1.3211883306503296 -79,1.3298097848892212 -80,1.2921571731567383 -81,1.2714073657989502 -82,1.2717524766921997 -83,1.2844282388687134 -84,1.2766720056533813 -85,1.2789597511291504 -86,1.2812765836715698 -87,1.2631819248199463 -88,1.2660346031188965 -89,1.2603918313980103 -90,1.259565830230713 -91,1.2606297731399536 -92,1.2600319385528564 -93,1.2584620714187622 -94,1.256937861442566 -95,1.25579833984375 -96,1.2553526163101196 -97,1.2555803060531616 -98,1.2554938793182373 -99,1.2533921003341675 -100,1.2498328685760498 -101,1.2466305494308472 -102,1.244370698928833 -103,1.2428834438323975 -104,1.2418575286865234 -105,1.2408556938171387 -106,1.2395187616348267 -107,1.2378828525543213 -108,1.235969066619873 -109,1.233924150466919 -110,1.2321208715438843 -111,1.2307465076446533 -112,1.2296063899993896 -113,1.228334665298462 -114,1.2267874479293823 -115,1.225052833557129 -116,1.2233047485351562 -117,1.221668004989624 -118,1.2201554775238037 -119,1.2186816930770874 -120,1.2171111106872559 -121,1.2151944637298584 -122,1.2117499113082886 -123,1.22069251537323 -124,1.2188118696212769 -125,1.2140172719955444 -126,1.2117632627487183 -127,1.211154818534851 -128,1.2107810974121094 -129,1.2102019786834717 -130,1.2094213962554932 -131,1.2085944414138794 -132,1.2078217267990112 -133,1.2071183919906616 -134,1.2064532041549683 -135,1.205772876739502 -136,1.2050246000289917 -137,1.2041679620742798 -138,1.2031928300857544 -139,1.2021235227584839 -140,1.201005220413208 -141,1.199918270111084 -142,1.1989129781723022 -143,1.1980092525482178 -144,1.1971986293792725 -145,1.1964560747146606 -146,1.195744276046753 -147,1.1950373649597168 -148,1.1943249702453613 -149,1.1936217546463013 -150,1.192954421043396 -151,1.1923339366912842 -152,1.1917402744293213 -153,1.1911396980285645 -154,1.1905028820037842 -155,1.1898272037506104 -156,1.1891266107559204 -157,1.1884251832962036 -158,1.1877354383468628 -159,1.1870495080947876 -160,1.186354398727417 -161,1.1856378316879272 -162,1.1849063634872437 -163,1.184168815612793 -164,1.1834330558776855 -165,1.1826987266540527 -166,1.1819628477096558 -167,1.1812222003936768 -168,1.180472731590271 -169,1.1797096729278564 -170,1.178932547569275 -171,1.1781386137008667 -172,1.177330732345581 -173,1.1765060424804688 -174,1.1756659746170044 -175,1.1748069524765015 -176,1.173925518989563 -177,1.1730185747146606 -178,1.1720807552337646 -179,1.1711071729660034 -180,1.170084834098816 -181,1.1689950227737427 -182,1.1678073406219482 -183,1.1664692163467407 -184,1.1650248765945435 -185,1.1637274026870728 -186,1.1626108884811401 -187,1.1615296602249146 -188,1.1603710651397705 -189,1.1591209173202515 -190,1.1578606367111206 -191,1.156672477722168 -192,1.1555849313735962 -193,1.1545790433883667 -194,1.1535996198654175 -195,1.1525930166244507 -196,1.151549220085144 -197,1.1504911184310913 -198,1.1494487524032593 -199,1.1484423875808716 -200,1.1474772691726685 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.010/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.010/training_config.txt deleted file mode 100644 index b251101..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.010/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.010/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.010/training_log.csv deleted file mode 100644 index 608ab07..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,2.1702463626861572 -2,2.1196882724761963 -3,2.0463221073150635 -4,1.9902693033218384 -5,1.970646858215332 -6,1.9307997226715088 -7,1.820638656616211 -8,1.673026442527771 -9,1.558285117149353 -10,1.536666750907898 -11,1.4375275373458862 -12,1.3801395893096924 -13,1.3586421012878418 -14,1.3318732976913452 -15,1.277651309967041 -16,1.2574422359466553 -17,1.2358754873275757 -18,1.2276147603988647 -19,1.2043282985687256 -20,1.208265781402588 -21,1.2051934003829956 -22,1.2051079273223877 -23,1.20060396194458 -24,1.1960066556930542 -25,1.1909085512161255 -26,1.1848626136779785 -27,1.1781227588653564 -28,1.1713988780975342 -29,1.1648848056793213 -30,1.1681525707244873 -31,1.1458836793899536 -32,1.1370500326156616 -33,1.1405469179153442 -34,1.1294639110565186 -35,1.1248077154159546 -36,1.1213477849960327 -37,1.1180591583251953 -38,1.114854097366333 -39,1.110586166381836 -40,1.119533658027649 -41,1.1152547597885132 -42,1.1013131141662598 -43,1.0996471643447876 -44,1.0979692935943604 -45,1.0963451862335205 -46,1.0941224098205566 -47,1.0915334224700928 -48,1.1176210641860962 -49,1.0907318592071533 -50,1.0916826725006104 -51,1.0914692878723145 -52,1.0903065204620361 -53,1.0888643264770508 -54,1.0875800848007202 -55,1.086552619934082 -56,1.0855000019073486 -57,1.0839285850524902 -58,1.0818066596984863 -59,1.0796396732330322 -60,1.0777091979980469 -61,1.075937271118164 -62,1.074021816253662 -63,1.0716825723648071 -64,1.0691429376602173 -65,1.066778540611267 -66,1.0643364191055298 -67,1.057497262954712 -68,1.0560153722763062 -69,1.0463708639144897 -70,1.0739097595214844 -71,1.0773957967758179 -72,1.095451831817627 -73,1.093056321144104 -74,1.0890024900436401 -75,1.0857044458389282 -76,1.0811971426010132 -77,1.0749369859695435 -78,1.080056071281433 -79,1.0806490182876587 -80,1.0743281841278076 -81,1.0462005138397217 -82,1.0452343225479126 -83,1.048316478729248 -84,1.0480690002441406 -85,1.0424796342849731 -86,1.0383754968643188 -87,1.0565564632415771 -88,1.036283016204834 -89,1.039870262145996 -90,1.041857123374939 -91,1.0367422103881836 -92,1.0309635400772095 -93,1.0292631387710571 -94,1.0506340265274048 -95,1.0325100421905518 -96,1.0307750701904297 -97,1.0103464126586914 -98,1.0103163719177246 -99,0.9961365461349487 -100,0.9677155613899231 -101,0.9454376101493835 -102,0.944040060043335 -103,0.9378814101219177 -104,0.9326220750808716 -105,0.928594172000885 -106,0.9206691980361938 -107,0.9073977470397949 -108,0.9081133008003235 -109,0.9015911817550659 -110,0.9020310640335083 -111,0.9002777934074402 -112,0.8905704021453857 -113,0.8877788782119751 -114,0.8802834749221802 -115,0.8862090706825256 -116,0.8884850740432739 -117,0.8845982551574707 -118,0.8746154308319092 -119,0.8596267700195312 -120,0.8572601079940796 -121,0.8517112135887146 -122,0.8669522404670715 -123,0.8639866709709167 -124,0.8638798594474792 -125,0.8653892278671265 -126,0.8680069446563721 -127,0.8657275438308716 -128,0.8598066568374634 -129,0.8545275926589966 -130,0.8509934544563293 -131,0.8498417139053345 -132,0.8451749682426453 -133,0.838697075843811 -134,0.8316362500190735 -135,0.8272111415863037 -136,0.8197388052940369 -137,0.8212252259254456 -138,0.8215195536613464 -139,0.8200788497924805 -140,0.8148610591888428 -141,0.8138924241065979 -142,0.8122375011444092 -143,0.8051712512969971 -144,0.8013624548912048 -145,0.8021494150161743 -146,0.801565408706665 -147,0.7982281446456909 -148,0.7945678234100342 -149,0.7912273406982422 -150,0.7871538400650024 -151,0.7800770401954651 -152,0.7775476574897766 -153,0.7736361026763916 -154,0.7728707194328308 -155,0.7717796564102173 -156,0.7679808139801025 -157,0.7667056322097778 -158,0.7654604911804199 -159,0.7637823820114136 -160,0.7607815265655518 -161,0.7557823061943054 -162,0.7487210631370544 -163,0.7406527996063232 -164,0.743075966835022 -165,0.7366697788238525 -166,0.7468344569206238 -167,0.7495911121368408 -168,0.7439302802085876 -169,0.7371572852134705 -170,0.7388795018196106 -171,0.7325120568275452 -172,0.7293329238891602 -173,0.7267312407493591 -174,0.7221713662147522 -175,0.7147794365882874 -176,0.712761640548706 -177,0.7170422077178955 -178,0.7255465984344482 -179,0.7288293838500977 -180,0.7280754446983337 -181,0.7279298305511475 -182,0.7235725522041321 -183,0.7124432921409607 -184,0.681918740272522 -185,0.6458532214164734 -186,0.6113165616989136 -187,0.5930007100105286 -188,0.6060739755630493 -189,0.6891444325447083 -190,0.7965916395187378 -191,0.8313663005828857 -192,0.8750811219215393 -193,0.9529334306716919 -194,1.0372289419174194 -195,1.0992761850357056 -196,1.1279892921447754 -197,1.1241943836212158 -198,1.1248223781585693 -199,1.0940697193145752 -200,1.060122013092041 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.020/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.020/training_config.txt deleted file mode 100644 index 4b116fa..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.020/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.020/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.020/training_log.csv deleted file mode 100644 index 8993224..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,2.1864874362945557 -2,2.079257011413574 -3,1.8941025733947754 -4,1.6702632904052734 -5,1.5391191244125366 -6,1.4138232469558716 -7,1.450480580329895 -8,1.5139294862747192 -9,1.4799691438674927 -10,1.4717389345169067 -11,1.4748433828353882 -12,1.472725749015808 -13,1.4786887168884277 -14,1.4763191938400269 -15,1.4718589782714844 -16,1.464564323425293 -17,1.4554702043533325 -18,1.4494041204452515 -19,1.4440853595733643 -20,1.4400204420089722 -21,1.4320037364959717 -22,1.4335440397262573 -23,1.4272890090942383 -24,1.4203953742980957 -25,1.4204307794570923 -26,1.4234024286270142 -27,1.4072273969650269 -28,1.4269565343856812 -29,1.4410927295684814 -30,1.4452458620071411 -31,1.4328728914260864 -32,1.4337974786758423 -33,1.3932658433914185 -34,1.39686918258667 -35,1.3834508657455444 -36,1.4213942289352417 -37,1.3945616483688354 -38,1.4330343008041382 -39,1.4171687364578247 -40,1.4282629489898682 -41,1.4991275072097778 -42,1.6595112085342407 -43,1.575676441192627 -44,1.5967543125152588 -45,1.4948034286499023 -46,1.3528305292129517 -47,1.3587982654571533 -48,1.3222355842590332 -49,1.2095474004745483 -50,1.150171160697937 -51,1.1297292709350586 -52,1.1238762140274048 -53,1.0951485633850098 -54,1.041959524154663 -55,0.9923514127731323 -56,0.9269406795501709 -57,0.8955050706863403 -58,0.8553591966629028 -59,0.7994856834411621 -60,0.7466424107551575 -61,0.7564269304275513 -62,0.7416343092918396 -63,0.6893301606178284 -64,0.656705915927887 -65,0.6364743709564209 -66,0.6208157539367676 -67,0.6146029829978943 -68,0.6125989556312561 -69,0.6020426750183105 -70,0.5986618995666504 -71,0.5946180820465088 -72,0.5975226759910583 -73,0.5935636758804321 -74,0.6010663509368896 -75,0.6006225943565369 -76,0.6002271175384521 -77,0.5990448594093323 -78,0.6027929782867432 -79,0.6029893159866333 -80,0.6004418730735779 -81,0.5964370369911194 -82,0.5893253684043884 -83,0.5878283381462097 -84,0.585076630115509 -85,0.5811577439308167 -86,0.5748568773269653 -87,0.5634569525718689 -88,0.5500493049621582 -89,0.5445960760116577 -90,0.5364826917648315 -91,0.5262883305549622 -92,0.5225415825843811 -93,0.517623245716095 -94,0.5071588158607483 -95,0.5050635933876038 -96,0.4854481518268585 -97,0.49229058623313904 -98,0.48127254843711853 -99,0.4856733977794647 -100,0.48919448256492615 -101,0.48539435863494873 -102,0.4859865605831146 -103,0.48252934217453003 -104,0.48023927211761475 -105,0.47601261734962463 -106,0.499474436044693 -107,0.5173237919807434 -108,0.5199418663978577 -109,0.5267238020896912 -110,0.5277305841445923 -111,0.5438823699951172 -112,0.5512048006057739 -113,0.5566309094429016 -114,0.5665693283081055 -115,0.58656245470047 -116,0.5931544303894043 -117,0.585620105266571 -118,0.5688803195953369 -119,0.5612881183624268 -120,0.5545324683189392 -121,0.5454316735267639 -122,0.5427390933036804 -123,0.5377922058105469 -124,0.5380872488021851 -125,0.5346838235855103 -126,0.5276938676834106 -127,0.5178701877593994 -128,0.5192517042160034 -129,0.5173124670982361 -130,0.511246383190155 -131,0.5031496286392212 -132,0.49350517988204956 -133,0.4914042055606842 -134,0.4806959927082062 -135,0.4781157970428467 -136,0.46803614497184753 -137,0.4716032147407532 -138,0.4734819829463959 -139,0.47255468368530273 -140,0.4718664288520813 -141,0.4684658348560333 -142,0.46798309683799744 -143,0.4701618552207947 -144,0.4680912494659424 -145,0.4667641222476959 -146,0.46225506067276 -147,0.4642951786518097 -148,0.4660252034664154 -149,0.46567338705062866 -150,0.4655846059322357 -151,0.46244433522224426 -152,0.46574831008911133 -153,0.46638593077659607 -154,0.4668586552143097 -155,0.4670920670032501 -156,0.466675728559494 -157,0.4639771282672882 -158,0.46683770418167114 -159,0.4659000337123871 -160,0.4644864499568939 -161,0.46322688460350037 -162,0.4638603627681732 -163,0.46144092082977295 -164,0.4673479199409485 -165,0.4654633402824402 -166,0.4694529175758362 -167,0.46647176146507263 -168,0.4654717743396759 -169,0.4669090807437897 -170,0.46238765120506287 -171,0.4632357060909271 -172,0.4589671194553375 -173,0.444733589887619 -174,0.44814422726631165 -175,0.44224226474761963 -176,0.4560497999191284 -177,0.4600551426410675 -178,0.46098268032073975 -179,0.45923587679862976 -180,0.4642072021961212 -181,0.4649030566215515 -182,0.46375900506973267 -183,0.4617590606212616 -184,0.458629846572876 -185,0.45653465390205383 -186,0.45391637086868286 -187,0.451072096824646 -188,0.4481046795845032 -189,0.4448804259300232 -190,0.4370953440666199 -191,0.4455304443836212 -192,0.44649627804756165 -193,0.4463185667991638 -194,0.44549843668937683 -195,0.4442598521709442 -196,0.4426906108856201 -197,0.44061362743377686 -198,0.4358190596103668 -199,0.42813840508461 -200,0.42621666193008423 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.040/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.040/training_config.txt deleted file mode 100644 index b6e7533..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.040/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.040/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.040/training_log.csv deleted file mode 100644 index fe4cd1a..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,2.1853270530700684 -2,1.8424105644226074 -3,1.5842230319976807 -4,1.4510667324066162 -5,1.5075438022613525 -6,1.4865801334381104 -7,1.495492935180664 -8,1.4990603923797607 -9,1.4733929634094238 -10,1.3737980127334595 -11,1.2401483058929443 -12,1.2110083103179932 -13,1.089177131652832 -14,1.0276134014129639 -15,0.9647066593170166 -16,0.887430727481842 -17,0.8381298780441284 -18,0.8104872107505798 -19,0.7629677057266235 -20,0.7678875923156738 -21,0.7069725394248962 -22,0.6476229429244995 -23,0.6155053973197937 -24,0.5910855531692505 -25,0.5794780850410461 -26,0.5514950156211853 -27,0.5498846173286438 -28,0.5206058025360107 -29,0.5187222361564636 -30,0.5158522725105286 -31,0.5050655603408813 -32,0.5030510425567627 -33,0.5005184412002563 -34,0.49615874886512756 -35,0.4895249605178833 -36,0.4806213080883026 -37,0.46587616205215454 -38,0.4538334012031555 -39,0.4574257433414459 -40,0.4497193694114685 -41,0.44332048296928406 -42,0.4314599931240082 -43,0.4118993580341339 -44,0.4011518061161041 -45,0.3906686305999756 -46,0.378737211227417 -47,0.3715396523475647 -48,0.36261919140815735 -49,0.3534567058086395 -50,0.353904664516449 -51,0.3673397898674011 -52,0.3623000681400299 -53,0.35900449752807617 -54,0.3604944944381714 -55,0.3561490476131439 -56,0.343487024307251 -57,0.32087430357933044 -58,0.3122802674770355 -59,0.30080896615982056 -60,0.29615211486816406 -61,0.2929946184158325 -62,0.28123876452445984 -63,0.2964189052581787 -64,0.29122886061668396 -65,0.2881415784358978 -66,0.2977277636528015 -67,0.29534104466438293 -68,0.2893059253692627 -69,0.27975451946258545 -70,0.2649153769016266 -71,0.2613416314125061 -72,0.2635561227798462 -73,0.2542930245399475 -74,0.2557988166809082 -75,0.2598586976528168 -76,0.2527490258216858 -77,0.2413514256477356 -78,0.24797159433364868 -79,0.24773752689361572 -80,0.2396092265844345 -81,0.23010802268981934 -82,0.22980399429798126 -83,0.22533884644508362 -84,0.22274939715862274 -85,0.22860637307167053 -86,0.22250398993492126 -87,0.21561221778392792 -88,0.21286413073539734 -89,0.21070989966392517 -90,0.20366732776165009 -91,0.2000613659620285 -92,0.1941729336977005 -93,0.19064517319202423 -94,0.18560640513896942 -95,0.1800004243850708 -96,0.17274829745292664 -97,0.1721523255109787 -98,0.16779643297195435 -99,0.16419310867786407 -100,0.16370804607868195 -101,0.15822352468967438 -102,0.15261095762252808 -103,0.1452869027853012 -104,0.1626061350107193 -105,0.1711450219154358 -106,0.17518861591815948 -107,0.17678320407867432 -108,0.1763310581445694 -109,0.17436066269874573 -110,0.17110426723957062 -111,0.1670386642217636 -112,0.16358332335948944 -113,0.16564016044139862 -114,0.16592678427696228 -115,0.16643878817558289 -116,0.1671016365289688 -117,0.16649411618709564 -118,0.16549494862556458 -119,0.16430500149726868 -120,0.16262193024158478 -121,0.16043820977210999 -122,0.1578841507434845 -123,0.15530654788017273 -124,0.1540873646736145 -125,0.15424031019210815 -126,0.1534300148487091 -127,0.1520424336194992 -128,0.14977803826332092 -129,0.14806953072547913 -130,0.14700119197368622 -131,0.1461489200592041 -132,0.14496369659900665 -133,0.14389848709106445 -134,0.14190055429935455 -135,0.1405821591615677 -136,0.1391584575176239 -137,0.13892284035682678 -138,0.13557757437229156 -139,0.13411538302898407 -140,0.13344891369342804 -141,0.13227854669094086 -142,0.13091813027858734 -143,0.12980501353740692 -144,0.1290101259946823 -145,0.12743617594242096 -146,0.12654411792755127 -147,0.12543994188308716 -148,0.12415467202663422 -149,0.12302384525537491 -150,0.1204417496919632 -151,0.11872820556163788 -152,0.11713668704032898 -153,0.1142444759607315 -154,0.11092942953109741 -155,0.11379111558198929 -156,0.11550188809633255 -157,0.11527690291404724 -158,0.11577911674976349 -159,0.1138700619339943 -160,0.1074858084321022 -161,0.11580486595630646 -162,0.1155380830168724 -163,0.10833647102117538 -164,0.1125308945775032 -165,0.11335642635822296 -166,0.11080244928598404 -167,0.10951468348503113 -168,0.10738123953342438 -169,0.10827423632144928 -170,0.10912823677062988 -171,0.10817993432283401 -172,0.10706055909395218 -173,0.1079767644405365 -174,0.1089257225394249 -175,0.10616890341043472 -176,0.1091480702161789 -177,0.10935042053461075 -178,0.10416389256715775 -179,0.11462277173995972 -180,0.1172519400715828 -181,0.10963605344295502 -182,0.10488811880350113 -183,0.11033324897289276 -184,0.11043141782283783 -185,0.10589109361171722 -186,0.1068224236369133 -187,0.11000405251979828 -188,0.10708543658256531 -189,0.10535577684640884 -190,0.1082683652639389 -191,0.10565268993377686 -192,0.10506991297006607 -193,0.10880043357610703 -194,0.10551976412534714 -195,0.10002276301383972 -196,0.1043529361486435 -197,0.10406578332185745 -198,0.09967181086540222 -199,0.1010308712720871 -200,0.10147761553525925 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.050/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.050/training_config.txt deleted file mode 100644 index 4519258..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.050/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.050/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.050/training_log.csv deleted file mode 100644 index 62f9a73..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,2.169529914855957 -2,1.844244122505188 -3,1.4700028896331787 -4,1.2505474090576172 -5,1.1252309083938599 -6,1.0592488050460815 -7,1.1359546184539795 -8,1.0080175399780273 -9,0.8502873778343201 -10,0.7423998713493347 -11,0.6665404438972473 -12,0.6330356597900391 -13,0.6239616870880127 -14,0.6285354495048523 -15,0.6326475143432617 -16,0.6412897109985352 -17,0.6506186723709106 -18,0.6410632133483887 -19,0.6343741416931152 -20,0.6278436183929443 -21,0.6077826023101807 -22,0.5840175747871399 -23,0.5832247138023376 -24,0.5511788129806519 -25,0.5505902171134949 -26,0.5383395552635193 -27,0.5382249355316162 -28,0.5334414839744568 -29,0.5247580409049988 -30,0.5176382064819336 -31,0.5194433331489563 -32,0.5097893476486206 -33,0.5046312212944031 -34,0.4884893596172333 -35,0.4981231689453125 -36,0.49932968616485596 -37,0.5085933804512024 -38,0.5173273682594299 -39,0.5106900334358215 -40,0.49659398198127747 -41,0.4935058653354645 -42,0.48793211579322815 -43,0.48055562376976013 -44,0.47312718629837036 -45,0.4642174243927002 -46,0.4485430121421814 -47,0.45894739031791687 -48,0.46369025111198425 -49,0.46806010603904724 -50,0.46646419167518616 -51,0.4616087079048157 -52,0.45512041449546814 -53,0.44626083970069885 -54,0.43164172768592834 -55,0.42767390608787537 -56,0.41933131217956543 -57,0.4114472568035126 -58,0.40334925055503845 -59,0.39440998435020447 -60,0.3866659700870514 -61,0.38044852018356323 -62,0.37477365136146545 -63,0.36847659945487976 -64,0.36062175035476685 -65,0.35039472579956055 -66,0.3365800678730011 -67,0.3147258162498474 -68,0.2852092683315277 -69,0.2619646489620209 -70,0.24688757956027985 -71,0.2399025708436966 -72,0.23451675474643707 -73,0.2294600009918213 -74,0.22415515780448914 -75,0.21877388656139374 -76,0.2131466418504715 -77,0.20724207162857056 -78,0.20083296298980713 -79,0.19379685819149017 -80,0.18592782318592072 -81,0.17715425789356232 -82,0.16939373314380646 -83,0.1620950996875763 -84,0.1566828340291977 -85,0.1584087759256363 -86,0.15480300784111023 -87,0.16465993225574493 -88,0.16173399984836578 -89,0.1490383893251419 -90,0.149128258228302 -91,0.14736780524253845 -92,0.1408865749835968 -93,0.13711537420749664 -94,0.1394956409931183 -95,0.13902078568935394 -96,0.13727547228336334 -97,0.12887264788150787 -98,0.1288532167673111 -99,0.13028384745121002 -100,0.12734895944595337 -101,0.12383589148521423 -102,0.12631823122501373 -103,0.12357136607170105 -104,0.11826994270086288 -105,0.11865851283073425 -106,0.11764947324991226 -107,0.11618238687515259 -108,0.11727971583604813 -109,0.11005958914756775 -110,0.11550556868314743 -111,0.11823321878910065 -112,0.11655285954475403 -113,0.1121453270316124 -114,0.1223524734377861 -115,0.118833027780056 -116,0.10990063846111298 -117,0.11355848610401154 -118,0.11453016102313995 -119,0.10959713160991669 -120,0.10640430450439453 -121,0.10738600790500641 -122,0.10590681433677673 -123,0.10644590109586716 -124,0.10594771802425385 -125,0.10322455316781998 -126,0.10873517394065857 -127,0.1031942218542099 -128,0.1088137999176979 -129,0.1112639382481575 -130,0.10776393860578537 -131,0.10286970436573029 -132,0.11288055032491684 -133,0.11117672175168991 -134,0.10222665220499039 -135,0.10656686127185822 -136,0.10771244764328003 -137,0.10360192507505417 -138,0.1048864871263504 -139,0.10662589967250824 -140,0.10508555173873901 -141,0.09982316195964813 -142,0.10186901688575745 -143,0.10105016827583313 -144,0.09937787055969238 -145,0.1001669242978096 -146,0.0994502529501915 -147,0.09945807605981827 -148,0.09882689267396927 -149,0.09751823544502258 -150,0.09803126007318497 -151,0.09944356232881546 -152,0.09728319942951202 -153,0.1025111973285675 -154,0.10004356503486633 -155,0.10031678527593613 -156,0.10194376111030579 -157,0.09849485009908676 -158,0.10165532678365707 -159,0.10197727382183075 -160,0.09690123051404953 -161,0.09917918592691422 -162,0.09614098072052002 -163,0.0988646000623703 -164,0.0970906987786293 -165,0.10183890908956528 -166,0.10351310670375824 -167,0.1000322699546814 -168,0.09660466760396957 -169,0.09602294117212296 -170,0.09876678138971329 -171,0.10013819485902786 -172,0.0970768854022026 -173,0.09809113293886185 -174,0.0981677994132042 -175,0.0979207307100296 -176,0.0988585501909256 -177,0.09513265639543533 -178,0.10031801462173462 -179,0.09863299876451492 -180,0.09632236510515213 -181,0.09877127408981323 -182,0.09578467160463333 -183,0.09740961343050003 -184,0.09823527187108994 -185,0.09324457496404648 -186,0.09658043086528778 -187,0.09588976204395294 -188,0.09648849070072174 -189,0.09250644594430923 -190,0.09474215656518936 -191,0.09336742013692856 -192,0.09664322435855865 -193,0.0939905196428299 -194,0.09835164248943329 -195,0.10055584460496902 -196,0.09713885188102722 -197,0.09462706744670868 -198,0.09616422653198242 -199,0.09204868972301483 -200,0.09537271410226822 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.080/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.080/training_config.txt deleted file mode 100644 index 44e7a24..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.080/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.080/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.080/training_log.csv deleted file mode 100644 index 60bc0a7..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,2.1746840476989746 -2,2.001373291015625 -3,1.4903160333633423 -4,1.0770525932312012 -5,0.8961449861526489 -6,0.7791555523872375 -7,0.7490363717079163 -8,0.7300459146499634 -9,0.706596851348877 -10,0.6920714378356934 -11,0.6749076843261719 -12,0.6498388051986694 -13,0.6140002608299255 -14,0.5700377821922302 -15,0.525227963924408 -16,0.4825479984283447 -17,0.45910605788230896 -18,0.44805285334587097 -19,0.4378248453140259 -20,0.42509686946868896 -21,0.41069677472114563 -22,0.39705538749694824 -23,0.38397079706192017 -24,0.3706299364566803 -25,0.35690200328826904 -26,0.3434862494468689 -27,0.33009228110313416 -28,0.3160717487335205 -29,0.30262887477874756 -30,0.29261839389801025 -31,0.284845232963562 -32,0.29177388548851013 -33,0.2909400165081024 -34,0.2824162244796753 -35,0.26774293184280396 -36,0.2641109824180603 -37,0.26187407970428467 -38,0.25954827666282654 -39,0.2582181990146637 -40,0.2552824318408966 -41,0.24952419102191925 -42,0.24270853400230408 -43,0.23314517736434937 -44,0.23077355325222015 -45,0.22942811250686646 -46,0.22010864317417145 -47,0.2156265527009964 -48,0.21930018067359924 -49,0.2202986478805542 -50,0.21712158620357513 -51,0.20917581021785736 -52,0.1976747214794159 -53,0.2055012583732605 -54,0.20594844222068787 -55,0.20242594182491302 -56,0.19346439838409424 -57,0.19949620962142944 -58,0.20129834115505219 -59,0.1976803094148636 -60,0.19108270108699799 -61,0.1825151890516281 -62,0.18249642848968506 -63,0.18198013305664062 -64,0.17776265740394592 -65,0.1763063669204712 -66,0.17141306400299072 -67,0.16916687786579132 -68,0.16908928751945496 -69,0.16091759502887726 -70,0.15629979968070984 -71,0.15173417329788208 -72,0.15036329627037048 -73,0.14816194772720337 -74,0.14680826663970947 -75,0.1431165635585785 -76,0.15579676628112793 -77,0.15064682066440582 -78,0.15748657286167145 -79,0.1635306179523468 -80,0.16908873617649078 -81,0.1715037077665329 -82,0.1690494567155838 -83,0.1654825061559677 -84,0.16089507937431335 -85,0.15723861753940582 -86,0.15342135727405548 -87,0.15337668359279633 -88,0.14896467328071594 -89,0.1432482749223709 -90,0.1491943597793579 -91,0.1448395699262619 -92,0.14020231366157532 -93,0.14308835566043854 -94,0.140527606010437 -95,0.13738247752189636 -96,0.1420619636774063 -97,0.13858452439308167 -98,0.14069652557373047 -99,0.14141377806663513 -100,0.14062704145908356 -101,0.13715918362140656 -102,0.14049573242664337 -103,0.1363249123096466 -104,0.1361480951309204 -105,0.13702984154224396 -106,0.13318300247192383 -107,0.13374397158622742 -108,0.1351056843996048 -109,0.12876704335212708 -110,0.13009043037891388 -111,0.13013748824596405 -112,0.12746112048625946 -113,0.12860050797462463 -114,0.1285390853881836 -115,0.12960021197795868 -116,0.12648320198059082 -117,0.12633761763572693 -118,0.1251002699136734 -119,0.1228410005569458 -120,0.12279636412858963 -121,0.12261074781417847 -122,0.12146823108196259 -123,0.12021832913160324 -124,0.12033328413963318 -125,0.12005985528230667 -126,0.11871335655450821 -127,0.11916553974151611 -128,0.11804063618183136 -129,0.11706869304180145 -130,0.1159578487277031 -131,0.11646230518817902 -132,0.11658401787281036 -133,0.11614234745502472 -134,0.11475525796413422 -135,0.11348326504230499 -136,0.11184834688901901 -137,0.11285457760095596 -138,0.1028088703751564 -139,0.10171851515769958 -140,0.09982611984014511 -141,0.10135991126298904 -142,0.10117921978235245 -143,0.09805311262607574 -144,0.10288751870393753 -145,0.09983957558870316 -146,0.09867742657661438 -147,0.10155688226222992 -148,0.10184872150421143 -149,0.09797421842813492 -150,0.09987737238407135 -151,0.1043926328420639 -152,0.10239782929420471 -153,0.09720270335674286 -154,0.09780514240264893 -155,0.09769171476364136 -156,0.09530490636825562 -157,0.09596844762563705 -158,0.09614173322916031 -159,0.09595738351345062 -160,0.09683465212583542 -161,0.09728387743234634 -162,0.0947444960474968 -163,0.09718268364667892 -164,0.0963558480143547 -165,0.0992504432797432 -166,0.09627273678779602 -167,0.09484125673770905 -168,0.09522999078035355 -169,0.09639368951320648 -170,0.09603158384561539 -171,0.09421057999134064 -172,0.092661552131176 -173,0.09393461793661118 -174,0.09427772462368011 -175,0.09618615359067917 -176,0.09218859672546387 -177,0.09196411073207855 -178,0.09241463989019394 -179,0.09195614606142044 -180,0.0922527089715004 -181,0.09239008277654648 -182,0.09449075907468796 -183,0.09275585412979126 -184,0.09682537615299225 -185,0.09624013304710388 -186,0.09369787573814392 -187,0.09402007609605789 -188,0.09246449917554855 -189,0.09560021013021469 -190,0.09113160520792007 -191,0.09227626770734787 -192,0.09178797155618668 -193,0.0930463969707489 -194,0.09039857238531113 -195,0.0950954481959343 -196,0.09453223645687103 -197,0.09392591565847397 -198,0.09728937596082687 -199,0.09304837137460709 -200,0.09371616691350937 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.100/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.100/training_config.txt deleted file mode 100644 index 4ed11d0..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.100/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.100/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.100/training_log.csv deleted file mode 100644 index 8b8aec9..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,2.358433246612549 -2,1.7380083799362183 -3,1.523009181022644 -4,1.2694531679153442 -5,1.0241341590881348 -6,1.1517606973648071 -7,1.2998449802398682 -8,1.1438120603561401 -9,1.0724124908447266 -10,1.0158313512802124 -11,0.9844156503677368 -12,0.7656126618385315 -13,0.7417177557945251 -14,0.733391523361206 -15,0.7234828472137451 -16,0.7160509824752808 -17,0.7402266263961792 -18,0.7443031668663025 -19,0.7405902147293091 -20,0.7293604016304016 -21,0.727705717086792 -22,0.719597578048706 -23,0.7057821154594421 -24,0.7014689445495605 -25,0.6896355748176575 -26,0.6770132184028625 -27,0.6606560945510864 -28,0.6398467421531677 -29,0.6199065446853638 -30,0.6067121028900146 -31,0.5952031016349792 -32,0.5831400752067566 -33,0.5696457028388977 -34,0.5571767091751099 -35,0.5470531582832336 -36,0.5371373295783997 -37,0.5278363823890686 -38,0.5202682614326477 -39,0.5154412984848022 -40,0.5113637447357178 -41,0.5070692300796509 -42,0.5019904375076294 -43,0.49614226818084717 -44,0.4895056486129761 -45,0.48203733563423157 -46,0.47370973229408264 -47,0.4647367596626282 -48,0.4566524028778076 -49,0.4414069652557373 -50,0.41740238666534424 -51,0.3997155427932739 -52,0.38489407300949097 -53,0.36962419748306274 -54,0.353161096572876 -55,0.3352747857570648 -56,0.30479490756988525 -57,0.28368276357650757 -58,0.263123095035553 -59,0.24073316156864166 -60,0.21380990743637085 -61,0.2049916535615921 -62,0.20480404794216156 -63,0.19061709940433502 -64,0.22584375739097595 -65,0.23921944200992584 -66,0.22207705676555634 -67,0.19175173342227936 -68,0.2118654102087021 -69,0.2173893004655838 -70,0.21403388679027557 -71,0.2028019279241562 -72,0.1838327795267105 -73,0.16645124554634094 -74,0.16737939417362213 -75,0.16774511337280273 -76,0.16333302855491638 -77,0.18841810524463654 -78,0.1906232237815857 -79,0.1717461198568344 -80,0.1612335741519928 -81,0.16374297440052032 -82,0.16264685988426208 -83,0.16353553533554077 -84,0.1608569622039795 -85,0.1544482260942459 -86,0.1426977962255478 -87,0.1273145228624344 -88,0.14338508248329163 -89,0.14426325261592865 -90,0.13178005814552307 -91,0.12186212837696075 -92,0.1306121051311493 -93,0.13498535752296448 -94,0.13747845590114594 -95,0.13648003339767456 -96,0.13161320984363556 -97,0.12309979647397995 -98,0.12279871851205826 -99,0.1215125322341919 -100,0.12093830108642578 -101,0.11393982917070389 -102,0.11145678907632828 -103,0.11659649014472961 -104,0.11417167633771896 -105,0.11012041568756104 -106,0.10632403939962387 -107,0.10744092613458633 -108,0.10948213189840317 -109,0.1082916185259819 -110,0.1059848889708519 -111,0.10845990478992462 -112,0.10452363640069962 -113,0.10457442700862885 -114,0.10695996880531311 -115,0.10546793043613434 -116,0.10347311198711395 -117,0.10505875200033188 -118,0.10192281752824783 -119,0.10151883959770203 -120,0.10463466495275497 -121,0.10355282574892044 -122,0.10484358668327332 -123,0.10556609183549881 -124,0.10391475260257721 -125,0.10301955044269562 -126,0.10233666002750397 -127,0.10226913541555405 -128,0.10439444333314896 -129,0.10070646554231644 -130,0.10474462807178497 -131,0.10500942170619965 -132,0.09943191707134247 -133,0.10206633806228638 -134,0.09946455806493759 -135,0.10262671113014221 -136,0.10112376511096954 -137,0.10130366683006287 -138,0.1039021909236908 -139,0.1028759554028511 -140,0.10200440883636475 -141,0.1024387776851654 -142,0.09710289537906647 -143,0.09866029024124146 -144,0.09919554740190506 -145,0.0975947231054306 -146,0.09802597016096115 -147,0.09736952930688858 -148,0.09707653522491455 -149,0.09528356790542603 -150,0.09691014885902405 -151,0.09558732807636261 -152,0.09533476084470749 -153,0.09397301077842712 -154,0.09938344359397888 -155,0.09512193500995636 -156,0.09712006151676178 -157,0.0972491130232811 -158,0.09507989138364792 -159,0.09735977649688721 -160,0.0979161411523819 -161,0.09476582705974579 -162,0.09680768102407455 -163,0.09917623549699783 -164,0.09773746132850647 -165,0.09495394676923752 -166,0.10124841332435608 -167,0.10566064715385437 -168,0.1006074920296669 -169,0.09434705972671509 -170,0.0984312891960144 -171,0.09816785901784897 -172,0.09673287719488144 -173,0.09251240640878677 -174,0.09129419177770615 -175,0.09754009544849396 -176,0.09778999537229538 -177,0.09237460047006607 -178,0.10151756554841995 -179,0.0994313433766365 -180,0.09222684800624847 -181,0.09825102984905243 -182,0.09602739661931992 -183,0.09214505553245544 -184,0.09619522094726562 -185,0.0924985334277153 -186,0.09497334063053131 -187,0.09454101324081421 -188,0.09332100301980972 -189,0.09731867909431458 -190,0.09401796758174896 -191,0.09920597821474075 -192,0.09362262487411499 -193,0.09663565456867218 -194,0.1027231365442276 -195,0.10039439052343369 -196,0.0957486480474472 -197,0.09897614270448685 -198,0.10049466043710709 -199,0.09476757794618607 -200,0.0997181087732315 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.125/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.125/training_config.txt deleted file mode 100644 index 5068b2e..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.125/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.125/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.125/training_log.csv deleted file mode 100644 index 51eae78..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,2.688716173171997 -2,1.5239933729171753 -3,1.5393582582473755 -4,1.0959677696228027 -5,0.9552558660507202 -6,0.910280704498291 -7,0.9073458909988403 -8,0.8633150458335876 -9,0.8723593950271606 -10,0.8195877075195312 -11,0.7988299131393433 -12,0.7940869927406311 -13,0.8207513093948364 -14,0.8072560429573059 -15,0.6807224154472351 -16,0.5787550806999207 -17,0.5046325325965881 -18,0.4567889869213104 -19,0.42221879959106445 -20,0.37219560146331787 -21,0.3826168179512024 -22,0.3839942216873169 -23,0.37994274497032166 -24,0.3568728566169739 -25,0.3287677466869354 -26,0.2884560227394104 -27,0.24385003745555878 -28,0.2433919608592987 -29,0.24001455307006836 -30,0.24399591982364655 -31,0.23236732184886932 -32,0.22867587208747864 -33,0.22478163242340088 -34,0.2177419811487198 -35,0.20899127423763275 -36,0.20302057266235352 -37,0.19158215820789337 -38,0.19357164204120636 -39,0.17338351905345917 -40,0.1665142923593521 -41,0.15499044954776764 -42,0.14577001333236694 -43,0.1425931751728058 -44,0.1462075263261795 -45,0.1445891261100769 -46,0.13826541602611542 -47,0.13428565859794617 -48,0.13204771280288696 -49,0.12894456088542938 -50,0.13394661247730255 -51,0.13309690356254578 -52,0.12547561526298523 -53,0.12456076592206955 -54,0.12486019730567932 -55,0.12320750951766968 -56,0.11842665821313858 -57,0.11110015213489532 -58,0.1130218431353569 -59,0.11023709177970886 -60,0.10276690870523453 -61,0.10834449529647827 -62,0.10961444675922394 -63,0.10430389642715454 -64,0.10305812954902649 -65,0.1070115864276886 -66,0.09952478855848312 -67,0.10651043057441711 -68,0.1115066260099411 -69,0.10992284119129181 -70,0.1034320816397667 -71,0.10023067891597748 -72,0.10344801843166351 -73,0.09768636524677277 -74,0.09914475679397583 -75,0.0979360044002533 -76,0.09794362634420395 -77,0.09731261432170868 -78,0.09667293727397919 -79,0.09490493685007095 -80,0.09559091925621033 -81,0.09394511580467224 -82,0.09463523328304291 -83,0.09594345837831497 -84,0.0935225710272789 -85,0.09428738802671432 -86,0.09379138052463531 -87,0.09243442118167877 -88,0.09268350154161453 -89,0.09221138060092926 -90,0.09586518257856369 -91,0.09298954904079437 -92,0.09792398661375046 -93,0.09429093450307846 -94,0.09978412091732025 -95,0.10146566480398178 -96,0.09274455159902573 -97,0.10243760794401169 -98,0.10415042191743851 -99,0.09392265230417252 -100,0.1045694574713707 -101,0.11109350621700287 -102,0.10647358000278473 -103,0.0934666097164154 -104,0.10355080664157867 -105,0.10694606602191925 -106,0.09884300827980042 -107,0.0971965342760086 -108,0.10167835652828217 -109,0.09729978442192078 -110,0.09486889839172363 -111,0.09786008298397064 -112,0.09212633222341537 -113,0.09566261619329453 -114,0.09317433834075928 -115,0.09636545926332474 -116,0.09484273940324783 -117,0.09618006646633148 -118,0.09721453487873077 -119,0.09248542040586472 -120,0.09350470453500748 -121,0.09275978803634644 -122,0.09296966344118118 -123,0.09144039452075958 -124,0.09311946481466293 -125,0.09156538546085358 -126,0.09132052212953568 -127,0.09169212728738785 -128,0.0906606987118721 -129,0.09235387295484543 -130,0.0917920395731926 -131,0.09057745337486267 -132,0.08979310840368271 -133,0.09112752974033356 -134,0.09304899722337723 -135,0.09047728031873703 -136,0.09484145045280457 -137,0.0946589782834053 -138,0.09258434921503067 -139,0.09590069949626923 -140,0.09667035192251205 -141,0.09813673049211502 -142,0.09427405893802643 -143,0.09799369424581528 -144,0.09914247691631317 -145,0.0945243388414383 -146,0.09613281488418579 -147,0.09430858492851257 -148,0.09645862877368927 -149,0.10128036141395569 -150,0.09575366973876953 -151,0.09832816570997238 -152,0.10234905779361725 -153,0.09482282400131226 -154,0.0976630300283432 -155,0.10170412063598633 -156,0.09313099086284637 -157,0.100615955889225 -158,0.10659903287887573 -159,0.10188128054141998 -160,0.09075988084077835 -161,0.10289835184812546 -162,0.10442501306533813 -163,0.0944148376584053 -164,0.09876866638660431 -165,0.10509399324655533 -166,0.10097695887088776 -167,0.09069716185331345 -168,0.09851940721273422 -169,0.09480408579111099 -170,0.09492667019367218 -171,0.1007070541381836 -172,0.09794231504201889 -173,0.09388214349746704 -174,0.09804925322532654 -175,0.09682431071996689 -176,0.09179519861936569 -177,0.09749460965394974 -178,0.09373769164085388 -179,0.09433822333812714 -180,0.10046595335006714 -181,0.09583621472120285 -182,0.09499620646238327 -183,0.0997474268078804 -184,0.09540674835443497 -185,0.09007707983255386 -186,0.09736322611570358 -187,0.09671460092067719 -188,0.09302614629268646 -189,0.09527812898159027 -190,0.09574031084775925 -191,0.09420765191316605 -192,0.09419658780097961 -193,0.09025240689516068 -194,0.09619222581386566 -195,0.0937998816370964 -196,0.09092769771814346 -197,0.09238547831773758 -198,0.09100429713726044 -199,0.08905801922082901 -200,0.09293346107006073 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.160/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.160/training_config.txt deleted file mode 100644 index d142761..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.160/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.160/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.160/training_log.csv deleted file mode 100644 index 67ea4dd..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,3.4399921894073486 -2,1.1671464443206787 -3,1.0263512134552002 -4,0.8585180044174194 -5,0.858004629611969 -6,0.8130729794502258 -7,1.0299588441848755 -8,4.265998363494873 -9,4.475827217102051 -10,4.497794151306152 -11,4.371824741363525 -12,4.225469589233398 -13,4.156452655792236 -14,3.2760391235351562 -15,3.2901248931884766 -16,3.2148611545562744 -17,3.170281171798706 -18,3.32649827003479 -19,3.0342607498168945 -20,2.589634656906128 -21,2.2622783184051514 -22,2.0825860500335693 -23,1.7496724128723145 -24,1.3891087770462036 -25,0.6599261164665222 -26,0.470650315284729 -27,0.45662757754325867 -28,0.4333275556564331 -29,0.40673527121543884 -30,0.37977486848831177 -31,0.3566829264163971 -32,0.3523373603820801 -33,0.32435086369514465 -34,0.2997196912765503 -35,0.2811371088027954 -36,0.26413077116012573 -37,0.24665682017803192 -38,0.2329929918050766 -39,0.21653582155704498 -40,0.21946950256824493 -41,0.2292984575033188 -42,0.2288999706506729 -43,0.22124671936035156 -44,0.2087724655866623 -45,0.20085881650447845 -46,0.20454658567905426 -47,0.20065398514270782 -48,0.1896040439605713 -49,0.1810798943042755 -50,0.17972803115844727 -51,0.1738511621952057 -52,0.16273033618927002 -53,0.16169437766075134 -54,0.15894703567028046 -55,0.1501828283071518 -56,0.14268401265144348 -57,0.14264142513275146 -58,0.12802013754844666 -59,0.14267133176326752 -60,0.15402548015117645 -61,0.16328895092010498 -62,0.165134459733963 -63,0.15894882380962372 -64,0.1532893031835556 -65,0.1472959667444229 -66,0.1405823826789856 -67,0.13195538520812988 -68,0.11722320318222046 -69,0.11439530551433563 -70,0.1132933646440506 -71,0.11074002087116241 -72,0.107852503657341 -73,0.10613369196653366 -74,0.11229285597801208 -75,0.11336785554885864 -76,0.10721391439437866 -77,0.10663791745901108 -78,0.1087559387087822 -79,0.10477546602487564 -80,0.09995321184396744 -81,0.10320938378572464 -82,0.10328935086727142 -83,0.09941080957651138 -84,0.10524646937847137 -85,0.10455353558063507 -86,0.10092496871948242 -87,0.10828711092472076 -88,0.10845521092414856 -89,0.1056375503540039 -90,0.10186001658439636 -91,0.10253342241048813 -92,0.09957577288150787 -93,0.10207279771566391 -94,0.10273765027523041 -95,0.10025788098573685 -96,0.1036067008972168 -97,0.10207608342170715 -98,0.0975092202425003 -99,0.10160036385059357 -100,0.09835570305585861 -101,0.09909934550523758 -102,0.10112586617469788 -103,0.09706339985132217 -104,0.10310395807027817 -105,0.10349714010953903 -106,0.10157173126935959 -107,0.0986780896782875 -108,0.10051313042640686 -109,0.09574715793132782 -110,0.09594352543354034 -111,0.09515818953514099 -112,0.09894803911447525 -113,0.0975758507847786 -114,0.09762746095657349 -115,0.0964386910200119 -116,0.09856288880109787 -117,0.09988389909267426 -118,0.09550268203020096 -119,0.10069110989570618 -120,0.10325835645198822 -121,0.09882444888353348 -122,0.09695151448249817 -123,0.09977121651172638 -124,0.09261587262153625 -125,0.10331758111715317 -126,0.10551568120718002 -127,0.09813554584980011 -128,0.10054777562618256 -129,0.10475403815507889 -130,0.09953675419092178 -131,0.09809315949678421 -132,0.10057442635297775 -133,0.09531184285879135 -134,0.1000899001955986 -135,0.10155396908521652 -136,0.0925692468881607 -137,0.10213404148817062 -138,0.1058991476893425 -139,0.10063762962818146 -140,0.09685508161783218 -141,0.10150227695703506 -142,0.09608160704374313 -143,0.09384115040302277 -144,0.09346088021993637 -145,0.09809783101081848 -146,0.09634557366371155 -147,0.09335034340620041 -148,0.09934154152870178 -149,0.10018093883991241 -150,0.09518299251794815 -151,0.09638385474681854 -152,0.0974612608551979 -153,0.09493265300989151 -154,0.09451110661029816 -155,0.09380608797073364 -156,0.0974801778793335 -157,0.09417133033275604 -158,0.09367808699607849 -159,0.09191571921110153 -160,0.09144528955221176 -161,0.0933178961277008 -162,0.09132317453622818 -163,0.09271252155303955 -164,0.09401481598615646 -165,0.09272214025259018 -166,0.09506122022867203 -167,0.09156269580125809 -168,0.09228184074163437 -169,0.09464482218027115 -170,0.09287915378808975 -171,0.09378435462713242 -172,0.096316859126091 -173,0.0909808874130249 -174,0.09790302813053131 -175,0.10082588344812393 -176,0.09676065295934677 -177,0.09242468327283859 -178,0.09779967367649078 -179,0.09902316331863403 -180,0.09227389842271805 -181,0.09913361072540283 -182,0.10163439810276031 -183,0.09455103427171707 -184,0.10032651573419571 -185,0.10166408121585846 -186,0.09392663836479187 -187,0.10238960385322571 -188,0.10710886120796204 -189,0.1008593887090683 -190,0.09425365924835205 -191,0.10299721360206604 -192,0.10280716419219971 -193,0.09922820329666138 -194,0.09475552290678024 -195,0.09905817359685898 -196,0.10272675007581711 -197,0.102718286216259 -198,0.09426280856132507 -199,0.0957556813955307 -200,0.1007818728685379 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.200/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.200/training_config.txt deleted file mode 100644 index 1849f40..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.200/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.200/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.200/training_log.csv deleted file mode 100644 index a520d7a..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,4.633268356323242 -2,1.7218070030212402 -3,2.842303514480591 -4,1.7170990705490112 -5,1.571808099746704 -6,1.3614861965179443 -7,0.9501515626907349 -8,0.7996809482574463 -9,0.6815970540046692 -10,0.6156871318817139 -11,0.5520683526992798 -12,0.49406152963638306 -13,0.4427739381790161 -14,0.3882291913032532 -15,0.34762945771217346 -16,0.31714633107185364 -17,0.3155519366264343 -18,0.2996760308742523 -19,0.2736278474330902 -20,0.24108877778053284 -21,0.20830468833446503 -22,0.19799646735191345 -23,0.18864737451076508 -24,0.17942509055137634 -25,0.1716892570257187 -26,0.165944904088974 -27,0.165434330701828 -28,0.1591731756925583 -29,0.15568257868289948 -30,0.15666581690311432 -31,0.15083299577236176 -32,0.13984543085098267 -33,0.13109952211380005 -34,0.12470874935388565 -35,0.12188691645860672 -36,0.11925455927848816 -37,0.11445286870002747 -38,0.12003426998853683 -39,0.11483048647642136 -40,0.1129693016409874 -41,0.10968577861785889 -42,0.10626054555177689 -43,0.11121870577335358 -44,0.10815789550542831 -45,0.10818073153495789 -46,0.10676021128892899 -47,0.10277268290519714 -48,0.09884966164827347 -49,0.10021133720874786 -50,0.1018216460943222 -51,0.10205628722906113 -52,0.10006970167160034 -53,0.09942276030778885 -54,0.09717234969139099 -55,0.10042369365692139 -56,0.10167164355516434 -57,0.10070860385894775 -58,0.09667547792196274 -59,0.09615707397460938 -60,0.09861835092306137 -61,0.0992332175374031 -62,0.09718066453933716 -63,0.09724212437868118 -64,0.09460485726594925 -65,0.09589019417762756 -66,0.09465502947568893 -67,0.09762448072433472 -68,0.09662442654371262 -69,0.09785118699073792 -70,0.09463480859994888 -71,0.09485030919313431 -72,0.09397398680448532 -73,0.09545910358428955 -74,0.09457255154848099 -75,0.09798739105463028 -76,0.09696757048368454 -77,0.09391956776380539 -78,0.09526462852954865 -79,0.09295747429132462 -80,0.09617458283901215 -81,0.09708663821220398 -82,0.0936812311410904 -83,0.09410067647695541 -84,0.10023656487464905 -85,0.09952782094478607 -86,0.09606326371431351 -87,0.0989609807729721 -88,0.10181845724582672 -89,0.09669061005115509 -90,0.09627973288297653 -91,0.09617269784212112 -92,0.09479775279760361 -93,0.09367331862449646 -94,0.1000368744134903 -95,0.10034182667732239 -96,0.09373302012681961 -97,0.09904691576957703 -98,0.09366212785243988 -99,0.10494700074195862 -100,0.10802678763866425 -101,0.09727383404970169 -102,0.10112504661083221 -103,0.10732457041740417 -104,0.10160867869853973 -105,0.09275362640619278 -106,0.10444199293851852 -107,0.10713205486536026 -108,0.10710395872592926 -109,0.09825288504362106 -110,0.09550195187330246 -111,0.10407181084156036 -112,0.10473335534334183 -113,0.10459655523300171 -114,0.0967487096786499 -115,0.0945444107055664 -116,0.09440594166517258 -117,0.09579072892665863 -118,0.09250274300575256 -119,0.09651163220405579 -120,0.09440179169178009 -121,0.09553593397140503 -122,0.09407293051481247 -123,0.09263134747743607 -124,0.0952998623251915 -125,0.09164600819349289 -126,0.09317658096551895 -127,0.09644661098718643 -128,0.09455570578575134 -129,0.09145285934209824 -130,0.09746433049440384 -131,0.10153818875551224 -132,0.09792811423540115 -133,0.09385444968938828 -134,0.09007633477449417 -135,0.09983684867620468 -136,0.10126683115959167 -137,0.09350155293941498 -138,0.09365364164113998 -139,0.09348376095294952 -140,0.09630882740020752 -141,0.10299713909626007 -142,0.10010219365358353 -143,0.09449902176856995 -144,0.10232970118522644 -145,0.10131219029426575 -146,0.09211060404777527 -147,0.10320974886417389 -148,0.10397730767726898 -149,0.09350308775901794 -150,0.10247515141963959 -151,0.10824376344680786 -152,0.0994173213839531 -153,0.09479514509439468 -154,0.10160070657730103 -155,0.0955565944314003 -156,0.09459837526082993 -157,0.09674859046936035 -158,0.08884984999895096 -159,0.09353478997945786 -160,0.08899236470460892 -161,0.08919026702642441 -162,0.09190872311592102 -163,0.09164614230394363 -164,0.09039010852575302 -165,0.09217050671577454 -166,0.09167370945215225 -167,0.0943571999669075 -168,0.09401169419288635 -169,0.09534290432929993 -170,0.08900710940361023 -171,0.08900091797113419 -172,0.08855142444372177 -173,0.09072481840848923 -174,0.08744034916162491 -175,0.08967015892267227 -176,0.09013191610574722 -177,0.0876171663403511 -178,0.09074938297271729 -179,0.08620432019233704 -180,0.087984599173069 -181,0.08656038343906403 -182,0.0923156887292862 -183,0.09198661148548126 -184,0.08697302639484406 -185,0.08879384398460388 -186,0.09146363288164139 -187,0.08879827708005905 -188,0.08672105520963669 -189,0.09157583862543106 -190,0.08919161558151245 -191,0.09410347044467926 -192,0.0889691486954689 -193,0.09599597007036209 -194,0.09683703631162643 -195,0.08827219158411026 -196,0.09154621511697769 -197,0.08874097466468811 -198,0.09493687003850937 -199,0.08951424807310104 -200,0.09616569429636002 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.250/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.250/training_config.txt deleted file mode 100644 index acb1840..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.250/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.250/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.250/training_log.csv deleted file mode 100644 index 4b05012..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,5.934715747833252 -2,0.9830235242843628 -3,0.7109450697898865 -4,0.7049779295921326 -5,0.6398754715919495 -6,0.5143073201179504 -7,0.45022109150886536 -8,0.40576642751693726 -9,0.3764641284942627 -10,0.3429192006587982 -11,0.30029812455177307 -12,0.26843759417533875 -13,0.28146710991859436 -14,0.25107651948928833 -15,0.20668567717075348 -16,0.19300904870033264 -17,0.17957472801208496 -18,0.15961068868637085 -19,0.14306850731372833 -20,0.1779780238866806 -21,0.18291297554969788 -22,0.17253084480762482 -23,0.1673128455877304 -24,0.1519843190908432 -25,0.12272793054580688 -26,0.12307410687208176 -27,0.1362430304288864 -28,0.14364118874073029 -29,0.1382160186767578 -30,0.1368439793586731 -31,0.13679541647434235 -32,0.12822706997394562 -33,0.10984697192907333 -34,0.1097346618771553 -35,0.11937372386455536 -36,0.12114051729440689 -37,0.12114936858415604 -38,0.11230652034282684 -39,0.09948696196079254 -40,0.10502037405967712 -41,0.11057890206575394 -42,0.11554276943206787 -43,0.11289231479167938 -44,0.11350201815366745 -45,0.11127810180187225 -46,0.1049499437212944 -47,0.10158628225326538 -48,0.11100660264492035 -49,0.10946602374315262 -50,0.11284323036670685 -51,0.10839830338954926 -52,0.10663499683141708 -53,0.10397621989250183 -54,0.11540043354034424 -55,0.11579523235559464 -56,0.10791734606027603 -57,0.09818071126937866 -58,0.11365891247987747 -59,0.11828875541687012 -60,0.12032882124185562 -61,0.11577403545379639 -62,0.10595745593309402 -63,0.09993445873260498 -64,0.1066267117857933 -65,0.10450604557991028 -66,0.09597554057836533 -67,0.09912297129631042 -68,0.09810415655374527 -69,0.10143521428108215 -70,0.10162507742643356 -71,0.09717144072055817 -72,0.09703433513641357 -73,0.09666674584150314 -74,0.09702961891889572 -75,0.09748556464910507 -76,0.09454484283924103 -77,0.09605659544467926 -78,0.09421345591545105 -79,0.09469817578792572 -80,0.09607448428869247 -81,0.09592710435390472 -82,0.09605133533477783 -83,0.09689027816057205 -84,0.09320113062858582 -85,0.0936802551150322 -86,0.09856259077787399 -87,0.0965288057923317 -88,0.09810687601566315 -89,0.09623110294342041 -90,0.09564904868602753 -91,0.09403635561466217 -92,0.09257730096578598 -93,0.09393861144781113 -94,0.09404195845127106 -95,0.09974870085716248 -96,0.09575823694467545 -97,0.09971970319747925 -98,0.10259635001420975 -99,0.09506691992282867 -100,0.10334259271621704 -101,0.10486091673374176 -102,0.09540393203496933 -103,0.10547707229852676 -104,0.11048950254917145 -105,0.10512521862983704 -106,0.09431301802396774 -107,0.10206107795238495 -108,0.09778749942779541 -109,0.10344486683607101 -110,0.10393738746643066 -111,0.10159136354923248 -112,0.0952286347746849 -113,0.09819842129945755 -114,0.09145212918519974 -115,0.10250614583492279 -116,0.1062246561050415 -117,0.0997670590877533 -118,0.0953030213713646 -119,0.09697139263153076 -120,0.0929202139377594 -121,0.0941205844283104 -122,0.09414532035589218 -123,0.09333395212888718 -124,0.09588421136140823 -125,0.09709673374891281 -126,0.0929609015583992 -127,0.09925078600645065 -128,0.09832251816987991 -129,0.09138797223567963 -130,0.1007944643497467 -131,0.10416370630264282 -132,0.09984798729419708 -133,0.09172075986862183 -134,0.1001858338713646 -135,0.10411839932203293 -136,0.09976597130298615 -137,0.09381230920553207 -138,0.10164790600538254 -139,0.10466942191123962 -140,0.10173159837722778 -141,0.09433694928884506 -142,0.10068510472774506 -143,0.10470490902662277 -144,0.09979762136936188 -145,0.09063556790351868 -146,0.09533154219388962 -147,0.09630080312490463 -148,0.09055892378091812 -149,0.09067640453577042 -150,0.0902600958943367 -151,0.08900193125009537 -152,0.09503751993179321 -153,0.09208936989307404 -154,0.08810841292142868 -155,0.09429841488599777 -156,0.09783543646335602 -157,0.09497247636318207 -158,0.09152542054653168 -159,0.09457608312368393 -160,0.09687190502882004 -161,0.09167822450399399 -162,0.0920402929186821 -163,0.09282510727643967 -164,0.09654655307531357 -165,0.09289788454771042 -166,0.0927291214466095 -167,0.09386250376701355 -168,0.08856771886348724 -169,0.08911248296499252 -170,0.08795499801635742 -171,0.08774285018444061 -172,0.0891939103603363 -173,0.08744078874588013 -174,0.09292645007371902 -175,0.09054861962795258 -176,0.09208942204713821 -177,0.09284055233001709 -178,0.09477032721042633 -179,0.08941418677568436 -180,0.09361937642097473 -181,0.09304226189851761 -182,0.0944054052233696 -183,0.09240671247243881 -184,0.09110556542873383 -185,0.09256403893232346 -186,0.08852539956569672 -187,0.08940569311380386 -188,0.08665762841701508 -189,0.09031728655099869 -190,0.09205824136734009 -191,0.09040126949548721 -192,0.08889517188072205 -193,0.08939017355442047 -194,0.09162093698978424 -195,0.0882963016629219 -196,0.09087496250867844 -197,0.0901964083313942 -198,0.09050974249839783 -199,0.08957212418317795 -200,0.09111212939023972 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.300/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.300/training_config.txt deleted file mode 100644 index b9d383c..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.300/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.300/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.300/training_log.csv deleted file mode 100644 index 8c0db99..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.300/training_log.csv +++ /dev/null @@ -1,55 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,6.532146453857422 -2,0.9733349680900574 -3,0.7573456168174744 -4,2.0879273414611816 -5,6.9928364753723145 -6,7.645502090454102 -7,7.718578338623047 -8,7.7584309577941895 -9,7.780616283416748 -10,7.88822078704834 -11,7.983259201049805 -12,7.957936763763428 -13,8.161380767822266 -14,8.170310020446777 -15,8.170429229736328 -16,8.160181045532227 -17,8.027851104736328 -18,8.028975486755371 -19,8.02234935760498 -20,8.007378578186035 -21,7.975668430328369 -22,7.953291893005371 -23,7.726495265960693 -24,7.670090675354004 -25,7.888734340667725 -26,7.932565212249756 -27,7.894569396972656 -28,7.763021469116211 -29,7.280590057373047 -30,6.906335830688477 -31,6.624904155731201 -32,6.266253471374512 -33,5.252723217010498 -34,4.891237258911133 -35,4.475665092468262 -36,4.33233118057251 -37,3.6556754112243652 -38,2.9560773372650146 -39,0.8645632863044739 -40,0.6540758013725281 -41,0.6125065088272095 -42,0.5764771103858948 -43,0.5453358292579651 -44,0.5170462727546692 -45,0.4905022978782654 -46,0.4653904438018799 -47,0.4414188861846924 -48,0.41859930753707886 -49,0.4252408444881439 -50,nan -51,nan -52,nan -53,nan diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.400/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.400/training_config.txt deleted file mode 100644 index f1cf21a..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.400/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.400/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.400/training_log.csv deleted file mode 100644 index 41d4fb4..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.400/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,7.470058441162109 -2,0.8523464798927307 -3,3.5259411334991455 -4,0.5408902168273926 -5,0.4667302072048187 -6,0.4175697863101959 -7,0.37301501631736755 -8,0.3177974820137024 -9,0.334554523229599 -10,0.3660605847835541 -11,0.36569488048553467 -12,0.3453773558139801 -13,0.3355959355831146 -14,0.328816682100296 -15,0.31451570987701416 -16,0.30198565125465393 -17,0.29089516401290894 -18,0.27825239300727844 -19,0.26490291953086853 -20,0.2508532404899597 -21,0.23476451635360718 -22,0.21637624502182007 -23,0.19541379809379578 -24,0.17467010021209717 -25,0.16543927788734436 -26,0.17394188046455383 -27,0.17379124462604523 -28,0.17290188372135162 -29,0.16836632788181305 -30,0.1622752994298935 -31,0.1493954211473465 -32,0.13881197571754456 -33,0.14022554457187653 -34,0.14253556728363037 -35,0.14291639626026154 -36,0.1413918435573578 -37,0.1377445012331009 -38,0.13122913241386414 -39,0.12487799674272537 -40,0.11760665476322174 -41,0.12400957942008972 -42,0.12402268499135971 -43,0.12074477225542068 -44,0.11616513133049011 -45,0.11464016884565353 -46,0.11702074110507965 -47,0.11788465827703476 -48,0.11078394949436188 -49,0.10707808285951614 -50,0.11098644882440567 -51,0.10813949257135391 -52,0.10543939471244812 -53,0.10692302882671356 -54,0.10755149275064468 -55,0.10787274688482285 -56,0.10140225291252136 -57,0.10678059607744217 -58,0.10781962424516678 -59,0.10133162140846252 -60,0.10538869351148605 -61,0.10890121012926102 -62,0.10166117548942566 -63,0.10232502967119217 -64,0.10398226976394653 -65,0.10030968487262726 -66,0.1010059267282486 -67,0.09913836419582367 -68,0.10101766884326935 -69,0.10024537146091461 -70,0.10026691108942032 -71,0.10053741931915283 -72,0.10096677392721176 -73,0.09703604131937027 -74,0.09756311029195786 -75,0.0969357043504715 -76,0.09572598338127136 -77,0.09948857128620148 -78,0.09661409258842468 -79,0.10078523308038712 -80,0.10136177390813828 -81,0.09542567282915115 -82,0.1014685407280922 -83,0.10344243794679642 -84,0.09544209390878677 -85,0.1079011932015419 -86,0.11128448694944382 -87,0.11056745052337646 -88,0.10242763161659241 -89,0.10157708078622818 -90,0.10630656033754349 -91,0.10468962788581848 -92,0.09458398073911667 -93,0.1066116914153099 -94,0.10962557792663574 -95,0.10875499248504639 -96,0.10080647468566895 -97,0.10092714428901672 -98,0.10443942248821259 -99,0.10736251622438431 -100,0.10277612507343292 -101,0.09983404725790024 -102,0.10457166284322739 -103,0.1057884618639946 -104,0.1055644080042839 -105,0.09662099182605743 -106,0.1054852083325386 -107,0.11237051337957382 -108,0.1102972999215126 -109,0.10653342306613922 -110,0.10019364953041077 -111,0.10003834217786789 -112,0.10691182315349579 -113,0.10542195290327072 -114,0.09873246401548386 -115,0.09635553508996964 -116,0.10008405894041061 -117,0.0995258316397667 -118,0.09423353523015976 -119,0.09906941652297974 -120,0.10028708726167679 -121,0.09765278548002243 -122,0.0947585478425026 -123,0.09642501920461655 -124,0.09478721022605896 -125,0.09448689222335815 -126,0.0955180674791336 -127,0.09447992593050003 -128,0.09838646650314331 -129,0.09879612922668457 -130,0.09200070798397064 -131,0.09994829446077347 -132,0.10193122923374176 -133,0.09743598848581314 -134,0.09536248445510864 -135,0.09790855646133423 -136,0.09230569005012512 -137,0.09957162290811539 -138,0.10353168100118637 -139,0.09818137437105179 -140,0.09270978718996048 -141,0.09590548276901245 -142,0.09168503433465958 -143,0.09188443422317505 -144,0.09132067859172821 -145,0.09412675350904465 -146,0.0908600315451622 -147,0.10092086344957352 -148,0.09911283850669861 -149,0.09495852142572403 -150,0.10541268438100815 -151,0.10626538842916489 -152,0.09807479381561279 -153,0.09548204392194748 -154,0.09553559124469757 -155,0.09434044361114502 -156,0.09522350132465363 -157,0.09050958603620529 -158,0.09553214907646179 -159,0.09969018399715424 -160,0.09485550224781036 -161,0.09319932758808136 -162,0.10176315903663635 -163,0.09918981790542603 -164,0.09910688549280167 -165,0.10093333572149277 -166,0.10251167416572571 -167,0.09790468215942383 -168,0.09274405241012573 -169,0.09562180191278458 -170,0.098443903028965 -171,0.09441555291414261 -172,0.0940561294555664 -173,0.09402935206890106 -174,0.09500926733016968 -175,0.09392016381025314 -176,0.08923441171646118 -177,0.09798004478216171 -178,0.09748242050409317 -179,0.09061242640018463 -180,0.0984516590833664 -181,0.09941217303276062 -182,0.09379463642835617 -183,0.09503092616796494 -184,0.10252890735864639 -185,0.09915277361869812 -186,0.09493089467287064 -187,0.09614145755767822 -188,0.09535933285951614 -189,0.09294629842042923 -190,0.09016509354114532 -191,0.09278380125761032 -192,0.09039317816495895 -193,0.09347531944513321 -194,0.0961071029305458 -195,0.08966007828712463 -196,0.09846541285514832 -197,0.0991307720541954 -198,0.0982275977730751 -199,0.09712965786457062 -200,0.09589947015047073 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.500/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.500/training_config.txt deleted file mode 100644 index 3f17ec1..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.500/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.500/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.500/training_log.csv deleted file mode 100644 index d818f27..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.500/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,8.101946830749512 -2,4.337261199951172 -3,0.7130698561668396 -4,1.4732517004013062 -5,0.5262705683708191 -6,0.41694414615631104 -7,0.3865049183368683 -8,0.3547259271144867 -9,0.3190578818321228 -10,0.2775569558143616 -11,0.2526097893714905 -12,0.2502021789550781 -13,0.23589743673801422 -14,0.23037193715572357 -15,0.2075444757938385 -16,0.32258301973342896 -17,0.5651567578315735 -18,0.5670267343521118 -19,0.5532003045082092 -20,0.7345653176307678 -21,0.809249758720398 -22,0.7117406129837036 -23,0.5784265398979187 -24,0.44279974699020386 -25,0.3697846531867981 -26,0.3386504352092743 -27,0.2762228548526764 -28,0.27352020144462585 -29,0.26952797174453735 -30,0.2743604779243469 -31,0.27297255396842957 -32,0.26141542196273804 -33,0.23972003161907196 -34,0.23002126812934875 -35,0.22926867008209229 -36,0.21451616287231445 -37,0.20335538685321808 -38,0.19916610419750214 -39,0.2036384493112564 -40,0.20045864582061768 -41,0.18495184183120728 -42,0.1773393750190735 -43,0.1780303716659546 -44,0.1782016158103943 -45,0.16123947501182556 -46,0.14833858609199524 -47,0.13757233321666718 -48,0.1314454972743988 -49,0.13386663794517517 -50,0.13800838589668274 -51,0.12991909682750702 -52,0.12826676666736603 -53,0.13128851354122162 -54,0.13286902010440826 -55,0.1266874372959137 -56,0.12074166536331177 -57,0.11515756696462631 -58,0.1099315658211708 -59,0.10841619223356247 -60,0.10758862644433975 -61,0.10629312694072723 -62,0.10822793841362 -63,0.10686087608337402 -64,0.10380832850933075 -65,0.10462633520364761 -66,0.10148649662733078 -67,0.10171914845705032 -68,0.10130707174539566 -69,0.09850175678730011 -70,0.10363931208848953 -71,0.10767163336277008 -72,0.10396894812583923 -73,0.10071699321269989 -74,0.1050809845328331 -75,0.10834121704101562 -76,0.10054648667573929 -77,0.10611206293106079 -78,0.10647580027580261 -79,0.1040555089712143 -80,0.10166727751493454 -81,0.10251495987176895 -82,0.09851112961769104 -83,0.10323615372180939 -84,0.09758497029542923 -85,0.105072021484375 -86,0.10885719954967499 -87,0.09812909364700317 -88,0.10391996800899506 -89,0.10492263734340668 -90,0.10071199387311935 -91,0.09861864894628525 -92,0.10306163132190704 -93,0.0997968390583992 -94,0.09890962392091751 -95,0.09784042090177536 -96,0.09636902064085007 -97,0.09911751002073288 -98,0.09562746435403824 -99,0.09643658995628357 -100,0.09371112287044525 -101,0.09229438751935959 -102,0.09613960236310959 -103,0.0970318391919136 -104,0.09422831237316132 -105,0.0971950814127922 -106,0.09599816799163818 -107,0.0995115414261818 -108,0.1059972494840622 -109,0.10338836908340454 -110,0.10113319009542465 -111,0.10500150918960571 -112,0.1011686772108078 -113,0.09715085476636887 -114,0.10226444900035858 -115,0.09848009049892426 -116,0.09402953833341599 -117,0.09843037277460098 -118,0.0976371318101883 -119,0.093772292137146 -120,0.09393176436424255 -121,0.0912749171257019 -122,0.09229268878698349 -123,0.09656090289354324 -124,0.09437941759824753 -125,0.0956038236618042 -126,0.09229275584220886 -127,0.09736207127571106 -128,0.10015738010406494 -129,0.09717278182506561 -130,0.09754063934087753 -131,0.09882020950317383 -132,0.09449062496423721 -133,0.09324546158313751 -134,0.09335633367300034 -135,0.09272391349077225 -136,0.09210533648729324 -137,0.0929284319281578 -138,0.0958893820643425 -139,0.09447959065437317 -140,0.09250865131616592 -141,0.094594307243824 -142,0.09205985069274902 -143,0.0964517742395401 -144,0.09448518604040146 -145,0.09744911640882492 -146,0.0954861268401146 -147,0.092599056661129 -148,0.09913412481546402 -149,0.09924563765525818 -150,0.09139934182167053 -151,0.09796115010976791 -152,0.0972367376089096 -153,0.09780339151620865 -154,0.09352511167526245 -155,0.09876835346221924 -156,0.09619922935962677 -157,0.09414488822221756 -158,0.09742644429206848 -159,0.09524794667959213 -160,0.09269178658723831 -161,0.0931859239935875 -162,0.09225688129663467 -163,0.0940014198422432 -164,0.09020508080720901 -165,0.09698014706373215 -166,0.09256376326084137 -167,0.10139664262533188 -168,0.10303696244955063 -169,0.09263106435537338 -170,0.10498157888650894 -171,0.10971024632453918 -172,0.09919553250074387 -173,0.09926420450210571 -174,0.10613478720188141 -175,0.09867149591445923 -176,0.0940600261092186 -177,0.09719426184892654 -178,0.09459145367145538 -179,0.12691275775432587 -180,0.13981452584266663 -181,0.1463158279657364 -182,0.15437225997447968 -183,0.15129417181015015 -184,0.14828649163246155 -185,0.13871990144252777 -186,0.12462565302848816 -187,0.1198619157075882 -188,0.12866869568824768 -189,0.1340850442647934 -190,0.13495540618896484 -191,0.13499480485916138 -192,0.1353243738412857 -193,0.1319616436958313 -194,0.13471443951129913 -195,0.1291528344154358 -196,0.12990982830524445 -197,0.12799009680747986 -198,0.11737561970949173 -199,0.11697296798229218 -200,0.11978898197412491 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.600/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.600/training_config.txt deleted file mode 100644 index bd91bb9..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.600/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.600/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.600/training_log.csv deleted file mode 100644 index e6e0be4..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.600/training_log.csv +++ /dev/null @@ -1,18 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,8.220053672790527 -2,7.716942310333252 -3,2.109666109085083 -4,0.5575574636459351 -5,0.5265039801597595 -6,0.4362044930458069 -7,0.4195132255554199 -8,0.4002525210380554 -9,0.3778596520423889 -10,0.3291352689266205 -11,0.29065170884132385 -12,0.8186007738113403 -13,nan -14,nan -15,nan -16,nan diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.700/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.700/training_config.txt deleted file mode 100644 index b3bc86b..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.700/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.700/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.700/training_log.csv deleted file mode 100644 index 939a995..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.700/training_log.csv +++ /dev/null @@ -1,12 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,8.247651100158691 -2,8.244234085083008 -3,8.1273193359375 -4,4.752761363983154 -5,0.5147081613540649 -6,0.623521089553833 -7,nan -8,nan -9,nan -10,nan diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.800/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.800/training_config.txt deleted file mode 100644 index 5eeab98..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.800/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.800/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.800/training_log.csv deleted file mode 100644 index 970d975..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,8.251254081726074 -2,8.251808166503906 -3,8.251808166503906 -4,8.251808166503906 -5,8.251808166503906 -6,8.251808166503906 -7,8.251808166503906 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.900/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_0.900/training_config.txt deleted file mode 100644 index 75aea11..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.900/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_0.900/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_0.900/training_log.csv deleted file mode 100644 index b15a568..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_0.900/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,8.251808166503906 -2,8.251808166503906 -3,8.251808166503906 -4,8.251808166503906 -5,8.251808166503906 -6,8.251808166503906 diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_1.000/training_config.txt b/training/base_loss_learning_rate_sweep/one_half/lr_1.000/training_config.txt deleted file mode 100644 index ae624a1..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_1.000/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function Name: one_half -Loss Function Exponent: 0.5 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_half_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_half/lr_1.000/training_log.csv b/training/base_loss_learning_rate_sweep/one_half/lr_1.000/training_log.csv deleted file mode 100644 index b15a568..0000000 --- a/training/base_loss_learning_rate_sweep/one_half/lr_1.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,2.1826066970825195 -1,8.251808166503906 -2,8.251808166503906 -3,8.251808166503906 -4,8.251808166503906 -5,8.251808166503906 -6,8.251808166503906 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.003/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.003/training_config.txt deleted file mode 100644 index 685a292..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.003/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.0025 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.003/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.003/training_log.csv deleted file mode 100644 index 2636ed2..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.003/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,1.8385334014892578 -2,1.7923020124435425 -3,1.7621530294418335 -4,1.7571710348129272 -5,1.792307734489441 -6,1.8093068599700928 -7,1.8144179582595825 -8,1.8160213232040405 -9,1.8155635595321655 -10,1.8135337829589844 -11,1.8099631071090698 -12,1.804831624031067 -13,1.7984275817871094 -14,1.7905795574188232 -15,1.78093683719635 -16,1.768936038017273 -17,1.7513043880462646 -18,1.724238634109497 -19,1.7275587320327759 -20,1.712952733039856 -21,1.7020102739334106 -22,1.7336241006851196 -23,1.7401542663574219 -24,1.741686224937439 -25,1.7410390377044678 -26,1.7394239902496338 -27,1.7368766069412231 -28,1.7332870960235596 -29,1.7284408807754517 -30,1.7207432985305786 -31,1.712604284286499 -32,1.7048169374465942 -33,1.6979976892471313 -34,1.6921418905258179 -35,1.6863772869110107 -36,1.6791255474090576 -37,1.6658451557159424 -38,1.6505357027053833 -39,1.6031569242477417 -40,1.5795953273773193 -41,1.5823286771774292 -42,1.5668944120407104 -43,1.5423741340637207 -44,1.5086733102798462 -45,1.4929841756820679 -46,1.4878168106079102 -47,1.4876209497451782 -48,1.4826359748840332 -49,1.5097359418869019 -50,1.5138826370239258 -51,1.4950228929519653 -52,1.4818669557571411 -53,1.4537606239318848 -54,1.4803462028503418 -55,1.4714491367340088 -56,1.4725161790847778 -57,1.468363642692566 -58,1.4657032489776611 -59,1.4666507244110107 -60,1.4551520347595215 -61,1.447176456451416 -62,1.4448844194412231 -63,1.4167330265045166 -64,1.3965734243392944 -65,1.3855431079864502 -66,1.3567302227020264 -67,1.349766492843628 -68,1.3482393026351929 -69,1.3483458757400513 -70,1.3486340045928955 -71,1.3305195569992065 -72,1.331059455871582 -73,1.3354456424713135 -74,1.3261691331863403 -75,1.3178646564483643 -76,1.326471209526062 -77,1.3284204006195068 -78,1.2838129997253418 -79,1.2745957374572754 -80,1.243709921836853 -81,1.2468799352645874 -82,1.26051664352417 -83,1.2618834972381592 -84,1.261278748512268 -85,1.2377700805664062 -86,1.2252956628799438 -87,1.2359015941619873 -88,1.234358549118042 -89,1.2133071422576904 -90,1.215346336364746 -91,1.2150096893310547 -92,1.211989402770996 -93,1.207862377166748 -94,1.2019308805465698 -95,1.1842535734176636 -96,1.2113301753997803 -97,1.2146492004394531 -98,1.2347551584243774 -99,1.2333506345748901 -100,1.227697730064392 -101,1.2182679176330566 -102,1.2135447263717651 -103,1.2056543827056885 -104,1.2217979431152344 -105,1.2057437896728516 -106,1.2098498344421387 -107,1.2034655809402466 -108,1.2014579772949219 -109,1.1823408603668213 -110,1.1655926704406738 -111,1.1504607200622559 -112,1.1425198316574097 -113,1.1300818920135498 -114,1.1252471208572388 -115,1.1175296306610107 -116,1.1185201406478882 -117,1.1132136583328247 -118,1.1159603595733643 -119,1.11582612991333 -120,1.1127398014068604 -121,1.1181769371032715 -122,1.1177517175674438 -123,1.1156727075576782 -124,1.1099815368652344 -125,1.12002694606781 -126,1.1203253269195557 -127,1.1210383176803589 -128,1.1216825246810913 -129,1.1222081184387207 -130,1.1225513219833374 -131,1.1226565837860107 -132,1.1225578784942627 -133,1.1222879886627197 -134,1.1218981742858887 -135,1.121495246887207 -136,1.1215063333511353 -137,1.1221468448638916 -138,1.1240488290786743 -139,1.1266921758651733 -140,1.1288772821426392 -141,1.1319419145584106 -142,1.1368571519851685 -143,1.1165813207626343 -144,1.109130859375 -145,1.1055068969726562 -146,1.1076432466506958 -147,1.1085864305496216 -148,1.1083961725234985 -149,1.1026779413223267 -150,1.1038566827774048 -151,1.103567361831665 -152,1.102920413017273 -153,1.103776454925537 -154,1.1135276556015015 -155,1.1040607690811157 -156,1.1014776229858398 -157,1.0963553190231323 -158,1.0889880657196045 -159,1.0888725519180298 -160,1.0890547037124634 -161,1.0885337591171265 -162,1.0877249240875244 -163,1.0867918729782104 -164,1.085806965827942 -165,1.0847975015640259 -166,1.0837795734405518 -167,1.0827648639678955 -168,1.0817724466323853 -169,1.0808144807815552 -170,1.0799176692962646 -171,1.0791370868682861 -172,1.0784579515457153 -173,1.0778474807739258 -174,1.0772637128829956 -175,1.07668137550354 -176,1.0760939121246338 -177,1.0754998922348022 -178,1.074894666671753 -179,1.0742733478546143 -180,1.0736300945281982 -181,1.0729600191116333 -182,1.0722540616989136 -183,1.0715054273605347 -184,1.0707035064697266 -185,1.069838285446167 -186,1.0688915252685547 -187,1.0678420066833496 -188,1.0666584968566895 -189,1.0652875900268555 -190,1.0636491775512695 -191,1.0615962743759155 -192,1.0589455366134644 -193,1.0557111501693726 -194,1.0520026683807373 -195,1.0477677583694458 -196,1.0432111024856567 -197,1.032072901725769 -198,1.0238651037216187 -199,1.0216935873031616 -200,1.0182780027389526 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.005/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.005/training_config.txt deleted file mode 100644 index 5e2ff9c..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.005/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.005 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.005/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.005/training_log.csv deleted file mode 100644 index 5bb39ac..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.005/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,1.8201370239257812 -2,1.8206686973571777 -3,1.7991713285446167 -4,1.762706995010376 -5,1.7602343559265137 -6,1.7588448524475098 -7,1.751203179359436 -8,1.7403348684310913 -9,1.727457046508789 -10,1.711920142173767 -11,1.6948572397232056 -12,1.6724529266357422 -13,1.6497013568878174 -14,1.6250910758972168 -15,1.5618234872817993 -16,1.5414204597473145 -17,1.5033363103866577 -18,1.519364356994629 -19,1.5221819877624512 -20,1.514203429222107 -21,1.4304001331329346 -22,1.4130902290344238 -23,1.3903766870498657 -24,1.3984711170196533 -25,1.3695106506347656 -26,1.3869848251342773 -27,1.3369959592819214 -28,1.3354758024215698 -29,1.2913695573806763 -30,1.2517328262329102 -31,1.244531512260437 -32,1.2400000095367432 -33,1.2311807870864868 -34,1.2401262521743774 -35,1.2456073760986328 -36,1.2335104942321777 -37,1.2300578355789185 -38,1.2291676998138428 -39,1.2290449142456055 -40,1.2281451225280762 -41,1.227543830871582 -42,1.2427785396575928 -43,1.2404918670654297 -44,1.2279107570648193 -45,1.2284601926803589 -46,1.2273755073547363 -47,1.2232991456985474 -48,1.217631459236145 -49,1.2054508924484253 -50,1.2100111246109009 -51,1.2163605690002441 -52,1.2013005018234253 -53,1.1931787729263306 -54,1.1841418743133545 -55,1.1643693447113037 -56,1.165989875793457 -57,1.1659588813781738 -58,1.163834571838379 -59,1.1587886810302734 -60,1.182216763496399 -61,1.1788299083709717 -62,1.173915147781372 -63,1.1686699390411377 -64,1.1738429069519043 -65,1.1631578207015991 -66,1.149125576019287 -67,1.1493425369262695 -68,1.1689057350158691 -69,1.1659927368164062 -70,1.1698410511016846 -71,1.1635698080062866 -72,1.1605184078216553 -73,1.160229206085205 -74,1.1595889329910278 -75,1.1573460102081299 -76,1.154805064201355 -77,1.1534687280654907 -78,1.1535611152648926 -79,1.161170482635498 -80,1.1528146266937256 -81,1.1693165302276611 -82,1.1806747913360596 -83,1.159953236579895 -84,1.1409811973571777 -85,1.1310439109802246 -86,1.1156995296478271 -87,1.087786316871643 -88,1.0835951566696167 -89,1.0832021236419678 -90,1.0675818920135498 -91,1.0602059364318848 -92,1.028119444847107 -93,1.0142277479171753 -94,1.0044437646865845 -95,0.9964226484298706 -96,0.9894255995750427 -97,0.9831018447875977 -98,0.9776513576507568 -99,0.9730268716812134 -100,0.9669128060340881 -101,0.9606471657752991 -102,0.9575116038322449 -103,0.952974259853363 -104,0.942331075668335 -105,0.9423538446426392 -106,0.9423568844795227 -107,0.9392810463905334 -108,0.9391604065895081 -109,0.9343206882476807 -110,0.9258409738540649 -111,0.921166718006134 -112,0.9135589599609375 -113,0.9161087274551392 -114,0.9164585471153259 -115,0.9155129790306091 -116,0.9138450026512146 -117,0.9119644165039062 -118,0.9103713631629944 -119,0.9100748300552368 -120,0.9116419553756714 -121,0.9210381507873535 -122,0.9156776666641235 -123,0.9132989048957825 -124,0.9118529558181763 -125,0.9108927845954895 -126,0.9102438688278198 -127,0.9097983837127686 -128,0.9094816446304321 -129,0.9092020392417908 -130,0.9088860750198364 -131,0.908478856086731 -132,0.9079270362854004 -133,0.9071828722953796 -134,0.9062591195106506 -135,0.9050728678703308 -136,0.9034750461578369 -137,0.9010005593299866 -138,0.887041449546814 -139,0.9013856649398804 -140,0.9168360233306885 -141,0.9427462816238403 -142,0.9478737711906433 -143,0.9507958292961121 -144,0.942295253276825 -145,0.9219983220100403 -146,0.9136631488800049 -147,0.9094576239585876 -148,0.900894284248352 -149,0.899848997592926 -150,0.9003382325172424 -151,0.9004459381103516 -152,0.8999486565589905 -153,0.898679256439209 -154,0.8967409729957581 -155,0.8940804600715637 -156,0.8907378315925598 -157,0.886354923248291 -158,0.8880074620246887 -159,0.8880243301391602 -160,0.8859974145889282 -161,0.8882144689559937 -162,0.8854153752326965 -163,0.885582447052002 -164,0.8882405161857605 -165,0.8896467685699463 -166,0.890792727470398 -167,0.8917164206504822 -168,0.892328143119812 -169,0.8926825523376465 -170,0.8928200602531433 -171,0.8927658200263977 -172,0.8925427794456482 -173,0.892168402671814 -174,0.8916534185409546 -175,0.890998125076294 -176,0.8901914358139038 -177,0.8891977071762085 -178,0.8879451155662537 -179,0.8862634897232056 -180,0.8843821287155151 -181,0.8907813429832458 -182,0.8926663994789124 -183,0.8933366537094116 -184,0.8934126496315002 -185,0.8930718898773193 -186,0.892387866973877 -187,0.8913995027542114 -188,0.890184223651886 -189,0.888691246509552 -190,0.8867635726928711 -191,0.8839964270591736 -192,0.8782017230987549 -193,0.8893021941184998 -194,0.8902336359024048 -195,0.886479377746582 -196,0.8850558400154114 -197,0.884833812713623 -198,0.8846578598022461 -199,0.8843613266944885 -200,0.8838837146759033 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.010/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.010/training_config.txt deleted file mode 100644 index 270caf3..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.010/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.010/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.010/training_log.csv deleted file mode 100644 index 5e8f8e6..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,1.8480628728866577 -2,1.8185044527053833 -3,1.7725857496261597 -4,1.735030174255371 -5,1.7053040266036987 -6,1.6269400119781494 -7,1.6178289651870728 -8,1.5322095155715942 -9,1.5370680093765259 -10,1.4201815128326416 -11,1.315647006034851 -12,1.3183456659317017 -13,1.2813183069229126 -14,1.3079766035079956 -15,1.2956151962280273 -16,1.2746869325637817 -17,1.2602437734603882 -18,1.2647876739501953 -19,1.238991141319275 -20,1.1998498439788818 -21,1.2137700319290161 -22,1.196675181388855 -23,1.195374608039856 -24,1.2005276679992676 -25,1.1892303228378296 -26,1.182427167892456 -27,1.17698335647583 -28,1.1799912452697754 -29,1.1444636583328247 -30,1.137837529182434 -31,1.1397284269332886 -32,1.14256751537323 -33,1.137217402458191 -34,1.127004861831665 -35,1.1054250001907349 -36,1.0978484153747559 -37,1.1133322715759277 -38,1.1156095266342163 -39,1.113214373588562 -40,1.1084290742874146 -41,1.1069104671478271 -42,1.1051576137542725 -43,1.106768012046814 -44,1.1068885326385498 -45,1.107832431793213 -46,1.1095679998397827 -47,1.1084870100021362 -48,1.09788179397583 -49,1.0990406274795532 -50,1.0984830856323242 -51,1.0984010696411133 -52,1.0988922119140625 -53,1.0990231037139893 -54,1.0973769426345825 -55,1.0942778587341309 -56,1.0905643701553345 -57,1.0867974758148193 -58,1.0828523635864258 -59,1.083754062652588 -60,1.0820213556289673 -61,1.0784454345703125 -62,1.0791774988174438 -63,1.0795692205429077 -64,1.0786442756652832 -65,1.0763179063796997 -66,1.0724692344665527 -67,1.0703165531158447 -68,1.0699915885925293 -69,1.068562626838684 -70,1.0664578676223755 -71,1.0621308088302612 -72,1.0648081302642822 -73,1.0635632276535034 -74,1.0631946325302124 -75,1.0620787143707275 -76,1.0592564344406128 -77,1.0554202795028687 -78,1.0483583211898804 -79,1.056619644165039 -80,1.0594831705093384 -81,1.058300256729126 -82,1.0524516105651855 -83,1.0505101680755615 -84,1.0499863624572754 -85,1.0497217178344727 -86,1.0485725402832031 -87,1.0467795133590698 -88,1.0448229312896729 -89,1.0425599813461304 -90,1.0406429767608643 -91,1.0416743755340576 -92,1.0389667749404907 -93,1.035481333732605 -94,1.0337969064712524 -95,1.0328482389450073 -96,1.0313291549682617 -97,1.0306916236877441 -98,1.0282880067825317 -99,1.0259089469909668 -100,1.0269004106521606 -101,1.0250335931777954 -102,1.022233486175537 -103,1.0170472860336304 -104,1.0168288946151733 -105,1.012195110321045 -106,1.0120176076889038 -107,1.0089884996414185 -108,1.008192777633667 -109,1.002773642539978 -110,1.018885850906372 -111,1.0034902095794678 -112,1.0290842056274414 -113,1.0231386423110962 -114,1.0308831930160522 -115,1.0368003845214844 -116,1.0135849714279175 -117,1.0281944274902344 -118,1.036202311515808 -119,1.0483651161193848 -120,1.0524685382843018 -121,1.0386406183242798 -122,1.0423277616500854 -123,1.0499117374420166 -124,1.0368125438690186 -125,1.030737042427063 -126,1.02531898021698 -127,1.0233097076416016 -128,1.0207014083862305 -129,1.0110702514648438 -130,1.0036646127700806 -131,0.9987952709197998 -132,1.000577449798584 -133,0.9902662634849548 -134,0.988551139831543 -135,0.9819716811180115 -136,0.9776078462600708 -137,0.9784602522850037 -138,0.9741770029067993 -139,0.9643430113792419 -140,0.9682835340499878 -141,0.963522732257843 -142,0.9720661640167236 -143,0.9634385704994202 -144,0.9647436738014221 -145,0.9595259428024292 -146,0.9521517157554626 -147,0.9527463316917419 -148,0.951144814491272 -149,0.9450331926345825 -150,0.9161795377731323 -151,0.8821196556091309 -152,0.8629743456840515 -153,0.8642869591712952 -154,0.9235614538192749 -155,0.9127792716026306 -156,0.8552144765853882 -157,0.8608366250991821 -158,0.9389497637748718 -159,0.8739004731178284 -160,0.9647873044013977 -161,1.0556628704071045 -162,1.1865242719650269 -163,1.237725853919983 -164,1.3555099964141846 -165,1.3683624267578125 -166,1.3146251440048218 -167,1.2563284635543823 -168,1.208544373512268 -169,1.218557596206665 -170,1.167697787284851 -171,1.1542084217071533 -172,1.1617659330368042 -173,1.1568642854690552 -174,1.151565670967102 -175,1.1439534425735474 -176,1.1663696765899658 -177,1.171202540397644 -178,1.1590908765792847 -179,1.145645022392273 -180,1.1350568532943726 -181,1.0928446054458618 -182,1.0478639602661133 -183,1.0405311584472656 -184,1.0586942434310913 -185,1.072749376296997 -186,1.054288387298584 -187,1.0462446212768555 -188,1.0469465255737305 -189,1.063978910446167 -190,1.0527852773666382 -191,1.0448230504989624 -192,1.0417927503585815 -193,1.0135117769241333 -194,1.0323446989059448 -195,1.0423322916030884 -196,1.0271028280258179 -197,1.0323522090911865 -198,1.0523772239685059 -199,1.0554817914962769 -200,1.0523655414581299 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.020/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.020/training_config.txt deleted file mode 100644 index c2abdcc..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.020/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.020/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.020/training_log.csv deleted file mode 100644 index 9e1a9c2..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,1.8651926517486572 -2,1.8003525733947754 -3,1.6788967847824097 -4,1.5421165227890015 -5,1.4197158813476562 -6,1.3351329565048218 -7,1.3128677606582642 -8,1.285691261291504 -9,1.2854944467544556 -10,1.2713528871536255 -11,1.2668802738189697 -12,1.2617377042770386 -13,1.2545208930969238 -14,1.263350486755371 -15,1.2467063665390015 -16,1.2158869504928589 -17,1.1874785423278809 -18,1.1419878005981445 -19,1.1347390413284302 -20,1.0473723411560059 -21,0.9928276538848877 -22,0.9783355593681335 -23,0.9249412417411804 -24,0.8917446732521057 -25,0.8609322905540466 -26,0.8583627343177795 -27,0.8538973927497864 -28,0.8563206791877747 -29,0.8220697641372681 -30,0.7355301380157471 -31,0.7146391868591309 -32,0.7022900581359863 -33,0.6756631731987 -34,0.6435810327529907 -35,0.6325421929359436 -36,0.616088330745697 -37,0.5960723757743835 -38,0.575426459312439 -39,0.5549689531326294 -40,0.5498839616775513 -41,0.5458338856697083 -42,0.5436569452285767 -43,0.5297523736953735 -44,0.5430587530136108 -45,0.5439453125 -46,0.5492159128189087 -47,0.5542446970939636 -48,0.5558467507362366 -49,0.5558803081512451 -50,0.5530300140380859 -51,0.558372974395752 -52,0.5576729774475098 -53,0.5601630806922913 -54,0.5596457123756409 -55,0.5569971203804016 -56,0.5516653656959534 -57,0.5496507287025452 -58,0.5449784994125366 -59,0.5395414233207703 -60,0.5287690162658691 -61,0.5426141023635864 -62,0.5569456219673157 -63,0.5510543584823608 -64,0.5510596632957458 -65,0.5480853915214539 -66,0.5435360670089722 -67,0.5382755398750305 -68,0.5309158563613892 -69,0.5325626134872437 -70,0.5298891663551331 -71,0.5199649930000305 -72,0.5037056803703308 -73,0.49224987626075745 -74,0.4889849126338959 -75,0.5031132102012634 -76,0.5063619613647461 -77,0.5072569847106934 -78,0.5069547891616821 -79,0.5059442520141602 -80,0.5041998624801636 -81,0.5013865232467651 -82,0.49426424503326416 -83,0.4933996796607971 -84,0.48739320039749146 -85,0.5037341713905334 -86,0.5060588121414185 -87,0.5017239451408386 -88,0.5072561502456665 -89,0.5150969624519348 -90,0.5192365050315857 -91,0.5218858122825623 -92,0.5312889814376831 -93,0.5311004519462585 -94,0.5269703269004822 -95,0.5185866355895996 -96,0.5010254383087158 -97,0.49926042556762695 -98,0.4977790415287018 -99,0.4973812699317932 -100,0.49707528948783875 -101,0.49471163749694824 -102,0.4909311532974243 -103,0.4854736030101776 -104,0.4796683192253113 -105,0.4762103259563446 -106,0.4707246422767639 -107,0.46305882930755615 -108,0.4488268792629242 -109,0.4401516318321228 -110,0.43082737922668457 -111,0.42328760027885437 -112,0.4190102517604828 -113,0.40992799401283264 -114,0.4132516086101532 -115,0.4077712297439575 -116,0.4019029140472412 -117,0.4017505645751953 -118,0.40964627265930176 -119,0.4105607569217682 -120,0.40860235691070557 -121,0.40349146723747253 -122,0.4053412675857544 -123,0.40539026260375977 -124,0.4028768539428711 -125,0.40028417110443115 -126,0.3941408097743988 -127,0.38074618577957153 -128,0.3894459009170532 -129,0.386313259601593 -130,0.38527464866638184 -131,0.38456961512565613 -132,0.38352876901626587 -133,0.3820718228816986 -134,0.3802006244659424 -135,0.37794217467308044 -136,0.3753252327442169 -137,0.3723653554916382 -138,0.36906492710113525 -139,0.3654167354106903 -140,0.3613947331905365 -141,0.3569050431251526 -142,0.3512086570262909 -143,0.35030055046081543 -144,0.34750694036483765 -145,0.34415173530578613 -146,0.34053608775138855 -147,0.3367072641849518 -148,0.3326511085033417 -149,0.3283970057964325 -150,0.3239903151988983 -151,0.3194323480129242 -152,0.31473562121391296 -153,0.3099016547203064 -154,0.30487969517707825 -155,0.2995544970035553 -156,0.2933278977870941 -157,0.2846088409423828 -158,0.2808651328086853 -159,0.27683356404304504 -160,0.27191653847694397 -161,0.2655576169490814 -162,0.2552741467952728 -163,0.24664629995822906 -164,0.24367010593414307 -165,0.24110029637813568 -166,0.2379550188779831 -167,0.23401890695095062 -168,0.22924283146858215 -169,0.22372141480445862 -170,0.21765899658203125 -171,0.2109994888305664 -172,0.20406776666641235 -173,0.19795534014701843 -174,0.19563618302345276 -175,0.19350016117095947 -176,0.1867869794368744 -177,0.18639592826366425 -178,0.189001202583313 -179,0.18816827237606049 -180,0.18363825976848602 -181,0.1812162548303604 -182,0.18029694259166718 -183,0.17506644129753113 -184,0.17325718700885773 -185,0.17227210104465485 -186,0.16992023587226868 -187,0.1676417589187622 -188,0.16978563368320465 -189,0.16709531843662262 -190,0.16464148461818695 -191,0.1647021323442459 -192,0.1624182015657425 -193,0.1579800248146057 -194,0.1579122245311737 -195,0.15783549845218658 -196,0.15564192831516266 -197,0.157798632979393 -198,0.1558426171541214 -199,0.1534789502620697 -200,0.15428173542022705 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.040/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.040/training_config.txt deleted file mode 100644 index 9b205ca..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.040/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.040/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.040/training_log.csv deleted file mode 100644 index 3fd4ec2..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,1.8705934286117554 -2,1.6390407085418701 -3,1.4585373401641846 -4,1.3435144424438477 -5,1.2390574216842651 -6,1.2589421272277832 -7,1.3696359395980835 -8,1.4065340757369995 -9,1.4431397914886475 -10,1.439719557762146 -11,1.4957139492034912 -12,1.5774035453796387 -13,1.6898908615112305 -14,1.6827139854431152 -15,1.6487287282943726 -16,1.5352592468261719 -17,1.4561623334884644 -18,1.3731167316436768 -19,1.2146157026290894 -20,1.1928009986877441 -21,1.132797122001648 -22,1.1307064294815063 -23,1.0993565320968628 -24,1.0458788871765137 -25,1.003597378730774 -26,0.9697384834289551 -27,0.9337178468704224 -28,0.8800227046012878 -29,0.8466416001319885 -30,0.8241704702377319 -31,0.7714559435844421 -32,0.7242811322212219 -33,0.6960938572883606 -34,0.6506131291389465 -35,0.5980530381202698 -36,0.5935397744178772 -37,0.6015126705169678 -38,0.6264485716819763 -39,0.6131971478462219 -40,0.6050447225570679 -41,0.5850446820259094 -42,0.5780426263809204 -43,0.5701805353164673 -44,0.5513374209403992 -45,0.5701315402984619 -46,0.5677663087844849 -47,0.579094648361206 -48,0.5818157196044922 -49,0.5787236094474792 -50,0.5789393186569214 -51,0.574056088924408 -52,0.5603704452514648 -53,0.5441048741340637 -54,0.5289670825004578 -55,0.520210325717926 -56,0.5098549127578735 -57,0.49766504764556885 -58,0.4872570037841797 -59,0.4789099395275116 -60,0.47023335099220276 -61,0.4607013463973999 -62,0.45019397139549255 -63,0.43821507692337036 -64,0.4236103594303131 -65,0.4056887924671173 -66,0.3866305947303772 -67,0.3700045645236969 -68,0.3581759035587311 -69,0.3516818881034851 -70,0.3467928469181061 -71,0.3424711525440216 -72,0.3383679986000061 -73,0.3338594138622284 -74,0.3285166323184967 -75,0.3224027752876282 -76,0.3157321512699127 -77,0.3087090849876404 -78,0.30142855644226074 -79,0.2940685451030731 -80,0.28638115525245667 -81,0.27878326177597046 -82,0.272779643535614 -83,0.26758572459220886 -84,0.262003093957901 -85,0.2551746368408203 -86,0.24639707803726196 -87,0.2349006086587906 -88,0.21964524686336517 -89,0.20431599020957947 -90,0.19539035856723785 -91,0.21145470440387726 -92,0.21438530087471008 -93,0.207488551735878 -94,0.1902855485677719 -95,0.18834534287452698 -96,0.19579488039016724 -97,0.1944667100906372 -98,0.1879204958677292 -99,0.1817481517791748 -100,0.17245127260684967 -101,0.18443258106708527 -102,0.19054485857486725 -103,0.1855224221944809 -104,0.1706194281578064 -105,0.1700890064239502 -106,0.1744835525751114 -107,0.1769690215587616 -108,0.1747264564037323 -109,0.16836856305599213 -110,0.15873193740844727 -111,0.16552619636058807 -112,0.16991093754768372 -113,0.16736404597759247 -114,0.15826624631881714 -115,0.15545935928821564 -116,0.1601111739873886 -117,0.15998290479183197 -118,0.1558336317539215 -119,0.1484426110982895 -120,0.15539324283599854 -121,0.1560482680797577 -122,0.14797545969486237 -123,0.1509070247411728 -124,0.1550908237695694 -125,0.15577849745750427 -126,0.15342527627944946 -127,0.14946764707565308 -128,0.14412899315357208 -129,0.14333808422088623 -130,0.14264962077140808 -131,0.13327644765377045 -132,0.1393127292394638 -133,0.14497502148151398 -134,0.14195933938026428 -135,0.1305050104856491 -136,0.13132812082767487 -137,0.13502570986747742 -138,0.13762998580932617 -139,0.13868890702724457 -140,0.1363411247730255 -141,0.13172754645347595 -142,0.13063736259937286 -143,0.12675058841705322 -144,0.12328766286373138 -145,0.12728755176067352 -146,0.12612400949001312 -147,0.1233312338590622 -148,0.12191344052553177 -149,0.1198710948228836 -150,0.12074577063322067 -151,0.12246661633253098 -152,0.125389963388443 -153,0.12358179688453674 -154,0.11622975766658783 -155,0.12377426028251648 -156,0.12494248151779175 -157,0.12119058519601822 -158,0.11827944964170456 -159,0.11812242865562439 -160,0.11570281535387039 -161,0.1194693073630333 -162,0.11811305582523346 -163,0.11773455142974854 -164,0.11780719459056854 -165,0.1150461882352829 -166,0.11732670664787292 -167,0.12028084695339203 -168,0.11738742142915726 -169,0.11810377240180969 -170,0.11660023033618927 -171,0.12073767930269241 -172,0.11378799378871918 -173,0.11880719661712646 -174,0.12235258519649506 -175,0.11981131136417389 -176,0.11383027583360672 -177,0.11390209197998047 -178,0.11592932045459747 -179,0.10979122668504715 -180,0.11405137181282043 -181,0.11232911050319672 -182,0.11357150226831436 -183,0.11238120496273041 -184,0.11174316704273224 -185,0.11288361996412277 -186,0.1151224821805954 -187,0.11336541175842285 -188,0.11246705055236816 -189,0.10892560333013535 -190,0.11359561234712601 -191,0.11509538441896439 -192,0.11207632720470428 -193,0.10800876468420029 -194,0.11225061863660812 -195,0.11382081359624863 -196,0.11452718824148178 -197,0.11032157391309738 -198,0.10486261546611786 -199,0.10848502814769745 -200,0.10699199885129929 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.050/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.050/training_config.txt deleted file mode 100644 index d82270d..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.050/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.050/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.050/training_log.csv deleted file mode 100644 index 30be276..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,1.8596184253692627 -2,1.6467440128326416 -3,1.4145634174346924 -4,1.2399075031280518 -5,1.1170485019683838 -6,1.1041563749313354 -7,0.9985504150390625 -8,0.9077017307281494 -9,0.813700258731842 -10,0.7232066988945007 -11,0.6589105129241943 -12,0.6433457136154175 -13,0.577139675617218 -14,0.6246100068092346 -15,0.6484180092811584 -16,0.6446887850761414 -17,0.628659188747406 -18,0.6133161783218384 -19,0.5903587937355042 -20,0.5612714886665344 -21,0.5430153012275696 -22,0.5250557661056519 -23,0.5040762424468994 -24,0.48236319422721863 -25,0.4617631435394287 -26,0.4358871579170227 -27,0.4056369364261627 -28,0.37493714690208435 -29,0.3474310040473938 -30,0.33475956320762634 -31,0.32567068934440613 -32,0.31809207797050476 -33,0.30856558680534363 -34,0.2969925105571747 -35,0.28592124581336975 -36,0.27435997128486633 -37,0.2601704001426697 -38,0.2443944215774536 -39,0.25203976035118103 -40,0.2536557912826538 -41,0.24968275427818298 -42,0.24575825035572052 -43,0.24031290411949158 -44,0.23305177688598633 -45,0.22325648367404938 -46,0.20870226621627808 -47,0.2042279988527298 -48,0.20488649606704712 -49,0.20231181383132935 -50,0.19771011173725128 -51,0.19175133109092712 -52,0.1887359768152237 -53,0.18290835618972778 -54,0.17477066814899445 -55,0.1698659062385559 -56,0.15878570079803467 -57,0.15050622820854187 -58,0.14143812656402588 -59,0.13228069245815277 -60,0.1387520581483841 -61,0.13660618662834167 -62,0.11997316777706146 -63,0.12417486310005188 -64,0.13135558366775513 -65,0.13363970816135406 -66,0.12994664907455444 -67,0.12434573471546173 -68,0.11489182710647583 -69,0.12139252573251724 -70,0.12565748393535614 -71,0.11989923566579819 -72,0.11564741283655167 -73,0.1213817223906517 -74,0.12523433566093445 -75,0.12409070134162903 -76,0.11864706128835678 -77,0.11123371124267578 -78,0.11495896428823471 -79,0.11654839664697647 -80,0.11253546923398972 -81,0.11119299381971359 -82,0.11070513725280762 -83,0.11231711506843567 -84,0.10825176537036896 -85,0.10896671563386917 -86,0.11020135879516602 -87,0.10878696292638779 -88,0.10540381819009781 -89,0.10811722278594971 -90,0.10775401443243027 -91,0.10520685464143753 -92,0.10809510201215744 -93,0.10761498659849167 -94,0.10888301581144333 -95,0.11114568263292313 -96,0.10454007983207703 -97,0.11457058042287827 -98,0.11749535799026489 -99,0.11290185898542404 -100,0.10525845736265182 -101,0.11371724307537079 -102,0.1140233501791954 -103,0.10508836805820465 -104,0.11275450885295868 -105,0.11689513176679611 -106,0.1147480383515358 -107,0.10722128301858902 -108,0.10677102953195572 -109,0.10663563013076782 -110,0.10184840112924576 -111,0.10934065282344818 -112,0.11167016625404358 -113,0.10720178484916687 -114,0.10324915498495102 -115,0.10623130947351456 -116,0.10121551156044006 -117,0.10889812558889389 -118,0.11071881651878357 -119,0.1068255752325058 -120,0.10119032114744186 -121,0.10746882855892181 -122,0.10286656767129898 -123,0.10525774955749512 -124,0.10906063765287399 -125,0.10585078597068787 -126,0.10031527280807495 -127,0.1105409637093544 -128,0.10998909920454025 -129,0.10103318095207214 -130,0.11013401299715042 -131,0.11558306217193604 -132,0.11215720325708389 -133,0.10330914705991745 -134,0.10667447000741959 -135,0.11204326897859573 -136,0.10611442476511002 -137,0.10406582057476044 -138,0.10982528328895569 -139,0.11078028380870819 -140,0.10537870973348618 -141,0.10097931325435638 -142,0.1108366996049881 -143,0.11105689406394958 -144,0.10112817585468292 -145,0.10687650740146637 -146,0.11290159076452255 -147,0.11217980086803436 -148,0.10557916015386581 -149,0.1009758710861206 -150,0.1070541962981224 -151,0.10316796600818634 -152,0.10136676579713821 -153,0.10388010740280151 -154,0.10084794461727142 -155,0.10267218202352524 -156,0.1030174195766449 -157,0.09989195317029953 -158,0.1027272492647171 -159,0.10061115026473999 -160,0.10084287077188492 -161,0.10295846313238144 -162,0.0973602831363678 -163,0.10553844273090363 -164,0.10711964964866638 -165,0.10176438093185425 -166,0.10129737108945847 -167,0.10413678735494614 -168,0.09779787063598633 -169,0.10440012067556381 -170,0.10669764131307602 -171,0.10186230391263962 -172,0.09961390495300293 -173,0.10179643332958221 -174,0.09796247631311417 -175,0.10010028630495071 -176,0.09774670749902725 -177,0.09825404733419418 -178,0.0977540835738182 -179,0.10042988508939743 -180,0.0986432209610939 -181,0.10153574496507645 -182,0.09792302548885345 -183,0.10084318369626999 -184,0.10023228824138641 -185,0.09725795686244965 -186,0.0988616868853569 -187,0.09951948374509811 -188,0.10080526769161224 -189,0.09824127703905106 -190,0.09909270703792572 -191,0.0964895561337471 -192,0.09989884495735168 -193,0.09937859326601028 -194,0.09812002629041672 -195,0.09767968952655792 -196,0.09709044545888901 -197,0.09580190479755402 -198,0.0971299484372139 -199,0.0955745279788971 -200,0.09588037431240082 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.080/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.080/training_config.txt deleted file mode 100644 index f0af771..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.080/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.080/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.080/training_log.csv deleted file mode 100644 index b853e74..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,1.8508031368255615 -2,1.6170424222946167 -3,1.5052655935287476 -4,1.4385077953338623 -5,1.4311357736587524 -6,1.3610601425170898 -7,1.3996180295944214 -8,1.5684289932250977 -9,1.3778791427612305 -10,1.2310574054718018 -11,1.1894855499267578 -12,1.1746118068695068 -13,1.2037361860275269 -14,1.1799893379211426 -15,1.1367074251174927 -16,1.1082683801651 -17,1.0493431091308594 -18,1.0343012809753418 -19,0.9345860481262207 -20,0.8299703598022461 -21,0.788470983505249 -22,0.7621355652809143 -23,0.7388635277748108 -24,0.740685760974884 -25,0.7360385060310364 -26,0.7257197499275208 -27,0.7137860655784607 -28,0.7012507319450378 -29,0.6884874701499939 -30,0.673256516456604 -31,0.6455525159835815 -32,0.6135529279708862 -33,0.5862502455711365 -34,0.5709480047225952 -35,0.5567715764045715 -36,0.5429749488830566 -37,0.5285758972167969 -38,0.5106999278068542 -39,0.48197221755981445 -40,0.4701685905456543 -41,0.4594450891017914 -42,0.4493996798992157 -43,0.43933141231536865 -44,0.42897894978523254 -45,0.41786542534828186 -46,0.40587669610977173 -47,0.3932194709777832 -48,0.3800389766693115 -49,0.36642783880233765 -50,0.35232603549957275 -51,0.3377308249473572 -52,0.32251808047294617 -53,0.3070387542247772 -54,0.291841596364975 -55,0.27810266613960266 -56,0.26513150334358215 -57,0.25269031524658203 -58,0.2387533038854599 -59,0.2214806079864502 -60,0.2350601851940155 -61,0.245763897895813 -62,0.24832983314990997 -63,0.24445776641368866 -64,0.23456254601478577 -65,0.2180497795343399 -66,0.19305419921875 -67,0.19627973437309265 -68,0.20111292600631714 -69,0.2049034684896469 -70,0.2054097056388855 -71,0.20375679433345795 -72,0.20117059350013733 -73,0.19641166925430298 -74,0.19034716486930847 -75,0.18399624526500702 -76,0.17601585388183594 -77,0.1678687334060669 -78,0.15724414587020874 -79,0.16095520555973053 -80,0.16395409405231476 -81,0.16026344895362854 -82,0.15377798676490784 -83,0.14479345083236694 -84,0.14772677421569824 -85,0.14301460981369019 -86,0.1382867991924286 -87,0.13338999450206757 -88,0.13546890020370483 -89,0.13206103444099426 -90,0.12032310664653778 -91,0.11717873066663742 -92,0.11797944456338882 -93,0.11664453148841858 -94,0.1158246323466301 -95,0.11588329821825027 -96,0.1124568060040474 -97,0.11198785156011581 -98,0.11078092455863953 -99,0.10972914844751358 -100,0.11233580112457275 -101,0.11288077384233475 -102,0.11254851520061493 -103,0.11214874684810638 -104,0.10948334634304047 -105,0.11001824587583542 -106,0.11075184494256973 -107,0.10941869765520096 -108,0.11492319405078888 -109,0.10606729239225388 -110,0.1079387292265892 -111,0.10687524080276489 -112,0.10624658316373825 -113,0.10850951820611954 -114,0.10313007980585098 -115,0.10810500383377075 -116,0.10780362039804459 -117,0.10537567734718323 -118,0.10648850351572037 -119,0.10474301129579544 -120,0.10188230127096176 -121,0.10236422717571259 -122,0.10574930161237717 -123,0.10321599990129471 -124,0.10432039946317673 -125,0.10277511179447174 -126,0.10673799365758896 -127,0.1033582016825676 -128,0.10160773247480392 -129,0.10189880430698395 -130,0.1064537987112999 -131,0.10578428953886032 -132,0.09837435930967331 -133,0.09982939064502716 -134,0.10218734294176102 -135,0.10069563239812851 -136,0.09994645416736603 -137,0.09768103808164597 -138,0.10232040286064148 -139,0.09886439144611359 -140,0.10283936560153961 -141,0.1020825207233429 -142,0.09961959719657898 -143,0.0994151160120964 -144,0.09966465085744858 -145,0.10180656611919403 -146,0.09698513150215149 -147,0.10148254036903381 -148,0.09843011945486069 -149,0.09767884761095047 -150,0.09925665706396103 -151,0.09655877947807312 -152,0.10059181600809097 -153,0.09948968142271042 -154,0.09871815145015717 -155,0.09633298218250275 -156,0.09769836068153381 -157,0.10073215514421463 -158,0.0971699208021164 -159,0.09996403008699417 -160,0.0983169823884964 -161,0.09870674461126328 -162,0.09651603549718857 -163,0.09733490645885468 -164,0.10059484094381332 -165,0.10408269613981247 -166,0.09891610592603683 -167,0.10129617899656296 -168,0.10138839483261108 -169,0.1008201315999031 -170,0.10606298595666885 -171,0.10273776948451996 -172,0.09681348502635956 -173,0.0974218100309372 -174,0.09605611115694046 -175,0.09904111921787262 -176,0.0955996960401535 -177,0.09742483496665955 -178,0.09892896562814713 -179,0.09971421957015991 -180,0.09701409190893173 -181,0.09597620368003845 -182,0.0973844900727272 -183,0.09622281044721603 -184,0.09662728756666183 -185,0.09576655924320221 -186,0.09854107350111008 -187,0.09683003276586533 -188,0.09607810527086258 -189,0.09510739147663116 -190,0.09413450956344604 -191,0.09678595513105392 -192,0.09452401101589203 -193,0.09378516674041748 -194,0.0955805853009224 -195,0.09574058651924133 -196,0.09549655765295029 -197,0.0947699099779129 -198,0.09692013263702393 -199,0.09260193258523941 -200,0.09669338911771774 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.100/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.100/training_config.txt deleted file mode 100644 index d2e24c2..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.100/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.100/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.100/training_log.csv deleted file mode 100644 index f776273..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,1.971460223197937 -2,1.5707331895828247 -3,1.275785207748413 -4,1.2048256397247314 -5,1.1472859382629395 -6,1.158279538154602 -7,1.6053547859191895 -8,1.2172683477401733 -9,0.996049165725708 -10,0.7450193762779236 -11,0.6586880683898926 -12,0.635782778263092 -13,0.5623727440834045 -14,0.5317357778549194 -15,0.4825260043144226 -16,0.4354988932609558 -17,0.40916708111763 -18,0.3848198652267456 -19,0.3655678331851959 -20,0.3272549510002136 -21,0.302945613861084 -22,0.2926166355609894 -23,0.28136903047561646 -24,0.2677591145038605 -25,0.25251588225364685 -26,0.23555251955986023 -27,0.21742302179336548 -28,0.20154991745948792 -29,0.18863169848918915 -30,0.1717826873064041 -31,0.18544216454029083 -32,0.19426320493221283 -33,0.19289137423038483 -34,0.18411186337471008 -35,0.16749975085258484 -36,0.1520385444164276 -37,0.1569991111755371 -38,0.15665030479431152 -39,0.15069140493869781 -40,0.1412702351808548 -41,0.13036158680915833 -42,0.12568682432174683 -43,0.11487093567848206 -44,0.11725998669862747 -45,0.12110626697540283 -46,0.12286796420812607 -47,0.11249810457229614 -48,0.11407017707824707 -49,0.1181727722287178 -50,0.1152905747294426 -51,0.11110531538724899 -52,0.11019396781921387 -53,0.10732453316450119 -54,0.10763263702392578 -55,0.111946702003479 -56,0.1082882508635521 -57,0.10624286532402039 -58,0.10183171927928925 -59,0.10696036368608475 -60,0.10369617491960526 -61,0.10759176313877106 -62,0.10779772698879242 -63,0.10666004568338394 -64,0.10170859843492508 -65,0.10368605703115463 -66,0.10157693922519684 -67,0.1034676730632782 -68,0.10317620635032654 -69,0.10396774858236313 -70,0.10245947539806366 -71,0.10360579192638397 -72,0.10248862206935883 -73,0.10057877749204636 -74,0.10591591894626617 -75,0.10091625899076462 -76,0.1041598916053772 -77,0.09946724027395248 -78,0.10192893445491791 -79,0.09929917007684708 -80,0.09859347343444824 -81,0.09819959849119186 -82,0.0994873121380806 -83,0.09862888604402542 -84,0.09883268922567368 -85,0.09923933446407318 -86,0.0967024564743042 -87,0.10295601189136505 -88,0.09800073504447937 -89,0.10022491961717606 -90,0.09751978516578674 -91,0.10374382138252258 -92,0.1020435020327568 -93,0.10199536383152008 -94,0.10803662240505219 -95,0.10395278036594391 -96,0.10016409307718277 -97,0.10094888508319855 -98,0.09999467432498932 -99,0.10309597849845886 -100,0.09936009347438812 -101,0.10006589442491531 -102,0.10298898816108704 -103,0.09749306738376617 -104,0.1034945622086525 -105,0.09997953474521637 -106,0.09996910393238068 -107,0.09954693913459778 -108,0.1025562658905983 -109,0.10280762612819672 -110,0.09568265825510025 -111,0.10298668593168259 -112,0.09885192662477493 -113,0.10084254294633865 -114,0.10164019465446472 -115,0.09756939113140106 -116,0.09572528302669525 -117,0.09869164973497391 -118,0.0955725908279419 -119,0.10031907260417938 -120,0.10242868959903717 -121,0.1003948226571083 -122,0.09455016255378723 -123,0.10098681598901749 -124,0.0994764193892479 -125,0.10098171979188919 -126,0.09799042344093323 -127,0.10663551837205887 -128,0.10355408489704132 -129,0.09614644944667816 -130,0.10448084771633148 -131,0.1005932167172432 -132,0.10095144808292389 -133,0.10037609934806824 -134,0.09780906140804291 -135,0.09737768769264221 -136,0.09740575402975082 -137,0.09700320661067963 -138,0.09501434862613678 -139,0.0972013920545578 -140,0.09712409228086472 -141,0.09319622069597244 -142,0.09314166754484177 -143,0.09770579636096954 -144,0.09497182071208954 -145,0.09656210243701935 -146,0.097537100315094 -147,0.09811373800039291 -148,0.09396566450595856 -149,0.09468196332454681 -150,0.09828142821788788 -151,0.09359858930110931 -152,0.10421356558799744 -153,0.10621830821037292 -154,0.09679389744997025 -155,0.10652557760477066 -156,0.11105475574731827 -157,0.10405147820711136 -158,0.10133150964975357 -159,0.1115182414650917 -160,0.11058347672224045 -161,0.10059285908937454 -162,0.10589500516653061 -163,0.11086428165435791 -164,0.1025807335972786 -165,0.1010584607720375 -166,0.10723169147968292 -167,0.10068590193986893 -168,0.10390149056911469 -169,0.10307339578866959 -170,0.10358438640832901 -171,0.09684570133686066 -172,0.09953024238348007 -173,0.10200686752796173 -174,0.09683443605899811 -175,0.10423499345779419 -176,0.10104907304048538 -177,0.09547659754753113 -178,0.10133860260248184 -179,0.09655236452817917 -180,0.09929327666759491 -181,0.10577850788831711 -182,0.09776747971773148 -183,0.10267031192779541 -184,0.10934282839298248 -185,0.1029939204454422 -186,0.09652956575155258 -187,0.11073652654886246 -188,0.11181341111660004 -189,0.09876082092523575 -190,0.104742631316185 -191,0.11398954689502716 -192,0.1100102961063385 -193,0.09676419198513031 -194,0.10287981480360031 -195,0.10697205364704132 -196,0.09864886850118637 -197,0.09878673404455185 -198,0.10379110276699066 -199,0.09827166050672531 -200,0.09715278446674347 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.125/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.125/training_config.txt deleted file mode 100644 index 1088719..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.125/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.125/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.125/training_log.csv deleted file mode 100644 index d922143..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,2.16554594039917 -2,1.4535876512527466 -3,1.4060248136520386 -4,1.061143159866333 -5,0.8785852193832397 -6,0.7999621629714966 -7,0.6975253820419312 -8,0.6119063496589661 -9,0.5858175754547119 -10,0.5661748051643372 -11,0.5983917117118835 -12,0.5490202903747559 -13,0.5597102046012878 -14,0.4963439404964447 -15,0.44360414147377014 -16,0.4373575448989868 -17,0.4330054521560669 -18,0.4251129627227783 -19,0.41379478573799133 -20,0.40136009454727173 -21,0.39138343930244446 -22,0.3815820813179016 -23,0.3714381754398346 -24,0.3602408468723297 -25,0.34806546568870544 -26,0.3390403389930725 -27,0.3283770978450775 -28,0.31482037901878357 -29,0.2993558347225189 -30,0.27942541241645813 -31,0.26080796122550964 -32,0.2530541718006134 -33,0.23599688708782196 -34,0.2019229233264923 -35,0.18096886575222015 -36,0.15496385097503662 -37,0.16713593900203705 -38,0.16194570064544678 -39,0.1832474172115326 -40,0.18654754757881165 -41,0.1775195598602295 -42,0.1548694670200348 -43,0.15928077697753906 -44,0.15630114078521729 -45,0.14889532327651978 -46,0.13373778760433197 -47,0.12231014668941498 -48,0.11744161695241928 -49,0.1250714212656021 -50,0.11994614452123642 -51,0.12592342495918274 -52,0.12986019253730774 -53,0.12588196992874146 -54,0.11587710678577423 -55,0.12107348442077637 -56,0.11751866340637207 -57,0.1080549955368042 -58,0.11278088390827179 -59,0.11099045723676682 -60,0.10923025757074356 -61,0.11158283799886703 -62,0.11464311182498932 -63,0.10720949620008469 -64,0.10350152850151062 -65,0.10474246740341187 -66,0.10962922871112823 -67,0.10710713267326355 -68,0.10147511214017868 -69,0.11204396933317184 -70,0.11667881160974503 -71,0.11276715993881226 -72,0.09972326457500458 -73,0.11076869070529938 -74,0.1177838146686554 -75,0.11564413458108902 -76,0.10201199352741241 -77,0.109420545399189 -78,0.11792167276144028 -79,0.12206849455833435 -80,0.11779851466417313 -81,0.1058272123336792 -82,0.10670103132724762 -83,0.11046163737773895 -84,0.10834237188100815 -85,0.11249902099370956 -86,0.10973551869392395 -87,0.09972872585058212 -88,0.11062312126159668 -89,0.11337348818778992 -90,0.10081633180379868 -91,0.10922379791736603 -92,0.1181265190243721 -93,0.11568322777748108 -94,0.10229312628507614 -95,0.10790979862213135 -96,0.11673188954591751 -97,0.10986588150262833 -98,0.09587622433900833 -99,0.1056496798992157 -100,0.10412929207086563 -101,0.09857441484928131 -102,0.10210390388965607 -103,0.09749582409858704 -104,0.09925328195095062 -105,0.09428895264863968 -106,0.10355199128389359 -107,0.10571538656949997 -108,0.0988263413310051 -109,0.10272328555583954 -110,0.10415948927402496 -111,0.10168983042240143 -112,0.1008463054895401 -113,0.09789279848337173 -114,0.10369802266359329 -115,0.10437346994876862 -116,0.09397940337657928 -117,0.09773766249418259 -118,0.10353966057300568 -119,0.10187394917011261 -120,0.09395699948072433 -121,0.09468284249305725 -122,0.09554002434015274 -123,0.0950496718287468 -124,0.09827736765146255 -125,0.09615998715162277 -126,0.09719542413949966 -127,0.0955263301730156 -128,0.09851492941379547 -129,0.0929332748055458 -130,0.09278400987386703 -131,0.10034725815057755 -132,0.09630651026964188 -133,0.10047667473554611 -134,0.10302861034870148 -135,0.0948958620429039 -136,0.10598596930503845 -137,0.1071249321103096 -138,0.09364013373851776 -139,0.1083187386393547 -140,0.11365540325641632 -141,0.1078903004527092 -142,0.09337149560451508 -143,0.10347460955381393 -144,0.1014544814825058 -145,0.10398516058921814 -146,0.10527759045362473 -147,0.10471650958061218 -148,0.09751418232917786 -149,0.1026236042380333 -150,0.09423916786909103 -151,0.10156986862421036 -152,0.10510269552469254 -153,0.09814497828483582 -154,0.09984026849269867 -155,0.09884907305240631 -156,0.09582902491092682 -157,0.10284924507141113 -158,0.10212580114603043 -159,0.10177437961101532 -160,0.1052335575222969 -161,0.10456234216690063 -162,0.09762795269489288 -163,0.10133380442857742 -164,0.1002858430147171 -165,0.09098638594150543 -166,0.09494469314813614 -167,0.09288464486598969 -168,0.09210673719644547 -169,0.0899854525923729 -170,0.09030532836914062 -171,0.0905441865324974 -172,0.0899382084608078 -173,0.09377647936344147 -174,0.09074326604604721 -175,0.0897378921508789 -176,0.09186830371618271 -177,0.09535672515630722 -178,0.0932765081524849 -179,0.09793616831302643 -180,0.0975261703133583 -181,0.09163177013397217 -182,0.09774909913539886 -183,0.09771828353404999 -184,0.0965346246957779 -185,0.10053494572639465 -186,0.09073419868946075 -187,0.10697105526924133 -188,0.11362023651599884 -189,0.1061999723315239 -190,0.1013917550444603 -191,0.10335566848516464 -192,0.10019730031490326 -193,0.09304024279117584 -194,0.09548790752887726 -195,0.09208797663450241 -196,0.09163075685501099 -197,0.09496580064296722 -198,0.09362119436264038 -199,0.08913746476173401 -200,0.09202319383621216 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.160/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.160/training_config.txt deleted file mode 100644 index 2ed71fe..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.160/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.160/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.160/training_log.csv deleted file mode 100644 index 59cc153..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,2.6028337478637695 -2,1.2584651708602905 -3,1.4440919160842896 -4,0.8752118349075317 -5,0.5926865935325623 -6,0.5725212693214417 -7,0.5454012155532837 -8,0.4375882148742676 -9,0.3516681492328644 -10,0.33308619260787964 -11,0.30197465419769287 -12,0.27206307649612427 -13,0.24843767285346985 -14,0.23496638238430023 -15,0.21845470368862152 -16,0.19701214134693146 -17,0.18893326818943024 -18,0.17494860291481018 -19,0.15294091403484344 -20,0.17562910914421082 -21,0.18175671994686127 -22,0.17526665329933167 -23,0.16015030443668365 -24,0.13377581536769867 -25,0.13435383141040802 -26,0.13525930047035217 -27,0.13480126857757568 -28,0.13273262977600098 -29,0.12506909668445587 -30,0.12853288650512695 -31,0.12443304061889648 -32,0.11453398317098618 -33,0.11238574236631393 -34,0.1207767128944397 -35,0.12193763256072998 -36,0.11423347890377045 -37,0.10755793005228043 -38,0.11119696497917175 -39,0.11337140202522278 -40,0.11866354197263718 -41,0.11921319365501404 -42,0.11172185838222504 -43,0.10249006748199463 -44,0.11658196151256561 -45,0.11751332879066467 -46,0.10582692921161652 -47,0.10747423022985458 -48,0.11690487712621689 -49,0.11820274591445923 -50,0.11120462417602539 -51,0.09778457134962082 -52,0.11349283903837204 -53,0.11916082352399826 -54,0.11198703944683075 -55,0.10286494344472885 -56,0.10840587317943573 -57,0.10927280783653259 -58,0.10584606975317001 -59,0.10409065335988998 -60,0.09750548005104065 -61,0.10153622180223465 -62,0.10526145249605179 -63,0.0995219275355339 -64,0.0978635624051094 -65,0.10082586854696274 -66,0.09826775640249252 -67,0.10107215493917465 -68,0.10220294445753098 -69,0.09905082732439041 -70,0.09769191592931747 -71,0.09565304219722748 -72,0.0974608063697815 -73,0.10076603293418884 -74,0.098905049264431 -75,0.0970926284790039 -76,0.09738045930862427 -77,0.09843239188194275 -78,0.09899358451366425 -79,0.09437727183103561 -80,0.09488341212272644 -81,0.10062999278306961 -82,0.09843011200428009 -83,0.09759034216403961 -84,0.0953928679227829 -85,0.10134994983673096 -86,0.09570230543613434 -87,0.10504624992609024 -88,0.10653575509786606 -89,0.09728090465068817 -90,0.10675688832998276 -91,0.11190161108970642 -92,0.10375373065471649 -93,0.10372915118932724 -94,0.10840580612421036 -95,0.1045583039522171 -96,0.10682442039251328 -97,0.10732732713222504 -98,0.10070021450519562 -99,0.10354545712471008 -100,0.11254942417144775 -101,0.10891129076480865 -102,0.09395867586135864 -103,0.11178109794855118 -104,0.11828777194023132 -105,0.110916368663311 -106,0.10011493414640427 -107,0.10163335502147675 -108,0.10050652176141739 -109,0.10428796708583832 -110,0.10598430782556534 -111,0.09564121067523956 -112,0.09544141590595245 -113,0.10168169438838959 -114,0.09757976979017258 -115,0.09761589765548706 -116,0.09983664751052856 -117,0.09244954586029053 -118,0.10020270198583603 -119,0.1002272367477417 -120,0.09749515354633331 -121,0.09636623412370682 -122,0.09438860416412354 -123,0.09786543250083923 -124,0.09470492601394653 -125,0.09455931186676025 -126,0.09343792498111725 -127,0.09682277590036392 -128,0.09706609696149826 -129,0.09823884814977646 -130,0.0935121402144432 -131,0.09616120904684067 -132,0.09516452997922897 -133,0.09375642240047455 -134,0.0945364460349083 -135,0.09572400897741318 -136,0.09663315862417221 -137,0.09816137701272964 -138,0.0946086198091507 -139,0.09651614725589752 -140,0.10290069878101349 -141,0.09912382066249847 -142,0.09807882457971573 -143,0.0973707064986229 -144,0.10356242209672928 -145,0.09965068101882935 -146,0.09760972112417221 -147,0.10412725061178207 -148,0.10358794778585434 -149,0.09712661802768707 -150,0.09974928945302963 -151,0.10122578591108322 -152,0.09191842377185822 -153,0.10435247421264648 -154,0.10649453103542328 -155,0.09926817566156387 -156,0.10034176707267761 -157,0.1067875549197197 -158,0.1071954220533371 -159,0.09595800936222076 -160,0.10597215592861176 -161,0.11199209839105606 -162,0.10991697013378143 -163,0.09939344227313995 -164,0.10047241300344467 -165,0.09638845175504684 -166,0.1016681119799614 -167,0.10017506033182144 -168,0.09373372048139572 -169,0.09463240951299667 -170,0.09678251296281815 -171,0.09432921558618546 -172,0.09438535571098328 -173,0.0991816520690918 -174,0.09729290008544922 -175,0.09678637981414795 -176,0.09729845076799393 -177,0.09602730721235275 -178,0.0997198298573494 -179,0.0979858860373497 -180,0.09476854652166367 -181,0.10424729436635971 -182,0.09836473315954208 -183,0.09946447610855103 -184,0.10488688200712204 -185,0.09782969206571579 -186,0.0959291085600853 -187,0.09490073472261429 -188,0.09440254420042038 -189,0.0935053899884224 -190,0.09778135269880295 -191,0.10193949937820435 -192,0.09307732433080673 -193,0.1062115803360939 -194,0.11212459951639175 -195,0.10417107492685318 -196,0.09526075422763824 -197,0.11146137863397598 -198,0.11387103796005249 -199,0.10010989755392075 -200,0.10012011975049973 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.200/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.200/training_config.txt deleted file mode 100644 index 468f8c1..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.200/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.200/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.200/training_log.csv deleted file mode 100644 index bbe2c08..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,3.2541069984436035 -2,1.3552881479263306 -3,1.9490134716033936 -4,1.92193603515625 -5,0.9746360778808594 -6,0.8448134064674377 -7,0.7856918573379517 -8,0.7900827527046204 -9,0.7794777154922485 -10,0.7744824886322021 -11,0.7545056939125061 -12,0.7206372022628784 -13,0.6949772834777832 -14,0.5892934799194336 -15,0.5445763468742371 -16,0.5251467227935791 -17,0.4988878667354584 -18,0.464356005191803 -19,0.43106335401535034 -20,0.37951868772506714 -21,0.3480062484741211 -22,0.3323124945163727 -23,0.31524184346199036 -24,0.29971033334732056 -25,0.28195178508758545 -26,0.264346718788147 -27,0.2444770336151123 -28,0.22728495299816132 -29,0.21817094087600708 -30,0.20642049610614777 -31,0.1937236338853836 -32,0.18119162321090698 -33,0.16936415433883667 -34,0.16039197146892548 -35,0.15225204825401306 -36,0.14234980940818787 -37,0.13079175353050232 -38,0.13265009224414825 -39,0.12555299699306488 -40,0.11062345653772354 -41,0.11474234610795975 -42,0.11778780072927475 -43,0.11704905331134796 -44,0.11671354621648788 -45,0.11557044833898544 -46,0.11223077774047852 -47,0.10641630738973618 -48,0.11382496356964111 -49,0.1134658232331276 -50,0.11983146518468857 -51,0.11646918207406998 -52,0.11083061248064041 -53,0.10707595944404602 -54,0.10889419913291931 -55,0.10723599791526794 -56,0.10521277040243149 -57,0.10183388739824295 -58,0.10360142588615417 -59,0.10472101718187332 -60,0.10570669919252396 -61,0.10150454193353653 -62,0.10062894970178604 -63,0.10281836986541748 -64,0.10067571699619293 -65,0.10319040715694427 -66,0.1013190895318985 -67,0.10354326665401459 -68,0.10182122141122818 -69,0.1013173833489418 -70,0.10676874220371246 -71,0.10882505029439926 -72,0.10908666998147964 -73,0.1040494292974472 -74,0.09826351702213287 -75,0.10471084713935852 -76,0.10643206536769867 -77,0.10923157632350922 -78,0.10269986093044281 -79,0.10369569063186646 -80,0.10594715178012848 -81,0.10700450837612152 -82,0.11258158832788467 -83,0.10602851212024689 -84,0.10643970966339111 -85,0.10149654746055603 -86,0.10008581727743149 -87,0.10019279271364212 -88,0.10324496030807495 -89,0.10410816222429276 -90,0.09911321103572845 -91,0.10082022100687027 -92,0.10339260846376419 -93,0.10130443423986435 -94,0.10186894237995148 -95,0.10248716920614243 -96,0.10003169625997543 -97,0.10013485699892044 -98,0.1045713797211647 -99,0.11061345785856247 -100,0.10802452266216278 -101,0.10722552239894867 -102,0.10487789660692215 -103,0.10154304653406143 -104,0.10245975852012634 -105,0.1085372120141983 -106,0.10340040922164917 -107,0.10256744921207428 -108,0.10215215384960175 -109,0.0996871367096901 -110,0.10037384927272797 -111,0.09660571068525314 -112,0.09647268801927567 -113,0.09385503828525543 -114,0.09980318695306778 -115,0.10319269448518753 -116,0.10242065787315369 -117,0.09489742666482925 -118,0.09535764157772064 -119,0.09920305758714676 -120,0.09691732376813889 -121,0.09596848487854004 -122,0.09683885425329208 -123,0.09532777220010757 -124,0.0950867161154747 -125,0.09688068181276321 -126,0.09402070939540863 -127,0.09445978701114655 -128,0.09640147536993027 -129,0.09346933662891388 -130,0.09433306008577347 -131,0.10160060971975327 -132,0.09872404485940933 -133,0.09540398418903351 -134,0.09432242065668106 -135,0.09933347254991531 -136,0.09540429711341858 -137,0.0927199125289917 -138,0.09744085371494293 -139,0.09440527111291885 -140,0.09738367795944214 -141,0.09646172821521759 -142,0.10078586637973785 -143,0.0987769216299057 -144,0.10495167970657349 -145,0.10062381625175476 -146,0.10133534669876099 -147,0.10188481956720352 -148,0.09508290886878967 -149,0.09216561913490295 -150,0.10390609502792358 -151,0.10370273888111115 -152,0.09167375415563583 -153,0.09736558794975281 -154,0.09692282974720001 -155,0.10374977439641953 -156,0.09947685152292252 -157,0.10629148036241531 -158,0.1048392802476883 -159,0.0955318808555603 -160,0.11443132907152176 -161,0.11743517965078354 -162,0.10566315054893494 -163,0.10092402994632721 -164,0.10615076869726181 -165,0.09790243208408356 -166,0.09966712445020676 -167,0.09279190003871918 -168,0.10549132525920868 -169,0.10398318618535995 -170,0.10017354786396027 -171,0.10096964985132217 -172,0.09344098716974258 -173,0.09671387076377869 -174,0.09755025058984756 -175,0.0981118306517601 -176,0.09189312160015106 -177,0.09998297691345215 -178,0.0978756844997406 -179,0.09385056048631668 -180,0.09702567756175995 -181,0.09528551995754242 -182,0.10224708169698715 -183,0.10208096355199814 -184,0.09779128432273865 -185,0.10339121520519257 -186,0.10595231503248215 -187,0.09518357366323471 -188,0.09920845180749893 -189,0.09878981858491898 -190,0.09526604413986206 -191,0.09159325063228607 -192,0.09452378004789352 -193,0.09422746300697327 -194,0.09635140746831894 -195,0.09856609255075455 -196,0.0917048454284668 -197,0.10061654448509216 -198,0.09494754672050476 -199,0.10384641587734222 -200,0.10516966879367828 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.250/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.250/training_config.txt deleted file mode 100644 index 15aeb6b..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.250/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.250/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.250/training_log.csv deleted file mode 100644 index becedf8..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,3.9134297370910645 -2,0.8450782895088196 -3,0.6536256074905396 -4,0.5870351791381836 -5,3.449690580368042 -6,0.48182061314582825 -7,0.38593384623527527 -8,0.31183135509490967 -9,0.27523764967918396 -10,0.2509806454181671 -11,0.24361905455589294 -12,0.23253728449344635 -13,0.22091542184352875 -14,0.2014215886592865 -15,0.1908843219280243 -16,0.18463589251041412 -17,0.154606893658638 -18,0.15316274762153625 -19,0.15000107884407043 -20,0.13675165176391602 -21,0.1462252140045166 -22,0.14681290090084076 -23,0.1350386142730713 -24,0.13823597133159637 -25,0.13233402371406555 -26,0.1292247474193573 -27,0.1280447393655777 -28,0.11809337139129639 -29,0.12208396196365356 -30,0.1196112260222435 -31,0.12505879998207092 -32,0.11863168329000473 -33,0.12418683618307114 -34,0.12347286939620972 -35,0.11561224609613419 -36,0.12311239540576935 -37,0.12808021903038025 -38,0.12804079055786133 -39,0.1199151948094368 -40,0.12863124907016754 -41,0.12865760922431946 -42,0.11723971366882324 -43,0.11661583185195923 -44,0.11690245568752289 -45,0.10954372584819794 -46,0.11295301467180252 -47,0.11174008995294571 -48,0.11090720444917679 -49,0.11085251718759537 -50,0.10533107817173004 -51,0.1133509948849678 -52,0.11030896753072739 -53,0.10791604965925217 -54,0.10271089524030685 -55,0.10286767780780792 -56,0.10014671832323074 -57,0.10086055099964142 -58,0.1005275547504425 -59,0.09850568324327469 -60,0.1000528484582901 -61,0.10252909362316132 -62,0.09910368174314499 -63,0.10342099517583847 -64,0.10463110357522964 -65,0.10396233946084976 -66,0.09811489284038544 -67,0.1114761084318161 -68,0.11339377611875534 -69,0.10262172669172287 -70,0.11134752631187439 -71,0.11563824862241745 -72,0.10642771422863007 -73,0.10065687447786331 -74,0.10418086498975754 -75,0.09846632927656174 -76,0.10601095110177994 -77,0.10088388621807098 -78,0.10350693762302399 -79,0.10588043928146362 -80,0.10010340809822083 -81,0.0991295725107193 -82,0.09995726495981216 -83,0.10030070692300797 -84,0.09811645746231079 -85,0.09782852232456207 -86,0.10148286819458008 -87,0.09632334113121033 -88,0.10751179605722427 -89,0.10393799096345901 -90,0.1015743762254715 -91,0.11626176536083221 -92,0.11867474019527435 -93,0.10823825001716614 -94,0.10001374781131744 -95,0.10586606711149216 -96,0.09671430289745331 -97,0.10215543210506439 -98,0.09959636628627777 -99,0.10457201302051544 -100,0.10601096600294113 -101,0.10532834380865097 -102,0.0966339036822319 -103,0.09879279881715775 -104,0.09808791428804398 -105,0.10152474790811539 -106,0.09785660356283188 -107,0.10504039376974106 -108,0.10268913954496384 -109,0.09952773153781891 -110,0.09997034817934036 -111,0.09718932956457138 -112,0.09673593193292618 -113,0.10082380473613739 -114,0.09694697707891464 -115,0.10219412297010422 -116,0.10309947282075882 -117,0.0984499529004097 -118,0.09948255866765976 -119,0.09256471693515778 -120,0.10410230606794357 -121,0.10445129126310349 -122,0.10008300840854645 -123,0.10261118412017822 -124,0.0996381863951683 -125,0.09842357039451599 -126,0.10059868544340134 -127,0.09822680801153183 -128,0.09733613580465317 -129,0.09678957611322403 -130,0.09932135790586472 -131,0.10610977560281754 -132,0.10221178829669952 -133,0.09343461692333221 -134,0.0964534655213356 -135,0.09921489655971527 -136,0.0985889807343483 -137,0.09847985953092575 -138,0.10070225596427917 -139,0.09384508430957794 -140,0.10153892636299133 -141,0.10676875710487366 -142,0.10448198020458221 -143,0.09624264389276505 -144,0.09929586201906204 -145,0.10219074785709381 -146,0.09767715632915497 -147,0.0976516529917717 -148,0.09858334064483643 -149,0.09359670430421829 -150,0.09078704565763474 -151,0.09431919455528259 -152,0.0929071232676506 -153,0.09034241735935211 -154,0.09192484617233276 -155,0.09624770283699036 -156,0.09329461306333542 -157,0.10036657750606537 -158,0.0978233739733696 -159,0.09499877691268921 -160,0.09676036238670349 -161,0.09100078791379929 -162,0.09969815611839294 -163,0.09549427032470703 -164,0.09179084002971649 -165,0.09071896225214005 -166,0.09704161435365677 -167,0.09435638785362244 -168,0.09621702134609222 -169,0.0956043154001236 -170,0.09058965742588043 -171,0.10405415296554565 -172,0.10096089541912079 -173,0.09309534728527069 -174,0.10855048149824142 -175,0.11365014314651489 -176,0.10628426820039749 -177,0.09019317477941513 -178,0.09374497085809708 -179,0.09102635085582733 -180,0.08920332789421082 -181,0.08811776340007782 -182,0.08932483196258545 -183,0.08748263120651245 -184,0.08733230829238892 -185,0.0882270485162735 -186,0.0920003354549408 -187,0.08786150813102722 -188,0.0854221060872078 -189,0.08826985955238342 -190,0.09303843230009079 -191,0.08883175253868103 -192,0.09127519279718399 -193,0.0947996973991394 -194,0.09160743653774261 -195,0.09220576286315918 -196,0.08567097783088684 -197,0.0948219895362854 -198,0.09186982363462448 -199,0.08831455558538437 -200,0.09480004757642746 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.300/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.300/training_config.txt deleted file mode 100644 index f68a3b9..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.300/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.300/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.300/training_log.csv deleted file mode 100644 index c5845e1..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.300/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,4.178016185760498 -2,0.8789786696434021 -3,0.6365265250205994 -4,0.438367635011673 -5,0.32522350549697876 -6,0.2491561621427536 -7,0.21662987768650055 -8,0.22175873816013336 -9,0.2157270461320877 -10,0.19481737911701202 -11,0.1633155196905136 -12,0.1347752958536148 -13,0.13666178286075592 -14,0.13696123659610748 -15,0.12566715478897095 -16,0.11765944957733154 -17,0.1148461103439331 -18,0.11978412419557571 -19,0.11696705967187881 -20,0.11129703372716904 -21,0.11038848012685776 -22,0.10693936794996262 -23,0.10428313910961151 -24,0.10773470997810364 -25,0.10605864971876144 -26,0.10585833340883255 -27,0.10459895431995392 -28,0.10538353025913239 -29,0.10461226850748062 -30,0.10548081248998642 -31,0.10716811567544937 -32,0.10058221220970154 -33,0.10565704107284546 -34,0.10630404204130173 -35,0.10113389045000076 -36,0.1054990217089653 -37,0.10901812463998795 -38,0.10193167626857758 -39,0.10499173402786255 -40,0.10304944962263107 -41,0.10130932927131653 -42,0.10168450325727463 -43,0.10324443876743317 -44,0.1085558757185936 -45,0.10013894736766815 -46,0.1058366596698761 -47,0.10985212028026581 -48,0.10363252460956573 -49,0.10911524295806885 -50,0.11080092191696167 -51,0.101373590528965 -52,0.10343115031719208 -53,0.10248314589262009 -54,0.10092777758836746 -55,0.10450608283281326 -56,0.1010473221540451 -57,0.10073487460613251 -58,0.09969169646501541 -59,0.09465596079826355 -60,0.09943626821041107 -61,0.10116804391145706 -62,0.09571944922208786 -63,0.10270506888628006 -64,0.10565834492444992 -65,0.09770994633436203 -66,0.10716649144887924 -67,0.1097840815782547 -68,0.10760132968425751 -69,0.09959184378385544 -70,0.10318014025688171 -71,0.09896846115589142 -72,0.10483387112617493 -73,0.10080667585134506 -74,0.1033550351858139 -75,0.10448921471834183 -76,0.09478913992643356 -77,0.10239669680595398 -78,0.1072569489479065 -79,0.10027901083230972 -80,0.09971082955598831 -81,0.10510140657424927 -82,0.102565698325634 -83,0.0965764969587326 -84,0.10514698177576065 -85,0.10441753268241882 -86,0.09434524923563004 -87,0.09875912219285965 -88,0.09830083698034286 -89,0.0964641198515892 -90,0.09348327666521072 -91,0.09986621141433716 -92,0.09615255147218704 -93,0.10166729241609573 -94,0.09692557156085968 -95,0.10062672942876816 -96,0.0996231883764267 -97,0.09675323963165283 -98,0.09745920449495316 -99,0.09792202711105347 -100,0.09507950395345688 -101,0.09272212535142899 -102,0.09553908556699753 -103,0.09924421459436417 -104,0.09760372340679169 -105,0.09349873661994934 -106,0.09183681756258011 -107,0.09794018417596817 -108,0.09522400051355362 -109,0.09713931381702423 -110,0.09106742590665817 -111,0.09322084486484528 -112,0.09438377618789673 -113,0.09434007853269577 -114,0.09550006687641144 -115,0.0977480486035347 -116,0.09516777843236923 -117,0.09812891483306885 -118,0.0977107360959053 -119,0.0972486138343811 -120,0.0965273529291153 -121,0.09149205684661865 -122,0.09403827786445618 -123,0.09593590348958969 -124,0.09494443237781525 -125,0.09288410097360611 -126,0.0995023101568222 -127,0.0964822992682457 -128,0.09875856339931488 -129,0.10042233020067215 -130,0.08999130874872208 -131,0.10039541125297546 -132,0.1009959727525711 -133,0.09409710019826889 -134,0.09936787188053131 -135,0.10282648354768753 -136,0.09791600704193115 -137,0.09168770909309387 -138,0.09713076800107956 -139,0.0903262197971344 -140,0.10024760663509369 -141,0.0984795019030571 -142,0.09390735626220703 -143,0.10308448225259781 -144,0.09706952422857285 -145,0.10187044739723206 -146,0.10424842685461044 -147,0.09490061551332474 -148,0.1041354313492775 -149,0.1060112938284874 -150,0.09614337980747223 -151,0.09300319850444794 -152,0.09263159334659576 -153,0.08899598568677902 -154,0.09685862809419632 -155,0.09190846979618073 -156,0.09470514208078384 -157,0.10222326964139938 -158,0.10154667496681213 -159,0.09307754039764404 -160,0.10178136080503464 -161,0.10947969555854797 -162,0.10405220091342926 -163,0.09422551095485687 -164,0.10742849856615067 -165,0.10665803402662277 -166,0.09355279058218002 -167,0.10602375119924545 -168,0.10947925597429276 -169,0.09934698045253754 -170,0.0993715152144432 -171,0.10317707061767578 -172,0.09618435055017471 -173,0.1005251482129097 -174,0.09687920659780502 -175,0.09984610229730606 -176,0.10902302712202072 -177,0.10669258236885071 -178,0.09224008768796921 -179,0.10681009292602539 -180,0.11129014194011688 -181,0.102207250893116 -182,0.09152745455503464 -183,0.09948021173477173 -184,0.09216038137674332 -185,0.09882944822311401 -186,0.10022272169589996 -187,0.08891452848911285 -188,0.1066652163863182 -189,0.11156947165727615 -190,0.09977429360151291 -191,0.09745156019926071 -192,0.10506093502044678 -193,0.09696757048368454 -194,0.09550710022449493 -195,0.10025417059659958 -196,0.0927700400352478 -197,0.09147705137729645 -198,0.09030349552631378 -199,0.09421152621507645 -200,0.09564574062824249 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.400/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.400/training_config.txt deleted file mode 100644 index c2a27d1..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.400/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.400/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.400/training_log.csv deleted file mode 100644 index 978fb11..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.400/training_log.csv +++ /dev/null @@ -1,141 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,4.6483283042907715 -2,1.1269198656082153 -3,0.5819365382194519 -4,0.48168689012527466 -5,0.35402533411979675 -6,0.3504738211631775 -7,0.2699557840824127 -8,0.25700727105140686 -9,0.25336363911628723 -10,0.2479134351015091 -11,0.2278718501329422 -12,0.22678817808628082 -13,0.21264994144439697 -14,0.19402414560317993 -15,0.18736830353736877 -16,0.16268423199653625 -17,0.14878220856189728 -18,0.13118939101696014 -19,0.1225244551897049 -20,0.11991097778081894 -21,0.12889881432056427 -22,0.12865015864372253 -23,0.12323620915412903 -24,0.11201728135347366 -25,0.11726406961679459 -26,0.11332643032073975 -27,0.1283477395772934 -28,0.131832554936409 -29,0.12546968460083008 -30,0.11576668173074722 -31,0.11424200236797333 -32,0.10376783460378647 -33,0.11948661506175995 -34,0.1161920353770256 -35,0.11793404072523117 -36,0.11605028063058853 -37,0.11013708263635635 -38,0.10276077687740326 -39,0.10648642480373383 -40,0.10630063712596893 -41,0.10374182462692261 -42,0.10182969272136688 -43,0.10264753550291061 -44,0.10522059351205826 -45,0.10677195340394974 -46,0.10092084109783173 -47,0.10747268050909042 -48,0.10376329720020294 -49,0.1163349375128746 -50,0.11705780774354935 -51,0.10507187992334366 -52,0.10565996915102005 -53,0.09884271770715714 -54,0.09982894361019135 -55,0.10087364912033081 -56,0.10248152166604996 -57,0.10291056334972382 -58,0.09875848144292831 -59,0.10852525383234024 -60,0.10145509243011475 -61,0.1077897697687149 -62,0.10974660515785217 -63,0.09874804317951202 -64,0.10682226717472076 -65,0.10424041002988815 -66,0.10153527557849884 -67,0.09939819574356079 -68,0.1019977554678917 -69,0.10493407398462296 -70,0.10240796208381653 -71,0.1037406474351883 -72,0.1031215712428093 -73,0.09866444766521454 -74,0.1029185801744461 -75,0.09986522048711777 -76,0.1064007356762886 -77,0.1034296452999115 -78,0.10165185481309891 -79,0.09997744858264923 -80,0.1054043397307396 -81,0.10708656162023544 -82,0.10548502206802368 -83,0.10375874489545822 -84,0.10092362016439438 -85,0.10119402408599854 -86,0.1041315495967865 -87,0.10199040174484253 -88,0.10224969685077667 -89,0.09902117401361465 -90,0.10526007413864136 -91,0.10337108373641968 -92,0.0994352251291275 -93,0.09580700844526291 -94,0.10630907118320465 -95,0.10657791048288345 -96,0.09980570524930954 -97,0.1024014800786972 -98,0.0985577404499054 -99,0.09853903949260712 -100,0.09783310443162918 -101,0.09615540504455566 -102,0.09932804852724075 -103,0.09991557151079178 -104,0.0965900868177414 -105,0.09790921211242676 -106,0.09992071241140366 -107,0.0985126718878746 -108,0.09792061150074005 -109,0.09424657374620438 -110,0.09596216678619385 -111,0.09939030557870865 -112,0.09612778574228287 -113,0.09569163620471954 -114,0.09990882128477097 -115,0.09804987162351608 -116,0.10599932074546814 -117,0.10796266049146652 -118,0.10356137901544571 -119,0.10386920720338821 -120,0.1050972044467926 -121,0.10054168105125427 -122,0.11399451643228531 -123,0.11511507630348206 -124,0.09805388003587723 -125,0.11687231808900833 -126,0.2540358304977417 -127,0.9708822965621948 -128,1.336992621421814 -129,0.8928899765014648 -130,0.678084671497345 -131,0.49207261204719543 -132,1.2614070177078247 -133,5.050410270690918 -134,5.0506110191345215 -135,5.0506110191345215 -136,5.0506110191345215 -137,5.0506110191345215 -138,5.0506110191345215 -139,5.0506110191345215 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.500/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.500/training_config.txt deleted file mode 100644 index fa01ec5..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.500/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.500/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.500/training_log.csv deleted file mode 100644 index 01376f0..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.500/training_log.csv +++ /dev/null @@ -1,18 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,4.99691915512085 -2,3.7129878997802734 -3,0.7247946262359619 -4,2.2251815795898438 -5,0.5506382584571838 -6,0.4661947786808014 -7,0.37438029050827026 -8,0.2815653681755066 -9,0.2682429552078247 -10,0.2530883252620697 -11,0.21322181820869446 -12,0.2850916385650635 -13,nan -14,nan -15,nan -16,nan diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.600/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.600/training_config.txt deleted file mode 100644 index 3a52de2..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.600/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.600/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.600/training_log.csv deleted file mode 100644 index 15c8dc2..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.600/training_log.csv +++ /dev/null @@ -1,14 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,5.054003715515137 -2,4.8853559494018555 -3,2.584973096847534 -4,0.5946675539016724 -5,0.5157726407051086 -6,0.4067072570323944 -7,0.6043218374252319 -8,0.365432471036911 -9,nan -10,nan -11,nan -12,nan diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.700/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.700/training_config.txt deleted file mode 100644 index 0676470..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.700/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.700/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.700/training_log.csv deleted file mode 100644 index 2016d99..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.700/training_log.csv +++ /dev/null @@ -1,12 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,5.0666184425354 -2,5.068101406097412 -3,5.062430381774902 -4,4.813815116882324 -5,3.579498529434204 -6,0.7584800124168396 -7,nan -8,nan -9,nan -10,nan diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.800/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.800/training_config.txt deleted file mode 100644 index f0245cb..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.800/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.800/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.800/training_log.csv deleted file mode 100644 index 8d07b64..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,5.06820011138916 -2,5.0684003829956055 -3,5.0684003829956055 -4,5.0684003829956055 -5,5.0684003829956055 -6,5.0684003829956055 -7,5.0684003829956055 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.900/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_0.900/training_config.txt deleted file mode 100644 index deb5430..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.900/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_0.900/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_0.900/training_log.csv deleted file mode 100644 index 35532de..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_0.900/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,5.0684003829956055 -2,5.0684003829956055 -3,5.0684003829956055 -4,5.0684003829956055 -5,5.0684003829956055 -6,5.0684003829956055 diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_1.000/training_config.txt b/training/base_loss_learning_rate_sweep/one_third/lr_1.000/training_config.txt deleted file mode 100644 index c232047..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_1.000/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function Name: one_third -Loss Function Exponent: 0.3333333333333333 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def one_third_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=1/3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/one_third/lr_1.000/training_log.csv b/training/base_loss_learning_rate_sweep/one_third/lr_1.000/training_log.csv deleted file mode 100644 index 35532de..0000000 --- a/training/base_loss_learning_rate_sweep/one_third/lr_1.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,1.8500734567642212 -1,5.0684003829956055 -2,5.0684003829956055 -3,5.0684003829956055 -4,5.0684003829956055 -5,5.0684003829956055 -6,5.0684003829956055 diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.003/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.003/training_config.txt deleted file mode 100644 index 33a9d51..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.003/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.0025 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.003/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.003/training_log.csv deleted file mode 100644 index 2da44f5..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.003/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,135.874755859375 -2,125.90669250488281 -3,116.13702392578125 -4,107.89566802978516 -5,102.23477172851562 -6,96.45507049560547 -7,90.84486389160156 -8,85.3683090209961 -9,80.17546844482422 -10,74.72090911865234 -11,69.99527740478516 -12,65.65023803710938 -13,59.599464416503906 -14,55.33636474609375 -15,52.471805572509766 -16,49.25043869018555 -17,40.907283782958984 -18,38.2786865234375 -19,37.128414154052734 -20,37.35280227661133 -21,35.4979248046875 -22,31.87309455871582 -23,28.234207153320312 -24,26.84414291381836 -25,23.382259368896484 -26,21.053321838378906 -27,18.320444107055664 -28,18.977874755859375 -29,16.967937469482422 -30,16.38793182373047 -31,14.543279647827148 -32,14.339263916015625 -33,13.680957794189453 -34,13.274813652038574 -35,13.370959281921387 -36,13.555278778076172 -37,13.69521427154541 -38,13.382287979125977 -39,13.55402660369873 -40,13.786392211914062 -41,13.868768692016602 -42,13.829219818115234 -43,13.591170310974121 -44,13.253226280212402 -45,12.964497566223145 -46,12.64905071258545 -47,12.610809326171875 -48,12.430038452148438 -49,12.471795082092285 -50,12.443628311157227 -51,12.391176223754883 -52,12.327741622924805 -53,12.255420684814453 -54,12.182821273803711 -55,12.110440254211426 -56,12.010481834411621 -57,11.92467212677002 -58,11.548406600952148 -59,11.53352165222168 -60,11.60100269317627 -61,11.700538635253906 -62,11.200035095214844 -63,11.20883846282959 -64,11.211002349853516 -65,11.198058128356934 -66,11.167494773864746 -67,11.112109184265137 -68,11.3766508102417 -69,11.097163200378418 -70,11.105742454528809 -71,11.202339172363281 -72,11.054827690124512 -73,11.198837280273438 -74,11.078231811523438 -75,11.05031967163086 -76,11.541680335998535 -77,11.487818717956543 -78,11.43238639831543 -79,11.372194290161133 -80,11.318127632141113 -81,11.27869987487793 -82,11.254172325134277 -83,11.240710258483887 -84,11.233515739440918 -85,11.224154472351074 -86,11.145919799804688 -87,12.02368450164795 -88,11.817368507385254 -89,11.770654678344727 -90,11.758471488952637 -91,11.778042793273926 -92,11.820446968078613 -93,11.86233139038086 -94,11.389412879943848 -95,11.344338417053223 -96,11.340365409851074 -97,11.342437744140625 -98,11.345563888549805 -99,11.348031997680664 -100,11.34900951385498 -101,11.348021507263184 -102,11.344889640808105 -103,11.339485168457031 -104,11.331954956054688 -105,11.322463035583496 -106,11.311345100402832 -107,11.298992156982422 -108,11.285781860351562 -109,11.27216625213623 -110,11.258506774902344 -111,11.245200157165527 -112,11.23255729675293 -113,11.220892906188965 -114,11.210468292236328 -115,11.201438903808594 -116,11.193922996520996 -117,11.187816619873047 -118,11.182880401611328 -119,11.1785249710083 -120,11.173818588256836 -121,11.167586326599121 -122,11.159406661987305 -123,11.150117874145508 -124,11.141092300415039 -125,11.133037567138672 -126,11.126220703125 -127,11.120523452758789 -128,11.115718841552734 -129,11.111517906188965 -130,11.107647895812988 -131,11.103903770446777 -132,11.100119590759277 -133,11.096195220947266 -134,11.092118263244629 -135,11.08786392211914 -136,11.08354377746582 -137,11.079227447509766 -138,11.075017929077148 -139,11.070939064025879 -140,11.067098617553711 -141,11.063491821289062 -142,11.060075759887695 -143,11.056779861450195 -144,11.053458213806152 -145,11.050047874450684 -146,11.046493530273438 -147,11.042795181274414 -148,11.03903865814209 -149,11.035329818725586 -150,11.03171443939209 -151,11.028162956237793 -152,11.02469253540039 -153,11.021282196044922 -154,11.017882347106934 -155,11.01445484161377 -156,11.01102352142334 -157,11.007546424865723 -158,11.004007339477539 -159,11.000473976135254 -160,10.99692440032959 -161,10.993404388427734 -162,10.989904403686523 -163,10.986432075500488 -164,10.982975959777832 -165,10.979561805725098 -166,10.9761381149292 -167,10.972722053527832 -168,10.969310760498047 -169,10.965900421142578 -170,10.962467193603516 -171,10.959003448486328 -172,10.955574989318848 -173,10.952136039733887 -174,10.948697090148926 -175,10.945280075073242 -176,10.941856384277344 -177,10.938467025756836 -178,10.935068130493164 -179,10.93171501159668 -180,10.928348541259766 -181,10.9249906539917 -182,10.921642303466797 -183,10.918305397033691 -184,10.914999961853027 -185,10.911707878112793 -186,10.908448219299316 -187,10.905191421508789 -188,10.901957511901855 -189,10.898750305175781 -190,10.895569801330566 -191,10.892410278320312 -192,10.889286041259766 -193,10.886202812194824 -194,10.883171081542969 -195,10.880170822143555 -196,10.877195358276367 -197,10.874247550964355 -198,10.871369361877441 -199,10.868518829345703 -200,10.86571979522705 diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.005/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.005/training_config.txt deleted file mode 100644 index e6e99f8..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.005/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.005 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.005/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.005/training_log.csv deleted file mode 100644 index eebaa01..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.005/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,124.23001098632812 -2,114.03627014160156 -3,102.58149719238281 -4,90.77410125732422 -5,79.61434173583984 -6,68.37020111083984 -7,57.50529098510742 -8,50.86507797241211 -9,41.8616943359375 -10,33.627410888671875 -11,28.872787475585938 -12,25.60563087463379 -13,24.909931182861328 -14,19.57880973815918 -15,17.10990333557129 -16,14.330360412597656 -17,13.819918632507324 -18,13.090404510498047 -19,12.541306495666504 -20,13.601611137390137 -21,11.004902839660645 -22,10.581703186035156 -23,10.83653736114502 -24,9.561168670654297 -25,9.233443260192871 -26,9.381519317626953 -27,9.525856971740723 -28,9.770283699035645 -29,9.661660194396973 -30,9.578134536743164 -31,9.536802291870117 -32,9.42701530456543 -33,9.325456619262695 -34,9.383854866027832 -35,9.636186599731445 -36,9.689652442932129 -37,10.343518257141113 -38,8.384734153747559 -39,8.356614112854004 -40,8.103487968444824 -41,8.252459526062012 -42,8.378509521484375 -43,8.29431438446045 -44,8.223125457763672 -45,8.21034049987793 -46,8.156908988952637 -47,8.091829299926758 -48,8.035887718200684 -49,7.837449550628662 -50,7.845506191253662 -51,7.765670299530029 -52,7.374792098999023 -53,7.227788925170898 -54,7.014102458953857 -55,6.708464622497559 -56,6.561244964599609 -57,6.331165313720703 -58,6.193565368652344 -59,6.098486423492432 -60,6.033126354217529 -61,6.070139408111572 -62,6.009886264801025 -63,5.9602203369140625 -64,5.769186973571777 -65,5.75280237197876 -66,5.744164943695068 -67,5.734443187713623 -68,5.722902297973633 -69,5.708559036254883 -70,5.690740585327148 -71,5.668700695037842 -72,5.641605854034424 -73,5.6107988357543945 -74,5.577850818634033 -75,5.5437541007995605 -76,5.509584903717041 -77,5.477203845977783 -78,5.463444709777832 -79,5.428569316864014 -80,5.406351566314697 -81,5.3856706619262695 -82,5.364723205566406 -83,5.342974662780762 -84,5.320249557495117 -85,5.296544551849365 -86,5.272139549255371 -87,5.247554302215576 -88,5.223708152770996 -89,5.201833248138428 -90,5.181774616241455 -91,5.160848617553711 -92,5.142269134521484 -93,5.126183986663818 -94,5.112127780914307 -95,5.1009745597839355 -96,5.094961643218994 -97,5.076032638549805 -98,5.0577263832092285 -99,5.044837474822998 -100,5.032914638519287 -101,5.021239280700684 -102,5.010924339294434 -103,5.001199722290039 -104,4.991443634033203 -105,4.981372833251953 -106,4.9709248542785645 -107,4.960170745849609 -108,4.94928503036499 -109,4.938545227050781 -110,4.928308963775635 -111,4.9190497398376465 -112,4.911226272583008 -113,4.904443264007568 -114,4.896310329437256 -115,4.8867950439453125 -116,4.878091812133789 -117,4.87044620513916 -118,4.863379001617432 -119,4.8564534187316895 -120,4.84940767288208 -121,4.842167854309082 -122,4.834812641143799 -123,4.827580451965332 -124,4.820765495300293 -125,4.814465522766113 -126,4.808195114135742 -127,4.80145263671875 -128,4.79477071762085 -129,4.788590908050537 -130,4.782685279846191 -131,4.776691913604736 -132,4.770471096038818 -133,4.764114856719971 -134,4.757904052734375 -135,4.75194787979126 -136,4.745873928070068 -137,4.739448547363281 -138,4.733028411865234 -139,4.726716041564941 -140,4.720245838165283 -141,4.713444709777832 -142,4.7064056396484375 -143,4.699312686920166 -144,4.692021369934082 -145,4.684265613555908 -146,4.676271438598633 -147,4.668126583099365 -148,4.659569263458252 -149,4.650478363037109 -150,4.640774726867676 -151,4.629734992980957 -152,4.615130424499512 -153,4.580550193786621 -154,5.027108669281006 -155,4.757719039916992 -156,4.7475905418396 -157,4.693667411804199 -158,4.696273326873779 -159,4.751673698425293 -160,4.751963138580322 -161,4.427925109863281 -162,4.180473804473877 -163,4.182477951049805 -164,4.221771717071533 -165,4.270496845245361 -166,4.313242435455322 -167,4.307394027709961 -168,4.159936428070068 -169,4.165384769439697 -170,4.1608662605285645 -171,4.147212982177734 -172,4.127928256988525 -173,4.105685234069824 -174,4.081830024719238 -175,4.057365894317627 -176,4.033291339874268 -177,4.010456085205078 -178,3.989356517791748 -179,3.9701995849609375 -180,3.9530813694000244 -181,3.9416332244873047 -182,3.932459592819214 -183,3.9153475761413574 -184,3.9058966636657715 -185,3.8965837955474854 -186,3.8872954845428467 -187,3.8779711723327637 -188,3.8686039447784424 -189,3.859208345413208 -190,3.8498082160949707 -191,3.8404321670532227 -192,3.8311023712158203 -193,3.821845054626465 -194,3.812687397003174 -195,3.8036420345306396 -196,3.7947230339050293 -197,3.785936117172241 -198,3.7772748470306396 -199,3.768718719482422 -200,3.760244369506836 diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.010/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.010/training_config.txt deleted file mode 100644 index b9757ea..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.010/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.010/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.010/training_log.csv deleted file mode 100644 index bb17e8e..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,109.24370574951172 -2,85.5264663696289 -3,66.57756805419922 -4,50.69388198852539 -5,36.517330169677734 -6,22.68756866455078 -7,18.7763729095459 -8,13.895877838134766 -9,11.474885940551758 -10,12.26622486114502 -11,9.147258758544922 -12,8.742733001708984 -13,7.613509178161621 -14,6.480799198150635 -15,6.046586513519287 -16,5.657448768615723 -17,5.3297529220581055 -18,4.5756306648254395 -19,4.182915687561035 -20,3.8239517211914062 -21,3.407334566116333 -22,3.118173360824585 -23,2.8219661712646484 -24,2.5441553592681885 -25,2.4098269939422607 -26,2.3352437019348145 -27,2.158601999282837 -28,1.9049497842788696 -29,1.772861361503601 -30,1.653112530708313 -31,1.5475298166275024 -32,1.496275782585144 -33,1.4432001113891602 -34,1.363749623298645 -35,1.3610270023345947 -36,1.3373435735702515 -37,1.2963467836380005 -38,1.22366464138031 -39,1.2211970090866089 -40,1.1863609552383423 -41,1.1634314060211182 -42,1.14309823513031 -43,1.1399548053741455 -44,1.1157571077346802 -45,1.0814701318740845 -46,1.0721626281738281 -47,1.061773657798767 -48,1.0481191873550415 -49,1.0290981531143188 -50,0.9985044598579407 -51,0.9677425622940063 -52,0.9805786609649658 -53,0.9813370108604431 -54,0.9730110764503479 -55,0.9408488273620605 -56,0.9374524354934692 -57,0.9349033832550049 -58,0.9321332573890686 -59,0.9286718964576721 -60,0.92417311668396 -61,0.9180570244789124 -62,0.909235417842865 -63,0.8981465697288513 -64,0.891947865486145 -65,0.8952312469482422 -66,0.900342583656311 -67,0.9025272727012634 -68,0.9015012383460999 -69,0.8981264233589172 -70,0.8937088251113892 -71,0.8889014720916748 -72,0.8838589191436768 -73,0.8783838152885437 -74,0.8715256452560425 -75,0.8604124784469604 -76,0.8423553705215454 -77,0.7858025431632996 -78,0.7712984681129456 -79,0.7636986970901489 -80,0.7567034959793091 -81,0.7477204203605652 -82,0.7345601916313171 -83,0.7262639403343201 -84,0.7202565670013428 -85,0.7180958986282349 -86,0.7150704264640808 -87,0.7121201753616333 -88,0.7092071175575256 -89,0.7063138484954834 -90,0.703436017036438 -91,0.7005761861801147 -92,0.6977412104606628 -93,0.694949209690094 -94,0.6922426819801331 -95,0.6897289156913757 -96,0.6877004504203796 -97,0.6868786811828613 -98,0.6911046504974365 -99,0.6858518123626709 -100,0.6878769993782043 -101,0.6896923780441284 -102,0.6910361051559448 -103,0.6919516324996948 -104,0.6925090551376343 -105,0.6927673816680908 -106,0.6927770972251892 -107,0.6925755739212036 -108,0.6921940445899963 -109,0.6916576027870178 -110,0.6909874081611633 -111,0.6902029514312744 -112,0.6893202662467957 -113,0.6883524656295776 -114,0.6873096227645874 -115,0.6862016320228577 -116,0.6850365996360779 -117,0.683820903301239 -118,0.6825596690177917 -119,0.6812569499015808 -120,0.6799156665802002 -121,0.6785378456115723 -122,0.6771290302276611 -123,0.6756935715675354 -124,0.6742368340492249 -125,0.6727684736251831 -126,0.6713014841079712 -127,0.6698574423789978 -128,0.6684738397598267 -129,0.6672196984291077 -130,0.6662269234657288 -131,0.6656746864318848 -132,0.6655579805374146 -133,0.663720428943634 -134,0.6644368171691895 -135,0.6662309169769287 -136,0.6680290102958679 -137,0.669519305229187 -138,0.670682430267334 -139,0.6715476512908936 -140,0.6721495985984802 -141,0.672519862651825 -142,0.6726855635643005 -143,0.6726687550544739 -144,0.6724886894226074 -145,0.6721631288528442 -146,0.6717068552970886 -147,0.6711336970329285 -148,0.6704566478729248 -149,0.6696869730949402 -150,0.6688344478607178 -151,0.6679095029830933 -152,0.66692054271698 -153,0.6658756732940674 -154,0.6647809147834778 -155,0.6636438369750977 -156,0.6624708771705627 -157,0.6612688302993774 -158,0.6600440740585327 -159,0.6588034629821777 -160,0.6575546264648438 -161,0.6563074588775635 -162,0.655072808265686 -163,0.6538663506507874 -164,0.6527112722396851 -165,0.6516430974006653 -166,0.6507189869880676 -167,0.6500269770622253 -168,0.6496553421020508 -169,0.6495773792266846 -170,0.6491881608963013 -171,0.6614606380462646 -172,0.6484742760658264 -173,0.6476728916168213 -174,0.6474364399909973 -175,0.6474927067756653 -176,0.6476133465766907 -177,0.6476906538009644 -178,0.6476829051971436 -179,0.6475734114646912 -180,0.6473600268363953 -181,0.6470468044281006 -182,0.6466417908668518 -183,0.6461539268493652 -184,0.6455960869789124 -185,0.6449822783470154 -186,0.6443297863006592 -187,0.6436615586280823 -188,0.6430061459541321 -189,0.642396092414856 -190,0.641864538192749 -191,0.6414269208908081 -192,0.641068160533905 -193,0.6407302618026733 -194,0.6400712728500366 -195,0.6458998322486877 -196,0.6392392516136169 -197,0.6392699480056763 -198,0.6396658420562744 -199,0.6399355530738831 -200,0.6395043730735779 diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.020/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.020/training_config.txt deleted file mode 100644 index 565c9e4..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.020/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.020/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.020/training_log.csv deleted file mode 100644 index 2b149aa..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,89.54231262207031 -2,50.60892105102539 -3,24.473318099975586 -4,13.490397453308105 -5,10.333541870117188 -6,8.688423156738281 -7,8.258033752441406 -8,7.813135623931885 -9,6.3613152503967285 -10,6.103447437286377 -11,4.812254905700684 -12,3.968592643737793 -13,3.3208086490631104 -14,2.801744222640991 -15,2.239187240600586 -16,1.9917300939559937 -17,1.679150938987732 -18,1.556692123413086 -19,1.4872453212738037 -20,1.3305772542953491 -21,1.244823694229126 -22,1.147659420967102 -23,1.1299208402633667 -24,1.1658549308776855 -25,1.1343648433685303 -26,1.0245505571365356 -27,0.9059932231903076 -28,0.8096622228622437 -29,0.7774941325187683 -30,0.7591992020606995 -31,0.7379881143569946 -32,0.7209556698799133 -33,0.707085132598877 -34,0.6982727646827698 -35,0.6948640942573547 -36,0.7245545387268066 -37,0.7250595688819885 -38,0.7145333290100098 -39,0.6904298067092896 -40,0.6902416348457336 -41,0.6910455822944641 -42,0.6918412446975708 -43,0.692431628704071 -44,0.6928741931915283 -45,0.6929153800010681 -46,0.6923276782035828 -47,0.6911664009094238 -48,0.6895283460617065 -49,0.6876181364059448 -50,0.6856474876403809 -51,0.6835974454879761 -52,0.680701732635498 -53,0.676016628742218 -54,0.6746599078178406 -55,0.6721251606941223 -56,0.669608473777771 -57,0.6671828627586365 -58,0.664853572845459 -59,0.6626406908035278 -60,0.6605706810951233 -61,0.6586681008338928 -62,0.656938910484314 -63,0.6553813815116882 -64,0.6539803743362427 -65,0.652720034122467 -66,0.6515927314758301 -67,0.6506068706512451 -68,0.6497755646705627 -69,0.6490979194641113 -70,0.6485306620597839 -71,0.6479799151420593 -72,0.6473246812820435 -73,0.6464654803276062 -74,0.6454038619995117 -75,0.6442487835884094 -76,0.6431291699409485 -77,0.6421290040016174 -78,0.6412748694419861 -79,0.6405513286590576 -80,0.6399205327033997 -81,0.6393381357192993 -82,0.63876873254776 -83,0.6381888389587402 -84,0.6375863552093506 -85,0.6369593739509583 -86,0.6363153457641602 -87,0.635667085647583 -88,0.6350286602973938 -89,0.6344123482704163 -90,0.6338265538215637 -91,0.633270263671875 -92,0.6327348947525024 -93,0.6322120428085327 -94,0.6316937208175659 -95,0.6311742067337036 -96,0.6306474804878235 -97,0.6301106214523315 -98,0.6295672655105591 -99,0.6290278434753418 -100,0.6285021305084229 -101,0.627996027469635 -102,0.6275100111961365 -103,0.6270411014556885 -104,0.6265836358070374 -105,0.6261312961578369 -106,0.6256784200668335 -107,0.6252227425575256 -108,0.6247633695602417 -109,0.6243017315864563 -110,0.6238415241241455 -111,0.6233857274055481 -112,0.6229366660118103 -113,0.6224961280822754 -114,0.6220628619194031 -115,0.621633768081665 -116,0.6212075352668762 -117,0.6207827925682068 -118,0.6203577518463135 -119,0.6199334859848022 -120,0.6195107698440552 -121,0.6190905570983887 -122,0.6186737418174744 -123,0.6182597875595093 -124,0.6178476810455322 -125,0.6174371242523193 -126,0.617026686668396 -127,0.6166163682937622 -128,0.6162061095237732 -129,0.6157961487770081 -130,0.6153870224952698 -131,0.6149793267250061 -132,0.6145726442337036 -133,0.6141659617424011 -134,0.6137582659721375 -135,0.6133479475975037 -136,0.6129328608512878 -137,0.6125103235244751 -138,0.6120744943618774 -139,0.6116126775741577 -140,0.6110830903053284 -141,0.6102741956710815 -142,0.6081164479255676 -143,0.6086042523384094 -144,0.6079257130622864 -145,0.6071854829788208 -146,0.6069789528846741 -147,0.6072627902030945 -148,0.6060351729393005 -149,0.6072588562965393 -150,0.6068220138549805 -151,0.6051309704780579 -152,0.6047847867012024 -153,0.605591893196106 -154,0.6046950817108154 -155,0.6055837869644165 -156,0.6056721210479736 -157,0.6052873134613037 -158,0.6045794486999512 -159,0.6031411290168762 -160,0.6039466857910156 -161,0.6040088534355164 -162,0.603682279586792 -163,0.6032419204711914 -164,0.6024231910705566 -165,0.6020128130912781 -166,0.6020244359970093 -167,0.6007804274559021 -168,0.6008185148239136 -169,0.6008376479148865 -170,0.5997441411018372 -171,0.6001589894294739 -172,0.6002739667892456 -173,0.5999934077262878 -174,0.5993895530700684 -175,0.5982042551040649 -176,0.5988638401031494 -177,0.5987363457679749 -178,0.5977555513381958 -179,0.5976547598838806 -180,0.5979189276695251 -181,0.5971102714538574 -182,0.596393883228302 -183,0.5964828133583069 -184,0.5955618023872375 -185,0.5958382487297058 -186,0.595636248588562 -187,0.5948234796524048 -188,0.5952358245849609 -189,0.5946877598762512 -190,0.5942102670669556 -191,0.594269335269928 -192,0.5935224294662476 -193,0.5937354564666748 -194,0.5934563279151917 -195,0.5927099585533142 -196,0.5927499532699585 -197,0.5921125411987305 -198,0.592311680316925 -199,0.5917604565620422 -200,0.5916938781738281 diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.040/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.040/training_config.txt deleted file mode 100644 index 007c90c..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.040/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.040/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.040/training_log.csv deleted file mode 100644 index 0d9977d..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,61.889183044433594 -2,13.478277206420898 -3,8.701150894165039 -4,5.69813346862793 -5,3.459804058074951 -6,2.048839807510376 -7,1.3876768350601196 -8,1.0806347131729126 -9,0.897577166557312 -10,0.8893500566482544 -11,0.6875256299972534 -12,0.6368137001991272 -13,0.6754326820373535 -14,0.7336571216583252 -15,0.5804811120033264 -16,0.5311198234558105 -17,0.4636698365211487 -18,0.40959757566452026 -19,0.3870234489440918 -20,0.373278945684433 -21,0.3600274622440338 -22,0.346781462430954 -23,0.3324364423751831 -24,0.31375300884246826 -25,0.26459455490112305 -26,0.2560074031352997 -27,0.24919670820236206 -28,0.2434045672416687 -29,0.23827405273914337 -30,0.23361413180828094 -31,0.22932517528533936 -32,0.22534912824630737 -33,0.2216290831565857 -34,0.21812771260738373 -35,0.21481186151504517 -36,0.21166011691093445 -37,0.2086547464132309 -38,0.20577536523342133 -39,0.20300228893756866 -40,0.20031435787677765 -41,0.19768989086151123 -42,0.19511356949806213 -43,0.1925642490386963 -44,0.1900162249803543 -45,0.18743611872196198 -46,0.18472492694854736 -47,0.18125998973846436 -48,0.17635875940322876 -49,0.17268790304660797 -50,0.16906924545764923 -51,0.16930198669433594 -52,0.16988690197467804 -53,0.17025889456272125 -54,0.17024606466293335 -55,0.17000198364257812 -56,0.16958087682724 -57,0.1688809096813202 -58,0.16648250818252563 -59,0.1652597337961197 -60,0.16469107568264008 -61,0.16413259506225586 -62,0.16361771523952484 -63,0.16316281259059906 -64,0.16275405883789062 -65,0.16235439479351044 -66,0.16193629801273346 -67,0.1614908128976822 -68,0.16101329028606415 -69,0.16049957275390625 -70,0.15994475781917572 -71,0.1593400537967682 -72,0.15866926312446594 -73,0.15790125727653503 -74,0.1569727659225464 -75,0.15576262772083282 -76,0.1542929857969284 -77,0.1536364108324051 -78,0.1537025421857834 -79,0.153691366314888 -80,0.1535898596048355 -81,0.15346787869930267 -82,0.15339697897434235 -83,0.15322181582450867 -84,0.15286403894424438 -85,0.15237069129943848 -86,0.1518835872411728 -87,0.15143916010856628 -88,0.15098941326141357 -89,0.15052196383476257 -90,0.15001645684242249 -91,0.14948585629463196 -92,0.1490151435136795 -93,0.14874674379825592 -94,0.14860497415065765 -95,0.14830227196216583 -96,0.14717978239059448 -97,0.14740601181983948 -98,0.1473228931427002 -99,0.147007018327713 -100,0.1474684774875641 -101,0.14774246513843536 -102,0.14744913578033447 -103,0.14659425616264343 -104,0.14575739204883575 -105,0.1457654982805252 -106,0.14559097588062286 -107,0.14534974098205566 -108,0.14492972195148468 -109,0.1438419222831726 -110,0.14470960199832916 -111,0.144733726978302 -112,0.14459660649299622 -113,0.14442700147628784 -114,0.14425869286060333 -115,0.14411741495132446 -116,0.1439569592475891 -117,0.1437646895647049 -118,0.14353972673416138 -119,0.1432849019765854 -120,0.14300404489040375 -121,0.1427011936903 -122,0.14238131046295166 -123,0.14204902946949005 -124,0.14170652627944946 -125,0.1413402408361435 -126,0.14083008468151093 -127,0.13945825397968292 -128,0.1401495337486267 -129,0.1401454359292984 -130,0.139977365732193 -131,0.1397622674703598 -132,0.13951590657234192 -133,0.1392187923192978 -134,0.13872754573822021 -135,0.1387503743171692 -136,0.13786190748214722 -137,0.13804280757904053 -138,0.13782404363155365 -139,0.13719162344932556 -140,0.13806094229221344 -141,0.13760913908481598 -142,0.13692930340766907 -143,0.13693687319755554 -144,0.13673539459705353 -145,0.13646315038204193 -146,0.13608421385288239 -147,0.1352730691432953 -148,0.13574638962745667 -149,0.13586118817329407 -150,0.13572508096694946 -151,0.135501891374588 -152,0.13521628081798553 -153,0.13487455248832703 -154,0.134367898106575 -155,0.1331874281167984 -156,0.13359472155570984 -157,0.13347920775413513 -158,0.13304705917835236 -159,0.13227267563343048 -160,0.13239967823028564 -161,0.13178926706314087 -162,0.1316775530576706 -163,0.13127964735031128 -164,0.13104258477687836 -165,0.13092240691184998 -166,0.13059143722057343 -167,0.13055159151554108 -168,0.13013993203639984 -169,0.1299675852060318 -170,0.12965676188468933 -171,0.12940610945224762 -172,0.12916229665279388 -173,0.12892252206802368 -174,0.12869685888290405 -175,0.12843453884124756 -176,0.128244549036026 -177,0.1279943436384201 -178,0.12772971391677856 -179,0.12752960622310638 -180,0.1272066831588745 -181,0.1269792765378952 -182,0.1266913115978241 -183,0.12645691633224487 -184,0.126183420419693 -185,0.1258871704339981 -186,0.12558011710643768 -187,0.12522074580192566 -188,0.1248604878783226 -189,0.12444020807743073 -190,0.12404164671897888 -191,0.1237025111913681 -192,0.12342319637537003 -193,0.12313538789749146 -194,0.12291362136602402 -195,0.12263574451208115 -196,0.12241033464670181 -197,0.12214463949203491 -198,0.12189940363168716 -199,0.12166843563318253 -200,0.12141196429729462 diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.050/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.050/training_config.txt deleted file mode 100644 index fec2d53..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.050/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.050/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.050/training_log.csv deleted file mode 100644 index 5170791..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,49.38374328613281 -2,9.883437156677246 -3,7.7730865478515625 -4,3.966827392578125 -5,2.3384008407592773 -6,1.1803959608078003 -7,0.9546282887458801 -8,0.6302970051765442 -9,0.5511708855628967 -10,0.4737107455730438 -11,0.39367106556892395 -12,0.3285837173461914 -13,0.29891568422317505 -14,0.28980550169944763 -15,0.28040894865989685 -16,0.27275967597961426 -17,0.2671482264995575 -18,0.26174643635749817 -19,0.25682973861694336 -20,0.2523464560508728 -21,0.2482086718082428 -22,0.24435675144195557 -23,0.2407427877187729 -24,0.23733288049697876 -25,0.23409005999565125 -26,0.23098799586296082 -27,0.22800330817699432 -28,0.22512100636959076 -29,0.22232505679130554 -30,0.2195998579263687 -31,0.2169293612241745 -32,0.21428662538528442 -33,0.21164104342460632 -34,0.20893913507461548 -35,0.20610634982585907 -36,0.20307715237140656 -37,0.20008835196495056 -38,0.19693796336650848 -39,0.1938217729330063 -40,0.19205866754055023 -41,0.18976977467536926 -42,0.18881723284721375 -43,0.18756036460399628 -44,0.18607331812381744 -45,0.18446975946426392 -46,0.18276140093803406 -47,0.1809314787387848 -48,0.1789252609014511 -49,0.17674823105335236 -50,0.1747535616159439 -51,0.1729772835969925 -52,0.17243020236492157 -53,0.1724272519350052 -54,0.1723446547985077 -55,0.17209097743034363 -56,0.1716892272233963 -57,0.17117752134799957 -58,0.17058202624320984 -59,0.16991694271564484 -60,0.16918790340423584 -61,0.16839613020420074 -62,0.16753913462162018 -63,0.16661109030246735 -64,0.1656014323234558 -65,0.16449753940105438 -66,0.16329257190227509 -67,0.16200877726078033 -68,0.16076873242855072 -69,0.15985804796218872 -70,0.15935787558555603 -71,0.1590120643377304 -72,0.15853393077850342 -73,0.15788157284259796 -74,0.15707197785377502 -75,0.1560978889465332 -76,0.15494637191295624 -77,0.1535000205039978 -78,0.1503230631351471 -79,0.1496018022298813 -80,0.1489233821630478 -81,0.1480015218257904 -82,0.14704084396362305 -83,0.14664000272750854 -84,0.14625360071659088 -85,0.14580558240413666 -86,0.1453394740819931 -87,0.14484670758247375 -88,0.1444169282913208 -89,0.1442405730485916 -90,0.14385351538658142 -91,0.14319409430027008 -92,0.14269447326660156 -93,0.14232300221920013 -94,0.14194777607917786 -95,0.14156542718410492 -96,0.14117881655693054 -97,0.14077776670455933 -98,0.1403605341911316 -99,0.13993032276630402 -100,0.13949273526668549 -101,0.13905949890613556 -102,0.13863052427768707 -103,0.13820163905620575 -104,0.13776953518390656 -105,0.13733573257923126 -106,0.13692735135555267 -107,0.13655193150043488 -108,0.13614597916603088 -109,0.13570275902748108 -110,0.13527414202690125 -111,0.1348796933889389 -112,0.13449369370937347 -113,0.13410396873950958 -114,0.1337064802646637 -115,0.133301243185997 -116,0.1328914314508438 -117,0.1324811726808548 -118,0.13207249343395233 -119,0.13166563212871552 -120,0.1312604695558548 -121,0.1308520883321762 -122,0.13043507933616638 -123,0.13000747561454773 -124,0.12957166135311127 -125,0.12912921607494354 -126,0.12868034839630127 -127,0.12822659313678741 -128,0.1277703195810318 -129,0.12731759250164032 -130,0.12687629461288452 -131,0.126443549990654 -132,0.1260305792093277 -133,0.12563546001911163 -134,0.12525206804275513 -135,0.12486792355775833 -136,0.12447546422481537 -137,0.12407630681991577 -138,0.12367702275514603 -139,0.12328283488750458 -140,0.12289576232433319 -141,0.12251769751310349 -142,0.12214875966310501 -143,0.12178688496351242 -144,0.1214316263794899 -145,0.12108229845762253 -146,0.12074175477027893 -147,0.1204126849770546 -148,0.12009528279304504 -149,0.11978597939014435 -150,0.11948051303625107 -151,0.1191760003566742 -152,0.11887326091527939 -153,0.11857637017965317 -154,0.1182861328125 -155,0.117998868227005 -156,0.11771219968795776 -157,0.11742742359638214 -158,0.11714763939380646 -159,0.11687366664409637 -160,0.1166023388504982 -161,0.11633169651031494 -162,0.11606293171644211 -163,0.11579781025648117 -164,0.11553613096475601 -165,0.11527584493160248 -166,0.11501646041870117 -167,0.11475992202758789 -168,0.11450687050819397 -169,0.1142563670873642 -170,0.11400721222162247 -171,0.11375962942838669 -172,0.11351469159126282 -173,0.11327215284109116 -174,0.1130310520529747 -175,0.11279108375310898 -176,0.11255288869142532 -177,0.11231688410043716 -178,0.11208285391330719 -179,0.11185038834810257 -180,0.11161942034959793 -181,0.11139056086540222 -182,0.1111641526222229 -183,0.11094007641077042 -184,0.11071810871362686 -185,0.11049866676330566 -186,0.11028193682432175 -187,0.11006785929203033 -188,0.10985635966062546 -189,0.1096477285027504 -190,0.10944215208292007 -191,0.10923948884010315 -192,0.10903988033533096 -193,0.10884362459182739 -194,0.10865335166454315 -195,0.10847313702106476 -196,0.10829785466194153 -197,0.10812648385763168 -198,0.10795858502388 -199,0.10779375582933426 -200,0.10763172805309296 diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.080/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.080/training_config.txt deleted file mode 100644 index 060d3da..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.080/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.080/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.080/training_log.csv deleted file mode 100644 index 75d4d65..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,24.505306243896484 -2,6.410512924194336 -3,2.733731269836426 -4,1.4315212965011597 -5,1.0418634414672852 -6,0.665600061416626 -7,0.727537214756012 -8,0.6443448662757874 -9,0.613862156867981 -10,0.5200310945510864 -11,0.33874306082725525 -12,0.3034406900405884 -13,0.27003705501556396 -14,0.21495728194713593 -15,0.20133540034294128 -16,0.1919333040714264 -17,0.18235407769680023 -18,0.1771046221256256 -19,0.17260506749153137 -20,0.1685747355222702 -21,0.16484284400939941 -22,0.16131845116615295 -23,0.1579580307006836 -24,0.15474024415016174 -25,0.15174894034862518 -26,0.1492457389831543 -27,0.1473120003938675 -28,0.14569981396198273 -29,0.1442432701587677 -30,0.14283010363578796 -31,0.1414346545934677 -32,0.14005567133426666 -33,0.13869927823543549 -34,0.137383371591568 -35,0.13613851368427277 -36,0.13495348393917084 -37,0.13381707668304443 -38,0.13272279500961304 -39,0.131663516163826 -40,0.1306336522102356 -41,0.12963013350963593 -42,0.12864874303340912 -43,0.12768599390983582 -44,0.12673717737197876 -45,0.1257985532283783 -46,0.12486670911312103 -47,0.12393756955862045 -48,0.12301095575094223 -49,0.1220785453915596 -50,0.1211458221077919 -51,0.120223768055439 -52,0.11932004988193512 -53,0.11845008283853531 -54,0.11763690412044525 -55,0.11690358817577362 -56,0.11623931676149368 -57,0.11570744216442108 -58,0.11528457701206207 -59,0.11497184634208679 -60,0.11472407728433609 -61,0.11449175328016281 -62,0.1142411008477211 -63,0.11395263671875 -64,0.11361762881278992 -65,0.11323646456003189 -66,0.11281508207321167 -67,0.112362802028656 -68,0.11189188808202744 -69,0.11141545325517654 -70,0.1109469085931778 -71,0.1104973554611206 -72,0.11007476598024368 -73,0.10968858748674393 -74,0.1093430146574974 -75,0.10903207957744598 -76,0.108742356300354 -77,0.10846622288227081 -78,0.10819900035858154 -79,0.10793625563383102 -80,0.10767379403114319 -81,0.107407346367836 -82,0.10713570564985275 -83,0.10685975104570389 -84,0.10658196359872818 -85,0.10630549490451813 -86,0.10603433847427368 -87,0.10577067732810974 -88,0.10551515966653824 -89,0.10526822507381439 -90,0.10503028333187103 -91,0.10480064898729324 -92,0.1045793890953064 -93,0.10436360538005829 -94,0.10415033251047134 -95,0.10393760353326797 -96,0.10372426360845566 -97,0.10350983589887619 -98,0.10329454392194748 -99,0.10307928174734116 -100,0.10286516696214676 -101,0.10265340656042099 -102,0.10244497656822205 -103,0.10224100947380066 -104,0.10204168409109116 -105,0.10184688121080399 -106,0.10165476053953171 -107,0.10146483033895493 -108,0.10127689689397812 -109,0.10109034180641174 -110,0.10090503096580505 -111,0.10072030127048492 -112,0.10053526610136032 -113,0.10035048425197601 -114,0.10016658157110214 -115,0.09998368471860886 -116,0.09980209916830063 -117,0.09962238371372223 -118,0.0994446724653244 -119,0.09926871955394745 -120,0.0990942195057869 -121,0.0989205464720726 -122,0.09874755889177322 -123,0.09857501834630966 -124,0.09840314090251923 -125,0.09823252260684967 -126,0.09806326776742935 -127,0.09789599478244781 -128,0.0977308452129364 -129,0.09756768494844437 -130,0.09740594029426575 -131,0.09724541753530502 -132,0.09708614647388458 -133,0.09692834317684174 -134,0.09677246958017349 -135,0.09661896526813507 -136,0.09646770358085632 -137,0.09631822258234024 -138,0.09617022424936295 -139,0.09602431207895279 -140,0.09588088095188141 -141,0.09574058651924133 -142,0.09560465812683105 -143,0.09547336399555206 -144,0.09534686803817749 -145,0.09522436559200287 -146,0.09510553628206253 -147,0.09499043226242065 -148,0.09488172829151154 -149,0.09477830678224564 -150,0.09467927366495132 -151,0.0945829376578331 -152,0.094488725066185 -153,0.0943954661488533 -154,0.09430254250764847 -155,0.09420987963676453 -156,0.09411720931529999 -157,0.09402399510145187 -158,0.09393101930618286 -159,0.09383871406316757 -160,0.09374791383743286 -161,0.09365922212600708 -162,0.09357374906539917 -163,0.09349185228347778 -164,0.09341508150100708 -165,0.0933440625667572 -166,0.0932781770825386 -167,0.09321699291467667 -168,0.09316087514162064 -169,0.09310930967330933 -170,0.09306097030639648 -171,0.09301543980836868 -172,0.09297257661819458 -173,0.0929323211312294 -174,0.09289462119340897 -175,0.09285932034254074 -176,0.09282620251178741 -177,0.09279521554708481 -178,0.09276639670133591 -179,0.09273981302976608 -180,0.09271489083766937 -181,0.09269103407859802 -182,0.09266803413629532 -183,0.09264592826366425 -184,0.09262464195489883 -185,0.0926041379570961 -186,0.09258625656366348 -187,0.09257137030363083 -188,0.09255745261907578 -189,0.09254395961761475 -190,0.0925305038690567 -191,0.0925169363617897 -192,0.09250322729349136 -193,0.09248952567577362 -194,0.09247592091560364 -195,0.09246288239955902 -196,0.09244988113641739 -197,0.09243660420179367 -198,0.09242291003465652 -199,0.0924089103937149 -200,0.09239476919174194 diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.100/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.100/training_config.txt deleted file mode 100644 index 2249400..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.100/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.100/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.100/training_log.csv deleted file mode 100644 index 3782c0c..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,20.368268966674805 -2,2.623342275619507 -3,0.937842845916748 -4,0.559163510799408 -5,0.3518153131008148 -6,0.2935752272605896 -7,0.263658344745636 -8,0.24404558539390564 -9,0.22789669036865234 -10,0.21508629620075226 -11,0.20351079106330872 -12,0.19169792532920837 -13,0.1785695105791092 -14,0.16288414597511292 -15,0.15001147985458374 -16,0.14242303371429443 -17,0.13712136447429657 -18,0.13303790986537933 -19,0.1297117918729782 -20,0.12692910432815552 -21,0.12456830590963364 -22,0.122531957924366 -23,0.12075760960578918 -24,0.11920218914747238 -25,0.11782732605934143 -26,0.11659909039735794 -27,0.11549389362335205 -28,0.11449304223060608 -29,0.11358234286308289 -30,0.11275024712085724 -31,0.11198650300502777 -32,0.11128462105989456 -33,0.11064581573009491 -34,0.11005768179893494 -35,0.10950709879398346 -36,0.10898908972740173 -37,0.10850075632333755 -38,0.10803938657045364 -39,0.10760244727134705 -40,0.10718820989131927 -41,0.10679440200328827 -42,0.10641912370920181 -43,0.10606023669242859 -44,0.10571552067995071 -45,0.10538417845964432 -46,0.10506671667098999 -47,0.10476227104663849 -48,0.10446879267692566 -49,0.10418497025966644 -50,0.10391079634428024 -51,0.10364516079425812 -52,0.10338763892650604 -53,0.10313723981380463 -54,0.10289382934570312 -55,0.1026574894785881 -56,0.10242815315723419 -57,0.102205291390419 -58,0.1019885465502739 -59,0.1017773300409317 -60,0.10157124698162079 -61,0.10136878490447998 -62,0.10117033123970032 -63,0.10097711533308029 -64,0.10078846663236618 -65,0.10060421377420425 -66,0.10042478144168854 -67,0.1002553254365921 -68,0.10009609162807465 -69,0.09994007647037506 -70,0.09978669881820679 -71,0.09963604062795639 -72,0.09948998689651489 -73,0.09934582561254501 -74,0.09920284897089005 -75,0.09906037151813507 -76,0.09891878068447113 -77,0.09877783060073853 -78,0.09863831847906113 -79,0.09850224107503891 -80,0.09836794435977936 -81,0.09823581576347351 -82,0.09810669720172882 -83,0.09798003733158112 -84,0.09785474836826324 -85,0.09773033857345581 -86,0.0976070761680603 -87,0.09748490899801254 -88,0.09736377000808716 -89,0.09724371135234833 -90,0.09712541103363037 -91,0.09700808674097061 -92,0.09689151495695114 -93,0.09677572548389435 -94,0.09666064381599426 -95,0.0965464860200882 -96,0.09643294662237167 -97,0.09632059186697006 -98,0.09620906412601471 -99,0.09609843045473099 -100,0.0959889218211174 -101,0.09588056057691574 -102,0.0957731157541275 -103,0.09566651284694672 -104,0.09556092321872711 -105,0.09545592963695526 -106,0.09535156190395355 -107,0.09524749219417572 -108,0.09514375776052475 -109,0.09504066407680511 -110,0.09493810683488846 -111,0.09483654797077179 -112,0.09473580867052078 -113,0.0946366935968399 -114,0.09453964978456497 -115,0.09444456547498703 -116,0.09435153752565384 -117,0.09426054358482361 -118,0.09417162835597992 -119,0.09408469498157501 -120,0.09399976581335068 -121,0.09391685575246811 -122,0.0938357412815094 -123,0.09375655651092529 -124,0.09367910772562027 -125,0.0936032235622406 -126,0.09352898597717285 -127,0.09345623105764389 -128,0.09338516741991043 -129,0.09331551939249039 -130,0.09324714541435242 -131,0.09318027645349503 -132,0.09311452507972717 -133,0.09305001050233841 -134,0.09298644959926605 -135,0.09292355924844742 -136,0.09286152571439743 -137,0.09280011802911758 -138,0.09273926913738251 -139,0.09267863631248474 -140,0.09261644631624222 -141,0.09255143254995346 -142,0.0924832671880722 -143,0.09241289645433426 -144,0.09234067797660828 -145,0.09226701408624649 -146,0.09219276905059814 -147,0.09211807698011398 -148,0.09204357117414474 -149,0.09197179228067398 -150,0.09190444648265839 -151,0.09184033423662186 -152,0.09178034216165543 -153,0.09172471612691879 -154,0.09167342633008957 -155,0.09162560850381851 -156,0.09158032387495041 -157,0.09153597801923752 -158,0.09149181097745895 -159,0.09144540876150131 -160,0.09139588475227356 -161,0.09134306758642197 -162,0.09128706157207489 -163,0.09122785925865173 -164,0.09116511791944504 -165,0.09109950810670853 -166,0.09103085100650787 -167,0.09096063673496246 -168,0.09089035540819168 -169,0.0908210426568985 -170,0.09075411409139633 -171,0.09069229662418365 -172,0.09063621610403061 -173,0.09058523923158646 -174,0.09053868055343628 -175,0.09049627929925919 -176,0.09045777469873428 -177,0.09042306989431381 -178,0.09039163589477539 -179,0.09036365896463394 -180,0.09033755958080292 -181,0.09031248092651367 -182,0.09028870612382889 -183,0.0902661383152008 -184,0.090244822204113 -185,0.09022457152605057 -186,0.0902053713798523 -187,0.09018705040216446 -188,0.09016996622085571 -189,0.09015391767024994 -190,0.09013885259628296 -191,0.09012465178966522 -192,0.09011135995388031 -193,0.0900995135307312 -194,0.0900883600115776 -195,0.09007782489061356 -196,0.09006799012422562 -197,0.09005894511938095 -198,0.09005112200975418 -199,0.09004388749599457 -200,0.09003690630197525 diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.125/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.125/training_config.txt deleted file mode 100644 index f2f300a..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.125/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.125/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.125/training_log.csv deleted file mode 100644 index e894673..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,17.04619598388672 -2,2.1139488220214844 -3,1.3610737323760986 -4,0.3703656792640686 -5,0.3105773627758026 -6,0.2218484729528427 -7,0.20185601711273193 -8,0.18967145681381226 -9,0.18124249577522278 -10,0.17440980672836304 -11,0.16702833771705627 -12,0.163111612200737 -13,0.16040419042110443 -14,0.15795490145683289 -15,0.15586569905281067 -16,0.15409044921398163 -17,0.15248005092144012 -18,0.1510806679725647 -19,0.14984261989593506 -20,0.1486951857805252 -21,0.14762164652347565 -22,0.14660972356796265 -23,0.14564433693885803 -24,0.14472050964832306 -25,0.14383713901042938 -26,0.14299926161766052 -27,0.1422143429517746 -28,0.14148393273353577 -29,0.14080075919628143 -30,0.14016717672348022 -31,0.13957923650741577 -32,0.13903573155403137 -33,0.13854610919952393 -34,0.13810037076473236 -35,0.1376768946647644 -36,0.13726015388965607 -37,0.13683919608592987 -38,0.13640572130680084 -39,0.13595491647720337 -40,0.13548555970191956 -41,0.13499923050403595 -42,0.13449746370315552 -43,0.13398385047912598 -44,0.1334628164768219 -45,0.1329386681318283 -46,0.1324169933795929 -47,0.13190250098705292 -48,0.13139861822128296 -49,0.13090713322162628 -50,0.13042554259300232 -51,0.12995284795761108 -52,0.1294877976179123 -53,0.12902846932411194 -54,0.1285737156867981 -55,0.12812234461307526 -56,0.12767267227172852 -57,0.12722326815128326 -58,0.1267733871936798 -59,0.12632319331169128 -60,0.1258726716041565 -61,0.12542226910591125 -62,0.12497236579656601 -63,0.12452317029237747 -64,0.12407509237527847 -65,0.12362959235906601 -66,0.12318704277276993 -67,0.12274840474128723 -68,0.1223142221570015 -69,0.1218840479850769 -70,0.12145834416151047 -71,0.12103720754384995 -72,0.12061998248100281 -73,0.12020689994096756 -74,0.11979806423187256 -75,0.1193937212228775 -76,0.11899416148662567 -77,0.11859853565692902 -78,0.11820798367261887 -79,0.1178244948387146 -80,0.11745106428861618 -81,0.11708461493253708 -82,0.11672402173280716 -83,0.11636846512556076 -84,0.11601816862821579 -85,0.11567417532205582 -86,0.1153353825211525 -87,0.1150006353855133 -88,0.1146698072552681 -89,0.1143423542380333 -90,0.11401791870594025 -91,0.11369691044092178 -92,0.11337940394878387 -93,0.11306533962488174 -94,0.1127546951174736 -95,0.11244726926088333 -96,0.1121438518166542 -97,0.11184399574995041 -98,0.11154766380786896 -99,0.1112549751996994 -100,0.11096585541963577 -101,0.11068028956651688 -102,0.11039837449789047 -103,0.11011993139982224 -104,0.1098448783159256 -105,0.10957295447587967 -106,0.10930367559194565 -107,0.10903722047805786 -108,0.10877374559640884 -109,0.1085132583975792 -110,0.10825570672750473 -111,0.10800117999315262 -112,0.10774946957826614 -113,0.10750077664852142 -114,0.10725349932909012 -115,0.10700394958257675 -116,0.1067536324262619 -117,0.10650283098220825 -118,0.1062515527009964 -119,0.10600017011165619 -120,0.10574855655431747 -121,0.10549675673246384 -122,0.1052449494600296 -123,0.10499316453933716 -124,0.1047416552901268 -125,0.1044905036687851 -126,0.10424131155014038 -127,0.10399526357650757 -128,0.10375232994556427 -129,0.10351192206144333 -130,0.1032727062702179 -131,0.1030346006155014 -132,0.1027974858880043 -133,0.10256166756153107 -134,0.10232730209827423 -135,0.10209321975708008 -136,0.10186053067445755 -137,0.10163012892007828 -138,0.10140229016542435 -139,0.10117577761411667 -140,0.10095108300447464 -141,0.1007266491651535 -142,0.10050177574157715 -143,0.10027620196342468 -144,0.10004936903715134 -145,0.09982242435216904 -146,0.09959477931261063 -147,0.09936582297086716 -148,0.09913647174835205 -149,0.09890652447938919 -150,0.0986761599779129 -151,0.09844354540109634 -152,0.09821131825447083 -153,0.09797973930835724 -154,0.0977495089173317 -155,0.09752264618873596 -156,0.09729883819818497 -157,0.09707944840192795 -158,0.09686412662267685 -159,0.09665383398532867 -160,0.09644815325737 -161,0.09624791145324707 -162,0.09605303406715393 -163,0.09586351364850998 -164,0.09567975252866745 -165,0.0955011174082756 -166,0.09532763808965683 -167,0.09515974670648575 -168,0.0949963852763176 -169,0.09483765810728073 -170,0.09468452632427216 -171,0.09453678876161575 -172,0.09439501166343689 -173,0.09425998479127884 -174,0.09413006156682968 -175,0.09400517493486404 -176,0.09388554096221924 -177,0.09377146512269974 -178,0.09366209805011749 -179,0.0935571938753128 -180,0.09345638751983643 -181,0.09335893392562866 -182,0.0932646170258522 -183,0.09317336976528168 -184,0.09308450669050217 -185,0.09299614280462265 -186,0.09290990978479385 -187,0.09282476454973221 -188,0.09274005144834518 -189,0.09265594929456711 -190,0.09257321059703827 -191,0.09249336272478104 -192,0.09241456538438797 -193,0.09233681112527847 -194,0.09225966781377792 -195,0.09218357503414154 -196,0.09210839122533798 -197,0.0920340046286583 -198,0.0919608548283577 -199,0.09188887476921082 -200,0.09181830286979675 diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.160/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.160/training_config.txt deleted file mode 100644 index 1f1cd28..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.160/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.160/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.160/training_log.csv deleted file mode 100644 index 012b024..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,14.44062614440918 -2,1.5345511436462402 -3,1.574082374572754 -4,2.364678382873535 -5,3.5714778900146484 -6,3.767116069793701 -7,2.4967336654663086 -8,1.8373973369598389 -9,1.3303762674331665 -10,1.0878313779830933 -11,0.9219632744789124 -12,0.7901384830474854 -13,0.7026668787002563 -14,0.6340333819389343 -15,0.5720100402832031 -16,0.5125773549079895 -17,0.45785725116729736 -18,0.4138236343860626 -19,0.37905532121658325 -20,0.3513503074645996 -21,0.32893645763397217 -22,0.3104279041290283 -23,0.29458674788475037 -24,0.28077203035354614 -25,0.26850178837776184 -26,0.25743943452835083 -27,0.24740386009216309 -28,0.2382323294878006 -29,0.22980117797851562 -30,0.22204934060573578 -31,0.21490973234176636 -32,0.2082923948764801 -33,0.20210835337638855 -34,0.19628764688968658 -35,0.19079318642616272 -36,0.185631662607193 -37,0.18083010613918304 -38,0.1765543818473816 -39,0.17264465987682343 -40,0.16901591420173645 -41,0.16571122407913208 -42,0.16272492706775665 -43,0.159999817609787 -44,0.1574663519859314 -45,0.15511536598205566 -46,0.152947336435318 -47,0.15093815326690674 -48,0.14908970892429352 -49,0.14737173914909363 -50,0.14576759934425354 -51,0.14426615834236145 -52,0.1428573727607727 -53,0.141532301902771 -54,0.14028435945510864 -55,0.1391046643257141 -56,0.13798542320728302 -57,0.13692308962345123 -58,0.1359146237373352 -59,0.13495498895645142 -60,0.13403944671154022 -61,0.13316434621810913 -62,0.13232645392417908 -63,0.13152270019054413 -64,0.13075067102909088 -65,0.1300080418586731 -66,0.12929289042949677 -67,0.1286042332649231 -68,0.12794072926044464 -69,0.1273013800382614 -70,0.12668423354625702 -71,0.12608762085437775 -72,0.12551145255565643 -73,0.12495549768209457 -74,0.1244179904460907 -75,0.12389785051345825 -76,0.12339253723621368 -77,0.12290142476558685 -78,0.12242502719163895 -79,0.12196214497089386 -80,0.12151151150465012 -81,0.12107215821743011 -82,0.1206437349319458 -83,0.12022526562213898 -84,0.1198166087269783 -85,0.11941796541213989 -86,0.11902830004692078 -87,0.11864801496267319 -88,0.11827611178159714 -89,0.11791212111711502 -90,0.1175556555390358 -91,0.11720623075962067 -92,0.11686396598815918 -93,0.11652778834104538 -94,0.11619746685028076 -95,0.11587277054786682 -96,0.11555358022451401 -97,0.11524023115634918 -98,0.11493203788995743 -99,0.11462870985269547 -100,0.11433015763759613 -101,0.11403607577085495 -102,0.11374567449092865 -103,0.11345835030078888 -104,0.11317475140094757 -105,0.1128954067826271 -106,0.11261968314647675 -107,0.11234769225120544 -108,0.11207914352416992 -109,0.11181416362524033 -110,0.11155267059803009 -111,0.11129461228847504 -112,0.11103979498147964 -113,0.11078807711601257 -114,0.11053954809904099 -115,0.11029403656721115 -116,0.1100514680147171 -117,0.10981203615665436 -118,0.1095757931470871 -119,0.109342560172081 -120,0.10911247879266739 -121,0.10888518393039703 -122,0.10866060853004456 -123,0.10843861103057861 -124,0.10821922868490219 -125,0.10800205171108246 -126,0.10778727382421494 -127,0.10757546126842499 -128,0.10736621171236038 -129,0.10715953260660172 -130,0.1069551333785057 -131,0.10675321519374847 -132,0.1065535768866539 -133,0.10635634511709213 -134,0.1061612069606781 -135,0.10596839338541031 -136,0.10577768832445145 -137,0.10558927804231644 -138,0.10540272295475006 -139,0.10521826148033142 -140,0.10503588616847992 -141,0.1048555001616478 -142,0.10467708855867386 -143,0.1045004352927208 -144,0.10432572662830353 -145,0.10415279120206833 -146,0.10398188978433609 -147,0.10381273180246353 -148,0.10364542156457901 -149,0.1034797951579094 -150,0.10331598669290543 -151,0.10315392911434174 -152,0.10299340635538101 -153,0.10283485054969788 -154,0.10267788171768188 -155,0.1025225818157196 -156,0.1023688092827797 -157,0.10221661627292633 -158,0.10206537693738937 -159,0.10191517323255539 -160,0.10176628828048706 -161,0.10161865502595901 -162,0.10147237032651901 -163,0.1013273075222969 -164,0.10118351876735687 -165,0.10104121267795563 -166,0.1009000688791275 -167,0.10076043009757996 -168,0.1006220281124115 -169,0.10048504173755646 -170,0.10034948587417603 -171,0.10021503269672394 -172,0.10008178651332855 -173,0.09994978457689285 -174,0.09981892257928848 -175,0.09968918561935425 -176,0.09956017136573792 -177,0.09943148493766785 -178,0.09930344671010971 -179,0.09917609393596649 -180,0.0990496352314949 -181,0.09892404079437256 -182,0.09879925847053528 -183,0.09867613762617111 -184,0.09855435788631439 -185,0.09843365848064423 -186,0.09831421822309494 -187,0.09819620847702026 -188,0.09807981550693512 -189,0.09796473383903503 -190,0.09785107523202896 -191,0.09773893654346466 -192,0.09762811660766602 -193,0.09751884639263153 -194,0.09741129726171494 -195,0.09730508923530579 -196,0.09720038622617722 -197,0.09709714353084564 -198,0.09699515998363495 -199,0.0968947559595108 -200,0.09679577499628067 diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.200/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.200/training_config.txt deleted file mode 100644 index 6d31de4..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.200/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.200/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.200/training_log.csv deleted file mode 100644 index 3699e6b..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,7.1923089027404785 -2,1.3630146980285645 -3,1.4669691324234009 -4,1.3981761932373047 -5,1.2811387777328491 -6,1.1048579216003418 -7,0.8880125284194946 -8,0.7028480172157288 -9,0.5548998117446899 -10,0.45158639550209045 -11,0.37468522787094116 -12,0.30224737524986267 -13,0.23981453478336334 -14,0.2025785893201828 -15,0.176930770277977 -16,0.16242149472236633 -17,0.1552114188671112 -18,0.15179499983787537 -19,0.1513359546661377 -20,0.15324696898460388 -21,0.15615767240524292 -22,0.15753372013568878 -23,0.15581579506397247 -24,0.15098413825035095 -25,0.1443473994731903 -26,0.1380985528230667 -27,0.13214530050754547 -28,0.12698999047279358 -29,0.12271617352962494 -30,0.11915422976016998 -31,0.11617802083492279 -32,0.11374644935131073 -33,0.11180446296930313 -34,0.11018339544534683 -35,0.10887078195810318 -36,0.10781173408031464 -37,0.10696474462747574 -38,0.10625839233398438 -39,0.10568127781152725 -40,0.10526400804519653 -41,0.10492631047964096 -42,0.10462316870689392 -43,0.10433662682771683 -44,0.10404827445745468 -45,0.10375210642814636 -46,0.10344769060611725 -47,0.10313159227371216 -48,0.10280676186084747 -49,0.10247761756181717 -50,0.10214687138795853 -51,0.10181647539138794 -52,0.10148736834526062 -53,0.10116428136825562 -54,0.10084817558526993 -55,0.10053876042366028 -56,0.10024060308933258 -57,0.09995370358228683 -58,0.09967750310897827 -59,0.09941631555557251 -60,0.09917033463716507 -61,0.09893912076950073 -62,0.0987258329987526 -63,0.09852813184261322 -64,0.0983477458357811 -65,0.09819300472736359 -66,0.09806132316589355 -67,0.09794136136770248 -68,0.09782999753952026 -69,0.09772562980651855 -70,0.09762778878211975 -71,0.09753423184156418 -72,0.09744446724653244 -73,0.09736266732215881 -74,0.09728448837995529 -75,0.09720921516418457 -76,0.09713583439588547 -77,0.09706861525774002 -78,0.09700202196836472 -79,0.09693614393472672 -80,0.09687086939811707 -81,0.0968068540096283 -82,0.09674299508333206 -83,0.09668013453483582 -84,0.09661808609962463 -85,0.09655730426311493 -86,0.09649812430143356 -87,0.0964420735836029 -88,0.0963921770453453 -89,0.09634794294834137 -90,0.09630567580461502 -91,0.09626380354166031 -92,0.09622256457805634 -93,0.09618175029754639 -94,0.09614066779613495 -95,0.09609957784414291 -96,0.09605849534273148 -97,0.09601734578609467 -98,0.09597556293010712 -99,0.09593351930379868 -100,0.09589219838380814 -101,0.0958508774638176 -102,0.0958094373345375 -103,0.0957682803273201 -104,0.09572713822126389 -105,0.09568632394075394 -106,0.09564538300037384 -107,0.0956042930483818 -108,0.09556379914283752 -109,0.09552361816167831 -110,0.0954836755990982 -111,0.0954439714550972 -112,0.09540393948554993 -113,0.09536430239677429 -114,0.09532621502876282 -115,0.09528928250074387 -116,0.09525258839130402 -117,0.09521514177322388 -118,0.09517718106508255 -119,0.09513964504003525 -120,0.09510258585214615 -121,0.09506647288799286 -122,0.09503168612718582 -123,0.09499716013669968 -124,0.09496325999498367 -125,0.09492933005094528 -126,0.09489522129297256 -127,0.0948612168431282 -128,0.09482740610837936 -129,0.09479363262653351 -130,0.09475961327552795 -131,0.09472515434026718 -132,0.09469017386436462 -133,0.09465500712394714 -134,0.09462007135152817 -135,0.09458478540182114 -136,0.09454911947250366 -137,0.09451298415660858 -138,0.09447626024484634 -139,0.09443949908018112 -140,0.09440214186906815 -141,0.09436382353305817 -142,0.09432525187730789 -143,0.09428603202104568 -144,0.0942460298538208 -145,0.09420385956764221 -146,0.09416112303733826 -147,0.09411749988794327 -148,0.09407281130552292 -149,0.09402760863304138 -150,0.09398207813501358 -151,0.09393589198589325 -152,0.09388948976993561 -153,0.09384212642908096 -154,0.09379448741674423 -155,0.09374605864286423 -156,0.093697689473629 -157,0.09364961087703705 -158,0.09360207617282867 -159,0.09355366975069046 -160,0.09350476413965225 -161,0.09345553070306778 -162,0.09340644627809525 -163,0.09335780888795853 -164,0.09330926090478897 -165,0.09326096624135971 -166,0.09321317076683044 -167,0.09316395968198776 -168,0.09311359375715256 -169,0.09306338429450989 -170,0.09301353991031647 -171,0.09296412765979767 -172,0.0929148867726326 -173,0.09286505728960037 -174,0.09281504154205322 -175,0.09276421368122101 -176,0.0927140861749649 -177,0.0926644429564476 -178,0.09261535853147507 -179,0.09256567060947418 -180,0.09251854568719864 -181,0.09246842563152313 -182,0.09241921454668045 -183,0.09237038344144821 -184,0.0923209860920906 -185,0.09227190911769867 -186,0.09222352504730225 -187,0.09217185527086258 -188,0.09211958944797516 -189,0.09206704050302505 -190,0.09201356023550034 -191,0.09196165949106216 -192,0.09190767258405685 -193,0.09184983372688293 -194,0.09179215133190155 -195,0.09173577278852463 -196,0.0916815772652626 -197,0.09162799268960953 -198,0.09156874567270279 -199,0.09151103347539902 -200,0.09145573526620865 diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.250/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.250/training_config.txt deleted file mode 100644 index fd9ef1b..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.250/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.250/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.250/training_log.csv deleted file mode 100644 index 181200d..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,5.780508518218994 -2,3.324618339538574 -3,0.4295419156551361 -4,0.46255117654800415 -5,0.4545014500617981 -6,0.44154101610183716 -7,0.42240846157073975 -8,0.39699503779411316 -9,0.36556339263916016 -10,0.3318781852722168 -11,0.300488144159317 -12,0.27250710129737854 -13,0.2458430379629135 -14,0.22127285599708557 -15,0.2009476274251938 -16,0.18679006397724152 -17,0.1763410121202469 -18,0.16969071328639984 -19,0.16557079553604126 -20,0.16340507566928864 -21,0.16270850598812103 -22,0.162660151720047 -23,0.1627495437860489 -24,0.16255071759223938 -25,0.1616535633802414 -26,0.15996932983398438 -27,0.15736481547355652 -28,0.1541445553302765 -29,0.15055306255817413 -30,0.14678747951984406 -31,0.14302852749824524 -32,0.1393599808216095 -33,0.13606247305870056 -34,0.13317948579788208 -35,0.13070103526115417 -36,0.12858539819717407 -37,0.12682612240314484 -38,0.12526129186153412 -39,0.13007280230522156 -40,0.1281004697084427 -41,0.12661676108837128 -42,0.12718547880649567 -43,0.12784788012504578 -44,0.12852336466312408 -45,0.12907560169696808 -46,0.12913067638874054 -47,0.1291954517364502 -48,0.12929387390613556 -49,0.1294492781162262 -50,0.12965387105941772 -51,0.12990495562553406 -52,0.13018929958343506 -53,0.13046793639659882 -54,0.13073891401290894 -55,0.13098064064979553 -56,0.1311839520931244 -57,0.13134315609931946 -58,0.13145504891872406 -59,0.13151778280735016 -60,0.13153290748596191 -61,0.13150133192539215 -62,0.13142593204975128 -63,0.13131175935268402 -64,0.13116195797920227 -65,0.1309826374053955 -66,0.1307801753282547 -67,0.13055790960788727 -68,0.13031895458698273 -69,0.1300722062587738 -70,0.1298285722732544 -71,0.12958677113056183 -72,0.1293487846851349 -73,0.12911555171012878 -74,0.1288941353559494 -75,0.12867987155914307 -76,0.1284763514995575 -77,0.12828849256038666 -78,0.1281125694513321 -79,0.12794631719589233 -80,0.12778912484645844 -81,0.12764354050159454 -82,0.12750668823719025 -83,0.1273823082447052 -84,0.1272742748260498 -85,0.12718437612056732 -86,0.12711201608181 -87,0.12704910337924957 -88,0.12699584662914276 -89,0.12695035338401794 -90,0.12691694498062134 -91,0.12689781188964844 -92,0.12690219283103943 -93,0.12690693140029907 -94,0.12691012024879456 -95,0.12691327929496765 -96,0.12691554427146912 -97,0.12691889703273773 -98,0.12692223489284515 -99,0.1269281804561615 -100,0.12693411111831665 -101,0.12693887948989868 -102,0.12694242596626282 -103,0.12694697082042694 -104,0.12694258987903595 -105,0.12693631649017334 -106,0.126921147108078 -107,0.12690596282482147 -108,0.12689170241355896 -109,0.12686999142169952 -110,0.12687860429286957 -111,0.12688525021076202 -112,0.12688998878002167 -113,0.12689432501792908 -114,0.12689915299415588 -115,0.12690205872058868 -116,0.12690497934818268 -117,0.12689919769763947 -118,0.12689173221588135 -119,0.1268838495016098 -120,0.12687711417675018 -121,0.1268712431192398 -122,0.12686464190483093 -123,0.12685662508010864 -124,0.1268506795167923 -125,0.12684577703475952 -126,0.1268419772386551 -127,0.12683947384357452 -128,0.12683537602424622 -129,0.12682995200157166 -130,0.12682726979255676 -131,0.12681996822357178 -132,0.12681110203266144 -133,0.12680204212665558 -134,0.12679317593574524 -135,0.1267860233783722 -136,0.12677933275699615 -137,0.12677493691444397 -138,0.12677288055419922 -139,0.12677551805973053 -140,0.1267797350883484 -141,0.12678197026252747 -142,0.12678401172161102 -143,0.12678514420986176 -144,0.12678439915180206 -145,0.1267843246459961 -146,0.12678633630275726 -147,0.12678560614585876 -148,0.12678669393062592 -149,0.12679138779640198 -150,0.12678754329681396 -151,0.12677425146102905 -152,0.12676280736923218 -153,0.12675783038139343 -154,0.12675324082374573 -155,0.1267496645450592 -156,0.12674547731876373 -157,0.1267407387495041 -158,0.12673547863960266 -159,0.12673096358776093 -160,0.12672652304172516 -161,0.1267232596874237 -162,0.12672218680381775 -163,0.12672105431556702 -164,0.1267203539609909 -165,0.1267191767692566 -166,0.12671765685081482 -167,0.12671852111816406 -168,0.12671908736228943 -169,0.12671983242034912 -170,0.12672021985054016 -171,0.1267220824956894 -172,0.1267249435186386 -173,0.12672962248325348 -174,0.12673717737197876 -175,0.1267501562833786 -176,0.12675954401493073 -177,0.12676186859607697 -178,0.12676280736923218 -179,0.12676186859607697 -180,0.12675967812538147 -181,0.12675702571868896 -182,0.12675295770168304 -183,0.1267489194869995 -184,0.12674438953399658 -185,0.12673918902873993 -186,0.12673868238925934 -187,0.12673334777355194 -188,0.12672820687294006 -189,0.12672336399555206 -190,0.12671774625778198 -191,0.12670527398586273 -192,0.12669548392295837 -193,0.12669238448143005 -194,0.12669022381305695 -195,0.1266878843307495 -196,0.1266847848892212 -197,0.1266811043024063 -198,0.12667584419250488 -199,0.1266712248325348 -200,0.1266639679670334 diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.300/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.300/training_config.txt deleted file mode 100644 index 08d02fc..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.300/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.300/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.300/training_log.csv deleted file mode 100644 index 9b48a92..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.300/training_log.csv +++ /dev/null @@ -1,21 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,4.539264678955078 -2,1.4096364974975586 -3,1.5117967128753662 -4,1.5491447448730469 -5,1.518785834312439 -6,1.451342225074768 -7,1.370967149734497 -8,1.290228009223938 -9,1.2073137760162354 -10,1.1160789728164673 -11,0.9943879842758179 -12,0.8324974179267883 -13,0.6939564943313599 -14,0.5710135102272034 -15,0.4994618594646454 -16,nan -17,nan -18,nan -19,nan diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.400/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.400/training_config.txt deleted file mode 100644 index bd9cb93..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.400/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.400/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.400/training_log.csv deleted file mode 100644 index 576e4be..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.400/training_log.csv +++ /dev/null @@ -1,37 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,2.8993139266967773 -2,1.4062789678573608 -3,1.350577473640442 -4,1.0906323194503784 -5,0.8987999558448792 -6,0.751189112663269 -7,0.6213540434837341 -8,0.445861279964447 -9,0.36491069197654724 -10,0.5330243110656738 -11,1601.400390625 -12,14291.423828125 -13,20591.9921875 -14,18912.591796875 -15,24395.0078125 -16,21650.771484375 -17,12940.263671875 -18,6977.15625 -19,2635.4169921875 -20,778.6220092773438 -21,206.97682189941406 -22,83.3564453125 -23,40.714534759521484 -24,30.70235824584961 -25,29.35940933227539 -26,30.878244400024414 -27,58.30175018310547 -28,29.131345748901367 -29,18.61041831970215 -30,4.101516246795654 -31,1.8870397806167603 -32,nan -33,nan -34,nan -35,nan diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.500/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.500/training_config.txt deleted file mode 100644 index c5bb634..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.500/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.500/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.500/training_log.csv deleted file mode 100644 index 0d3c318..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.500/training_log.csv +++ /dev/null @@ -1,13 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,2.6925153732299805 -2,1.0060514211654663 -3,1.0119602680206299 -4,0.8628219366073608 -5,0.6910416483879089 -6,0.5470321774482727 -7,0.4616629183292389 -8,nan -9,nan -10,nan -11,nan diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.600/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.600/training_config.txt deleted file mode 100644 index da1b7b5..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.600/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.600/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.600/training_log.csv deleted file mode 100644 index 311c936..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.600/training_log.csv +++ /dev/null @@ -1,11 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,6.612940311431885 -2,1.295833945274353 -3,1.258835792541504 -4,1.0151455402374268 -5,0.856471598148346 -6,nan -7,nan -8,nan -9,nan diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.700/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.700/training_config.txt deleted file mode 100644 index 27185f9..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.700/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.700/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.700/training_log.csv deleted file mode 100644 index c611bba..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.700/training_log.csv +++ /dev/null @@ -1,11 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,44.23457717895508 -2,1.3076660633087158 -3,1.1377251148223877 -4,1.322084665298462 -5,1.390428066253662 -6,nan -7,nan -8,nan -9,nan diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.800/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.800/training_config.txt deleted file mode 100644 index 36d80d4..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.800/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.800/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.800/training_log.csv deleted file mode 100644 index 6b85744..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.800/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,1054.843994140625 -2,11.821290016174316 -3,2.5290067195892334 -4,1.7929779291152954 -5,nan -6,nan -7,nan -8,nan diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.900/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_0.900/training_config.txt deleted file mode 100644 index ce17a73..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.900/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_0.900/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_0.900/training_log.csv deleted file mode 100644 index 4d45253..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,73695.9921875 -2,156941.203125 -3,16.276460647583008 -4,nan -5,nan -6,nan -7,nan diff --git a/training/base_loss_learning_rate_sweep/three/lr_1.000/training_config.txt b/training/base_loss_learning_rate_sweep/three/lr_1.000/training_config.txt deleted file mode 100644 index 17b6cca..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_1.000/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function Name: three -Loss Function Exponent: 3 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def three_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=3, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/three/lr_1.000/training_log.csv b/training/base_loss_learning_rate_sweep/three/lr_1.000/training_log.csv deleted file mode 100644 index 9b943ce..0000000 --- a/training/base_loss_learning_rate_sweep/three/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,147.53147888183594 -1,117119.7890625 -2,375530.71875 -3,375530.71875 -4,375530.71875 -5,375530.71875 -6,375530.71875 -7,375530.71875 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.003/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.003/training_config.txt deleted file mode 100644 index 4c1f065..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.003/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.0025 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.003/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.003/training_log.csv deleted file mode 100644 index d05960e..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.003/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,20.564037322998047 -2,19.724693298339844 -3,18.429758071899414 -4,18.090593338012695 -5,17.58329200744629 -6,16.964824676513672 -7,16.51142120361328 -8,16.08953094482422 -9,15.590412139892578 -10,15.041303634643555 -11,14.367298126220703 -12,13.842450141906738 -13,13.457271575927734 -14,12.962154388427734 -15,12.276519775390625 -16,11.888554573059082 -17,11.571123123168945 -18,10.74780559539795 -19,9.695568084716797 -20,9.40937328338623 -21,9.251943588256836 -22,9.177083969116211 -23,9.0237398147583 -24,8.631869316101074 -25,7.083112716674805 -26,6.594664096832275 -27,6.566620826721191 -28,6.430863857269287 -29,6.0926713943481445 -30,5.72641134262085 -31,5.434191703796387 -32,5.1378984451293945 -33,4.806995868682861 -34,4.6786417961120605 -35,4.71548318862915 -36,4.654504776000977 -37,4.773532867431641 -38,4.641533374786377 -39,4.3438897132873535 -40,4.255399227142334 -41,4.228004455566406 -42,4.175965785980225 -43,3.7084462642669678 -44,3.6091246604919434 -45,3.6275432109832764 -46,3.6970601081848145 -47,3.662468671798706 -48,3.659083127975464 -49,3.650935411453247 -50,3.6388018131256104 -51,3.604776620864868 -52,3.5169169902801514 -53,3.5330934524536133 -54,3.5369303226470947 -55,3.536367177963257 -56,3.5328967571258545 -57,3.5270004272460938 -58,3.5187699794769287 -59,3.5081326961517334 -60,3.4950575828552246 -61,3.479818105697632 -62,3.4628984928131104 -63,3.4447007179260254 -64,3.424955368041992 -65,3.401184558868408 -66,3.522491931915283 -67,3.520526170730591 -68,3.5136728286743164 -69,3.506718635559082 -70,3.500173330307007 -71,3.4939868450164795 -72,3.4879603385925293 -73,3.481914758682251 -74,3.475764274597168 -75,3.469522714614868 -76,3.463327407836914 -77,3.4574310779571533 -78,3.4522311687469482 -79,3.448176145553589 -80,3.4455926418304443 -81,3.4441044330596924 -82,3.441805362701416 -83,3.4373743534088135 -84,3.4325919151306152 -85,3.4288408756256104 -86,3.426032781600952 -87,3.4236490726470947 -88,3.421257734298706 -89,3.4186158180236816 -90,3.415663242340088 -91,3.4124531745910645 -92,3.409135341644287 -93,3.405916213989258 -94,3.4029757976531982 -95,3.4004123210906982 -96,3.3981380462646484 -97,3.395916223526001 -98,3.393507480621338 -99,3.3909006118774414 -100,3.3882532119750977 -101,3.385742425918579 -102,3.3833959102630615 -103,3.3811864852905273 -104,3.3790266513824463 -105,3.376847267150879 -106,3.374606132507324 -107,3.3722705841064453 -108,3.3698232173919678 -109,3.3672380447387695 -110,3.3644630908966064 -111,3.361370325088501 -112,3.3576743602752686 -113,3.352635383605957 -114,3.3435258865356445 -115,3.282620429992676 -116,3.309007406234741 -117,3.3631160259246826 -118,3.406191110610962 -119,3.4349300861358643 -120,3.441728353500366 -121,3.4303977489471436 -122,3.4108970165252686 -123,3.391838788986206 -124,3.3876657485961914 -125,3.524972915649414 -126,3.4887588024139404 -127,3.4518589973449707 -128,3.436656951904297 -129,3.4354844093322754 -130,3.443324565887451 -131,3.4570698738098145 -132,3.466158390045166 -133,3.452591896057129 -134,3.4355173110961914 -135,3.421612501144409 -136,3.408684730529785 -137,3.322885751724243 -138,3.3016271591186523 -139,3.2655177116394043 -140,3.272704601287842 -141,3.3515474796295166 -142,3.413788080215454 -143,3.371983289718628 -144,3.396075963973999 -145,3.3951401710510254 -146,3.3057162761688232 -147,3.315040111541748 -148,3.341364622116089 -149,3.365978240966797 -150,3.382396697998047 -151,3.381476879119873 -152,3.370551109313965 -153,3.3745412826538086 -154,3.339231491088867 -155,3.3590738773345947 -156,3.367382526397705 -157,3.3716213703155518 -158,3.3724560737609863 -159,3.369946002960205 -160,3.364367961883545 -161,3.3564655780792236 -162,3.345754623413086 -163,3.3805224895477295 -164,3.328571081161499 -165,3.3196520805358887 -166,3.3097751140594482 -167,3.299886703491211 -168,3.2899482250213623 -169,3.280015707015991 -170,3.2701001167297363 -171,3.260225772857666 -172,3.2503440380096436 -173,3.2403345108032227 -174,3.230020523071289 -175,3.2190141677856445 -176,3.206294059753418 -177,3.256826639175415 -178,3.225956439971924 -179,3.2165307998657227 -180,3.2078068256378174 -181,3.199047565460205 -182,3.1901895999908447 -183,3.1814308166503906 -184,3.173206329345703 -185,3.1679704189300537 -186,3.1757335662841797 -187,3.173560380935669 -188,3.1691431999206543 -189,3.163329839706421 -190,3.1490039825439453 -191,3.2223713397979736 -192,3.215132474899292 -193,3.2086520195007324 -194,3.2009055614471436 -195,3.185243606567383 -196,3.178255081176758 -197,3.1751999855041504 -198,3.172595500946045 -199,3.170057535171509 -200,3.1674883365631104 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.005/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.005/training_config.txt deleted file mode 100644 index 755e47a..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.005/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.005 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.005/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.005/training_log.csv deleted file mode 100644 index 5ac7b88..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.005/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,20.075885772705078 -2,17.661388397216797 -3,16.90958023071289 -4,16.260440826416016 -5,15.04672908782959 -6,13.67845630645752 -7,12.521065711975098 -8,11.367539405822754 -9,11.20869255065918 -10,9.654285430908203 -11,9.529512405395508 -12,8.725348472595215 -13,6.860241413116455 -14,6.604360103607178 -15,6.538692474365234 -16,5.559205055236816 -17,4.864736080169678 -18,5.088016510009766 -19,4.720940113067627 -20,4.294004440307617 -21,4.0059332847595215 -22,3.6414794921875 -23,3.640334129333496 -24,3.48673677444458 -25,3.405416965484619 -26,3.1760709285736084 -27,3.1705210208892822 -28,3.159052610397339 -29,3.1472108364105225 -30,3.1239590644836426 -31,3.1526477336883545 -32,3.176949977874756 -33,3.1543173789978027 -34,3.1797804832458496 -35,3.1749467849731445 -36,3.100041627883911 -37,3.0040173530578613 -38,2.9998927116394043 -39,2.9121663570404053 -40,2.8600029945373535 -41,2.7870547771453857 -42,2.6726789474487305 -43,2.5784780979156494 -44,2.404287576675415 -45,2.3601958751678467 -46,2.332000970840454 -47,2.292973756790161 -48,2.2651867866516113 -49,2.247370481491089 -50,2.2358269691467285 -51,2.226667881011963 -52,2.217062473297119 -53,2.204580545425415 -54,2.1874775886535645 -55,2.1649115085601807 -56,2.1375863552093506 -57,2.111809492111206 -58,2.018016815185547 -59,1.9440269470214844 -60,1.901923418045044 -61,1.8689854145050049 -62,1.8419536352157593 -63,1.8197524547576904 -64,1.7753554582595825 -65,1.7596241235733032 -66,1.7410701513290405 -67,1.7211079597473145 -68,1.6993048191070557 -69,1.6729251146316528 -70,1.6497776508331299 -71,1.6312235593795776 -72,1.7070116996765137 -73,1.6790317296981812 -74,1.643073558807373 -75,1.6199216842651367 -76,1.6007466316223145 -77,1.5844275951385498 -78,1.5683647394180298 -79,1.5500024557113647 -80,1.5226436853408813 -81,1.5463547706604004 -82,1.5534920692443848 -83,1.5524039268493652 -84,1.5496562719345093 -85,1.546350121498108 -86,1.5428398847579956 -87,1.5392624139785767 -88,1.5356731414794922 -89,1.5321067571640015 -90,1.5285848379135132 -91,1.525118112564087 -92,1.5217175483703613 -93,1.5183910131454468 -94,1.5151379108428955 -95,1.5119614601135254 -96,1.5088638067245483 -97,1.50584876537323 -98,1.5029218196868896 -99,1.5000925064086914 -100,1.49738347530365 -101,1.494852066040039 -102,1.4926522970199585 -103,1.491101861000061 -104,1.4901537895202637 -105,1.4876450300216675 -106,1.4844614267349243 -107,1.4819825887680054 -108,1.4799093008041382 -109,1.4779813289642334 -110,1.4760940074920654 -111,1.4742143154144287 -112,1.4723347425460815 -113,1.4704551696777344 -114,1.4685779809951782 -115,1.4667073488235474 -116,1.464840054512024 -117,1.4629638195037842 -118,1.4610642194747925 -119,1.459129810333252 -120,1.4571603536605835 -121,1.4551796913146973 -122,1.4532281160354614 -123,1.4513580799102783 -124,1.4496265649795532 -125,1.4480476379394531 -126,1.4465028047561646 -127,1.4448004961013794 -128,1.4429569244384766 -129,1.4411348104476929 -130,1.439393162727356 -131,1.4377104043960571 -132,1.4360506534576416 -133,1.4343916177749634 -134,1.4327205419540405 -135,1.4310328960418701 -136,1.4293291568756104 -137,1.4276152849197388 -138,1.4259032011032104 -139,1.424216866493225 -140,1.422593116760254 -141,1.4210830926895142 -142,1.41973078250885 -143,1.418521761894226 -144,1.4171332120895386 -145,1.4134360551834106 -146,1.4161573648452759 -147,1.4180680513381958 -148,1.4197266101837158 -149,1.420973777770996 -150,1.4218358993530273 -151,1.4223332405090332 -152,1.4224731922149658 -153,1.4222657680511475 -154,1.4217361211776733 -155,1.4209239482879639 -156,1.4198805093765259 -157,1.4186588525772095 -158,1.4172967672348022 -159,1.415812373161316 -160,1.414218544960022 -161,1.412543535232544 -162,1.4108120203018188 -163,1.4090362787246704 -164,1.4072121381759644 -165,1.4053272008895874 -166,1.4033726453781128 -167,1.4013497829437256 -168,1.3992786407470703 -169,1.3972413539886475 -170,1.3955057859420776 -171,1.3946846723556519 -172,1.3903288841247559 -173,1.401596188545227 -174,1.3893415927886963 -175,1.3876420259475708 -176,1.3995884656906128 -177,1.400559425354004 -178,1.3975962400436401 -179,1.3833341598510742 -180,1.3890771865844727 -181,1.3937559127807617 -182,1.3974545001983643 -183,1.4003814458847046 -184,1.4026663303375244 -185,1.4043567180633545 -186,1.4054489135742188 -187,1.4059219360351562 -188,1.4057884216308594 -189,1.4051079750061035 -190,1.403971552848816 -191,1.4024710655212402 -192,1.4006885290145874 -193,1.3987116813659668 -194,1.3966315984725952 -195,1.3945146799087524 -196,1.392398476600647 -197,1.3902997970581055 -198,1.388218641281128 -199,1.3861337900161743 -200,1.3839915990829468 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.010/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.010/training_config.txt deleted file mode 100644 index 70e0ea8..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.010/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.010/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.010/training_log.csv deleted file mode 100644 index cd3dec2..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,18.665231704711914 -2,15.29311466217041 -3,14.807978630065918 -4,12.672126770019531 -5,10.498634338378906 -6,8.943449974060059 -7,7.288576602935791 -8,6.188742160797119 -9,4.794300079345703 -10,4.62605619430542 -11,4.050909519195557 -12,3.9393346309661865 -13,3.3346853256225586 -14,3.230642795562744 -15,3.0607783794403076 -16,2.914734363555908 -17,2.756934881210327 -18,2.6350862979888916 -19,2.58740234375 -20,2.4211575984954834 -21,2.153869390487671 -22,1.9753179550170898 -23,1.7552814483642578 -24,1.6274471282958984 -25,1.538541316986084 -26,1.3722654581069946 -27,1.255153775215149 -28,1.1547369956970215 -29,1.1184303760528564 -30,1.0793176889419556 -31,1.0556201934814453 -32,1.0032397508621216 -33,0.9732299447059631 -34,1.0029256343841553 -35,0.9476487040519714 -36,0.9335277080535889 -37,0.9169016480445862 -38,0.8801356554031372 -39,0.8674600720405579 -40,0.8611049652099609 -41,0.8466100096702576 -42,0.8391693234443665 -43,0.8331607580184937 -44,0.827642560005188 -45,0.8222489953041077 -46,0.8156353235244751 -47,0.7914356589317322 -48,0.7164923548698425 -49,0.7043221592903137 -50,0.6970568299293518 -51,0.6920491456985474 -52,0.6892895102500916 -53,0.6939749717712402 -54,0.6964327692985535 -55,0.6967371106147766 -56,0.676153838634491 -57,0.6653692126274109 -58,0.6608772873878479 -59,0.6558541655540466 -60,0.6565554141998291 -61,0.6544573307037354 -62,0.651495635509491 -63,0.6478056311607361 -64,0.6429248452186584 -65,0.6357221007347107 -66,0.6250190138816833 -67,0.6170826554298401 -68,0.6122126579284668 -69,0.6041489839553833 -70,0.602590024471283 -71,0.6010245680809021 -72,0.5993435978889465 -73,0.5976153016090393 -74,0.5958854556083679 -75,0.5941746234893799 -76,0.592485249042511 -77,0.5908131003379822 -78,0.589156448841095 -79,0.5875140428543091 -80,0.5858850479125977 -81,0.5842713713645935 -82,0.5826778411865234 -83,0.5810753107070923 -84,0.5793201327323914 -85,0.5762677788734436 -86,0.573045551776886 -87,0.5714084506034851 -88,0.5699263215065002 -89,0.5685434341430664 -90,0.5672104358673096 -91,0.565925657749176 -92,0.5646339654922485 -93,0.5633368492126465 -94,0.5620458722114563 -95,0.5607838034629822 -96,0.5596059560775757 -97,0.5575584173202515 -98,0.5594088435173035 -99,0.5582560300827026 -100,0.5569114089012146 -101,0.5556536912918091 -102,0.5545925498008728 -103,0.5536397099494934 -104,0.5526143312454224 -105,0.551443338394165 -106,0.5500890612602234 -107,0.5486648678779602 -108,0.548046886920929 -109,0.5482403039932251 -110,0.5482054352760315 -111,0.5479090213775635 -112,0.5474227666854858 -113,0.5467984080314636 -114,0.5460562705993652 -115,0.5452044010162354 -116,0.5442578196525574 -117,0.5432870984077454 -118,0.5424299836158752 -119,0.5420901775360107 -120,0.5419108271598816 -121,0.5416446924209595 -122,0.5412623882293701 -123,0.5407642126083374 -124,0.540161669254303 -125,0.53947514295578 -126,0.5387412309646606 -127,0.53806072473526 -128,0.5376566052436829 -129,0.5372533202171326 -130,0.5368210673332214 -131,0.5363478064537048 -132,0.5358344316482544 -133,0.5352874398231506 -134,0.5347153544425964 -135,0.5341275930404663 -136,0.5335313677787781 -137,0.5329295992851257 -138,0.5323213934898376 -139,0.531697154045105 -140,0.5310319662094116 -141,0.5302083492279053 -142,0.5271647572517395 -143,0.5313200950622559 -144,0.5317578315734863 -145,0.5319174528121948 -146,0.531742513179779 -147,0.5308258533477783 -148,0.5276018977165222 -149,0.5300766229629517 -150,0.5305317640304565 -151,0.530086874961853 -152,0.5259300470352173 -153,0.5208826065063477 -154,0.521938681602478 -155,0.521561324596405 -156,0.5212000012397766 -157,0.5210130214691162 -158,0.5208994746208191 -159,0.5207879543304443 -160,0.5206498503684998 -161,0.5204783082008362 -162,0.52027428150177 -163,0.5200399160385132 -164,0.5197679996490479 -165,0.5194545388221741 -166,0.519110381603241 -167,0.5187591910362244 -168,0.518393337726593 -169,0.5180084109306335 -170,0.5175989866256714 -171,0.5171648859977722 -172,0.5167069435119629 -173,0.5162284970283508 -174,0.5157318711280823 -175,0.5152170062065125 -176,0.5146777629852295 -177,0.5141012668609619 -178,0.5134599208831787 -179,0.5126898288726807 -180,0.5116218328475952 -181,0.509799599647522 -182,0.5089432001113892 -183,0.5098844170570374 -184,0.5102179646492004 -185,0.5100787281990051 -186,0.5095716118812561 -187,0.5087321400642395 -188,0.5075458288192749 -189,0.5063366293907166 -190,0.5071024298667908 -191,0.5074867010116577 -192,0.5071399211883545 -193,0.5061051845550537 -194,0.505000650882721 -195,0.5052773356437683 -196,0.5053768754005432 -197,0.5051398873329163 -198,0.504616379737854 -199,0.5039287805557251 -200,0.5037427544593811 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.020/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.020/training_config.txt deleted file mode 100644 index 0daa266..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.020/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.020/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.020/training_log.csv deleted file mode 100644 index e611866..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,17.12166976928711 -2,12.21277141571045 -3,7.614363670349121 -4,4.982464790344238 -5,4.23364782333374 -6,3.326111078262329 -7,3.2576987743377686 -8,2.951340913772583 -9,2.55849552154541 -10,2.296405076980591 -11,2.0251595973968506 -12,1.7001954317092896 -13,1.487138271331787 -14,1.3947497606277466 -15,1.1529626846313477 -16,1.1049602031707764 -17,1.0208032131195068 -18,0.8717604875564575 -19,0.7401155233383179 -20,0.7123733758926392 -21,0.6973929405212402 -22,0.6904362440109253 -23,0.664880633354187 -24,0.645022988319397 -25,0.5867667198181152 -26,0.562764585018158 -27,0.5480054616928101 -28,0.5352126955986023 -29,0.528016984462738 -30,0.5248285531997681 -31,0.5210938453674316 -32,0.5184431672096252 -33,0.515066921710968 -34,0.510270357131958 -35,0.5028725862503052 -36,0.49406126141548157 -37,0.48449745774269104 -38,0.4237695336341858 -39,0.40649887919425964 -40,0.39076948165893555 -41,0.38211843371391296 -42,0.3831266462802887 -43,0.383687824010849 -44,0.3834299147129059 -45,0.38220086693763733 -46,0.3803441822528839 -47,0.37805357575416565 -48,0.3753228783607483 -49,0.3720089793205261 -50,0.36869820952415466 -51,0.36728689074516296 -52,0.3654286563396454 -53,0.36240696907043457 -54,0.3616048991680145 -55,0.3588103950023651 -56,0.3593069911003113 -57,0.3580869138240814 -58,0.3559212386608124 -59,0.3540875017642975 -60,0.35266634821891785 -61,0.3490530550479889 -62,0.34813082218170166 -63,0.348886102437973 -64,0.34798464179039 -65,0.34696510434150696 -66,0.34578847885131836 -67,0.34431886672973633 -68,0.3428109288215637 -69,0.3411131203174591 -70,0.33826014399528503 -71,0.3342168927192688 -72,0.3324713110923767 -73,0.3341618776321411 -74,0.33314093947410583 -75,0.32808569073677063 -76,0.3281790316104889 -77,0.32801932096481323 -78,0.3248886168003082 -79,0.29713010787963867 -80,0.2937963008880615 -81,0.292640745639801 -82,0.2916780114173889 -83,0.2903364896774292 -84,0.2885505259037018 -85,0.2865915894508362 -86,0.2855454981327057 -87,0.2847203016281128 -88,0.28325292468070984 -89,0.2812402844429016 -90,0.27944594621658325 -91,0.2777632176876068 -92,0.27458566427230835 -93,0.2714833915233612 -94,0.2731453776359558 -95,0.2714672088623047 -96,0.26608479022979736 -97,0.2684484124183655 -98,0.27319538593292236 -99,0.27424192428588867 -100,0.27403494715690613 -101,0.2737354040145874 -102,0.2731333374977112 -103,0.2721075415611267 -104,0.27068793773651123 -105,0.26890993118286133 -106,0.2664487659931183 -107,0.25987565517425537 -108,0.2591119110584259 -109,0.2599741816520691 -110,0.25923803448677063 -111,0.25738200545310974 -112,0.2548366189002991 -113,0.25381651520729065 -114,0.2511498034000397 -115,0.24967467784881592 -116,0.2480269819498062 -117,0.24856272339820862 -118,0.25169992446899414 -119,0.2502487003803253 -120,0.24486298859119415 -121,0.2455894947052002 -122,0.2451235055923462 -123,0.24576336145401 -124,0.2461337000131607 -125,0.2454993575811386 -126,0.24403809010982513 -127,0.24093252420425415 -128,0.2403275966644287 -129,0.2393583059310913 -130,0.23661325871944427 -131,0.23925787210464478 -132,0.23886898159980774 -133,0.23620818555355072 -134,0.23643478751182556 -135,0.23740409314632416 -136,0.23351633548736572 -137,0.23100270330905914 -138,0.2332555502653122 -139,0.23375281691551208 -140,0.233177050948143 -141,0.23224826157093048 -142,0.2314518392086029 -143,0.23037265241146088 -144,0.22865763306617737 -145,0.2260662317276001 -146,0.2218974232673645 -147,0.2193489372730255 -148,0.22185546159744263 -149,0.22121721506118774 -150,0.2186966836452484 -151,0.2160397320985794 -152,0.21592310070991516 -153,0.21686118841171265 -154,0.21618203818798065 -155,0.21500036120414734 -156,0.21465551853179932 -157,0.21475139260292053 -158,0.2142840027809143 -159,0.21315839886665344 -160,0.21180354058742523 -161,0.2109728753566742 -162,0.21051988005638123 -163,0.20959725975990295 -164,0.2070598602294922 -165,0.20611387491226196 -166,0.20568431913852692 -167,0.20381656289100647 -168,0.20414483547210693 -169,0.2025056779384613 -170,0.2014809548854828 -171,0.19942861795425415 -172,0.2018030732870102 -173,0.2018403559923172 -174,0.2013106644153595 -175,0.20078293979167938 -176,0.19978688657283783 -177,0.1978873908519745 -178,0.19847829639911652 -179,0.19912606477737427 -180,0.1987447589635849 -181,0.19807371497154236 -182,0.19696272909641266 -183,0.1944030523300171 -184,0.19455544650554657 -185,0.19507256150245667 -186,0.19472822546958923 -187,0.19380217790603638 -188,0.1921953409910202 -189,0.1901564747095108 -190,0.19102327525615692 -191,0.19117511808872223 -192,0.18924574553966522 -193,0.18737661838531494 -194,0.18763373792171478 -195,0.18711164593696594 -196,0.185543030500412 -197,0.1839313805103302 -198,0.18400879204273224 -199,0.18200473487377167 -200,0.18027444183826447 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.040/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.040/training_config.txt deleted file mode 100644 index b83333b..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.040/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.040/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.040/training_log.csv deleted file mode 100644 index c003861..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,14.310356140136719 -2,5.681100845336914 -3,3.4240944385528564 -4,2.766697645187378 -5,2.0649821758270264 -6,1.6613152027130127 -7,1.5917893648147583 -8,1.5197221040725708 -9,1.2292368412017822 -10,1.0990136861801147 -11,1.0189948081970215 -12,0.9253038167953491 -13,0.7540449500083923 -14,0.7402397394180298 -15,0.6641564965248108 -16,0.6490633487701416 -17,0.6269697546958923 -18,0.6427996754646301 -19,0.6242321133613586 -20,0.6304505467414856 -21,0.6389805674552917 -22,0.6415343284606934 -23,0.6434311270713806 -24,0.639963686466217 -25,0.633034884929657 -26,0.625182569026947 -27,0.6158474683761597 -28,0.6076483726501465 -29,0.6122167706489563 -30,0.6100512146949768 -31,0.6011407375335693 -32,0.6008696556091309 -33,0.6005237102508545 -34,0.5986940264701843 -35,0.5953053832054138 -36,0.5906150341033936 -37,0.585258960723877 -38,0.5812813639640808 -39,0.5805040001869202 -40,0.5766493082046509 -41,0.5699041485786438 -42,0.5641922950744629 -43,0.5597607493400574 -44,0.561497688293457 -45,0.5542179346084595 -46,0.556590735912323 -47,0.5547016859054565 -48,0.5509213209152222 -49,0.5450443029403687 -50,0.5437198877334595 -51,0.5430049896240234 -52,0.5390960574150085 -53,0.5293514132499695 -54,0.5126516819000244 -55,0.5081446170806885 -56,0.5075410008430481 -57,0.5154198408126831 -58,0.5208664536476135 -59,0.5225343704223633 -60,0.5207017064094543 -61,0.515509843826294 -62,0.507744312286377 -63,0.5019105076789856 -64,0.49853286147117615 -65,0.4962589740753174 -66,0.4982979893684387 -67,0.49839556217193604 -68,0.4960469603538513 -69,0.49294334650039673 -70,0.4894433617591858 -71,0.48927122354507446 -72,0.48829782009124756 -73,0.4861129820346832 -74,0.4851459562778473 -75,0.48276686668395996 -76,0.4799552261829376 -77,0.47309476137161255 -78,0.47273921966552734 -79,0.4715736508369446 -80,0.4700401723384857 -81,0.46819281578063965 -82,0.46625325083732605 -83,0.4631190598011017 -84,0.46144166588783264 -85,0.459852397441864 -86,0.45862141251564026 -87,0.4576609432697296 -88,0.4563014507293701 -89,0.45422065258026123 -90,0.4437532424926758 -91,0.44106724858283997 -92,0.4400392770767212 -93,0.43945959210395813 -94,0.4381023645401001 -95,0.4360572099685669 -96,0.4339757561683655 -97,0.43247634172439575 -98,0.4312255084514618 -99,0.42973265051841736 -100,0.4285830557346344 -101,0.42767956852912903 -102,0.42652618885040283 -103,0.42445704340934753 -104,0.42451855540275574 -105,0.4220004379749298 -106,0.42078301310539246 -107,0.41925692558288574 -108,0.41778871417045593 -109,0.41603586077690125 -110,0.4143750071525574 -111,0.4129955470561981 -112,0.4113566279411316 -113,0.4098411202430725 -114,0.40788689255714417 -115,0.4071962833404541 -116,0.4038810729980469 -117,0.40129393339157104 -118,0.39228516817092896 -119,0.39088401198387146 -120,0.38876810669898987 -121,0.3873245418071747 -122,0.38553157448768616 -123,0.38392817974090576 -124,0.3824731111526489 -125,0.3805159032344818 -126,0.3790084421634674 -127,0.3771871030330658 -128,0.37524986267089844 -129,0.37358757853507996 -130,0.37181776762008667 -131,0.3699096739292145 -132,0.3680841326713562 -133,0.3661484718322754 -134,0.36435672640800476 -135,0.36252471804618835 -136,0.360492467880249 -137,0.35857272148132324 -138,0.35642021894454956 -139,0.35425883531570435 -140,0.3522050976753235 -141,0.3495684862136841 -142,0.3471395671367645 -143,0.3438229560852051 -144,0.33983999490737915 -145,0.335986852645874 -146,0.33430925011634827 -147,0.3201839029788971 -148,0.35618242621421814 -149,0.332665354013443 -150,0.32116052508354187 -151,0.31318899989128113 -152,0.31384438276290894 -153,0.2937667667865753 -154,0.27451956272125244 -155,0.3066142201423645 -156,0.3074060380458832 -157,0.31013062596321106 -158,0.31025147438049316 -159,0.30931025743484497 -160,0.3119201958179474 -161,0.31010767817497253 -162,0.30739977955818176 -163,0.30503353476524353 -164,0.3021712005138397 -165,0.29819491505622864 -166,0.2929995656013489 -167,0.2872278392314911 -168,0.2754034399986267 -169,0.2647288739681244 -170,0.2599852979183197 -171,0.26087647676467896 -172,0.25538700819015503 -173,0.24412061274051666 -174,0.239671990275383 -175,0.23857912421226501 -176,0.2344299703836441 -177,0.23471768200397491 -178,0.2355511337518692 -179,0.2356719672679901 -180,0.2359921932220459 -181,0.2321462780237198 -182,0.23070205748081207 -183,0.23022405803203583 -184,0.22633877396583557 -185,0.22700616717338562 -186,0.22777463495731354 -187,0.22375860810279846 -188,0.22496135532855988 -189,0.22301453351974487 -190,0.22840243577957153 -191,0.22258919477462769 -192,0.2229127734899521 -193,0.22273606061935425 -194,0.22407062351703644 -195,0.22488830983638763 -196,0.2205236554145813 -197,0.2323855608701706 -198,0.2252998948097229 -199,0.232336163520813 -200,0.2375572770833969 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.050/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.050/training_config.txt deleted file mode 100644 index ab24b42..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.050/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.050/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.050/training_log.csv deleted file mode 100644 index 85f9215..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,13.19515609741211 -2,4.713284969329834 -3,2.7768259048461914 -4,1.814987063407898 -5,1.0590540170669556 -6,0.8914084434509277 -7,0.7306835651397705 -8,0.5287058353424072 -9,0.49588876962661743 -10,0.4313264489173889 -11,0.3848385214805603 -12,0.37796124815940857 -13,0.3701876997947693 -14,0.3610330820083618 -15,0.3507320284843445 -16,0.33589786291122437 -17,0.32408636808395386 -18,0.3158630132675171 -19,0.3050376772880554 -20,0.29473739862442017 -21,0.2836450934410095 -22,0.26905474066734314 -23,0.23832744359970093 -24,0.22265535593032837 -25,0.21210142970085144 -26,0.20372265577316284 -27,0.19599413871765137 -28,0.18877393007278442 -29,0.1819170117378235 -30,0.1752941608428955 -31,0.16873151063919067 -32,0.1619967520236969 -33,0.1546313762664795 -34,0.1457904428243637 -35,0.14107051491737366 -36,0.14225472509860992 -37,0.14051851630210876 -38,0.1381148099899292 -39,0.13832655549049377 -40,0.13744589686393738 -41,0.13536980748176575 -42,0.13216112554073334 -43,0.1279500126838684 -44,0.12300903350114822 -45,0.11779603362083435 -46,0.11633569002151489 -47,0.11690420657396317 -48,0.1167764812707901 -49,0.1161152794957161 -50,0.11504752188920975 -51,0.11423332989215851 -52,0.11397671699523926 -53,0.11296313256025314 -54,0.11093896627426147 -55,0.10823925584554672 -56,0.10603450983762741 -57,0.10399739444255829 -58,0.10312243551015854 -59,0.1037670224905014 -60,0.10367591679096222 -61,0.10253869742155075 -62,0.10171964019536972 -63,0.1014353558421135 -64,0.10083411633968353 -65,0.10035395622253418 -66,0.10000462830066681 -67,0.09932716190814972 -68,0.09817427396774292 -69,0.09674292802810669 -70,0.09603694081306458 -71,0.09636027365922928 -72,0.09625784307718277 -73,0.09567655622959137 -74,0.09504040330648422 -75,0.09462793916463852 -76,0.09422142803668976 -77,0.09368667751550674 -78,0.09300214052200317 -79,0.0925210639834404 -80,0.09256341308355331 -81,0.0924762710928917 -82,0.0921408161520958 -83,0.09167338162660599 -84,0.09115228056907654 -85,0.090723916888237 -86,0.09040657430887222 -87,0.09012024849653244 -88,0.08993197977542877 -89,0.08974374085664749 -90,0.08950069546699524 -91,0.08917958289384842 -92,0.08882919698953629 -93,0.08849718421697617 -94,0.08823038637638092 -95,0.08800381422042847 -96,0.08779969066381454 -97,0.08756275475025177 -98,0.08742008358240128 -99,0.08726584911346436 -100,0.08706841617822647 -101,0.08697571605443954 -102,0.08686704188585281 -103,0.0867471843957901 -104,0.08665399998426437 -105,0.0865611657500267 -106,0.08645081520080566 -107,0.086342953145504 -108,0.08625062555074692 -109,0.08616377413272858 -110,0.08607695251703262 -111,0.08598378300666809 -112,0.0858839675784111 -113,0.08578420430421829 -114,0.0856889933347702 -115,0.085596963763237 -116,0.08550658077001572 -117,0.08541401475667953 -118,0.08531733602285385 -119,0.085220567882061 -120,0.08512702584266663 -121,0.08503606170415878 -122,0.08494646102190018 -123,0.08485540002584457 -124,0.08476275205612183 -125,0.08467107266187668 -126,0.0845814049243927 -127,0.08449431508779526 -128,0.08440756797790527 -129,0.08431882411241531 -130,0.08423057198524475 -131,0.08414608985185623 -132,0.08406764268875122 -133,0.08399923145771027 -134,0.08392121642827988 -135,0.083851657807827 -136,0.08378428965806961 -137,0.0837232768535614 -138,0.08366261422634125 -139,0.08360248059034348 -140,0.0835430920124054 -141,0.08348395675420761 -142,0.08342470228672028 -143,0.0833648070693016 -144,0.08330482989549637 -145,0.08324458450078964 -146,0.08318445086479187 -147,0.08312395215034485 -148,0.08306387811899185 -149,0.08300416171550751 -150,0.08294523507356644 -151,0.0828869491815567 -152,0.08283070474863052 -153,0.08277744054794312 -154,0.08273044973611832 -155,0.08266918361186981 -156,0.08262109011411667 -157,0.08256766945123672 -158,0.08251870423555374 -159,0.08246874809265137 -160,0.08242134749889374 -161,0.08237594366073608 -162,0.08233026415109634 -163,0.08228500187397003 -164,0.08224166184663773 -165,0.0821976289153099 -166,0.08215533941984177 -167,0.08211194723844528 -168,0.08207079768180847 -169,0.08202912658452988 -170,0.08198853582143784 -171,0.08194766938686371 -172,0.08190745115280151 -173,0.08186890929937363 -174,0.08182983100414276 -175,0.08179204910993576 -176,0.08175699412822723 -177,0.08175452053546906 -178,0.08168826252222061 -179,0.08170409500598907 -180,0.08162420243024826 -181,0.08168081194162369 -182,0.08158943802118301 -183,0.08156263083219528 -184,0.08154187351465225 -185,0.08145435899496078 -186,0.08145509660243988 -187,0.08136317878961563 -188,0.08136434853076935 -189,0.08129870891571045 -190,0.08127731829881668 -191,0.08124851435422897 -192,0.08119411766529083 -193,0.08116733282804489 -194,0.08112870901823044 -195,0.08110311627388 -196,0.08110497146844864 -197,0.08104947209358215 -198,0.08104976266622543 -199,0.0809747502207756 -200,0.08098162710666656 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.080/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.080/training_config.txt deleted file mode 100644 index 054124d..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.080/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.080/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.080/training_log.csv deleted file mode 100644 index 5104842..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,10.050004959106445 -2,3.9865951538085938 -3,14.750490188598633 -4,9.92122745513916 -5,4.110358238220215 -6,2.1884841918945312 -7,1.2535295486450195 -8,1.0108343362808228 -9,0.8432074785232544 -10,0.7911471128463745 -11,0.7635271549224854 -12,0.7468434572219849 -13,0.7304384708404541 -14,0.7140855193138123 -15,0.7051256895065308 -16,0.6913886666297913 -17,0.6736634373664856 -18,0.6521274447441101 -19,0.6266146302223206 -20,0.5949405431747437 -21,0.5524168014526367 -22,0.4974791705608368 -23,0.4307902753353119 -24,0.39369985461235046 -25,0.36880815029144287 -26,0.3503513038158417 -27,0.33333510160446167 -28,0.3176591694355011 -29,0.30230483412742615 -30,0.28411707282066345 -31,0.2614802122116089 -32,0.24894054234027863 -33,0.23896805942058563 -34,0.2295353263616562 -35,0.22039863467216492 -36,0.21134403347969055 -37,0.20222294330596924 -38,0.19341401755809784 -39,0.18541204929351807 -40,0.17926456034183502 -41,0.17715030908584595 -42,0.177435964345932 -43,0.17776967585086823 -44,0.17776435613632202 -45,0.17762286961078644 -46,0.1775110512971878 -47,0.1771710366010666 -48,0.17652206122875214 -49,0.17555075883865356 -50,0.17425526678562164 -51,0.1726488322019577 -52,0.17075635492801666 -53,0.16860827803611755 -54,0.1662379652261734 -55,0.16368046402931213 -56,0.1609664410352707 -57,0.15815238654613495 -58,0.15551155805587769 -59,0.15306806564331055 -60,0.15075108408927917 -61,0.14858373999595642 -62,0.14654572308063507 -63,0.14459013938903809 -64,0.1426924765110016 -65,0.14092665910720825 -66,0.1393478959798813 -67,0.13790929317474365 -68,0.13663123548030853 -69,0.13534702360630035 -70,0.13399526476860046 -71,0.13254493474960327 -72,0.13102948665618896 -73,0.12949198484420776 -74,0.12792176008224487 -75,0.12635628879070282 -76,0.12479381263256073 -77,0.1232738271355629 -78,0.12177500128746033 -79,0.12032704800367355 -80,0.11896765232086182 -81,0.11775286495685577 -82,0.11667823046445847 -83,0.11575751006603241 -84,0.11497203260660172 -85,0.11426928639411926 -86,0.1135140135884285 -87,0.11269080638885498 -88,0.11181101202964783 -89,0.11090521514415741 -90,0.11001034080982208 -91,0.1091843694448471 -92,0.10847384482622147 -93,0.10783315449953079 -94,0.10722135752439499 -95,0.10660207271575928 -96,0.10595127940177917 -97,0.10526221990585327 -98,0.10454530268907547 -99,0.10383027791976929 -100,0.1031658798456192 -101,0.10258091986179352 -102,0.10200227797031403 -103,0.10141457617282867 -104,0.1008119210600853 -105,0.10019705444574356 -106,0.09958287328481674 -107,0.09898657351732254 -108,0.09846162796020508 -109,0.09798359870910645 -110,0.0974748507142067 -111,0.09693452715873718 -112,0.09644564241170883 -113,0.09600941091775894 -114,0.09558697789907455 -115,0.09515818953514099 -116,0.09471239894628525 -117,0.09430617094039917 -118,0.09403613209724426 -119,0.09370869398117065 -120,0.09341242164373398 -121,0.09312574565410614 -122,0.09278889745473862 -123,0.0924963504076004 -124,0.092190220952034 -125,0.09188707917928696 -126,0.09158039093017578 -127,0.09127935022115707 -128,0.090989850461483 -129,0.0907171443104744 -130,0.09045181423425674 -131,0.09021534025669098 -132,0.08999188989400864 -133,0.08978008478879929 -134,0.08957155793905258 -135,0.08937012404203415 -136,0.08917392790317535 -137,0.08898181468248367 -138,0.08879392594099045 -139,0.0886121392250061 -140,0.08843370527029037 -141,0.08825885504484177 -142,0.08808968961238861 -143,0.08792535215616226 -144,0.0877659022808075 -145,0.08761254698038101 -146,0.08746664226055145 -147,0.08732631802558899 -148,0.0871882438659668 -149,0.08705343306064606 -150,0.0869223102927208 -151,0.08679462969303131 -152,0.08667109161615372 -153,0.08654429763555527 -154,0.08641837537288666 -155,0.08629259467124939 -156,0.08616811037063599 -157,0.08604659885168076 -158,0.08592817932367325 -159,0.08580879867076874 -160,0.08568999916315079 -161,0.08557123690843582 -162,0.08545228838920593 -163,0.08533408492803574 -164,0.085214763879776 -165,0.08509450405836105 -166,0.08497463166713715 -167,0.08485487848520279 -168,0.0847366526722908 -169,0.08462103456258774 -170,0.0845077708363533 -171,0.08439785242080688 -172,0.08429182320833206 -173,0.08418922126293182 -174,0.08409056812524796 -175,0.08399581164121628 -176,0.08390478044748306 -177,0.08381807804107666 -178,0.08373513072729111 -179,0.08365549892187119 -180,0.08357898890972137 -181,0.0835052877664566 -182,0.08343402296304703 -183,0.08336503803730011 -184,0.08329810202121735 -185,0.08323323726654053 -186,0.08317023515701294 -187,0.0831090584397316 -188,0.08304953575134277 -189,0.08299129456281662 -190,0.08293448388576508 -191,0.08287905156612396 -192,0.08282822370529175 -193,0.08278077095746994 -194,0.08273422718048096 -195,0.08268824219703674 -196,0.08264283090829849 -197,0.08259787410497665 -198,0.0825534388422966 -199,0.08250915259122849 -200,0.08246493339538574 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.100/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.100/training_config.txt deleted file mode 100644 index abc9e57..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.100/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.100/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.100/training_log.csv deleted file mode 100644 index 7a0f51f..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,9.866242408752441 -2,6.551309108734131 -3,12.094765663146973 -4,2.755216598510742 -5,1.012027621269226 -6,0.5684673190116882 -7,0.5784069895744324 -8,0.6189914345741272 -9,0.6023730635643005 -10,0.6634504199028015 -11,0.647130012512207 -12,0.6110135316848755 -13,0.5493007302284241 -14,0.45733681321144104 -15,0.40010741353034973 -16,0.37839627265930176 -17,0.36141282320022583 -18,0.3283330202102661 -19,0.29965752363204956 -20,0.2805231809616089 -21,0.26310715079307556 -22,0.24751071631908417 -23,0.23351997137069702 -24,0.22074635326862335 -25,0.2092989832162857 -26,0.19895438849925995 -27,0.18934519588947296 -28,0.18010859191417694 -29,0.17125672101974487 -30,0.16323944926261902 -31,0.15782079100608826 -32,0.15627005696296692 -33,0.15514475107192993 -34,0.1540473997592926 -35,0.15298175811767578 -36,0.15169069170951843 -37,0.15005750954151154 -38,0.14803051948547363 -39,0.14573577046394348 -40,0.14329814910888672 -41,0.1408708095550537 -42,0.13851384818553925 -43,0.13629908859729767 -44,0.13431327044963837 -45,0.1325596272945404 -46,0.13103380799293518 -47,0.12974680960178375 -48,0.128649041056633 -49,0.12764497101306915 -50,0.12666621804237366 -51,0.12569214403629303 -52,0.12470632046461105 -53,0.12369441241025925 -54,0.12266404926776886 -55,0.12162184715270996 -56,0.12057064473628998 -57,0.11950982362031937 -58,0.11843586713075638 -59,0.11735217273235321 -60,0.11627671867609024 -61,0.11522600799798965 -62,0.11423292011022568 -63,0.11333660036325455 -64,0.11252232640981674 -65,0.11176829785108566 -66,0.11104602366685867 -67,0.11033309996128082 -68,0.10961343348026276 -69,0.10887890309095383 -70,0.10812897235155106 -71,0.1073685884475708 -72,0.10660456120967865 -73,0.10584454983472824 -74,0.10509315878152847 -75,0.10435163229703903 -76,0.10362011939287186 -77,0.10290432721376419 -78,0.1022108942270279 -79,0.10154552012681961 -80,0.10091695934534073 -81,0.10032261162996292 -82,0.09972332417964935 -83,0.09917040169239044 -84,0.09867815673351288 -85,0.0980769693851471 -86,0.09760535508394241 -87,0.09716914594173431 -88,0.09665411710739136 -89,0.0962718278169632 -90,0.09582984447479248 -91,0.09543152898550034 -92,0.0950280949473381 -93,0.09459136426448822 -94,0.09417364746332169 -95,0.09374881535768509 -96,0.09331277757883072 -97,0.09289739280939102 -98,0.09250352531671524 -99,0.09210096299648285 -100,0.091685950756073 -101,0.09127423912286758 -102,0.0908626914024353 -103,0.09044487029314041 -104,0.09005295485258102 -105,0.08968589454889297 -106,0.08929850906133652 -107,0.0889439806342125 -108,0.08858013898134232 -109,0.08821860700845718 -110,0.08784308284521103 -111,0.08747894316911697 -112,0.08712726086378098 -113,0.08675676584243774 -114,0.08643866330385208 -115,0.08602070063352585 -116,0.0856718048453331 -117,0.08529121428728104 -118,0.08496466279029846 -119,0.08464116603136063 -120,0.08430425822734833 -121,0.08398211747407913 -122,0.08362484723329544 -123,0.08331184089183807 -124,0.08301008492708206 -125,0.08271173387765884 -126,0.08241498470306396 -127,0.08214841783046722 -128,0.08192415535449982 -129,0.08173775672912598 -130,0.08154819160699844 -131,0.08136582374572754 -132,0.0812024250626564 -133,0.08113662898540497 -134,0.08093281835317612 -135,0.08096813410520554 -136,0.08075915277004242 -137,0.08062538504600525 -138,0.0805727168917656 -139,0.08038872480392456 -140,0.08016136288642883 -141,0.08005303889513016 -142,0.07998205721378326 -143,0.07990286499261856 -144,0.0796881839632988 -145,0.07961846143007278 -146,0.07943589985370636 -147,0.07936151325702667 -148,0.07927913963794708 -149,0.07907472550868988 -150,0.0790715217590332 -151,0.07896584272384644 -152,0.07872854918241501 -153,0.07870516926050186 -154,0.07856268435716629 -155,0.07850341498851776 -156,0.07838878035545349 -157,0.07834567874670029 -158,0.07829195261001587 -159,0.07809586822986603 -160,0.07807298749685287 -161,0.0780276507139206 -162,0.07792622596025467 -163,0.07791305333375931 -164,0.07781549543142319 -165,0.07781342417001724 -166,0.07778618484735489 -167,0.07776560634374619 -168,0.07769616693258286 -169,0.07764147967100143 -170,0.07765515148639679 -171,0.07761867344379425 -172,0.07758860290050507 -173,0.07751385122537613 -174,0.0775194838643074 -175,0.07747389376163483 -176,0.07750319689512253 -177,0.07739298045635223 -178,0.07749655097723007 -179,0.07753390073776245 -180,0.07739248871803284 -181,0.07735291868448257 -182,0.07736434787511826 -183,0.07721799612045288 -184,0.07722271233797073 -185,0.07717741280794144 -186,0.07718940824270248 -187,0.07710695266723633 -188,0.07712157070636749 -189,0.07708965986967087 -190,0.07708762586116791 -191,0.07698899507522583 -192,0.07698260247707367 -193,0.07690473645925522 -194,0.07690025866031647 -195,0.07691823691129684 -196,0.07683362066745758 -197,0.07688046246767044 -198,0.07678339630365372 -199,0.07685886323451996 -200,0.0768178403377533 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.125/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.125/training_config.txt deleted file mode 100644 index e442507..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.125/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.125/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.125/training_log.csv deleted file mode 100644 index f373192..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,10.449976921081543 -2,2.188481092453003 -3,1.01656973361969 -4,0.7231359481811523 -5,0.8153721690177917 -6,0.7970715761184692 -7,0.775906503200531 -8,0.7374328970909119 -9,0.6277788281440735 -10,0.5835492014884949 -11,0.5449193120002747 -12,0.5087894201278687 -13,0.4682491421699524 -14,0.3989998996257782 -15,0.3549303710460663 -16,0.32654595375061035 -17,0.30215272307395935 -18,0.2788771092891693 -19,0.25601303577423096 -20,0.23329195380210876 -21,0.21101735532283783 -22,0.1932363212108612 -23,0.17903298139572144 -24,0.166754812002182 -25,0.15581873059272766 -26,0.14595329761505127 -27,0.1369420886039734 -28,0.1287131905555725 -29,0.12136217206716537 -30,0.1150636225938797 -31,0.11070865392684937 -32,0.10864625871181488 -33,0.10925622284412384 -34,0.11104800552129745 -35,0.1129193902015686 -36,0.11457253247499466 -37,0.11587279289960861 -38,0.11675533652305603 -39,0.11720172315835953 -40,0.11721903830766678 -41,0.11683165282011032 -42,0.11607860028743744 -43,0.11500804871320724 -44,0.11367643624544144 -45,0.11214244365692139 -46,0.11046826094388962 -47,0.10870902985334396 -48,0.10692138969898224 -49,0.10516407340765 -50,0.1035253033041954 -51,0.10223789513111115 -52,0.10146591812372208 -53,0.10092280805110931 -54,0.10051953047513962 -55,0.10019447654485703 -56,0.09989504516124725 -57,0.09958432614803314 -58,0.09935128688812256 -59,0.09910930693149567 -60,0.09878268092870712 -61,0.09836625307798386 -62,0.09786705672740936 -63,0.09730282425880432 -64,0.0967111736536026 -65,0.09612653404474258 -66,0.09556813538074493 -67,0.09505433589220047 -68,0.09460587799549103 -69,0.09426931291818619 -70,0.09412765502929688 -71,0.09400119632482529 -72,0.09380847960710526 -73,0.09354796260595322 -74,0.09322237968444824 -75,0.09284265339374542 -76,0.09242159873247147 -77,0.09197573363780975 -78,0.0915411189198494 -79,0.09123296290636063 -80,0.09102661162614822 -81,0.09083868563175201 -82,0.09064892679452896 -83,0.09045301377773285 -84,0.09022737294435501 -85,0.08996156603097916 -86,0.08967017382383347 -87,0.08936651796102524 -88,0.08904989063739777 -89,0.08872821927070618 -90,0.08841229975223541 -91,0.08811736851930618 -92,0.08790051192045212 -93,0.08771894872188568 -94,0.08750651776790619 -95,0.08725138008594513 -96,0.08695881068706512 -97,0.08666227012872696 -98,0.08642448484897614 -99,0.08621899783611298 -100,0.08600621670484543 -101,0.08577422052621841 -102,0.08552246540784836 -103,0.08526809513568878 -104,0.08501774817705154 -105,0.08475276082754135 -106,0.08450331538915634 -107,0.08427372574806213 -108,0.08403977006673813 -109,0.08379349857568741 -110,0.0835639014840126 -111,0.08335087448358536 -112,0.0831231102347374 -113,0.0828854963183403 -114,0.08266863226890564 -115,0.08246739953756332 -116,0.08226912468671799 -117,0.08208362013101578 -118,0.0819086953997612 -119,0.08173760771751404 -120,0.0815557986497879 -121,0.08138862997293472 -122,0.08125539124011993 -123,0.08110102266073227 -124,0.08094001561403275 -125,0.08080988377332687 -126,0.08067283779382706 -127,0.080544114112854 -128,0.08040566742420197 -129,0.08029353618621826 -130,0.0801626443862915 -131,0.08003361523151398 -132,0.07990339398384094 -133,0.0797770544886589 -134,0.07966198772192001 -135,0.07954435795545578 -136,0.07942357659339905 -137,0.0793139785528183 -138,0.07920447736978531 -139,0.07909829914569855 -140,0.07900017499923706 -141,0.07890139520168304 -142,0.07880792766809464 -143,0.07872088253498077 -144,0.07863802462816238 -145,0.07854899764060974 -146,0.07846622169017792 -147,0.0783926248550415 -148,0.07831791788339615 -149,0.0782490149140358 -150,0.07818614691495895 -151,0.07812480628490448 -152,0.07806875556707382 -153,0.07801186293363571 -154,0.07795248925685883 -155,0.0778985396027565 -156,0.07785353064537048 -157,0.07780912518501282 -158,0.07776906341314316 -159,0.07774055004119873 -160,0.07771391421556473 -161,0.07768813520669937 -162,0.07766732573509216 -163,0.07764589786529541 -164,0.0776304304599762 -165,0.077620729804039 -166,0.07760438323020935 -167,0.07760811597108841 -168,0.07758241146802902 -169,0.0775744840502739 -170,0.0775594562292099 -171,0.07756081223487854 -172,0.07753480225801468 -173,0.07752543687820435 -174,0.07750523835420609 -175,0.07750259339809418 -176,0.07747220993041992 -177,0.0774705708026886 -178,0.07746081799268723 -179,0.0774397999048233 -180,0.07741320878267288 -181,0.07739659398794174 -182,0.07739219069480896 -183,0.07737443596124649 -184,0.07736518234014511 -185,0.07735192775726318 -186,0.07733079046010971 -187,0.07731952518224716 -188,0.07729831337928772 -189,0.0772944986820221 -190,0.07727612555027008 -191,0.0772637352347374 -192,0.07726196199655533 -193,0.07724828273057938 -194,0.07724374532699585 -195,0.07722921669483185 -196,0.07721245288848877 -197,0.07720202952623367 -198,0.07719330489635468 -199,0.07718267291784286 -200,0.07716970145702362 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.160/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.160/training_config.txt deleted file mode 100644 index 59c02fc..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.160/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.160/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.160/training_log.csv deleted file mode 100644 index 2fb9384..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,11.771977424621582 -2,0.9449580907821655 -3,0.6472031474113464 -4,0.4114108979701996 -5,0.25818097591400146 -6,0.2076980620622635 -7,0.18263892829418182 -8,0.1620780974626541 -9,0.1455094814300537 -10,0.1367628127336502 -11,0.1313227266073227 -12,0.12737129628658295 -13,0.12410755455493927 -14,0.12125017493963242 -15,0.11874782294034958 -16,0.11645694077014923 -17,0.11427890509366989 -18,0.11218243092298508 -19,0.11013960093259811 -20,0.10811711847782135 -21,0.10611512511968613 -22,0.1041736975312233 -23,0.10232244431972504 -24,0.10056092590093613 -25,0.09889098256826401 -26,0.09736037254333496 -27,0.09600582718849182 -28,0.09489627182483673 -29,0.09448721259832382 -30,0.09434470534324646 -31,0.09416183084249496 -32,0.09391596168279648 -33,0.09359394758939743 -34,0.09320365637540817 -35,0.09276310354471207 -36,0.09227577596902847 -37,0.09175524115562439 -38,0.09122749418020248 -39,0.09084266424179077 -40,0.0905475914478302 -41,0.09036138653755188 -42,0.09020166844129562 -43,0.09002914279699326 -44,0.08983802050352097 -45,0.08967400342226028 -46,0.08948761969804764 -47,0.08925294131040573 -48,0.08897380530834198 -49,0.08865126222372055 -50,0.08829950541257858 -51,0.08796397596597672 -52,0.08763866871595383 -53,0.08738883584737778 -54,0.0872240960597992 -55,0.08707883954048157 -56,0.08699637651443481 -57,0.08690934628248215 -58,0.08678985387086868 -59,0.0866389125585556 -60,0.08645699918270111 -61,0.08624540269374847 -62,0.08601023256778717 -63,0.08575429767370224 -64,0.08547990769147873 -65,0.08520112931728363 -66,0.08501458913087845 -67,0.08486029505729675 -68,0.08471424132585526 -69,0.08457019925117493 -70,0.08442281931638718 -71,0.08427044749259949 -72,0.0841159000992775 -73,0.08395896106958389 -74,0.0838024690747261 -75,0.08364972472190857 -76,0.08349346369504929 -77,0.08333498239517212 -78,0.0831790417432785 -79,0.08303629606962204 -80,0.08290328830480576 -81,0.08278342336416245 -82,0.08269499242305756 -83,0.08259612321853638 -84,0.08248187601566315 -85,0.08235718309879303 -86,0.08226147294044495 -87,0.08217278867959976 -88,0.08208872377872467 -89,0.08200857043266296 -90,0.08193477243185043 -91,0.08186593651771545 -92,0.08179710060358047 -93,0.08173908293247223 -94,0.08169154077768326 -95,0.08164035528898239 -96,0.08158513903617859 -97,0.08152782917022705 -98,0.08147680759429932 -99,0.08142988383769989 -100,0.0813819169998169 -101,0.08133042603731155 -102,0.08127597719430923 -103,0.08121935278177261 -104,0.08116165548563004 -105,0.08110339939594269 -106,0.08104997873306274 -107,0.08099457621574402 -108,0.08093411475419998 -109,0.08087349683046341 -110,0.08081663399934769 -111,0.08075974136590958 -112,0.08070282638072968 -113,0.08064673095941544 -114,0.0805911123752594 -115,0.08053591847419739 -116,0.08048196882009506 -117,0.0804283544421196 -118,0.08037656545639038 -119,0.08032553642988205 -120,0.08027423918247223 -121,0.08022350817918777 -122,0.08017408102750778 -123,0.080123670399189 -124,0.08007435500621796 -125,0.08002631366252899 -126,0.07997727394104004 -127,0.07992956787347794 -128,0.07988105714321136 -129,0.07983367145061493 -130,0.07978486269712448 -131,0.07973785698413849 -132,0.07968936860561371 -133,0.07964104413986206 -134,0.07959478348493576 -135,0.07954686880111694 -136,0.07949957251548767 -137,0.07945423573255539 -138,0.07940515875816345 -139,0.07936165481805801 -140,0.07931243628263474 -141,0.07926201075315475 -142,0.07921212166547775 -143,0.07916226983070374 -144,0.07911182194948196 -145,0.0790630578994751 -146,0.07901104539632797 -147,0.07896202802658081 -148,0.07891059666872025 -149,0.07885807752609253 -150,0.07880744338035583 -151,0.07875745743513107 -152,0.078707255423069 -153,0.07865744084119797 -154,0.07860564440488815 -155,0.07855749875307083 -156,0.07850691676139832 -157,0.0784531682729721 -158,0.07840351015329361 -159,0.07835636287927628 -160,0.078306183218956 -161,0.07826515287160873 -162,0.07821108400821686 -163,0.07816506922245026 -164,0.07811275124549866 -165,0.07805557548999786 -166,0.07800109684467316 -167,0.07794199883937836 -168,0.07788743078708649 -169,0.0778307393193245 -170,0.07777879387140274 -171,0.0777263268828392 -172,0.07767587155103683 -173,0.07762610167264938 -174,0.0775768905878067 -175,0.07753342390060425 -176,0.07748981565237045 -177,0.07744640856981277 -178,0.07741151750087738 -179,0.0773765966296196 -180,0.07734114676713943 -181,0.07730741798877716 -182,0.07728324830532074 -183,0.07725919783115387 -184,0.07723304629325867 -185,0.07720968127250671 -186,0.07718578726053238 -187,0.07716881483793259 -188,0.07714616507291794 -189,0.07712893187999725 -190,0.0771111473441124 -191,0.07709550112485886 -192,0.0770811215043068 -193,0.07706660032272339 -194,0.0770520493388176 -195,0.07703987509012222 -196,0.07702609896659851 -197,0.07702037692070007 -198,0.07701080292463303 -199,0.07699379324913025 -200,0.07699070125818253 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.200/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.200/training_config.txt deleted file mode 100644 index 5ef5d9d..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.200/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.200/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.200/training_log.csv deleted file mode 100644 index 153bf7f..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,14.626718521118164 -2,1.5407180786132812 -3,1.867661952972412 -4,1.3577916622161865 -5,0.5540692210197449 -6,0.5083990693092346 -7,0.4601894021034241 -8,0.4085840582847595 -9,0.3567515015602112 -10,0.3176203668117523 -11,0.28865769505500793 -12,0.264487087726593 -13,0.24258753657341003 -14,0.2221667766571045 -15,0.2026125192642212 -16,0.18424679338932037 -17,0.1680942177772522 -18,0.15423016250133514 -19,0.1421658843755722 -20,0.13154029846191406 -21,0.12219236046075821 -22,0.11408962309360504 -23,0.10765315592288971 -24,0.10305962711572647 -25,0.10031109303236008 -26,0.10116572678089142 -27,0.10298101603984833 -28,0.10483507066965103 -29,0.10649231821298599 -30,0.10778355598449707 -31,0.10863274335861206 -32,0.10901626944541931 -33,0.10893478244543076 -34,0.10842786729335785 -35,0.10756296664476395 -36,0.10644642263650894 -37,0.10511276870965958 -38,0.1036549061536789 -39,0.10212471336126328 -40,0.10057145357131958 -41,0.09905949234962463 -42,0.0981813445687294 -43,0.09760746359825134 -44,0.09716139733791351 -45,0.09678704291582108 -46,0.09645549952983856 -47,0.09610418230295181 -48,0.09569960832595825 -49,0.09522753953933716 -50,0.0946861132979393 -51,0.09408431500196457 -52,0.09344450384378433 -53,0.09278896450996399 -54,0.09214156866073608 -55,0.09164424985647202 -56,0.09128047525882721 -57,0.09099990874528885 -58,0.09090930223464966 -59,0.09100446105003357 -60,0.0910431444644928 -61,0.09093780815601349 -62,0.09068816900253296 -63,0.0903080552816391 -64,0.08983363211154938 -65,0.08930724859237671 -66,0.08876814693212509 -67,0.08842705190181732 -68,0.08822368085384369 -69,0.0881521999835968 -70,0.0880812257528305 -71,0.08797071129083633 -72,0.08779458701610565 -73,0.08754751086235046 -74,0.08724120259284973 -75,0.08689787238836288 -76,0.08656277507543564 -77,0.08631367981433868 -78,0.08607959747314453 -79,0.08586695045232773 -80,0.08572210371494293 -81,0.08549562841653824 -82,0.0852067768573761 -83,0.08496436476707458 -84,0.08479376882314682 -85,0.0846305787563324 -86,0.08445499837398529 -87,0.08426497131586075 -88,0.08407264202833176 -89,0.08389417827129364 -90,0.08372984826564789 -91,0.08357902616262436 -92,0.08342979848384857 -93,0.08328567445278168 -94,0.08315077424049377 -95,0.08302534371614456 -96,0.08290677517652512 -97,0.08279284834861755 -98,0.08267054706811905 -99,0.08254608511924744 -100,0.08245005458593369 -101,0.08235279470682144 -102,0.08224458247423172 -103,0.08212373405694962 -104,0.08199423551559448 -105,0.08186118304729462 -106,0.08174286782741547 -107,0.08163353055715561 -108,0.08152218908071518 -109,0.0814039558172226 -110,0.08128318190574646 -111,0.08116618543863297 -112,0.08105706423521042 -113,0.08096680045127869 -114,0.0808737650513649 -115,0.08077903091907501 -116,0.08067511022090912 -117,0.08056788891553879 -118,0.08047181367874146 -119,0.08040350675582886 -120,0.08027961105108261 -121,0.08017463237047195 -122,0.08006597310304642 -123,0.07997225224971771 -124,0.07984527945518494 -125,0.07972817122936249 -126,0.0796014815568924 -127,0.07948664575815201 -128,0.07936953753232956 -129,0.07924515008926392 -130,0.07913320511579514 -131,0.0790223553776741 -132,0.07891803234815598 -133,0.07881642878055573 -134,0.07871995866298676 -135,0.07862641662359238 -136,0.07854481041431427 -137,0.07846709340810776 -138,0.0783914104104042 -139,0.07832375168800354 -140,0.07825954258441925 -141,0.07819891721010208 -142,0.0781421810388565 -143,0.0780881717801094 -144,0.07803279906511307 -145,0.0780021995306015 -146,0.07794872671365738 -147,0.07791666686534882 -148,0.0778774693608284 -149,0.07783576101064682 -150,0.07781136780977249 -151,0.07777907699346542 -152,0.07775373756885529 -153,0.07772689312696457 -154,0.07769836485385895 -155,0.07766888290643692 -156,0.07764027267694473 -157,0.07763855904340744 -158,0.07760161906480789 -159,0.0775827169418335 -160,0.07755860686302185 -161,0.07753869891166687 -162,0.07752276957035065 -163,0.07750643789768219 -164,0.07749020308256149 -165,0.0774715468287468 -166,0.07744919508695602 -167,0.07742331176996231 -168,0.07740145921707153 -169,0.07737919688224792 -170,0.07737446576356888 -171,0.07733500748872757 -172,0.07732431590557098 -173,0.07729962468147278 -174,0.07727193832397461 -175,0.07726912945508957 -176,0.07723745703697205 -177,0.07722503691911697 -178,0.07720784842967987 -179,0.07718850672245026 -180,0.07718317210674286 -181,0.07716058939695358 -182,0.07716796547174454 -183,0.07714550197124481 -184,0.07712268829345703 -185,0.07712540775537491 -186,0.07711385190486908 -187,0.07709141820669174 -188,0.07708138227462769 -189,0.07706872373819351 -190,0.07705778628587723 -191,0.07705109566450119 -192,0.07703948020935059 -193,0.07702457159757614 -194,0.07700957357883453 -195,0.07701383531093597 -196,0.07698957622051239 -197,0.07698714733123779 -198,0.07698770612478256 -199,0.0769636332988739 -200,0.07694884389638901 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.250/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.250/training_config.txt deleted file mode 100644 index 81f45a2..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.250/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.250/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.250/training_log.csv deleted file mode 100644 index a733670..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,28.099000930786133 -2,4.192780017852783 -3,1.2437902688980103 -4,0.9413572549819946 -5,0.7466213703155518 -6,0.6353301405906677 -7,0.558120846748352 -8,0.504294753074646 -9,0.46673721075057983 -10,0.43968895077705383 -11,0.4202134311199188 -12,0.40510520339012146 -13,0.3921518921852112 -14,0.3802344501018524 -15,0.3685072362422943 -16,0.3568040132522583 -17,0.34523722529411316 -18,0.3337801992893219 -19,0.3226439654827118 -20,0.31186315417289734 -21,0.3014376163482666 -22,0.2912413775920868 -23,0.28156688809394836 -24,0.2725730240345001 -25,0.2642178535461426 -26,0.25646018981933594 -27,0.24924881756305695 -28,0.2425307333469391 -29,0.23627716302871704 -30,0.2304316610097885 -31,0.2249101847410202 -32,0.2195269614458084 -33,0.21432450413703918 -34,0.20927244424819946 -35,0.2043636292219162 -36,0.19952623546123505 -37,0.19463607668876648 -38,0.18980683386325836 -39,0.18508699536323547 -40,0.18056705594062805 -41,0.17609956860542297 -42,0.17171673476696014 -43,0.1674415022134781 -44,0.16326406598091125 -45,0.15919345617294312 -46,0.155239537358284 -47,0.15140680968761444 -48,0.14770804345607758 -49,0.1441541165113449 -50,0.14073574542999268 -51,0.1374070644378662 -52,0.13418741524219513 -53,0.13107609748840332 -54,0.12809661030769348 -55,0.12525798380374908 -56,0.12262566387653351 -57,0.12016387283802032 -58,0.11785707622766495 -59,0.11567762494087219 -60,0.11361764371395111 -61,0.11163441836833954 -62,0.10972019284963608 -63,0.1079055517911911 -64,0.10621373355388641 -65,0.10463652014732361 -66,0.10317583382129669 -67,0.1018395647406578 -68,0.10065237432718277 -69,0.09968551248311996 -70,0.09892682731151581 -71,0.09833718091249466 -72,0.0978766456246376 -73,0.09753107279539108 -74,0.09738953411579132 -75,0.0973445400595665 -76,0.09727530926465988 -77,0.09717754274606705 -78,0.09704720973968506 -79,0.09687984734773636 -80,0.09667819738388062 -81,0.09645168483257294 -82,0.09619873017072678 -83,0.09592226147651672 -84,0.0956270843744278 -85,0.09531646966934204 -86,0.0949932187795639 -87,0.09465700387954712 -88,0.09430960565805435 -89,0.09395450353622437 -90,0.0935961902141571 -91,0.09324105829000473 -92,0.09289342164993286 -93,0.09256009012460709 -94,0.09225786477327347 -95,0.09199847280979156 -96,0.09180312603712082 -97,0.09165541082620621 -98,0.0915306881070137 -99,0.09141601622104645 -100,0.09130813181400299 -101,0.0912022739648819 -102,0.09110245853662491 -103,0.09100258350372314 -104,0.0908994972705841 -105,0.09079203754663467 -106,0.09067831933498383 -107,0.09056177735328674 -108,0.09044384956359863 -109,0.09033039212226868 -110,0.09021976590156555 -111,0.09010942280292511 -112,0.09000056982040405 -113,0.08989281952381134 -114,0.08979625254869461 -115,0.08971001207828522 -116,0.08963502198457718 -117,0.08956757187843323 -118,0.08949761092662811 -119,0.0894228145480156 -120,0.08934463560581207 -121,0.08926272392272949 -122,0.08917803317308426 -123,0.08909162878990173 -124,0.08900462836027145 -125,0.08891774713993073 -126,0.08883446455001831 -127,0.0887540727853775 -128,0.08867377787828445 -129,0.08859236538410187 -130,0.08850986510515213 -131,0.08842650055885315 -132,0.08834380656480789 -133,0.08826221525669098 -134,0.08818143606185913 -135,0.08810199797153473 -136,0.0880238488316536 -137,0.08794653415679932 -138,0.08786987513303757 -139,0.08779380470514297 -140,0.08771730959415436 -141,0.08763956278562546 -142,0.08756255358457565 -143,0.08748630434274673 -144,0.08741151541471481 -145,0.08733803778886795 -146,0.08726593852043152 -147,0.08719464391469955 -148,0.08712347596883774 -149,0.0870518907904625 -150,0.08698022365570068 -151,0.08690871298313141 -152,0.08683745563030243 -153,0.08676659315824509 -154,0.08669614791870117 -155,0.08662645518779755 -156,0.08655683696269989 -157,0.08648744225502014 -158,0.08641884475946426 -159,0.08635058254003525 -160,0.08628259599208832 -161,0.08621465414762497 -162,0.08614637702703476 -163,0.08607789129018784 -164,0.08600965142250061 -165,0.0859418734908104 -166,0.08587449789047241 -167,0.08580734580755234 -168,0.08574036508798599 -169,0.0856735110282898 -170,0.08560679852962494 -171,0.08554041385650635 -172,0.08547434955835342 -173,0.0854082927107811 -174,0.08534213155508041 -175,0.08527546375989914 -176,0.08520901948213577 -177,0.08514375239610672 -178,0.08507877588272095 -179,0.0850139930844307 -180,0.08494920283555984 -181,0.08488461375236511 -182,0.08482015877962112 -183,0.08475621789693832 -184,0.08469261229038239 -185,0.08462932705879211 -186,0.08456624299287796 -187,0.08450344204902649 -188,0.08444098383188248 -189,0.08437859266996384 -190,0.08431658893823624 -191,0.08425473421812057 -192,0.08419319987297058 -193,0.08413217961788177 -194,0.08407142758369446 -195,0.08401088416576385 -196,0.08395043015480042 -197,0.0838901475071907 -198,0.08382994681596756 -199,0.08376968652009964 -200,0.0837097242474556 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.300/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.300/training_config.txt deleted file mode 100644 index 20dffe4..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.300/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.300/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.300/training_log.csv deleted file mode 100644 index 5f1fe42..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.300/training_log.csv +++ /dev/null @@ -1,31 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,75.63182830810547 -2,1.0909863710403442 -3,3.9092202186584473 -4,1.2715548276901245 -5,1.0624412298202515 -6,0.965272843837738 -7,0.9000266194343567 -8,0.8429201245307922 -9,0.7952187657356262 -10,0.7556275129318237 -11,0.7224379181861877 -12,0.6931800246238708 -13,0.6671028733253479 -14,0.6434755325317383 -15,0.6221373081207275 -16,0.6026766896247864 -17,0.5846193432807922 -18,0.5677444338798523 -19,0.552042543888092 -20,0.5375548005104065 -21,0.5243050456047058 -22,0.5124533176422119 -23,0.5018178224563599 -24,0.49238041043281555 -25,0.48409557342529297 -26,nan -27,nan -28,nan -29,nan diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.400/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.400/training_config.txt deleted file mode 100644 index 3946805..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.400/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.400/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.400/training_log.csv deleted file mode 100644 index 55b72bd..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.400/training_log.csv +++ /dev/null @@ -1,25 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,881.1924438476562 -2,0.9227116703987122 -3,3430.841552734375 -4,1.1833546161651611 -5,1.0231198072433472 -6,0.9658364653587341 -7,0.9486367106437683 -8,0.9491131901741028 -9,0.9530090689659119 -10,0.9554793834686279 -11,0.9576630592346191 -12,0.9585234522819519 -13,0.9577469825744629 -14,0.9542115330696106 -15,0.9062563180923462 -16,3557.96875 -17,3760.626953125 -18,3760.656005859375 -19,3760.656005859375 -20,3760.656005859375 -21,3760.656005859375 -22,3760.656005859375 -23,3760.656005859375 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.500/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.500/training_config.txt deleted file mode 100644 index 8592c11..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.500/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.500/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.500/training_log.csv deleted file mode 100644 index 9616617..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.500/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,2046.052001953125 -2,3681.0537109375 -3,3700.03515625 -4,3499.96142578125 -5,2646.196044921875 -6,1338.6163330078125 -7,616.2645263671875 -8,123.25399780273438 -9,1.85293710231781 -10,1.3222061395645142 -11,1.1258697509765625 -12,1.009116530418396 -13,2.917672872543335 -14,6.997063159942627 -15,16.355575561523438 -16,21.68296241760254 -17,25.324031829833984 -18,27.002670288085938 -19,24.507274627685547 -20,20.66794204711914 -21,17.72010612487793 -22,15.846626281738281 -23,13.150153160095215 -24,11.60444450378418 -25,17.55567741394043 -26,24.598764419555664 -27,39.050453186035156 -28,61.36845397949219 -29,121.62574005126953 -30,470.5775451660156 -31,1820.2630615234375 -32,203.5693359375 -33,13.516998291015625 -34,3.2930965423583984 -35,1.71328866481781 -36,1.3845927715301514 -37,1.1823383569717407 -38,1.0650956630706787 -39,0.9932780265808105 -40,0.9436513781547546 -41,0.9059554934501648 -42,0.8749644160270691 -43,0.8491905927658081 -44,0.8273236751556396 -45,0.8080269694328308 -46,0.7906347513198853 -47,0.7749884724617004 -48,0.7609072327613831 -49,0.7418361902236938 -50,0.7308418154716492 -51,0.7205377221107483 -52,0.707227885723114 -53,0.6991121768951416 -54,0.6925564408302307 -55,0.6861046552658081 -56,0.679161548614502 -57,0.6735496520996094 -58,0.6684987545013428 -59,0.6637231707572937 -60,0.6592541933059692 -61,0.6561152338981628 -62,0.6534518599510193 -63,0.6509631276130676 -64,0.6486241221427917 -65,0.646418035030365 -66,0.6443150639533997 -67,0.6421751379966736 -68,0.6408573389053345 -69,0.6397050023078918 -70,0.6386656761169434 -71,0.6377168893814087 -72,0.6368198394775391 -73,0.6359508037567139 -74,0.6351059079170227 -75,0.6342821717262268 -76,0.6334778070449829 -77,0.6326900124549866 -78,0.631916344165802 -79,0.6311543583869934 -80,0.6304032206535339 -81,0.6296611428260803 -82,0.6289267539978027 -83,0.6281986236572266 -84,0.6274764537811279 -85,0.6267585158348083 -86,0.6260426640510559 -87,0.6253277063369751 -88,0.6246123909950256 -89,0.6238964200019836 -90,0.6231803894042969 -91,0.6224641799926758 -92,0.6217469573020935 -93,0.6210285425186157 -94,0.6203079223632812 -95,0.6195851564407349 -96,0.6188592910766602 -97,0.6181296110153198 -98,0.6173945069313049 -99,0.616654098033905 -100,0.6159103512763977 -101,0.6151636838912964 -102,0.6144132614135742 -103,0.6136588454246521 -104,0.6128926873207092 -105,0.6121138334274292 -106,0.611326277256012 -107,0.6105316877365112 -108,0.6097307801246643 -109,0.608923077583313 -110,0.608108401298523 -111,0.6072854995727539 -112,0.6064571142196655 -113,0.6056236028671265 -114,0.6047850847244263 -115,0.6039416193962097 -116,0.6030928492546082 -117,0.6022390127182007 -118,0.6013792753219604 -119,0.6005146503448486 -120,0.5996452569961548 -121,0.598771333694458 -122,0.5978928208351135 -123,0.5970100164413452 -124,0.5961228013038635 -125,0.5952317714691162 -126,0.5943368673324585 -127,0.5934386253356934 -128,0.5925369262695312 -129,0.5916318893432617 -130,0.5907235145568848 -131,0.5898122191429138 -132,0.5888976454734802 -133,0.5879799127578735 -134,0.5870588421821594 -135,0.586134672164917 -136,0.585207462310791 -137,0.5842771530151367 -138,0.583343505859375 -139,0.5824068784713745 -140,0.5814676284790039 -141,0.5805256366729736 -142,0.579581081867218 -143,0.5786340832710266 -144,0.5776841044425964 -145,0.5767313838005066 -146,0.5757757425308228 -147,0.5748172998428345 -148,0.5738565325737 -149,0.5728932023048401 -150,0.5719277858734131 -151,0.5709598064422607 -152,0.5699887871742249 -153,0.5690154433250427 -154,0.5680399537086487 -155,0.5670620799064636 -156,0.5660816431045532 -157,0.5650985836982727 -158,0.5641133189201355 -159,0.5631260275840759 -160,0.5621364116668701 -161,0.5611444711685181 -162,0.5601505041122437 -163,0.5591539144515991 -164,0.5581552982330322 -165,0.5571543574333191 -166,0.5561510324478149 -167,0.555145263671875 -168,0.554137110710144 -169,0.5531267523765564 -170,0.5521138310432434 -171,0.5510985851287842 -172,0.5500809550285339 -173,0.549060583114624 -174,0.5480380654335022 -175,0.5470128059387207 -176,0.5459846258163452 -177,0.5449532866477966 -178,0.5439193248748779 -179,0.5428826212882996 -180,0.5418429970741272 -181,0.5408008694648743 -182,0.5397560596466064 -183,0.5387085676193237 -184,0.5376585721969604 -185,0.536605954170227 -186,0.5355501770973206 -187,0.5344908833503723 -188,0.5334278345108032 -189,0.5323597192764282 -190,0.5312874913215637 -191,0.5302112698554993 -192,0.5291309356689453 -193,0.5280475616455078 -194,0.5269599556922913 -195,0.5258677005767822 -196,0.5247712731361389 -197,0.5236703157424927 -198,0.5225650072097778 -199,0.5214549899101257 -200,0.5203410983085632 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.600/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.600/training_config.txt deleted file mode 100644 index c8e409c..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.600/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.600/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.600/training_log.csv deleted file mode 100644 index e3c1acf..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.600/training_log.csv +++ /dev/null @@ -1,11 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,2830.217529296875 -2,3735.615966796875 -3,3747.221435546875 -4,3747.517333984375 -5,3747.517333984375 -6,3747.517333984375 -7,3747.517333984375 -8,3747.517333984375 -9,3747.517333984375 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.700/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.700/training_config.txt deleted file mode 100644 index 417b597..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.700/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.700/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.700/training_log.csv deleted file mode 100644 index c0448b4..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,3264.40625 -2,3746.637939453125 -3,3747.517333984375 -4,3747.517333984375 -5,3747.517333984375 -6,3747.517333984375 -7,3747.517333984375 -8,3747.517333984375 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.800/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.800/training_config.txt deleted file mode 100644 index b627a37..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.800/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.800/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.800/training_log.csv deleted file mode 100644 index 945a8c6..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.800/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,3531.03173828125 -2,3747.399169921875 -3,3747.517333984375 -4,3747.517333984375 -5,3747.517333984375 -6,3747.517333984375 -7,3747.517333984375 -8,3747.517333984375 diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.900/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_0.900/training_config.txt deleted file mode 100644 index 72a82ed..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.900/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_0.900/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_0.900/training_log.csv deleted file mode 100644 index 80f4f70..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,3655.16845703125 -2,3747.517333984375 -3,3747.517333984375 -4,3747.517333984375 -5,3747.517333984375 -6,3747.517333984375 -7,3747.517333984375 diff --git a/training/base_loss_learning_rate_sweep/two/lr_1.000/training_config.txt b/training/base_loss_learning_rate_sweep/two/lr_1.000/training_config.txt deleted file mode 100644 index 6796538..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_1.000/training_config.txt +++ /dev/null @@ -1,63 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function Name: two -Loss Function Exponent: 2 - -Current Loss Function (wrapper) Source Code: - def loss_fn(state_traj): - theta = state_traj[:, :, 0] # [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # [batch_size, t_points] - return torch.mean(base_loss_fn(theta, desired_theta)) - -Specific Base Loss Function Source Code: -def square_loss(theta: torch.Tensor, desired_theta: torch.Tensor, min_val: float = 0.01) -> torch.Tensor: - return normalized_loss(theta, desired_theta, exponent=2, min_val=min_val) - -Normalized Loss Function Source Code: -def normalized_loss(theta: torch.Tensor, desired_theta: torch.Tensor, exponent: float, min_val: float = 0.01, delta: float = 1) -> torch.Tensor: - """ - Computes a normalized loss that maps the error (|theta - desired_theta|) on [0, 2π] - to the range [min_val, 1]. To avoid an infinite gradient at error=0 for exponents < 1, - a shift 'delta' is added. - - The loss is given by: - - loss = min_val + (1 - min_val) * ( ((error + delta)^exponent - delta^exponent) - / ((2π + delta)^exponent - delta^exponent) ) - - so that: - - When error = 0: loss = min_val - - When error = 2π: loss = 1 - """ - error = torch.abs(theta - desired_theta) - numerator = (error + delta) ** exponent - delta ** exponent - denominator = (2 * math.pi + delta) ** exponent - delta ** exponent - return min_val + (1 - min_val) * (numerator / denominator) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/base_loss_learning_rate_sweep/two/lr_1.000/training_log.csv b/training/base_loss_learning_rate_sweep/two/lr_1.000/training_log.csv deleted file mode 100644 index 0fcb48a..0000000 --- a/training/base_loss_learning_rate_sweep/two/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,21.691205978393555 -1,3711.6171875 -2,3693.16943359375 -3,0.30928659439086914 -4,nan -5,nan -6,nan -7,nan diff --git a/training/base_loss_learning_rate_sweep_training.py b/training/base_loss_learning_rate_sweep_training.py index f9a05f3..8e75e7a 100644 --- a/training/base_loss_learning_rate_sweep_training.py +++ b/training/base_loss_learning_rate_sweep_training.py @@ -30,7 +30,7 @@ t_start, t_end, t_points = 0, 10, 1000 t_span = torch.linspace(t_start, t_end, t_points, device=device) # Output directory setup -base_output_dir = "base_loss_learning_rate_sweep" +base_output_dir = "training_files/base_loss_learning_rate_sweep" os.makedirs(base_output_dir, exist_ok=True) # Weight decay and training parameters diff --git a/training/time_weighting/constant/training_config.txt b/training/time_weighting/constant/training_config.txt deleted file mode 100644 index 49f7d3a..0000000 --- a/training/time_weighting/constant/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/constant/training_log.csv b/training/time_weighting/constant/training_log.csv deleted file mode 100644 index b31fc07..0000000 --- a/training/time_weighting/constant/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,490.75341796875 -2,312.3319396972656 -3,207.35940551757812 -4,141.8478240966797 -5,153.2593536376953 -6,183.9130859375 -7,182.892333984375 -8,184.9663848876953 -9,190.65370178222656 -10,177.59759521484375 -11,150.35174560546875 -12,120.16986083984375 -13,92.91024780273438 -14,63.549137115478516 -15,50.680484771728516 -16,42.914878845214844 -17,30.969497680664062 -18,24.494678497314453 -19,22.79924964904785 -20,19.7607479095459 -21,18.79807472229004 -22,18.196372985839844 -23,17.862667083740234 -24,17.69695472717285 -25,17.735563278198242 -26,18.34331512451172 -27,18.648853302001953 -28,13.696430206298828 -29,12.618926048278809 -30,12.0142822265625 -31,11.488611221313477 -32,11.000466346740723 -33,10.527975082397461 -34,10.033746719360352 -35,9.576258659362793 -36,9.359408378601074 -37,9.17169189453125 -38,8.99147891998291 -39,8.816200256347656 -40,8.655688285827637 -41,8.513349533081055 -42,8.380597114562988 -43,8.270284652709961 -44,8.198171615600586 -45,8.131336212158203 -46,8.058463096618652 -47,7.983811855316162 -48,7.910038948059082 -49,7.83903694152832 -50,7.7720746994018555 -51,7.7088165283203125 -52,7.646589279174805 -53,7.585124969482422 -54,7.522336006164551 -55,7.4578351974487305 -56,7.392258644104004 -57,7.325303077697754 -58,7.256715774536133 -59,7.186795234680176 -60,7.118039131164551 -61,7.0556464195251465 -62,7.001034736633301 -63,6.95070219039917 -64,6.900872230529785 -65,6.849946022033691 -66,6.797647953033447 -67,6.7439727783203125 -68,6.6889448165893555 -69,6.632674217224121 -70,6.575161457061768 -71,6.516720294952393 -72,6.458007335662842 -73,6.4001641273498535 -74,6.344671726226807 -75,6.292289733886719 -76,6.242043495178223 -77,6.192115783691406 -78,6.1412153244018555 -79,6.088791370391846 -80,6.034894943237305 -81,5.979804515838623 -82,5.923988342285156 -83,5.868124485015869 -84,5.812781810760498 -85,5.7580461502075195 -86,5.703456878662109 -87,5.648416042327881 -88,5.592501640319824 -89,5.535559177398682 -90,5.477576732635498 -91,5.418453216552734 -92,5.358047008514404 -93,5.295431137084961 -94,5.22777795791626 -95,5.145219802856445 -96,4.997491836547852 -97,4.885484218597412 -98,4.8027729988098145 -99,4.723220348358154 -100,4.643039703369141 -101,4.561824798583984 -102,4.488615989685059 -103,4.427394866943359 -104,4.3675360679626465 -105,4.306655406951904 -106,4.240554332733154 -107,4.155074119567871 -108,4.087523937225342 -109,4.03827428817749 -110,3.99094557762146 -111,3.9451541900634766 -112,3.9008193016052246 -113,3.857811689376831 -114,3.8157002925872803 -115,3.7744975090026855 -116,3.7341666221618652 -117,3.6947989463806152 -118,3.656316041946411 -119,3.618558168411255 -120,3.58121657371521 -121,3.5438358783721924 -122,3.505573034286499 -123,3.4649970531463623 -124,3.4194233417510986 -125,3.367537021636963 -126,3.3268792629241943 -127,3.2960045337677 -128,3.2701289653778076 -129,3.2467875480651855 -130,3.2252397537231445 -131,3.2051424980163574 -132,3.186323404312134 -133,3.1686110496520996 -134,3.151928663253784 -135,3.136169672012329 -136,3.1212592124938965 -137,3.1071107387542725 -138,3.093606948852539 -139,3.0806643962860107 -140,3.0682475566864014 -141,3.056366443634033 -142,3.045596122741699 -143,3.035923719406128 -144,3.0270395278930664 -145,3.0186846256256104 -146,3.010817527770996 -147,3.003373622894287 -148,2.9962880611419678 -149,2.9894936084747314 -150,2.9829318523406982 -151,2.97655987739563 -152,2.9703593254089355 -153,2.9643146991729736 -154,2.958420515060425 -155,2.952662467956543 -156,2.947028160095215 -157,2.9415042400360107 -158,2.9360709190368652 -159,2.9307186603546143 -160,2.925443649291992 -161,2.9202420711517334 -162,2.9151203632354736 -163,2.910083770751953 -164,2.905130386352539 -165,2.900256633758545 -166,2.8954641819000244 -167,2.8907523155212402 -168,2.886113166809082 -169,2.8815395832061768 -170,2.8770229816436768 -171,2.8725624084472656 -172,2.868152379989624 -173,2.8637893199920654 -174,2.859473705291748 -175,2.8551924228668213 -176,2.8509345054626465 -177,2.846712112426758 -178,2.842529773712158 -179,2.8383846282958984 -180,2.8342769145965576 -181,2.830204486846924 -182,2.8261666297912598 -183,2.822165012359619 -184,2.8182032108306885 -185,2.814279556274414 -186,2.8103959560394287 -187,2.8065545558929443 -188,2.802751302719116 -189,2.7989871501922607 -190,2.7952659130096436 -191,2.7915844917297363 -192,2.7879436016082764 -193,2.7843596935272217 -194,2.7809019088745117 -195,2.777543783187866 -196,2.7742998600006104 -197,2.7711868286132812 -198,2.76821231842041 -199,2.7653567790985107 -200,2.762613296508789 -201,2.7599780559539795 -202,2.7574517726898193 -203,2.7550241947174072 -204,2.7526872158050537 -205,2.750434160232544 -206,2.748260259628296 -207,2.746161460876465 -208,2.7441294193267822 -209,2.7421586513519287 -210,2.7402470111846924 -211,2.7383921146392822 -212,2.736588954925537 -213,2.7348344326019287 -214,2.7331271171569824 -215,2.731464147567749 -216,2.729841709136963 -217,2.7282509803771973 -218,2.726694345474243 -219,2.7251687049865723 -220,2.723675489425659 -221,2.72220778465271 -222,2.7207674980163574 -223,2.719362497329712 -224,2.717999219894409 -225,2.7166695594787598 -226,2.715369939804077 -227,2.7141005992889404 -228,2.7128615379333496 -229,2.711651563644409 -230,2.7104685306549072 -231,2.7093148231506348 -232,2.708189010620117 -233,2.7070963382720947 -234,2.7060344219207764 -235,2.705000400543213 -236,2.7039904594421387 -237,2.7030014991760254 -238,2.702031135559082 -239,2.701078414916992 -240,2.700141429901123 -241,2.6992177963256836 -242,2.698310613632202 -243,2.697417736053467 -244,2.696540594100952 -245,2.6956772804260254 -246,2.6948282718658447 -247,2.693993330001831 -248,2.693169116973877 -249,2.692354679107666 -250,2.6915512084960938 -251,2.6907551288604736 -252,2.689970016479492 -253,2.6891934871673584 -254,2.688424825668335 -255,2.6876649856567383 -256,2.686913013458252 -257,2.6861672401428223 -258,2.68542742729187 -259,2.68469500541687 -260,2.6839683055877686 -261,2.6832470893859863 -262,2.6825318336486816 -263,2.6818180084228516 -264,2.6811089515686035 -265,2.680405616760254 -266,2.679706573486328 -267,2.679011106491089 -268,2.6783196926116943 -269,2.6776342391967773 -270,2.676954984664917 -271,2.676284074783325 -272,2.675619602203369 -273,2.6749632358551025 -274,2.674314260482788 -275,2.673673391342163 -276,2.673041820526123 -277,2.6724178791046143 -278,2.671802043914795 -279,2.671194314956665 -280,2.670593738555908 -281,2.67000150680542 -282,2.669417142868042 -283,2.668840169906616 -284,2.668271064758301 -285,2.6677098274230957 -286,2.66715669631958 -287,2.6666152477264404 -288,2.666081428527832 -289,2.6655545234680176 -290,2.665034532546997 -291,2.664520025253296 -292,2.6640119552612305 -293,2.66351056098938 -294,2.6630144119262695 -295,2.6625235080718994 -296,2.6620378494262695 -297,2.661557197570801 -298,2.6610805988311768 -299,2.6606087684631348 -300,2.660140037536621 -301,2.6596760749816895 -302,2.6592161655426025 -303,2.6587600708007812 -304,2.6583075523376465 -305,2.6578586101531982 -306,2.6574130058288574 -307,2.656970739364624 -308,2.656531572341919 -309,2.6560957431793213 -310,2.655663251876831 -311,2.6552340984344482 -312,2.6548075675964355 -313,2.6543848514556885 -314,2.6539647579193115 -315,2.653547525405884 -316,2.6531336307525635 -317,2.6527230739593506 -318,2.652315139770508 -319,2.6519100666046143 -320,2.651507616043091 -321,2.6511080265045166 -322,2.650712013244629 -323,2.6503193378448486 -324,2.649928569793701 -325,2.649540901184082 -326,2.649155616760254 -327,2.648772716522217 -328,2.648392677307129 -329,2.648014783859253 -330,2.647639036178589 -331,2.647265672683716 -332,2.6468944549560547 -333,2.6465256214141846 -334,2.646158218383789 -335,2.6457929611206055 -336,2.6454291343688965 -337,2.6450681686401367 -338,2.644709348678589 -339,2.644352674484253 -340,2.6439971923828125 -341,2.6436431407928467 -342,2.643291711807251 -343,2.642941474914551 -344,2.642594814300537 -345,2.6422531604766846 -346,2.6419126987457275 -347,2.641573905944824 -348,2.641235828399658 -349,2.640899419784546 -350,2.6405646800994873 -351,2.6402316093444824 -352,2.639899253845215 -353,2.6395692825317383 -354,2.639240026473999 -355,2.6389124393463135 -356,2.6385860443115234 -357,2.63826060295105 -358,2.637936592102051 -359,2.6376142501831055 -360,2.637293815612793 -361,2.636974811553955 -362,2.636657476425171 -363,2.636341094970703 -364,2.6360270977020264 -365,2.635713815689087 -366,2.635401725769043 -367,2.6350910663604736 -368,2.6347808837890625 -369,2.634472608566284 -370,2.6341655254364014 -371,2.6338589191436768 -372,2.6335532665252686 -373,2.633249521255493 -374,2.6329474449157715 -375,2.6326465606689453 -376,2.632347345352173 -377,2.632049083709717 -378,2.6317524909973145 -379,2.6314573287963867 -380,2.6311633586883545 -381,2.6308701038360596 -382,2.6305785179138184 -383,2.6302874088287354 -384,2.629998207092285 -385,2.6297101974487305 -386,2.6294238567352295 -387,2.629138946533203 -388,2.628857374191284 -389,2.628579616546631 -390,2.628307580947876 -391,2.6280391216278076 -392,2.6277737617492676 -393,2.627511501312256 -394,2.627250909805298 -395,2.6269917488098145 -396,2.6267354488372803 -397,2.6264805793762207 -398,2.626227617263794 -399,2.625977039337158 -400,2.6257283687591553 -401,2.6254825592041016 -402,2.625239610671997 -403,2.6249990463256836 -404,2.6247599124908447 -405,2.6245224475860596 -406,2.624286651611328 -407,2.6240525245666504 -408,2.6238203048706055 -409,2.623589515686035 -410,2.6233601570129395 -411,2.6231319904327393 -412,2.6229054927825928 -413,2.6226797103881836 -414,2.622455596923828 -415,2.622232675552368 -416,2.6220109462738037 -417,2.6217901706695557 -418,2.621570587158203 -419,2.621352195739746 -420,2.6211349964141846 -421,2.6209185123443604 -422,2.6207027435302734 -423,2.620488166809082 -424,2.620274305343628 -425,2.620060682296753 -426,2.6198480129241943 -427,2.619636058807373 -428,2.61942458152771 -429,2.619213342666626 -430,2.6190028190612793 -431,2.618792772293091 -432,2.6185834407806396 -433,2.618375062942505 -434,2.6181671619415283 -435,2.617959976196289 -436,2.617753505706787 -437,2.6175482273101807 -438,2.6173431873321533 -439,2.6171398162841797 -440,2.616936683654785 -441,2.616734504699707 -442,2.6165332794189453 -443,2.616333246231079 -444,2.6161346435546875 -445,2.6159372329711914 -446,2.615741014480591 -447,2.6155457496643066 -448,2.615351676940918 -449,2.6151585578918457 -450,2.61496639251709 -451,2.6147756576538086 -452,2.614586353302002 -453,2.6143980026245117 -454,2.614210605621338 -455,2.614023208618164 -456,2.613837242126465 -457,2.613652229309082 -458,2.6134684085845947 -459,2.6132850646972656 -460,2.613102674484253 -461,2.6129212379455566 -462,2.6127400398254395 -463,2.6125595569610596 -464,2.6123805046081543 -465,2.612201690673828 -466,2.6120243072509766 -467,2.611847400665283 -468,2.6116714477539062 -469,2.6114962100982666 -470,2.6113216876983643 -471,2.61114764213562 -472,2.6109750270843506 -473,2.610801935195923 -474,2.6106295585632324 -475,2.6104578971862793 -476,2.6102871894836426 -477,2.610116720199585 -478,2.6099469661712646 -479,2.609778881072998 -480,2.6096103191375732 -481,2.609442710876465 -482,2.6092758178710938 -483,2.6091089248657227 -484,2.608943223953247 -485,2.6087775230407715 -486,2.6086127758026123 -487,2.6084482669830322 -488,2.6082842350006104 -489,2.608121871948242 -490,2.607959032058716 -491,2.6077969074249268 -492,2.607635736465454 -493,2.607475519180298 -494,2.607316255569458 -495,2.6071572303771973 -496,2.606999158859253 -497,2.606841564178467 -498,2.606684684753418 -499,2.6065280437469482 -500,2.606372594833374 -501,2.606217622756958 -502,2.6060631275177 -503,2.6059093475341797 -504,2.6057558059692383 -505,2.6056034564971924 -506,2.6054511070251465 -507,2.6052989959716797 -508,2.6051480770111084 -509,2.604997158050537 -510,2.6048471927642822 -511,2.6046969890594482 -512,2.6045479774475098 -513,2.6043992042541504 -514,2.6042511463165283 -515,2.6041038036346436 -516,2.6039562225341797 -517,2.6038105487823486 -518,2.6036672592163086 -519,2.6035256385803223 -520,2.6033859252929688 -521,2.6032466888427734 -522,2.6031088829040527 -523,2.6029717922210693 -524,2.6028356552124023 -525,2.6026999950408936 -526,2.602565050125122 -527,2.602431297302246 -528,2.6022980213165283 -529,2.6021664142608643 -530,2.6020350456237793 -531,2.6019043922424316 -532,2.601774215698242 -533,2.601644992828369 -534,2.601516008377075 -535,2.6013879776000977 -536,2.601259708404541 -537,2.6011335849761963 -538,2.601008653640747 -539,2.600883960723877 -540,2.600759506225586 -541,2.6006362438201904 -542,2.6005125045776367 -543,2.6003894805908203 -544,2.600266933441162 -545,2.600144624710083 -546,2.600022315979004 -547,2.599900960922241 -548,2.5997793674468994 -549,2.599658489227295 -550,2.5995371341705322 -551,2.599416494369507 -552,2.5992963314056396 -553,2.5991764068603516 -554,2.5990562438964844 -555,2.5989370346069336 -556,2.598818302154541 -557,2.5987000465393066 -558,2.5985817909240723 -559,2.598464012145996 -560,2.598346471786499 -561,2.598229169845581 -562,2.5981125831604004 -563,2.5979959964752197 -564,2.597879648208618 -565,2.597763776779175 -566,2.5976481437683105 -567,2.5975327491760254 -568,2.5974175930023193 -569,2.5973031520843506 -570,2.5971884727478027 -571,2.597074270248413 -572,2.5969603061676025 -573,2.59684681892395 -574,2.596733570098877 -575,2.596621036529541 -576,2.596508264541626 -577,2.59639573097229 -578,2.5962836742401123 -579,2.596172332763672 -580,2.5960607528686523 -581,2.595949649810791 -582,2.5958385467529297 -583,2.5957279205322266 -584,2.5956175327301025 -585,2.5955076217651367 -586,2.595398187637329 -587,2.5952885150909424 -588,2.595179319381714 -589,2.5950703620910645 -590,2.594961404800415 -591,2.594853162765503 -592,2.59474515914917 -593,2.594637393951416 -594,2.594529390335083 -595,2.5944223403930664 -596,2.5943148136138916 -597,2.594208002090454 -598,2.594101667404175 -599,2.5939958095550537 -600,2.5938901901245117 -601,2.5937843322753906 -602,2.593679666519165 -603,2.5935747623443604 -604,2.5934700965881348 -605,2.5933663845062256 -606,2.5932626724243164 -607,2.5931594371795654 -608,2.5930564403533936 -609,2.5929532051086426 -610,2.59285044670105 -611,2.5927484035491943 -612,2.5926461219787598 -613,2.5925443172454834 -614,2.592442750930786 -615,2.592341423034668 -616,2.592240333557129 -617,2.592139482498169 -618,2.592038631439209 -619,2.5919384956359863 -620,2.5918385982513428 -621,2.5917389392852783 -622,2.591639280319214 -623,2.5915398597717285 -624,2.591440439224243 -625,2.591341972351074 -626,2.5912435054779053 -627,2.5911450386047363 -628,2.5910468101501465 -629,2.5909488201141357 -630,2.590851306915283 -631,2.5907535552978516 -632,2.590656042098999 -633,2.5905585289001465 -634,2.590461492538452 -635,2.590364694595337 -636,2.59026837348938 -637,2.5901718139648438 -638,2.590075969696045 -639,2.589980125427246 -640,2.5898842811584473 -641,2.5897886753082275 -642,2.589693784713745 -643,2.5895988941192627 -644,2.5895044803619385 -645,2.589409828186035 -646,2.589315891265869 -647,2.5892221927642822 -648,2.589128255844116 -649,2.5890347957611084 -650,2.5889415740966797 -651,2.588848352432251 -652,2.5887560844421387 -653,2.58866286277771 -654,2.5885703563690186 -655,2.588477849960327 -656,2.588385820388794 -657,2.588294267654419 -658,2.588202476501465 -659,2.5881106853485107 -660,2.588019609451294 -661,2.587928533554077 -662,2.5878381729125977 -663,2.587747097015381 -664,2.5876572132110596 -665,2.5875673294067383 -666,2.587477207183838 -667,2.5873873233795166 -668,2.5872983932495117 -669,2.587209701538086 -670,2.5871217250823975 -671,2.58703351020813 -672,2.5869457721710205 -673,2.5868587493896484 -674,2.586771011352539 -675,2.586684226989746 -676,2.586596965789795 -677,2.586510181427002 -678,2.586423635482788 -679,2.5863373279571533 -680,2.5862507820129395 -681,2.586165189743042 -682,2.5860793590545654 -683,2.5859932899475098 -684,2.585907459259033 -685,2.5858218669891357 -686,2.5857367515563965 -687,2.5856516361236572 -688,2.5855660438537598 -689,2.585481643676758 -690,2.5853965282440186 -691,2.5853121280670166 -692,2.5852277278900146 -693,2.5851433277130127 -694,2.5850589275360107 -695,2.584974765777588 -696,2.584890604019165 -697,2.5848071575164795 -698,2.584723472595215 -699,2.58463978767395 -700,2.5845565795898438 -701,2.58447265625 -702,2.5843899250030518 -703,2.5843067169189453 -704,2.584223508834839 -705,2.5841405391693115 -706,2.584057569503784 -707,2.583974599838257 -708,2.583892345428467 -709,2.5838096141815186 -710,2.5837268829345703 -711,2.5836446285247803 -712,2.583561897277832 -713,2.583479642868042 -714,2.5833969116210938 -715,2.583314895629883 -716,2.583233118057251 -717,2.583150863647461 -718,2.583069324493408 -719,2.5829880237579346 -720,2.5829057693481445 -721,2.582824230194092 -722,2.582742929458618 -723,2.5826611518859863 -724,2.582580327987671 -725,2.582498788833618 -726,2.5824174880981445 -727,2.58233642578125 -728,2.5822551250457764 -729,2.582174301147461 -730,2.5820930004119873 -731,2.582012176513672 -732,2.5819313526153564 -733,2.58185076713562 -734,2.5817699432373047 -735,2.5816893577575684 -736,2.581608772277832 -737,2.5815281867980957 -738,2.5814478397369385 -739,2.5813677310943604 -740,2.581287145614624 -741,2.5812065601348877 -742,2.5811266899108887 -743,2.5810463428497314 -744,2.580965995788574 -745,2.580886125564575 -746,2.580806255340576 -747,2.580726385116577 -748,2.5806467533111572 -749,2.580566644668579 -750,2.580487012863159 -751,2.5804073810577393 -752,2.5803279876708984 -753,2.5802481174468994 -754,2.5801687240600586 -755,2.580089569091797 -756,2.580010175704956 -757,2.5799310207366943 -758,2.5798518657684326 -759,2.579773187637329 -760,2.5796942710876465 -761,2.579615354537964 -762,2.5795369148254395 -763,2.5794589519500732 -764,2.5793821811676025 -765,2.579306125640869 -766,2.5792298316955566 -767,2.5791544914245605 -768,2.5790791511535645 -769,2.5790042877197266 -770,2.5789294242858887 -771,2.57885479927063 -772,2.57878041267395 -773,2.5787057876586914 -774,2.578631639480591 -775,2.578557252883911 -776,2.5784828662872314 -777,2.578409194946289 -778,2.5783348083496094 -779,2.578260898590088 -780,2.5781869888305664 -781,2.578113317489624 -782,2.5780394077301025 -783,2.577965259552002 -784,2.5778918266296387 -785,2.5778186321258545 -786,2.577744960784912 -787,2.577671527862549 -788,2.5775983333587646 -789,2.5775251388549805 -790,2.5774521827697754 -791,2.5773797035217285 -792,2.5773067474365234 -793,2.5772337913513184 -794,2.5771613121032715 -795,2.5770883560180664 -796,2.5770163536071777 -797,2.576943874359131 -798,2.576871871948242 -799,2.5767996311187744 -800,2.576727867126465 -801,2.5766561031341553 -802,2.576584577560425 -803,2.5765128135681152 -804,2.5764412879943848 -805,2.5763700008392334 -806,2.576298713684082 -807,2.5762276649475098 -808,2.5761561393737793 -809,2.5760860443115234 -810,2.576014995574951 -811,2.575944662094116 -812,2.575873613357544 -813,2.575802803039551 -814,2.575732707977295 -815,2.575662136077881 -816,2.575591802597046 -817,2.575521945953369 -818,2.575451612472534 -819,2.5753817558288574 -820,2.5753121376037598 -821,2.575241804122925 -822,2.575172185897827 -823,2.5751020908355713 -824,2.5750324726104736 -825,2.574962615966797 -826,2.5748934745788574 -827,2.574824094772339 -828,2.5747547149658203 -829,2.5746850967407227 -830,2.5746161937713623 -831,2.574547290802002 -832,2.5744783878326416 -833,2.5744097232818604 -834,2.574340581893921 -835,2.5742721557617188 -836,2.5742032527923584 -837,2.5741348266601562 -838,2.5740668773651123 -839,2.5739986896514893 -840,2.5739312171936035 -841,2.5738637447357178 -842,2.573796033859253 -843,2.5737287998199463 -844,2.5736613273620605 -845,2.573593854904175 -846,2.5735270977020264 -847,2.573460102081299 -848,2.5733933448791504 -849,2.573326587677002 -850,2.5732598304748535 -851,2.5731935501098633 -852,2.573127269744873 -853,2.573060989379883 -854,2.57299542427063 -855,2.572929859161377 -856,2.572864055633545 -857,2.572798728942871 -858,2.5727334022521973 -859,2.5726683139801025 -860,2.572603225708008 -861,2.572538137435913 -862,2.5724732875823975 -863,2.572408437728882 -864,2.5723445415496826 -865,2.572279453277588 -866,2.5722150802612305 -867,2.572150945663452 -868,2.572086811065674 -869,2.5720226764678955 -870,2.5719592571258545 -871,2.571895122528076 -872,2.571831226348877 -873,2.571767807006836 -874,2.571704149246216 -875,2.571640729904175 -876,2.571577787399292 -877,2.57151460647583 -878,2.5714516639709473 -879,2.5713889598846436 -880,2.57132625579834 -881,2.571263313293457 -882,2.5712006092071533 -883,2.5711376667022705 -884,2.571075677871704 -885,2.5710132122039795 -886,2.570950984954834 -887,2.5708887577056885 -888,2.570826530456543 -889,2.5707645416259766 -890,2.5707027912139893 -891,2.5706405639648438 -892,2.5705788135528564 -893,2.5705173015594482 -894,2.570456027984619 -895,2.570394515991211 -896,2.570333480834961 -897,2.570272207260132 -898,2.570211410522461 -899,2.570150375366211 -900,2.57008957862854 -901,2.5700290203094482 -902,2.5699684619903564 -903,2.5699079036712646 -904,2.569847583770752 -905,2.5697875022888184 -906,2.5697274208068848 -907,2.569667339324951 -908,2.5696072578430176 -909,2.569546937942505 -910,2.5694873332977295 -911,2.569427490234375 -912,2.5693676471710205 -913,2.5693085193634033 -914,2.569248914718628 -915,2.5691893100738525 -916,2.569129705429077 -917,2.569070339202881 -918,2.5690112113952637 -919,2.5689518451690674 -920,2.5688929557800293 -921,2.568834066390991 -922,2.568774938583374 -923,2.568715810775757 -924,2.568657398223877 -925,2.5685982704162598 -926,2.568540096282959 -927,2.5684814453125 -928,2.56842303276062 -929,2.568364143371582 -930,2.5683059692382812 -931,2.5682480335235596 -932,2.568189859390259 -933,2.568131685256958 -934,2.5680735111236572 -935,2.5680155754089355 -936,2.567957639694214 -937,2.5678999423980713 -938,2.567842721939087 -939,2.5677852630615234 -940,2.567727565765381 -941,2.5676703453063965 -942,2.567613363265991 -943,2.567556142807007 -944,2.5674989223480225 -945,2.567441940307617 -946,2.5673844814300537 -947,2.5673279762268066 -948,2.5672709941864014 -949,2.567214250564575 -950,2.567157506942749 -951,2.567100763320923 -952,2.567044496536255 -953,2.566988229751587 -954,2.56693172454834 -955,2.566875457763672 -956,2.566819429397583 -957,2.566763401031494 -958,2.566707134246826 -959,2.566650867462158 -960,2.5665950775146484 -961,2.5665392875671387 -962,2.56648325920105 -963,2.566427707672119 -964,2.5663721561431885 -965,2.566316843032837 -966,2.5662612915039062 -967,2.5662059783935547 -968,2.566150665283203 -969,2.5660955905914307 -970,2.5660407543182373 -971,2.5659854412078857 -972,2.565930128097534 -973,2.565875291824341 -974,2.5658206939697266 -975,2.5657660961151123 -976,2.565711498260498 -977,2.5656566619873047 -978,2.5656018257141113 -979,2.565547466278076 -980,2.565492868423462 -981,2.5654382705688477 -982,2.5653839111328125 -983,2.5653295516967773 -984,2.5652756690979004 -985,2.5652213096618652 -986,2.565167188644409 -987,2.565113067626953 -988,2.565058946609497 -989,2.56500506401062 -990,2.564950942993164 -991,2.564897298812866 -992,2.564842939376831 -993,2.564789295196533 -994,2.564735174179077 -995,2.5646815299987793 -996,2.5646281242370605 -997,2.5645742416381836 -998,2.5645203590393066 -999,2.564466953277588 -1000,2.564413547515869 diff --git a/training/time_weighting/cubic/training_config.txt b/training/time_weighting/cubic/training_config.txt deleted file mode 100644 index b776dc3..0000000 --- a/training/time_weighting/cubic/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/cubic/training_log.csv b/training/time_weighting/cubic/training_log.csv deleted file mode 100644 index f78e343..0000000 --- a/training/time_weighting/cubic/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,214.33438110351562 -2,237.43614196777344 -3,49.46141052246094 -4,23.531375885009766 -5,11.65247631072998 -6,9.426081657409668 -7,7.7791361808776855 -8,9.611245155334473 -9,11.173761367797852 -10,14.406468391418457 -11,17.57773780822754 -12,13.362654685974121 -13,10.811542510986328 -14,10.507197380065918 -15,7.70914363861084 -16,7.279587268829346 -17,6.9148101806640625 -18,6.663288116455078 -19,6.3922529220581055 -20,6.167629241943359 -21,5.95806884765625 -22,5.7451934814453125 -23,5.522685527801514 -24,5.292733192443848 -25,5.025609493255615 -26,4.645634174346924 -27,4.257270812988281 -28,3.923229694366455 -29,3.6351733207702637 -30,3.3592307567596436 -31,3.086090087890625 -32,2.893432140350342 -33,2.74357533454895 -34,2.6197261810302734 -35,2.516815423965454 -36,2.427947759628296 -37,2.347137689590454 -38,2.272465229034424 -39,2.2025327682495117 -40,2.1368093490600586 -41,2.074849843978882 -42,2.015951633453369 -43,1.959746241569519 -44,1.9059433937072754 -45,1.8542544841766357 -46,1.8044408559799194 -47,1.7561099529266357 -48,1.7086862325668335 -49,1.6613430976867676 -50,1.6129086017608643 -51,1.5618224143981934 -52,1.5061633586883545 -53,1.4433941841125488 -54,1.3723069429397583 -55,1.2933753728866577 -56,1.210503339767456 -57,1.1299470663070679 -58,1.0542397499084473 -59,0.9841561317443848 -60,0.9186469316482544 -61,0.856884777545929 -62,0.7986710667610168 -63,0.7446183562278748 -64,0.6958078742027283 -65,0.6536332368850708 -66,0.6188480257987976 -67,0.5911473035812378 -68,0.569055438041687 -69,0.5503751039505005 -70,0.5332661271095276 -71,0.5165595412254333 -72,0.499622106552124 -73,0.4821142256259918 -74,0.4638543128967285 -75,0.44476833939552307 -76,0.4248579442501068 -77,0.4041653871536255 -78,0.38282710313796997 -79,0.3611437976360321 -80,0.3396911025047302 -81,0.31925851106643677 -82,0.30029240250587463 -83,0.2826981544494629 -84,0.266729474067688 -85,0.2519337832927704 -86,0.23798805475234985 -87,0.22459623217582703 -88,0.21157923340797424 -89,0.19883213937282562 -90,0.18636658787727356 -91,0.1744658350944519 -92,0.16359281539916992 -93,0.15435166656970978 -94,0.14706863462924957 -95,0.14160384237766266 -96,0.13783575594425201 -97,0.13551218807697296 -98,0.13431991636753082 -99,0.13390858471393585 -100,0.13397987186908722 -101,0.13430465757846832 -102,0.1346970498561859 -103,0.13501915335655212 -104,0.13517342507839203 -105,0.1351000815629959 -106,0.13476981222629547 -107,0.13417813181877136 -108,0.13334056735038757 -109,0.13228900730609894 -110,0.1310657113790512 -111,0.1297154277563095 -112,0.12828809022903442 -113,0.12682974338531494 -114,0.12538066506385803 -115,0.12397485226392746 -116,0.1226358637213707 -117,0.1213812604546547 -118,0.12022686004638672 -119,0.11917923390865326 -120,0.11824005842208862 -121,0.11740638315677643 -122,0.11667182296514511 -123,0.11602900922298431 -124,0.11546817421913147 -125,0.11497705429792404 -126,0.11454189568758011 -127,0.11414941400289536 -128,0.11378684639930725 -129,0.11344145238399506 -130,0.11310279369354248 -131,0.11276298016309738 -132,0.11241460591554642 -133,0.11205243319272995 -134,0.11167415976524353 -135,0.11127930879592896 -136,0.11086881905794144 -137,0.11044592410326004 -138,0.11001485586166382 -139,0.10958050936460495 -140,0.10914750397205353 -141,0.10872027277946472 -142,0.1083030253648758 -143,0.1078990027308464 -144,0.10750984400510788 -145,0.10713617503643036 -146,0.10677757859230042 -147,0.10643260926008224 -148,0.10609973222017288 -149,0.10577689111232758 -150,0.10546249151229858 -151,0.10515475273132324 -152,0.10485231876373291 -153,0.10455404222011566 -154,0.10425905883312225 -155,0.1039666160941124 -156,0.10367639362812042 -157,0.10338819772005081 -158,0.10310204327106476 -159,0.10281779617071152 -160,0.10253539681434631 -161,0.10225510597229004 -162,0.10197712481021881 -163,0.10170132666826248 -164,0.10142756998538971 -165,0.10115586221218109 -166,0.10088653117418289 -167,0.10061963647603989 -168,0.10035506635904312 -169,0.10009269416332245 -170,0.0998324304819107 -171,0.09957455098628998 -172,0.09931881725788116 -173,0.09906551986932755 -174,0.09881449490785599 -175,0.09856568276882172 -176,0.09831875562667847 -177,0.09807358682155609 -178,0.09783024340867996 -179,0.09758831560611725 -180,0.0973479226231575 -181,0.09710897505283356 -182,0.09687142819166183 -183,0.09663527458906174 -184,0.09640074521303177 -185,0.09616760164499283 -186,0.09593614935874939 -187,0.09570594131946564 -188,0.09547741711139679 -189,0.09525028616189957 -190,0.09502479434013367 -191,0.09480060636997223 -192,0.09457798302173615 -193,0.0943567231297493 -194,0.09413706511259079 -195,0.09391883760690689 -196,0.09370198100805283 -197,0.09348652511835098 -198,0.09327255934476852 -199,0.0930599570274353 -200,0.09284865856170654 -201,0.09263885021209717 -202,0.09243036061525345 -203,0.09222321957349777 -204,0.09201755374670029 -205,0.09181308001279831 -206,0.09160992503166199 -207,0.09140808880329132 -208,0.09120746701955795 -209,0.09100804477930069 -210,0.09080974012613297 -211,0.09061256051063538 -212,0.09041636437177658 -213,0.09022129327058792 -214,0.09002736955881119 -215,0.08983450382947922 -216,0.08964266628026962 -217,0.08945190161466599 -218,0.08926210552453995 -219,0.08907334506511688 -220,0.08888552337884903 -221,0.08869880437850952 -222,0.08851289749145508 -223,0.08832800388336182 -224,0.08814410865306854 -225,0.08796116709709167 -226,0.08777912706136703 -227,0.08759807050228119 -228,0.0874180719256401 -229,0.08723890036344528 -230,0.08706070482730865 -231,0.08688344806432724 -232,0.08670710027217865 -233,0.0865316241979599 -234,0.08635714650154114 -235,0.08618365228176117 -236,0.08601100742816925 -237,0.08583922684192657 -238,0.08566824346780777 -239,0.08549816161394119 -240,0.08532901853322983 -241,0.08516069501638412 -242,0.08499325066804886 -243,0.08482667803764343 -244,0.08466079831123352 -245,0.08449582010507584 -246,0.08433162420988083 -247,0.0841682180762291 -248,0.08400553464889526 -249,0.08384378254413605 -250,0.08368270099163055 -251,0.08352246135473251 -252,0.08336304873228073 -253,0.08320427685976028 -254,0.08304628729820251 -255,0.08288910239934921 -256,0.08273260295391083 -257,0.08257684856653214 -258,0.08242179453372955 -259,0.08226757496595383 -260,0.08211392164230347 -261,0.08196106553077698 -262,0.0818088948726654 -263,0.08165740221738815 -264,0.08150666207075119 -265,0.08135657012462616 -266,0.08120722323656082 -267,0.0810585618019104 -268,0.08091048896312714 -269,0.08076324313879013 -270,0.0806165337562561 -271,0.08047045767307281 -272,0.08032503724098206 -273,0.08018012344837189 -274,0.08003586530685425 -275,0.07989220321178436 -276,0.07974903285503387 -277,0.07960646599531174 -278,0.07946445792913437 -279,0.07932287454605103 -280,0.07918168604373932 -281,0.07904097437858582 -282,0.07890066504478455 -283,0.07876069843769073 -284,0.07862121611833572 -285,0.07848204672336578 -286,0.07834342867136002 -287,0.07820525765419006 -288,0.0780675858259201 -289,0.07793053239583969 -290,0.07779398560523987 -291,0.07765816897153854 -292,0.07752318680286407 -293,0.07738912850618362 -294,0.07725626975297928 -295,0.0771242156624794 -296,0.07699288427829742 -297,0.07686200737953186 -298,0.07673157006502151 -299,0.07660159468650818 -300,0.07647208124399185 -301,0.07634319365024567 -302,0.07621457427740097 -303,0.0760861337184906 -304,0.07595793902873993 -305,0.07583003491163254 -306,0.07570239901542664 -307,0.07557525485754013 -308,0.07544851303100586 -309,0.07532225549221039 -310,0.07519645243883133 -311,0.07507113367319107 -312,0.07494638115167618 -313,0.07482228428125381 -314,0.07469867169857025 -315,0.07457569986581802 -316,0.07445333153009415 -317,0.07433163374662399 -318,0.0742107480764389 -319,0.07409095019102097 -320,0.07397204637527466 -321,0.07385407388210297 -322,0.07373703271150589 -323,0.07362093031406403 -324,0.07350568473339081 -325,0.073391392827034 -326,0.07327800244092941 -327,0.07316553592681885 -328,0.0730539858341217 -329,0.07294338941574097 -330,0.07283368706703186 -331,0.07272496819496155 -332,0.07261691242456436 -333,0.07250964641571045 -334,0.07240329682826996 -335,0.07229763269424438 -336,0.07219253480434418 -337,0.07208803296089172 -338,0.07198404520750046 -339,0.07188062369823456 -340,0.07177773118019104 -341,0.07167541235685349 -342,0.0715736374258995 -343,0.07147223502397537 -344,0.07137129455804825 -345,0.07127080112695694 -346,0.07117067277431488 -347,0.07107093930244446 -348,0.0709715485572815 -349,0.07087258994579315 -350,0.07077404111623764 -351,0.07067576795816422 -352,0.07057784497737885 -353,0.07048024237155914 -354,0.07038285583257675 -355,0.07028581947088242 -356,0.07018903642892838 -357,0.0700925812125206 -358,0.0699964240193367 -359,0.06990056484937668 -360,0.06980497390031815 -361,0.06970968097448349 -362,0.06961461156606674 -363,0.06951986253261566 -364,0.06942541897296906 -365,0.06933122128248215 -366,0.06923732906579971 -367,0.06914366781711578 -368,0.06905028223991394 -369,0.0689571276307106 -370,0.06886419653892517 -371,0.06877154856920242 -372,0.06867914646863937 -373,0.06858701258897781 -374,0.06849510222673416 -375,0.0684034451842308 -376,0.06831205636262894 -377,0.06822086125612259 -378,0.06812994927167892 -379,0.06803927570581436 -380,0.06794878840446472 -381,0.06785851716995239 -382,0.06776853650808334 -383,0.06767874211072922 -384,0.06758919358253479 -385,0.06749989092350006 -386,0.06741079688072205 -387,0.06732185930013657 -388,0.06723319739103317 -389,0.0671447291970253 -390,0.06705654412508011 -391,0.06696860492229462 -392,0.06688083708286285 -393,0.06679333746433258 -394,0.0667060911655426 -395,0.06661905348300934 -396,0.06653223931789398 -397,0.06644563376903534 -398,0.06635928899049759 -399,0.06627311557531357 -400,0.06618719547986984 -401,0.06610159575939178 -402,0.06601620465517044 -403,0.06593102961778641 -404,0.06584610044956207 -405,0.06576132029294968 -406,0.06567683070898056 -407,0.06559258699417114 -408,0.06550853699445724 -409,0.06542465090751648 -410,0.06534107774496078 -411,0.06525775045156479 -412,0.06517472863197327 -413,0.06509195268154144 -414,0.06500951945781708 -415,0.06492728739976883 -416,0.06484536081552505 -417,0.06476368010044098 -418,0.0646822601556778 -419,0.06460116058588028 -420,0.06452032178640366 -421,0.0644397884607315 -422,0.06435956060886383 -423,0.06427959352731705 -424,0.06419995427131653 -425,0.06412055343389511 -426,0.06404147297143936 -427,0.0639626607298851 -428,0.06388413161039352 -429,0.06380598992109299 -430,0.06372809410095215 -431,0.06365040689706802 -432,0.0635729655623436 -433,0.06349573284387589 -434,0.06341873854398727 -435,0.06334193795919418 -436,0.063265360891819 -437,0.06318904459476471 -438,0.06311295181512833 -439,0.06303704530000687 -440,0.06296137720346451 -441,0.06288589537143707 -442,0.06281059235334396 -443,0.06273549050092697 -444,0.06266053766012192 -445,0.06258579343557358 -446,0.06251119822263718 -447,0.062436774373054504 -448,0.06236256659030914 -449,0.06228850036859512 -450,0.0622146874666214 -451,0.062140997499227524 -452,0.06206749379634857 -453,0.061994172632694244 -454,0.06192104145884514 -455,0.06184810400009155 -456,0.06177531182765961 -457,0.06170269101858139 -458,0.0616302527487278 -459,0.06155795603990555 -460,0.06148584932088852 -461,0.06141394004225731 -462,0.06134221702814102 -463,0.06127067282795906 -464,0.06119930371642113 -465,0.06112809479236603 -466,0.06105709448456764 -467,0.0609862394630909 -468,0.060915522277355194 -469,0.060845036059617996 -470,0.06077469885349274 -471,0.0607045441865921 -472,0.0606345497071743 -473,0.060564808547496796 -474,0.060495201498270035 -475,0.060425765812397 -476,0.060356538742780685 -477,0.0602874718606472 -478,0.06021859869360924 -479,0.0601499117910862 -480,0.06008138135075569 -481,0.06001298874616623 -482,0.05994478985667229 -483,0.05987676978111267 -484,0.0598088875412941 -485,0.05974116548895836 -486,0.05967362970113754 -487,0.05960625037550926 -488,0.05953893065452576 -489,0.059471823275089264 -490,0.05940483137965202 -491,0.05933796241879463 -492,0.05927121639251709 -493,0.059204597026109695 -494,0.05913808196783066 -495,0.05907166749238968 -496,0.05900539830327034 -497,0.05893925204873085 -498,0.0588732473552227 -499,0.05880729481577873 -500,0.058741431683301926 -501,0.05867564678192139 -502,0.058609914034605026 -503,0.058544207364320755 -504,0.058478571474552155 -505,0.05841294303536415 -506,0.05834738537669182 -507,0.058281827718019485 -508,0.05821629613637924 -509,0.05815077945590019 -510,0.05808522179722786 -511,0.05801966041326523 -512,0.05795405060052872 -513,0.057888396084308624 -514,0.057822730392217636 -515,0.05775703489780426 -516,0.05769128352403641 -517,0.05762545019388199 -518,0.057559527456760406 -519,0.057493653148412704 -520,0.05742781236767769 -521,0.05736202001571655 -522,0.05729632079601288 -523,0.05723075196146965 -524,0.057165272533893585 -525,0.0571000911295414 -526,0.05703505501151085 -527,0.056970249861478806 -528,0.056905876845121384 -529,0.05684199184179306 -530,0.05677846446633339 -531,0.05671539902687073 -532,0.05665271729230881 -533,0.05659037083387375 -534,0.05652831494808197 -535,0.05646662414073944 -536,0.05640515685081482 -537,0.056343983858823776 -538,0.05628311261534691 -539,0.056222524493932724 -540,0.05616221949458122 -541,0.05610206723213196 -542,0.056042153388261795 -543,0.055982425808906555 -544,0.05592290312051773 -545,0.05586359277367592 -546,0.05580441653728485 -547,0.05574539303779602 -548,0.05568651854991913 -549,0.05562777817249298 -550,0.05556919798254967 -551,0.055510763078927994 -552,0.055452488362789154 -553,0.05539432168006897 -554,0.05533629283308983 -555,0.05527834966778755 -556,0.05522054061293602 -557,0.05516282096505165 -558,0.055105213075876236 -559,0.05504767969250679 -560,0.05499020591378212 -561,0.054932866245508194 -562,0.05487562716007233 -563,0.05481843650341034 -564,0.05476135388016701 -565,0.05470433831214905 -566,0.05464741587638855 -567,0.054590582847595215 -568,0.05453390255570412 -569,0.054477278143167496 -570,0.054420825093984604 -571,0.05436452850699425 -572,0.054308343678712845 -573,0.05425236374139786 -574,0.054196588695049286 -575,0.0541410967707634 -576,0.05408574268221855 -577,0.05403055623173714 -578,0.05397549271583557 -579,0.053920578211545944 -580,0.053865790367126465 -581,0.053811170160770416 -582,0.053756676614284515 -583,0.05370231345295906 -584,0.05364809185266495 -585,0.0535939559340477 -586,0.053539976477622986 -587,0.05348610132932663 -588,0.05343236029148102 -589,0.05337869003415108 -590,0.05332518368959427 -591,0.0532718300819397 -592,0.053218595683574677 -593,0.053165461868047714 -594,0.05311243236064911 -595,0.053059548139572144 -596,0.05300676450133324 -597,0.05295409634709358 -598,0.052901554852724075 -599,0.052849166095256805 -600,0.05279693752527237 -601,0.05274483934044838 -602,0.05269288271665573 -603,0.052641142159700394 -604,0.05258951708674431 -605,0.05253805220127106 -606,0.05248672142624855 -607,0.052435535937547684 -608,0.05238455906510353 -609,0.05233374610543251 -610,0.05228312686085701 -611,0.052232783287763596 -612,0.052182674407958984 -613,0.0521329790353775 -614,0.052083827555179596 -615,0.05203479528427124 -616,0.05198590084910393 -617,0.05193713307380676 -618,0.051888518035411835 -619,0.05184001103043556 -620,0.05179162696003914 -621,0.051743362098932266 -622,0.05169520527124405 -623,0.05164718255400658 -624,0.05159924924373627 -625,0.05155137926340103 -626,0.05150365084409714 -627,0.051455989480018616 -628,0.05140846222639084 -629,0.05136099085211754 -630,0.051313597708940506 -631,0.051266323775053024 -632,0.051219142973423004 -633,0.05117206275463104 -634,0.05112504959106445 -635,0.051078129559755325 -636,0.05103131756186485 -637,0.05098453536629677 -638,0.05093793943524361 -639,0.050891391932964325 -640,0.050844959914684296 -641,0.05079863965511322 -642,0.05075245723128319 -643,0.05070636048913002 -644,0.050660405308008194 -645,0.050614528357982635 -646,0.05056878551840782 -647,0.05052312836050987 -648,0.050477590411901474 -649,0.05043216794729233 -650,0.05038688704371452 -651,0.0503416508436203 -652,0.0502965934574604 -653,0.05025164410471916 -654,0.05020682141184807 -655,0.050162091851234436 -656,0.05011748895049095 -657,0.050073035061359406 -658,0.050028685480356216 -659,0.04998454824090004 -660,0.04994048550724983 -661,0.04989655688405037 -662,0.04985271021723747 -663,0.04980890825390816 -664,0.04976523667573929 -665,0.04972163960337639 -666,0.04967811703681946 -667,0.04963463172316551 -668,0.04959125444293022 -669,0.0495479516685009 -670,0.04950468987226486 -671,0.0494614914059639 -672,0.0494183748960495 -673,0.049375344067811966 -674,0.04933243617415428 -675,0.04928954690694809 -676,0.04924676567316055 -677,0.04920404776930809 -678,0.0491614043712616 -679,0.04911882430315018 -680,0.049076344817876816 -681,0.04903387278318405 -682,0.048991505056619644 -683,0.048949163407087326 -684,0.04890693351626396 -685,0.048864834010601044 -686,0.048822853714227676 -687,0.048780977725982666 -688,0.04873929172754288 -689,0.04869772121310234 -690,0.04865630716085434 -691,0.048615001142024994 -692,0.0485738180577755 -693,0.04853276535868645 -694,0.04849180579185486 -695,0.04845095053315163 -696,0.04841022938489914 -697,0.04836966097354889 -698,0.048329152166843414 -699,0.04828875884413719 -700,0.04824848473072052 -701,0.04820825159549713 -702,0.04816810041666031 -703,0.04812802001833916 -704,0.048088036477565765 -705,0.04804816097021103 -706,0.04800841212272644 -707,0.04796876013278961 -708,0.04792926460504532 -709,0.047889869660139084 -710,0.04785055294632912 -711,0.0478113554418087 -712,0.047772228717803955 -713,0.047733187675476074 -714,0.04769422113895416 -715,0.04765532165765762 -716,0.04761651158332825 -717,0.04757773503661156 -718,0.04753905162215233 -719,0.04750044271349907 -720,0.04746190458536148 -721,0.047423433512449265 -722,0.047385022044181824 -723,0.047346651554107666 -724,0.047308359295129776 -725,0.04727012664079666 -726,0.047231946140527725 -727,0.04719381034374237 -728,0.04715571179986 -729,0.04711771383881569 -730,0.047079745680093765 -731,0.04704182595014572 -732,0.047003984451293945 -733,0.04696618765592575 -734,0.046928443014621735 -735,0.0468907430768013 -736,0.04685306176543236 -737,0.046815425157547 -738,0.04677781090140343 -739,0.04674021899700165 -740,0.04670269787311554 -741,0.04666519910097122 -742,0.04662776738405228 -743,0.04659038782119751 -744,0.046553079038858414 -745,0.046515848487615585 -746,0.04647863656282425 -747,0.04644147679209709 -748,0.04640435054898262 -749,0.04636728763580322 -750,0.04633026197552681 -751,0.04629328101873398 -752,0.046256352216005325 -753,0.04621947184205055 -754,0.046182628720998764 -755,0.046145882457494736 -756,0.046109188348054886 -757,0.046072542667388916 -758,0.046035971492528915 -759,0.04599941894412041 -760,0.04596293345093727 -761,0.0459265299141407 -762,0.04589013382792473 -763,0.045853834599256516 -764,0.04581757262349129 -765,0.04578135907649994 -766,0.045745205134153366 -767,0.04570911079645157 -768,0.045673079788684845 -769,0.04563707485795021 -770,0.04560111090540886 -771,0.04556521773338318 -772,0.045529358088970184 -773,0.045493584126234055 -774,0.04545784741640091 -775,0.04542215168476105 -776,0.045386504381895065 -777,0.045350898057222366 -778,0.04531534016132355 -779,0.045279838144779205 -780,0.04524436965584755 -781,0.04520893841981888 -782,0.04517354443669319 -783,0.04513820633292198 -784,0.04510289058089256 -785,0.04506761580705643 -786,0.04503236711025238 -787,0.044997118413448334 -788,0.04496191814541817 -789,0.044926729053258896 -790,0.044891607016325 -791,0.04485646262764931 -792,0.044821351766586304 -793,0.04478627070784569 -794,0.044751133769750595 -795,0.0447160005569458 -796,0.04468091204762459 -797,0.04464584216475487 -798,0.044610828161239624 -799,0.044575802981853485 -800,0.044540829956531525 -801,0.04450591653585434 -802,0.044471025466918945 -803,0.044436175376176834 -804,0.044401347637176514 -805,0.044366564601659775 -806,0.04433183744549751 -807,0.04429714009165764 -808,0.04426248371601105 -809,0.04422784969210625 -810,0.04419330507516861 -811,0.044158779084682465 -812,0.04412432760000229 -813,0.04409000650048256 -814,0.044055841863155365 -815,0.04402182996273041 -816,0.043987974524497986 -817,0.043954212218523026 -818,0.0439206138253212 -819,0.04388708248734474 -820,0.04385364055633545 -821,0.04382026940584183 -822,0.04378698393702507 -823,0.04375380277633667 -824,0.043720681220293045 -825,0.043687641620635986 -826,0.043654654175043106 -827,0.043621763586997986 -828,0.043588943779468536 -829,0.04355619102716446 -830,0.04352350905537605 -831,0.0434909351170063 -832,0.04345840960741043 -833,0.04342598840594292 -834,0.04339362308382988 -835,0.043361350893974304 -836,0.0433291457593441 -837,0.04329700395464897 -838,0.04326493293046951 -839,0.04323294758796692 -840,0.04320106655359268 -841,0.04316922649741173 -842,0.043137457221746445 -843,0.04310574755072594 -844,0.0430741161108017 -845,0.04304258152842522 -846,0.0430111326277256 -847,0.042979735881090164 -848,0.0429484061896801 -849,0.0429171547293663 -850,0.042885955423116684 -851,0.04285484179854393 -852,0.042823776602745056 -853,0.042792778462171555 -854,0.042761847376823425 -855,0.042730994522571564 -856,0.04270023852586746 -857,0.042669542133808136 -858,0.042638931423425674 -859,0.042608391493558884 -860,0.042577896267175674 -861,0.04254746809601784 -862,0.04251712188124657 -863,0.04248682036995888 -864,0.042456574738025665 -865,0.04242643713951111 -866,0.04239634796977043 -867,0.04236632585525513 -868,0.0423363521695137 -869,0.04230644181370735 -870,0.04227659851312637 -871,0.042246777564287186 -872,0.042217012494802475 -873,0.042187318205833435 -874,0.042157698422670364 -875,0.04212814196944237 -876,0.042098622769117355 -877,0.04206917807459831 -878,0.04203981161117554 -879,0.04201049730181694 -880,0.041981250047683716 -881,0.041952021420001984 -882,0.04192284867167473 -883,0.04189373925328255 -884,0.04186463728547096 -885,0.041835635900497437 -886,0.0418066531419754 -887,0.04177771508693695 -888,0.041748836636543274 -889,0.041720014065504074 -890,0.04169122129678726 -891,0.04166248440742493 -892,0.04163377359509468 -893,0.04160511493682861 -894,0.04157648980617523 -895,0.04154789447784424 -896,0.04151935875415802 -897,0.04149087890982628 -898,0.04146244004368782 -899,0.04143403097987175 -900,0.04140566289424896 -901,0.04137735813856125 -902,0.041349105536937714 -903,0.041320882737636566 -904,0.04129267483949661 -905,0.041264526546001434 -906,0.041236404329538345 -907,0.041208308190107346 -908,0.04118027165532112 -909,0.041152261197566986 -910,0.04112434387207031 -911,0.04109645634889603 -912,0.04106862470507622 -913,0.0410408116877079 -914,0.041013024747371674 -915,0.04098529368638992 -916,0.040957581251859665 -917,0.040929943323135376 -918,0.040902309119701385 -919,0.040874727070331573 -920,0.04084720090031624 -921,0.040819745510816574 -922,0.04079229012131691 -923,0.04076489806175232 -924,0.04073754698038101 -925,0.0407102108001709 -926,0.04068291559815407 -927,0.04065566882491112 -928,0.040628451853990555 -929,0.040601275861263275 -930,0.04057414084672928 -931,0.04054706543684006 -932,0.04052002355456352 -933,0.04049304127693176 -934,0.0404660739004612 -935,0.04043915495276451 -936,0.04041225463151932 -937,0.040385399013757706 -938,0.04035858064889908 -939,0.040331799536943436 -940,0.04030505195260048 -941,0.04027833044528961 -942,0.040251683443784714 -943,0.0402250774204731 -944,0.04019851237535477 -945,0.040171995759010315 -946,0.040145501494407654 -947,0.04011902958154678 -948,0.040092602372169495 -949,0.040066227316856384 -950,0.04003986716270447 -951,0.04001353308558464 -952,0.03998724743723869 -953,0.03996100649237633 -954,0.03993482515215874 -955,0.03990872949361801 -956,0.039882760494947433 -957,0.03985686972737312 -958,0.0398310162127018 -959,0.03980521857738495 -960,0.039779435843229294 -961,0.03975368291139603 -962,0.03972798213362694 -963,0.03970231115818024 -964,0.039676688611507416 -965,0.039651110768318176 -966,0.0396256148815155 -967,0.03960011526942253 -968,0.03957466408610344 -969,0.03954925388097763 -970,0.039523858577013016 -971,0.039498526602983475 -972,0.03947319835424423 -973,0.039447929710149765 -974,0.039422739297151566 -975,0.039397578686475754 -976,0.039372459053993225 -977,0.03934738412499428 -978,0.03932232782244682 -979,0.03929731249809265 -980,0.03927233815193176 -981,0.03924740105867386 -982,0.03922249749302864 -983,0.03919762372970581 -984,0.03917279094457626 -985,0.039148006588220596 -986,0.03912324458360672 -987,0.03909852355718613 -988,0.039073847234249115 -989,0.039049215614795685 -990,0.03902461752295494 -991,0.039000023156404495 -992,0.03897546976804733 -993,0.03895094618201256 -994,0.038926463574171066 -995,0.038902003318071365 -996,0.038877591490745544 -997,0.03885319456458092 -998,0.038828831166028976 -999,0.038804490119218826 -1000,0.03878021240234375 diff --git a/training/time_weighting/cubic_mirrored/training_config.txt b/training/time_weighting/cubic_mirrored/training_config.txt deleted file mode 100644 index 7d62d3d..0000000 --- a/training/time_weighting/cubic_mirrored/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/cubic_mirrored/training_log.csv b/training/time_weighting/cubic_mirrored/training_log.csv deleted file mode 100644 index 3ddbd8e..0000000 --- a/training/time_weighting/cubic_mirrored/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,32.82256317138672 -2,10.985063552856445 -3,6.004786014556885 -4,3.984433174133301 -5,3.275408983230591 -6,2.8717541694641113 -7,2.618776798248291 -8,2.4742774963378906 -9,2.385310173034668 -10,2.3269171714782715 -11,2.2835094928741455 -12,2.245662212371826 -13,2.2125556468963623 -14,2.186497449874878 -15,2.169370174407959 -16,2.1642227172851562 -17,2.160360336303711 -18,2.1635751724243164 -19,2.1681160926818848 -20,2.1582682132720947 -21,2.137511730194092 -22,2.1190366744995117 -23,2.1137144565582275 -24,2.113424777984619 -25,2.116406202316284 -26,2.120547294616699 -27,2.124138593673706 -28,2.125739097595215 -29,2.1245477199554443 -30,2.1207549571990967 -31,2.1154823303222656 -32,2.1106090545654297 -33,2.107555866241455 -34,2.1062443256378174 -35,2.1066181659698486 -36,2.1080892086029053 -37,2.1078131198883057 -38,2.105426549911499 -39,2.103058338165283 -40,2.101863384246826 -41,2.1013569831848145 -42,2.101085662841797 -43,2.1010048389434814 -44,2.101060152053833 -45,2.1011264324188232 -46,2.1010560989379883 -47,2.1007747650146484 -48,2.1003336906433105 -49,2.099883556365967 -50,2.0994107723236084 -51,2.0990042686462402 -52,2.09867000579834 -53,2.098357915878296 -54,2.098051071166992 -55,2.0977301597595215 -56,2.097378969192505 -57,2.0970404148101807 -58,2.0967049598693848 -59,2.096404552459717 -60,2.09611439704895 -61,2.09584379196167 -62,2.0956037044525146 -63,2.0953872203826904 -64,2.095177173614502 -65,2.0949926376342773 -66,2.094820022583008 -67,2.094639539718628 -68,2.094454526901245 -69,2.09427809715271 -70,2.0940933227539062 -71,2.0938901901245117 -72,2.093674659729004 -73,2.0934505462646484 -74,2.0932252407073975 -75,2.0930020809173584 -76,2.0927937030792236 -77,2.09260892868042 -78,2.092432975769043 -79,2.0922646522521973 -80,2.0921003818511963 -81,2.0919392108917236 -82,2.091778516769409 -83,2.091614007949829 -84,2.0914463996887207 -85,2.091278314590454 -86,2.091109275817871 -87,2.0909321308135986 -88,2.0907530784606934 -89,2.0905802249908447 -90,2.0904128551483154 -91,2.0902469158172607 -92,2.090078592300415 -93,2.089909553527832 -94,2.0897421836853027 -95,2.0895752906799316 -96,2.0894103050231934 -97,2.089245557785034 -98,2.089080572128296 -99,2.0889177322387695 -100,2.088757038116455 -101,2.0885982513427734 -102,2.08844256401062 -103,2.088290214538574 -104,2.0881428718566895 -105,2.0879993438720703 -106,2.0878562927246094 -107,2.0877130031585693 -108,2.087570905685425 -109,2.087430000305176 -110,2.0872881412506104 -111,2.087148427963257 -112,2.087010622024536 -113,2.086874485015869 -114,2.0867397785186768 -115,2.08660626411438 -116,2.0864760875701904 -117,2.08634614944458 -118,2.086216926574707 -119,2.0860891342163086 -120,2.085961103439331 -121,2.085832118988037 -122,2.0857040882110596 -123,2.0855774879455566 -124,2.085451364517212 -125,2.0853257179260254 -126,2.0851998329162598 -127,2.085073709487915 -128,2.084946632385254 -129,2.0848193168640137 -130,2.084693431854248 -131,2.084569215774536 -132,2.084446668624878 -133,2.0843262672424316 -134,2.0842061042785645 -135,2.0840845108032227 -136,2.0839645862579346 -137,2.0838446617126465 -138,2.0837273597717285 -139,2.0836081504821777 -140,2.0834851264953613 -141,2.083364963531494 -142,2.083238124847412 -143,2.083113670349121 -144,2.0829854011535645 -145,2.0828568935394287 -146,2.0827250480651855 -147,2.08259654045105 -148,2.0824778079986572 -149,2.082366943359375 -150,2.0822627544403076 -151,2.082163095474243 -152,2.082063674926758 -153,2.081965684890747 -154,2.0818660259246826 -155,2.0817642211914062 -156,2.0816609859466553 -157,2.081554651260376 -158,2.081441879272461 -159,2.0813283920288086 -160,2.0812151432037354 -161,2.081105947494507 -162,2.0809855461120605 -163,2.08084774017334 -164,2.080728054046631 -165,2.0805888175964355 -166,2.08046817779541 -167,2.0803213119506836 -168,2.0801641941070557 -169,2.080005407333374 -170,2.0798540115356445 -171,2.0796799659729004 -172,2.079512357711792 -173,2.0793232917785645 -174,2.0791428089141846 -175,2.078932523727417 -176,2.0787160396575928 -177,2.078495502471924 -178,2.0782463550567627 -179,2.0779902935028076 -180,2.077723503112793 -181,2.077434778213501 -182,2.077134370803833 -183,2.0768232345581055 -184,2.07649564743042 -185,2.076303005218506 -186,2.076242446899414 -187,2.075925827026367 -188,2.075637102127075 -189,2.0750620365142822 -190,2.074932336807251 -191,2.0745067596435547 -192,2.074233055114746 -193,2.0738632678985596 -194,2.0737390518188477 -195,2.073198080062866 -196,2.072784900665283 -197,2.072303533554077 -198,2.0723915100097656 -199,2.0718448162078857 -200,2.0713272094726562 -201,2.0712058544158936 -202,2.0708255767822266 -203,2.070326089859009 -204,2.070749044418335 -205,2.0697290897369385 -206,2.0706405639648438 -207,2.0698511600494385 -208,2.070237874984741 -209,2.0690784454345703 -210,2.0691683292388916 -211,2.0690486431121826 -212,2.068784713745117 -213,2.0682120323181152 -214,2.0679712295532227 -215,2.0680415630340576 -216,2.067613363265991 -217,2.067464828491211 -218,2.066946268081665 -219,2.0670628547668457 -220,2.0665671825408936 -221,2.0663199424743652 -222,2.066619634628296 -223,2.065951108932495 -224,2.065990686416626 -225,2.0657730102539062 -226,2.0655455589294434 -227,2.0653600692749023 -228,2.065537452697754 -229,2.0655124187469482 -230,2.0650830268859863 -231,2.0652575492858887 -232,2.0648550987243652 -233,2.0647506713867188 -234,2.0646719932556152 -235,2.064518690109253 -236,2.0642778873443604 -237,2.064239025115967 -238,2.064117193222046 -239,2.0639500617980957 -240,2.0639395713806152 -241,2.0641701221466064 -242,2.063779592514038 -243,2.0636534690856934 -244,2.063657522201538 -245,2.063389539718628 -246,2.0631980895996094 -247,2.063109874725342 -248,2.0630149841308594 -249,2.062901020050049 -250,2.0629827976226807 -251,2.0631070137023926 -252,2.0629093647003174 -253,2.062650680541992 -254,2.062880754470825 -255,2.0626001358032227 -256,2.0625078678131104 -257,2.0623056888580322 -258,2.0623080730438232 -259,2.062114953994751 -260,2.0621302127838135 -261,2.0620501041412354 -262,2.0620594024658203 -263,2.0618414878845215 -264,2.061894178390503 -265,2.061953544616699 -266,2.062004327774048 -267,2.0616629123687744 -268,2.061681032180786 -269,2.061685800552368 -270,2.0614089965820312 -271,2.0616025924682617 -272,2.0614616870880127 -273,2.06130051612854 -274,2.06141996383667 -275,2.0611822605133057 -276,2.061370611190796 -277,2.0610532760620117 -278,2.0610198974609375 -279,2.0610361099243164 -280,2.0610110759735107 -281,2.0609700679779053 -282,2.060819149017334 -283,2.0607035160064697 -284,2.0608103275299072 -285,2.0605757236480713 -286,2.060657024383545 -287,2.0604374408721924 -288,2.0604264736175537 -289,2.0613348484039307 -290,2.0608534812927246 -291,2.060333013534546 -292,2.0607309341430664 -293,2.060499906539917 -294,2.060230255126953 -295,2.0604734420776367 -296,2.0604031085968018 -297,2.059976100921631 -298,2.0599160194396973 -299,2.0599300861358643 -300,2.05983304977417 -301,2.059760808944702 -302,2.0600192546844482 -303,2.0602450370788574 -304,2.05989670753479 -305,2.059809446334839 -306,2.060045003890991 -307,2.059584140777588 -308,2.0599489212036133 -309,2.0607190132141113 -310,2.0597856044769287 -311,2.059579372406006 -312,2.0599465370178223 -313,2.059499979019165 -314,2.059608221054077 -315,2.0594797134399414 -316,2.0592846870422363 -317,2.0599098205566406 -318,2.0593881607055664 -319,2.059145450592041 -320,2.059359073638916 -321,2.0595390796661377 -322,2.059138536453247 -323,2.059330940246582 -324,2.059115171432495 -325,2.05906081199646 -326,2.059546947479248 -327,2.0591652393341064 -328,2.0588951110839844 -329,2.0590739250183105 -330,2.058880567550659 -331,2.0588462352752686 -332,2.059016704559326 -333,2.0591509342193604 -334,2.058741569519043 -335,2.058897018432617 -336,2.0588345527648926 -337,2.0586204528808594 -338,2.059021234512329 -339,2.0591423511505127 -340,2.0586135387420654 -341,2.058791160583496 -342,2.058790922164917 -343,2.058445930480957 -344,2.0587284564971924 -345,2.059095859527588 -346,2.0584311485290527 -347,2.0586891174316406 -348,2.059112787246704 -349,2.05835223197937 -350,2.058715581893921 -351,2.058396339416504 -352,2.058333396911621 -353,2.058743953704834 -354,2.058224678039551 -355,2.058225154876709 -356,2.0583183765411377 -357,2.0580480098724365 -358,2.0582497119903564 -359,2.0580499172210693 -360,2.0580220222473145 -361,2.0579471588134766 -362,2.0579895973205566 -363,2.057893753051758 -364,2.0578601360321045 -365,2.0579020977020264 -366,2.058149576187134 -367,2.057844400405884 -368,2.057927131652832 -369,2.0579168796539307 -370,2.057694911956787 -371,2.0579562187194824 -372,2.0581939220428467 -373,2.0576906204223633 -374,2.05794358253479 -375,2.057770252227783 -376,2.0576536655426025 -377,2.05774188041687 -378,2.05749773979187 -379,2.0576610565185547 -380,2.0580806732177734 -381,2.057467222213745 -382,2.057492256164551 -383,2.0575571060180664 -384,2.0574748516082764 -385,2.057497262954712 -386,2.0574469566345215 -387,2.0573911666870117 -388,2.0573909282684326 -389,2.057344913482666 -390,2.0575222969055176 -391,2.0573856830596924 -392,2.057333469390869 -393,2.0574398040771484 -394,2.0572121143341064 -395,2.0573041439056396 -396,2.0578744411468506 -397,2.0573503971099854 -398,2.057454824447632 -399,2.057539463043213 -400,2.057141065597534 -401,2.0574445724487305 -402,2.0575668811798096 -403,2.0571160316467285 -404,2.0574238300323486 -405,2.0571086406707764 -406,2.0572593212127686 -407,2.057206392288208 -408,2.0570578575134277 -409,2.0571067333221436 -410,2.0570878982543945 -411,2.056983232498169 -412,2.0570731163024902 -413,2.056967258453369 -414,2.057023286819458 -415,2.0569467544555664 -416,2.0569863319396973 -417,2.056959390640259 -418,2.056892156600952 -419,2.05690336227417 -420,2.0568125247955322 -421,2.0568172931671143 -422,2.056790351867676 -423,2.0570077896118164 -424,2.0573348999023438 -425,2.0570952892303467 -426,2.0568976402282715 -427,2.0570335388183594 -428,2.0569064617156982 -429,2.056945323944092 -430,2.05690598487854 -431,2.0567848682403564 -432,2.0568156242370605 -433,2.056702136993408 -434,2.0566959381103516 -435,2.0570971965789795 -436,2.0570976734161377 -437,2.0568323135375977 -438,2.0567855834960938 -439,2.0570318698883057 -440,2.0567469596862793 -441,2.0568342208862305 -442,2.0568225383758545 -443,2.056654214859009 -444,2.0567989349365234 -445,2.05708646774292 -446,2.056849718093872 -447,2.056657314300537 -448,2.0568323135375977 -449,2.0568275451660156 -450,2.056567668914795 -451,2.0566604137420654 -452,2.0569825172424316 -453,2.0567665100097656 -454,2.0566062927246094 -455,2.056762933731079 -456,2.056783676147461 -457,2.0566158294677734 -458,2.0566813945770264 -459,2.0565860271453857 -460,2.0565261840820312 -461,2.056508779525757 -462,2.056417226791382 -463,2.0568220615386963 -464,2.056866407394409 -465,2.056584119796753 -466,2.0565900802612305 -467,2.0567309856414795 -468,2.0564959049224854 -469,2.056612491607666 -470,2.056584596633911 -471,2.056448221206665 -472,2.056537628173828 -473,2.0564072132110596 -474,2.056396245956421 -475,2.056415557861328 -476,2.056429862976074 -477,2.056575298309326 -478,2.0564815998077393 -479,2.0563700199127197 -480,2.056619644165039 -481,2.0564124584198 -482,2.056389093399048 -483,2.0565237998962402 -484,2.0563244819641113 -485,2.0563933849334717 -486,2.0569236278533936 -487,2.05651593208313 -488,2.0564305782318115 -489,2.0566065311431885 -490,2.056248426437378 -491,2.056518316268921 -492,2.057117462158203 -493,2.0564863681793213 -494,2.0565991401672363 -495,2.056772470474243 -496,2.056272029876709 -497,2.056562900543213 -498,2.056288719177246 -499,2.056382417678833 -500,2.056527614593506 -501,2.056344985961914 -502,2.056248903274536 -503,2.0564255714416504 -504,2.056238889694214 -505,2.0563199520111084 -506,2.056307077407837 -507,2.0562148094177246 -508,2.0563082695007324 -509,2.056145668029785 -510,2.0562760829925537 -511,2.057020902633667 -512,2.056565761566162 -513,2.0563814640045166 -514,2.0568649768829346 -515,2.0562100410461426 -516,2.056499481201172 -517,2.0566749572753906 -518,2.056117534637451 -519,2.056537389755249 -520,2.0561540126800537 -521,2.0563290119171143 -522,2.056333065032959 -523,2.056124448776245 -524,2.0561885833740234 -525,2.056255578994751 -526,2.0560667514801025 -527,2.0562520027160645 -528,2.0560805797576904 -529,2.0560948848724365 -530,2.0561180114746094 -531,2.0559768676757812 -532,2.0561153888702393 -533,2.0565943717956543 -534,2.0562727451324463 -535,2.0562264919281006 -536,2.0565083026885986 -537,2.056061029434204 -538,2.056259870529175 -539,2.056140661239624 -540,2.0560495853424072 -541,2.0561516284942627 -542,2.0559580326080322 -543,2.056110382080078 -544,2.0560214519500732 -545,2.0560262203216553 -546,2.055920362472534 -547,2.0559709072113037 -548,2.055910587310791 -549,2.0558834075927734 -550,2.0559210777282715 -551,2.055856466293335 -552,2.0558745861053467 -553,2.0559396743774414 -554,2.0562586784362793 -555,2.0560526847839355 -556,2.0559921264648438 -557,2.0562236309051514 -558,2.055922031402588 -559,2.056016445159912 -560,2.0560359954833984 -561,2.055849552154541 -562,2.0559802055358887 -563,2.0558347702026367 -564,2.0558700561523438 -565,2.0561153888702393 -566,2.0562381744384766 -567,2.055943012237549 -568,2.055950880050659 -569,2.0560483932495117 -570,2.055846929550171 -571,2.055981159210205 -572,2.055903911590576 -573,2.055838108062744 -574,2.055896043777466 -575,2.0557756423950195 -576,2.055861234664917 -577,2.0558996200561523 -578,2.0560309886932373 -579,2.0558269023895264 -580,2.0558371543884277 -581,2.0559580326080322 -582,2.0557830333709717 -583,2.05584454536438 -584,2.0558369159698486 -585,2.0557358264923096 -586,2.055809736251831 -587,2.05600643157959 -588,2.055776596069336 -589,2.0558249950408936 -590,2.055818557739258 -591,2.0556869506835938 -592,2.0558829307556152 -593,2.0565497875213623 -594,2.05611252784729 -595,2.0559723377227783 -596,2.056457757949829 -597,2.0558395385742188 -598,2.056096315383911 -599,2.0559213161468506 -600,2.055823802947998 -601,2.055969715118408 -602,2.055677652359009 -603,2.055971145629883 -604,2.055919647216797 -605,2.0558199882507324 -606,2.0557491779327393 -607,2.055811882019043 -608,2.0556838512420654 -609,2.055742025375366 -610,2.0557703971862793 -611,2.0556859970092773 -612,2.055723190307617 -613,2.055643081665039 -614,2.0556492805480957 -615,2.055659055709839 -616,2.055640697479248 -617,2.0558605194091797 -618,2.0558104515075684 -619,2.0556395053863525 -620,2.055769443511963 -621,2.0557825565338135 -622,2.055675506591797 -623,2.055816888809204 -624,2.0556938648223877 -625,2.0557005405426025 -626,2.0557198524475098 -627,2.0556015968322754 -628,2.0556485652923584 -629,2.0555801391601562 -630,2.055553913116455 -631,2.0559134483337402 -632,2.0564136505126953 -633,2.056236743927002 -634,2.055675506591797 -635,2.056166887283325 -636,2.05611515045166 -637,2.055891275405884 -638,2.0563132762908936 -639,2.0558340549468994 -640,2.0560922622680664 -641,2.055826425552368 -642,2.05588436126709 -643,2.0558841228485107 -644,2.0556726455688477 -645,2.055908679962158 -646,2.055675506591797 -647,2.0556814670562744 -648,2.0565171241760254 -649,2.056055784225464 -650,2.0556108951568604 -651,2.056023359298706 -652,2.0558114051818848 -653,2.0557734966278076 -654,2.0559520721435547 -655,2.0556588172912598 -656,2.0558202266693115 -657,2.0556585788726807 -658,2.0556790828704834 -659,2.055715799331665 -660,2.055560350418091 -661,2.055680751800537 -662,2.0556082725524902 -663,2.055610418319702 -664,2.055511474609375 -665,2.0555765628814697 -666,2.0555365085601807 -667,2.0555050373077393 -668,2.0555412769317627 -669,2.055483102798462 -670,2.0555062294006348 -671,2.0555717945098877 -672,2.0559072494506836 -673,2.055699348449707 -674,2.055628538131714 -675,2.0558390617370605 -676,2.055581569671631 -677,2.0556817054748535 -678,2.0556864738464355 -679,2.055511474609375 -680,2.055617094039917 -681,2.0555169582366943 -682,2.0555217266082764 -683,2.0555906295776367 -684,2.055867910385132 -685,2.0555474758148193 -686,2.0555925369262695 -687,2.05570912361145 -688,2.055485486984253 -689,2.055595874786377 -690,2.0555472373962402 -691,2.055476427078247 -692,2.0555577278137207 -693,2.0554208755493164 -694,2.0554773807525635 -695,2.0557005405426025 -696,2.0559239387512207 -697,2.0556414127349854 -698,2.0555946826934814 -699,2.0558340549468994 -700,2.0555474758148193 -701,2.0556459426879883 -702,2.055664300918579 -703,2.0554747581481934 -704,2.05560040473938 -705,2.0554659366607666 -706,2.0554733276367188 -707,2.055528402328491 -708,2.05576753616333 -709,2.055476188659668 -710,2.0555217266082764 -711,2.0556185245513916 -712,2.055433988571167 -713,2.05549955368042 -714,2.0554897785186768 -715,2.055393934249878 -716,2.055514097213745 -717,2.055854082107544 -718,2.0555100440979004 -719,2.0555379390716553 -720,2.055710792541504 -721,2.055417060852051 -722,2.055534839630127 -723,2.055509090423584 -724,2.0553817749023438 -725,2.0554842948913574 -726,2.0553700923919678 -727,2.0553700923919678 -728,2.0556893348693848 -729,2.055893659591675 -730,2.055565595626831 -731,2.055523157119751 -732,2.055814743041992 -733,2.0554800033569336 -734,2.0555989742279053 -735,2.0556187629699707 -736,2.0554285049438477 -737,2.055561065673828 -738,2.0554120540618896 -739,2.055418014526367 -740,2.0554659366607666 -741,2.0553174018859863 -742,2.0554215908050537 -743,2.055645227432251 -744,2.0557444095611572 -745,2.0554888248443604 -746,2.0554680824279785 -747,2.05560302734375 -748,2.055427312850952 -749,2.0555102825164795 -750,2.0554792881011963 -751,2.0553512573242188 -752,2.055461883544922 -753,2.0553536415100098 -754,2.0553369522094727 -755,2.0553934574127197 -756,2.0555145740509033 -757,2.0553159713745117 -758,2.0553534030914307 -759,2.0554168224334717 -760,2.0552735328674316 -761,2.0553462505340576 -762,2.055666446685791 -763,2.0553715229034424 -764,2.0553791522979736 -765,2.055539846420288 -766,2.0552914142608643 -767,2.0553691387176514 -768,2.055396556854248 -769,2.055239200592041 -770,2.0553550720214844 -771,2.0557701587677 -772,2.0554397106170654 -773,2.0553765296936035 -774,2.055629014968872 -775,2.055319309234619 -776,2.055407762527466 -777,2.0553946495056152 -778,2.0552501678466797 -779,2.055363655090332 -780,2.055551290512085 -781,2.055260181427002 -782,2.0553407669067383 -783,2.0553972721099854 -784,2.0552220344543457 -785,2.055328369140625 -786,2.0559680461883545 -787,2.055534601211548 -788,2.0554680824279785 -789,2.0557641983032227 -790,2.0553441047668457 -791,2.0555267333984375 -792,2.055438280105591 -793,2.055274486541748 -794,2.0554494857788086 -795,2.055257797241211 -796,2.0552773475646973 -797,2.055560827255249 -798,2.055598020553589 -799,2.0552964210510254 -800,2.0553908348083496 -801,2.055471897125244 -802,2.0552473068237305 -803,2.055389404296875 -804,2.055290460586548 -805,2.0552685260772705 -806,2.055295705795288 -807,2.055203437805176 -808,2.0552451610565186 -809,2.0552878379821777 -810,2.0554563999176025 -811,2.055274248123169 -812,2.0552327632904053 -813,2.055382013320923 -814,2.0552284717559814 -815,2.0552499294281006 -816,2.0552895069122314 -817,2.0551650524139404 -818,2.0552144050598145 -819,2.0551910400390625 -820,2.055138111114502 -821,2.0552115440368652 -822,2.055210828781128 -823,2.0551488399505615 -824,2.0552258491516113 -825,2.0551862716674805 -826,2.055168390274048 -827,2.0551788806915283 -828,2.055182456970215 -829,2.05522084236145 -830,2.0551671981811523 -831,2.055152177810669 -832,2.055211305618286 -833,2.055149793624878 -834,2.0551345348358154 -835,2.0551681518554688 -836,2.055117130279541 -837,2.055144786834717 -838,2.0551109313964844 -839,2.0550990104675293 -840,2.0551974773406982 -841,2.0551562309265137 -842,2.055201768875122 -843,2.055147647857666 -844,2.055168390274048 -845,2.0551745891571045 -846,2.055148124694824 -847,2.0551517009735107 -848,2.0551323890686035 -849,2.0551106929779053 -850,2.0551044940948486 -851,2.0550949573516846 -852,2.055175304412842 -853,2.0556719303131104 -854,2.055795192718506 -855,2.0552525520324707 -856,2.0554821491241455 -857,2.0556931495666504 -858,2.0553791522979736 -859,2.0556540489196777 -860,2.0555896759033203 -861,2.055422782897949 -862,2.0555508136749268 -863,2.055495262145996 -864,2.0552589893341064 -865,2.055474281311035 -866,2.0552656650543213 -867,2.055191993713379 -868,2.0553250312805176 -869,2.0552151203155518 -870,2.055556297302246 -871,2.0552444458007812 -872,2.0552515983581543 -873,2.0553858280181885 -874,2.0552539825439453 -875,2.0552523136138916 -876,2.055211305618286 -877,2.0553014278411865 -878,2.0551092624664307 -879,2.055232524871826 -880,2.0551810264587402 -881,2.0553462505340576 -882,2.055314779281616 -883,2.055135726928711 -884,2.0554099082946777 -885,2.055203914642334 -886,2.05523681640625 -887,2.0552310943603516 -888,2.0552070140838623 -889,2.055148124694824 -890,2.0551390647888184 -891,2.0551934242248535 -892,2.055302619934082 -893,2.055337429046631 -894,2.0550737380981445 -895,2.0552937984466553 -896,2.05523943901062 -897,2.055157423019409 -898,2.0552825927734375 -899,2.055178165435791 -900,2.0551795959472656 -901,2.05513596534729 -902,2.05519962310791 -903,2.0551178455352783 -904,2.0559377670288086 -905,2.056199312210083 -906,2.0551252365112305 -907,2.0558557510375977 -908,2.05611252784729 -909,2.055269956588745 -910,2.0562984943389893 -911,2.055844783782959 -912,2.0554356575012207 -913,2.056135416030884 -914,2.055398464202881 -915,2.0554442405700684 -916,2.055725336074829 -917,2.055102825164795 -918,2.055410623550415 -919,2.0554981231689453 -920,2.0551233291625977 -921,2.0551998615264893 -922,2.0550537109375 -923,2.055180072784424 -924,2.0550732612609863 -925,2.055147409439087 -926,2.0551087856292725 -927,2.0550501346588135 -928,2.0551257133483887 -929,2.0550124645233154 -930,2.055055856704712 -931,2.05500864982605 -932,2.0550789833068848 -933,2.0554537773132324 -934,2.0555624961853027 -935,2.055021286010742 -936,2.0556297302246094 -937,2.0555412769317627 -938,2.0551841259002686 -939,2.0556046962738037 -940,2.0554556846618652 -941,2.055182695388794 -942,2.0554022789001465 -943,2.055401563644409 -944,2.0550851821899414 -945,2.0562245845794678 -946,2.056216239929199 -947,2.055053949356079 -948,2.0562658309936523 -949,2.056168794631958 -950,2.0553126335144043 -951,2.0565543174743652 -952,2.055710554122925 -953,2.0554919242858887 -954,2.056098222732544 -955,2.0552098751068115 -956,2.0554652214050293 -957,2.0556063652038574 -958,2.0549967288970947 -959,2.055492639541626 -960,2.055835485458374 -961,2.0552289485931396 -962,2.0552940368652344 -963,2.055541515350342 -964,2.0550458431243896 -965,2.055245876312256 -966,2.0551695823669434 -967,2.054990530014038 -968,2.0551865100860596 -969,2.055352210998535 -970,2.0550265312194824 -971,2.0550918579101562 -972,2.055138111114502 -973,2.0550220012664795 -974,2.055014133453369 -975,2.0550432205200195 -976,2.054982900619507 -977,2.0549302101135254 -978,2.054971218109131 -979,2.0548956394195557 -980,2.0549447536468506 -981,2.0550003051757812 -982,2.0551886558532715 -983,2.0551609992980957 -984,2.0550148487091064 -985,2.0550997257232666 -986,2.055227041244507 -987,2.0550284385681152 -988,2.055014133453369 -989,2.055161476135254 -990,2.054929256439209 -991,2.054957628250122 -992,2.0550529956817627 -993,2.054870843887329 -994,2.054975986480713 -995,2.0552730560302734 -996,2.0551564693450928 -997,2.0550577640533447 -998,2.055100440979004 -999,2.055159330368042 -1000,2.0551464557647705 diff --git a/training/time_weighting/cubic_root/training_config.txt b/training/time_weighting/cubic_root/training_config.txt deleted file mode 100644 index 8bf709c..0000000 --- a/training/time_weighting/cubic_root/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/cubic_root/training_log.csv b/training/time_weighting/cubic_root/training_log.csv deleted file mode 100644 index f368801..0000000 --- a/training/time_weighting/cubic_root/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,406.5049133300781 -2,286.1399230957031 -3,380.598876953125 -4,74.50359344482422 -5,28.205202102661133 -6,13.7955322265625 -7,12.178937911987305 -8,11.101798057556152 -9,10.332828521728516 -10,9.188380241394043 -11,8.540705680847168 -12,8.430368423461914 -13,7.399772644042969 -14,6.895443916320801 -15,6.629512310028076 -16,6.372962474822998 -17,6.163924217224121 -18,5.933969974517822 -19,5.729796409606934 -20,5.613147735595703 -21,5.510517120361328 -22,5.265130519866943 -23,5.014639377593994 -24,4.781589984893799 -25,4.561068058013916 -26,4.350449085235596 -27,4.15083646774292 -28,3.966463088989258 -29,3.8029510974884033 -30,3.6623408794403076 -31,3.5451314449310303 -32,3.4539527893066406 -33,3.382512092590332 -34,3.3125855922698975 -35,3.2327206134796143 -36,3.1372323036193848 -37,3.0341250896453857 -38,2.946577787399292 -39,2.897919178009033 -40,2.889165163040161 -41,2.864581346511841 -42,2.8110063076019287 -43,2.7348992824554443 -44,2.6447248458862305 -45,2.5551681518554688 -46,2.4864954948425293 -47,2.4479615688323975 -48,2.429374933242798 -49,2.4180080890655518 -50,2.405967950820923 -51,2.3896098136901855 -52,2.3678557872772217 -53,2.3409290313720703 -54,2.3100428581237793 -55,2.2774617671966553 -56,2.24589204788208 -57,2.217803716659546 -58,2.1940715312957764 -59,2.1743786334991455 -60,2.1567721366882324 -61,2.139000415802002 -62,2.119649648666382 -63,2.0984270572662354 -64,2.075986862182617 -65,2.0535783767700195 -66,2.0325355529785156 -67,2.013735055923462 -68,1.9973194599151611 -69,1.9828423261642456 -70,1.9695284366607666 -71,1.9566339254379272 -72,1.9436278343200684 -73,1.9302221536636353 -74,1.9163329601287842 -75,1.902048110961914 -76,1.8875571489334106 -77,1.8730971813201904 -78,1.8588813543319702 -79,1.8450961112976074 -80,1.8318288326263428 -81,1.8190621137619019 -82,1.8067073822021484 -83,1.7946562767028809 -84,1.7828168869018555 -85,1.7711378335952759 -86,1.7595897912979126 -87,1.7481752634048462 -88,1.7369414567947388 -89,1.7259103059768677 -90,1.7150880098342896 -91,1.704453706741333 -92,1.6939669847488403 -93,1.6835720539093018 -94,1.6732447147369385 -95,1.662967324256897 -96,1.6527365446090698 -97,1.6425807476043701 -98,1.6325258016586304 -99,1.6226006746292114 -100,1.6128342151641846 -101,1.6032302379608154 -102,1.5937778949737549 -103,1.5844647884368896 -104,1.5752677917480469 -105,1.5661566257476807 -106,1.5571343898773193 -107,1.5481972694396973 -108,1.5393567085266113 -109,1.530625343322754 -110,1.5220052003860474 -111,1.5134971141815186 -112,1.505105972290039 -113,1.4968204498291016 -114,1.48863685131073 -115,1.480558156967163 -116,1.4725959300994873 -117,1.4647555351257324 -118,1.457045555114746 -119,1.4494673013687134 -120,1.442007064819336 -121,1.434664011001587 -122,1.427437424659729 -123,1.4203137159347534 -124,1.4132962226867676 -125,1.4063827991485596 -126,1.39957594871521 -127,1.392884373664856 -128,1.386305332183838 -129,1.3798342943191528 -130,1.3734647035598755 -131,1.3671900033950806 -132,1.3609954118728638 -133,1.354870319366455 -134,1.348832130432129 -135,1.3428786993026733 -136,1.3370122909545898 -137,1.331228494644165 -138,1.3255186080932617 -139,1.3198776245117188 -140,1.31429123878479 -141,1.308756709098816 -142,1.3032773733139038 -143,1.2978607416152954 -144,1.2925114631652832 -145,1.2872297763824463 -146,1.2820124626159668 -147,1.276859164237976 -148,1.2717289924621582 -149,1.2665756940841675 -150,1.26143217086792 -151,1.2563081979751587 -152,1.2512109279632568 -153,1.246135950088501 -154,1.2410897016525269 -155,1.236074447631836 -156,1.2310867309570312 -157,1.2261301279067993 -158,1.2212023735046387 -159,1.2163134813308716 -160,1.2114677429199219 -161,1.2066590785980225 -162,1.2018927335739136 -163,1.1971721649169922 -164,1.1925042867660522 -165,1.1878777742385864 -166,1.1832987070083618 -167,1.1787718534469604 -168,1.174290657043457 -169,1.169861912727356 -170,1.1654812097549438 -171,1.1611614227294922 -172,1.15690016746521 -173,1.152706265449524 -174,1.1485880613327026 -175,1.1445385217666626 -176,1.140566349029541 -177,1.1366692781448364 -178,1.1328518390655518 -179,1.1291165351867676 -180,1.1254699230194092 -181,1.1219000816345215 -182,1.1184104681015015 -183,1.1149920225143433 -184,1.1116409301757812 -185,1.1083697080612183 -186,1.1051955223083496 -187,1.1021323204040527 -188,1.0991960763931274 -189,1.0963915586471558 -190,1.0937139987945557 -191,1.0911564826965332 -192,1.0887101888656616 -193,1.08636474609375 -194,1.0841104984283447 -195,1.0819432735443115 -196,1.0798640251159668 -197,1.0778627395629883 -198,1.0759369134902954 -199,1.0740876197814941 -200,1.0723094940185547 -201,1.0705980062484741 -202,1.0689465999603271 -203,1.067348837852478 -204,1.0658016204833984 -205,1.064301609992981 -206,1.0628422498703003 -207,1.0614192485809326 -208,1.06002938747406 -209,1.0586740970611572 -210,1.0573493242263794 -211,1.056058645248413 -212,1.05479896068573 -213,1.0535701513290405 -214,1.0523693561553955 -215,1.051194429397583 -216,1.0500473976135254 -217,1.0489234924316406 -218,1.0478241443634033 -219,1.046746015548706 -220,1.0456864833831787 -221,1.0446468591690063 -222,1.0436252355575562 -223,1.0426212549209595 -224,1.0416339635849 -225,1.0406625270843506 -226,1.0397082567214966 -227,1.0387680530548096 -228,1.0378426313400269 -229,1.0369315147399902 -230,1.036032795906067 -231,1.0351454019546509 -232,1.034273386001587 -233,1.0334147214889526 -234,1.0325700044631958 -235,1.031740427017212 -236,1.0309257507324219 -237,1.0301345586776733 -238,1.029380202293396 -239,1.0286917686462402 -240,1.0280870199203491 -241,1.027538776397705 -242,1.0270482301712036 -243,1.0265858173370361 -244,1.0261386632919312 -245,1.0257049798965454 -246,1.0252799987792969 -247,1.0248632431030273 -248,1.0244522094726562 -249,1.0240455865859985 -250,1.0236425399780273 -251,1.0232415199279785 -252,1.022842288017273 -253,1.022443175315857 -254,1.0220472812652588 -255,1.021653413772583 -256,1.0212615728378296 -257,1.020871877670288 -258,1.0204843282699585 -259,1.020101547241211 -260,1.0197219848632812 -261,1.0193471908569336 -262,1.0189777612686157 -263,1.0186138153076172 -264,1.0182554721832275 -265,1.017905354499817 -266,1.0175621509552002 -267,1.0172266960144043 -268,1.0168983936309814 -269,1.016576886177063 -270,1.0162622928619385 -271,1.0159536600112915 -272,1.0156502723693848 -273,1.0153534412384033 -274,1.0150617361068726 -275,1.0147746801376343 -276,1.014492154121399 -277,1.01421320438385 -278,1.0139387845993042 -279,1.0136682987213135 -280,1.0134012699127197 -281,1.0131378173828125 -282,1.012877345085144 -283,1.0126200914382935 -284,1.0123652219772339 -285,1.0121129751205444 -286,1.0118625164031982 -287,1.0116134881973267 -288,1.0113674402236938 -289,1.0111231803894043 -290,1.0108808279037476 -291,1.0106405019760132 -292,1.010401725769043 -293,1.010167121887207 -294,1.0099338293075562 -295,1.0097030401229858 -296,1.0094743967056274 -297,1.0092476606369019 -298,1.0090230703353882 -299,1.0087997913360596 -300,1.0085781812667847 -301,1.0083587169647217 -302,1.0081400871276855 -303,1.0079232454299927 -304,1.00770902633667 -305,1.0074964761734009 -306,1.0072855949401855 -307,1.0070769786834717 -308,1.0068711042404175 -309,1.0066719055175781 -310,1.006477952003479 -311,1.0062875747680664 -312,1.0061007738113403 -313,1.005916714668274 -314,1.0057348012924194 -315,1.0055556297302246 -316,1.0053786039352417 -317,1.005203127861023 -318,1.0050287246704102 -319,1.004855990409851 -320,1.0046848058700562 -321,1.0045143365859985 -322,1.0043442249298096 -323,1.0041753053665161 -324,1.0040063858032227 -325,1.0038374662399292 -326,1.0036685466766357 -327,1.0034998655319214 -328,1.0033305883407593 -329,1.0031611919403076 -330,1.0029911994934082 -331,1.002821445465088 -332,1.0026516914367676 -333,1.0024826526641846 -334,1.0023143291473389 -335,1.0021469593048096 -336,1.001981496810913 -337,1.0018177032470703 -338,1.0016552209854126 -339,1.0014961957931519 -340,1.0013389587402344 -341,1.001184105873108 -342,1.0010335445404053 -343,1.0008816719055176 -344,1.000728726387024 -345,1.0005754232406616 -346,1.00042724609375 -347,1.0002824068069458 -348,1.000139832496643 -349,1.0000026226043701 -350,0.9998689889907837 -351,0.9997372031211853 -352,0.999607264995575 -353,0.9994797706604004 -354,0.9993555545806885 -355,0.9992329478263855 -356,0.9991106390953064 -357,0.9989877343177795 -358,0.9988647699356079 -359,0.9987419247627258 -360,0.9986206293106079 -361,0.9985028505325317 -362,0.9983858466148376 -363,0.99826979637146 -364,0.9981549978256226 -365,0.9980422854423523 -366,0.9979296922683716 -367,0.9978163838386536 -368,0.997702419757843 -369,0.9975885152816772 -370,0.9974762201309204 -371,0.9973668456077576 -372,0.9972590804100037 -373,0.9971528053283691 -374,0.9970489144325256 -375,0.9969457387924194 -376,0.9968426823616028 -377,0.9967395067214966 -378,0.9966366291046143 -379,0.9965343475341797 -380,0.9964325428009033 -381,0.9963324069976807 -382,0.9962339997291565 -383,0.9961373209953308 -384,0.996042788028717 -385,0.9959486126899719 -386,0.9958547353744507 -387,0.9957610964775085 -388,0.9956675171852112 -389,0.9955740571022034 -390,0.9954813718795776 -391,0.9953897595405579 -392,0.9952991604804993 -393,0.9952100515365601 -394,0.9951217770576477 -395,0.9950350522994995 -396,0.9949488043785095 -397,0.9948625564575195 -398,0.9947761297225952 -399,0.9946895241737366 -400,0.9946029186248779 -401,0.9945162534713745 -402,0.9944303035736084 -403,0.9943447113037109 -404,0.994259774684906 -405,0.9941761493682861 -406,0.9940937757492065 -407,0.9940118193626404 -408,0.9939296841621399 -409,0.9938471913337708 -410,0.9937645792961121 -411,0.9936816692352295 -412,0.993598997592926 -413,0.9935172200202942 -414,0.9934359788894653 -415,0.993355393409729 -416,0.9932747483253479 -417,0.9931947588920593 -418,0.9931147694587708 -419,0.9930347800254822 -420,0.9929556250572205 -421,0.9928768873214722 -422,0.9927982687950134 -423,0.9927198886871338 -424,0.9926415085792542 -425,0.9925636649131775 -426,0.9924860000610352 -427,0.9924094676971436 -428,0.9923332929611206 -429,0.9922573566436768 -430,0.992181658744812 -431,0.9921062588691711 -432,0.9920312762260437 -433,0.9919569492340088 -434,0.9918839931488037 -435,0.9918116331100464 -436,0.9917391538619995 -437,0.9916667342185974 -438,0.9915931224822998 -439,0.9915198087692261 -440,0.9914461970329285 -441,0.9913725256919861 -442,0.9912999868392944 -443,0.9912283420562744 -444,0.9911579489707947 -445,0.9910879135131836 -446,0.9910176396369934 -447,0.9909477233886719 -448,0.9908776879310608 -449,0.9908075332641602 -450,0.9907376766204834 -451,0.9906678199768066 -452,0.9905980229377747 -453,0.99052894115448 -454,0.9904605746269226 -455,0.9903925061225891 -456,0.9903255701065063 -457,0.9902592301368713 -458,0.990193247795105 -459,0.990127682685852 -460,0.9900621175765991 -461,0.9900007843971252 -462,0.9899412989616394 -463,0.9898828864097595 -464,0.9898251891136169 -465,0.9897675514221191 -466,0.9897098541259766 -467,0.989652156829834 -468,0.9895943999290466 -469,0.9895360469818115 -470,0.9894774556159973 -471,0.9894186854362488 -472,0.9893597960472107 -473,0.9893015027046204 -474,0.9892439842224121 -475,0.9891865253448486 -476,0.9891295433044434 -477,0.989072859287262 -478,0.9890167117118835 -479,0.9889603853225708 -480,0.9889046549797058 -481,0.9888487458229065 -482,0.9887932538986206 -483,0.988738477230072 -484,0.9886844158172607 -485,0.9886307716369629 -486,0.9885774254798889 -487,0.988524317741394 -488,0.988471508026123 -489,0.9884190559387207 -490,0.9883668422698975 -491,0.9883149862289429 -492,0.9882631301879883 -493,0.9882116317749023 -494,0.988160252571106 -495,0.9881095290184021 -496,0.9880592226982117 -497,0.9880090355873108 -498,0.9879589676856995 -499,0.9879087209701538 -500,0.9878582954406738 -501,0.9878078103065491 -502,0.9877572059631348 -503,0.9877064228057861 -504,0.9876559972763062 -505,0.9876061677932739 -506,0.9875562787055969 -507,0.9875065684318542 -508,0.9874568581581116 -509,0.987407386302948 -510,0.987358033657074 -511,0.9873086810112 -512,0.9872597455978394 -513,0.9872108697891235 -514,0.9871619939804077 -515,0.9871131777763367 -516,0.9870644807815552 -517,0.9870155453681946 -518,0.9869667887687683 -519,0.9869177937507629 -520,0.9868687987327576 -521,0.9868199825286865 -522,0.9867717623710632 -523,0.9867231249809265 -524,0.9866746068000793 -525,0.9866262674331665 -526,0.9865776896476746 -527,0.9865295886993408 -528,0.9864817261695862 -529,0.986433744430542 -530,0.9863861203193665 -531,0.9863382577896118 -532,0.9862905740737915 -533,0.9862428903579712 -534,0.9861952066421509 -535,0.9861472845077515 -536,0.9860997200012207 -537,0.9860520362854004 -538,0.9860040545463562 -539,0.9859561324119568 -540,0.9859082698822021 -541,0.9858607053756714 -542,0.9858132004737854 -543,0.9857661724090576 -544,0.9857190251350403 -545,0.9856720566749573 -546,0.9856253266334534 -547,0.9855784773826599 -548,0.9855316281318665 -549,0.9854845404624939 -550,0.9854376912117004 -551,0.9853912591934204 -552,0.9853448867797852 -553,0.9852985739707947 -554,0.9852524995803833 -555,0.985206127166748 -556,0.985159695148468 -557,0.9851132035255432 -558,0.9850662350654602 -559,0.9850194454193115 -560,0.9849728941917419 -561,0.9849265813827515 -562,0.9848814010620117 -563,0.984836220741272 -564,0.984792172908783 -565,0.9847475290298462 -566,0.9847020506858826 -567,0.9846555590629578 -568,0.9846082925796509 -569,0.9845612645149231 -570,0.9845154285430908 -571,0.9844696521759033 -572,0.984424352645874 -573,0.9843783974647522 -574,0.9843317866325378 -575,0.9842848181724548 -576,0.9842388033866882 -577,0.9841935634613037 -578,0.9841486215591431 -579,0.9841042160987854 -580,0.9840595722198486 -581,0.9840145707130432 -582,0.9839695692062378 -583,0.9839245676994324 -584,0.9838792681694031 -585,0.9838337302207947 -586,0.9837880730628967 -587,0.9837422966957092 -588,0.983695924282074 -589,0.9836496710777283 -590,0.9836038947105408 -591,0.9835576415061951 -592,0.9835076332092285 -593,0.9834541082382202 -594,0.9833984375 -595,0.9833416938781738 -596,0.9832847118377686 -597,0.983228325843811 -598,0.9831724166870117 -599,0.9831181764602661 -600,0.9830651879310608 -601,0.9830135107040405 -602,0.9829630851745605 -603,0.9829137921333313 -604,0.982864499092102 -605,0.9828155040740967 -606,0.9827666878700256 -607,0.982718825340271 -608,0.9826716780662537 -609,0.9826250076293945 -610,0.9825787544250488 -611,0.9825329184532166 -612,0.9824874997138977 -613,0.9824422001838684 -614,0.982396125793457 -615,0.9823498129844666 -616,0.9823036193847656 -617,0.9822573065757751 -618,0.9822107553482056 -619,0.9821646213531494 -620,0.9821189641952515 -621,0.9820727705955505 -622,0.9820264577865601 -623,0.9819804430007935 -624,0.9819344878196716 -625,0.981887936592102 -626,0.9818414449691772 -627,0.9817948937416077 -628,0.9817482233047485 -629,0.9817016124725342 -630,0.9816551804542542 -631,0.9816087484359741 -632,0.981563150882721 -633,0.9815175533294678 -634,0.981471836566925 -635,0.9814271926879883 -636,0.9813841581344604 -637,0.9813402891159058 -638,0.9812960624694824 -639,0.9812521934509277 -640,0.9812089204788208 -641,0.9811652898788452 -642,0.9811217784881592 -643,0.9810790419578552 -644,0.9810366630554199 -645,0.9809940457344055 -646,0.9809508323669434 -647,0.9809075593948364 -648,0.9808642864227295 -649,0.9808208346366882 -650,0.9807780385017395 -651,0.980735719203949 -652,0.9806936979293823 -653,0.9806519150733948 -654,0.9806104302406311 -655,0.980567991733551 -656,0.9805258512496948 -657,0.9804838299751282 -658,0.9804417490959167 -659,0.9803991317749023 -660,0.9803569912910461 -661,0.9803154468536377 -662,0.980273425579071 -663,0.980231523513794 -664,0.9801896214485168 -665,0.9801483750343323 -666,0.9801057577133179 -667,0.9800630211830139 -668,0.9800207614898682 -669,0.9799791574478149 -670,0.9799374341964722 -671,0.9798951745033264 -672,0.9798532724380493 -673,0.9798116087913513 -674,0.979770302772522 -675,0.9797292947769165 -676,0.9796868562698364 -677,0.9796448945999146 -678,0.9796028733253479 -679,0.9795616269111633 -680,0.9795206785202026 -681,0.9794785976409912 -682,0.979436993598938 -683,0.9793957471847534 -684,0.9793550968170166 -685,0.979314923286438 -686,0.9792730808258057 -687,0.9792311787605286 -688,0.9791899919509888 -689,0.9791495203971863 -690,0.9791104197502136 -691,0.9790698885917664 -692,0.9790278673171997 -693,0.9789857268333435 -694,0.9789445400238037 -695,0.978903591632843 -696,0.9788643717765808 -697,0.9788236021995544 -698,0.9787814021110535 -699,0.978739857673645 -700,0.9786995649337769 -701,0.9786593317985535 -702,0.9786207675933838 -703,0.9785810708999634 -704,0.9785395264625549 -705,0.9784969091415405 -706,0.9784561395645142 -707,0.9784160256385803 -708,0.9783768653869629 -709,0.9783378839492798 -710,0.9782970547676086 -711,0.9782564640045166 -712,0.9782161116600037 -713,0.9781764149665833 -714,0.9781372547149658 -715,0.9780964255332947 -716,0.9780548810958862 -717,0.9780144691467285 -718,0.9779747128486633 -719,0.9779360890388489 -720,0.977898359298706 -721,0.9778602719306946 -722,0.9778218269348145 -723,0.9777831435203552 -724,0.9777445197105408 -725,0.9777064919471741 -726,0.9776688814163208 -727,0.9776313900947571 -728,0.9775930643081665 -729,0.9775559306144714 -730,0.9775182604789734 -731,0.9774801135063171 -732,0.9774418473243713 -733,0.9774023294448853 -734,0.9773613214492798 -735,0.977321207523346 -736,0.9772819876670837 -737,0.9772428870201111 -738,0.977204442024231 -739,0.9771649241447449 -740,0.9771244525909424 -741,0.9770833849906921 -742,0.9770426154136658 -743,0.9770022034645081 -744,0.9769622683525085 -745,0.9769218564033508 -746,0.9768804311752319 -747,0.9768390655517578 -748,0.9767982363700867 -749,0.9767576456069946 -750,0.9767175912857056 -751,0.9766768217086792 -752,0.9766349196434021 -753,0.976593017578125 -754,0.9765517711639404 -755,0.9765108227729797 -756,0.976470410823822 -757,0.9764295816421509 -758,0.9763878583908081 -759,0.9763455986976624 -760,0.9763033390045166 -761,0.9762616157531738 -762,0.9762201905250549 -763,0.9761781692504883 -764,0.9761354923248291 -765,0.9760927557945251 -766,0.9760504961013794 -767,0.9760094285011292 -768,0.975967526435852 -769,0.9759242534637451 -770,0.9758809208869934 -771,0.9758382439613342 -772,0.9757963418960571 -773,0.97575443983078 -774,0.9757108688354492 -775,0.9756677746772766 -776,0.9756252765655518 -777,0.9755828380584717 -778,0.97553950548172 -779,0.9754956364631653 -780,0.9754523038864136 -781,0.9754091501235962 -782,0.9753655195236206 -783,0.9753218293190002 -784,0.9752784967422485 -785,0.9752352833747864 -786,0.975191593170166 -787,0.9751477241516113 -788,0.9751049280166626 -789,0.9750619530677795 -790,0.9750189781188965 -791,0.974976122379303 -792,0.9749336838722229 -793,0.9748917818069458 -794,0.9748495817184448 -795,0.9748068451881409 -796,0.9747642874717712 -797,0.9747222065925598 -798,0.9746801257133484 -799,0.9746379852294922 -800,0.9745959639549255 -801,0.9745545387268066 -802,0.9745123386383057 -803,0.9744700789451599 -804,0.9744282960891724 -805,0.9743868112564087 -806,0.9743460416793823 -807,0.9743053913116455 -808,0.9742649793624878 -809,0.9742230772972107 -810,0.9741821885108948 -811,0.9741423726081848 -812,0.9741036295890808 -813,0.9740648865699768 -814,0.9740229845046997 -815,0.9739823937416077 -816,0.973942756652832 -817,0.9739041328430176 -818,0.9738662242889404 -819,0.9738273024559021 -820,0.9737858772277832 -821,0.9737459421157837 -822,0.9737071990966797 -823,0.9736696481704712 -824,0.9736326336860657 -825,0.9735919833183289 -826,0.973551869392395 -827,0.9735122323036194 -828,0.9734736084938049 -829,0.9734358787536621 -830,0.9733966588973999 -831,0.9733567833900452 -832,0.9733179807662964 -833,0.9732803702354431 -834,0.973243772983551 -835,0.9732034206390381 -836,0.9731636047363281 -837,0.9731249809265137 -838,0.9730881452560425 -839,0.9730502367019653 -840,0.9730097651481628 -841,0.9729709029197693 -842,0.9729335904121399 -843,0.9728949666023254 -844,0.9728541970252991 -845,0.9728134870529175 -846,0.9727743864059448 -847,0.972732424736023 -848,0.9726884961128235 -849,0.9726470112800598 -850,0.9726085066795349 -851,0.9725669622421265 -852,0.9725236892700195 -853,0.9724826216697693 -854,0.9724440574645996 -855,0.9724026322364807 -856,0.9723581075668335 -857,0.9723177552223206 -858,0.972280740737915 -859,0.9722402095794678 -860,0.9722007513046265 -861,0.9721589088439941 -862,0.9721240401268005 -863,0.9720854163169861 -864,0.9720414876937866 -865,0.9720038771629333 -866,0.9719631671905518 -867,0.9719323515892029 -868,0.9718955159187317 -869,0.9718523621559143 -870,0.9718146920204163 -871,0.9717739820480347 -872,0.9717448353767395 -873,0.9717136025428772 -874,0.9716742634773254 -875,0.9716296792030334 -876,0.9715954661369324 -877,0.9715543389320374 -878,0.9715271592140198 -879,0.9715010523796082 -880,0.9714651703834534 -881,0.9714208841323853 -882,0.9713800549507141 -883,0.9713448286056519 -884,0.9713121652603149 -885,0.9712838530540466 -886,0.9712488651275635 -887,0.9712077379226685 -888,0.9711694717407227 -889,0.9711344838142395 -890,0.9711055755615234 -891,0.9710726141929626 -892,0.9710344672203064 -893,0.9710002541542053 -894,0.970966637134552 -895,0.9709355235099792 -896,0.970899224281311 -897,0.9708660244941711 -898,0.9708309769630432 -899,0.9707981944084167 -900,0.9707592129707336 -901,0.970731794834137 -902,0.9706949591636658 -903,0.9706668853759766 -904,0.9706294536590576 -905,0.9705886244773865 -906,0.9705538153648376 -907,0.9705197215080261 -908,0.9704883098602295 -909,0.9704538583755493 -910,0.9704223871231079 -911,0.9703918695449829 -912,0.970360517501831 -913,0.9703220129013062 -914,0.9702877998352051 -915,0.9702542424201965 -916,0.9702247977256775 -917,0.9701899886131287 -918,0.9701569676399231 -919,0.9701244831085205 -920,0.9700940847396851 -921,0.9700579047203064 -922,0.9700253009796143 -923,0.969992458820343 -924,0.9699633121490479 -925,0.9699286222457886 -926,0.9698948860168457 -927,0.9698629379272461 -928,0.9698328971862793 -929,0.9697980284690857 -930,0.9697672128677368 -931,0.9697376489639282 -932,0.969707727432251 -933,0.9696716070175171 -934,0.9696425795555115 -935,0.9696101546287537 -936,0.96958327293396 -937,0.9695488214492798 -938,0.9695168733596802 -939,0.9694852828979492 -940,0.9694557785987854 -941,0.9694222211837769 -942,0.9693911671638489 -943,0.9693630933761597 -944,0.9693313241004944 -945,0.9693012237548828 -946,0.9692727327346802 -947,0.9692434072494507 -948,0.9692091345787048 -949,0.9691787362098694 -950,0.9691526293754578 -951,0.9691223502159119 -952,0.9690874218940735 -953,0.9690574407577515 -954,0.9690330028533936 -955,0.9690022468566895 -956,0.9689672589302063 -957,0.9689375162124634 -958,0.9689106941223145 -959,0.968878984451294 -960,0.9688495993614197 -961,0.9688233137130737 -962,0.9687944054603577 -963,0.9687609076499939 -964,0.9687316417694092 -965,0.9687047004699707 -966,0.9686732888221741 -967,0.9686447381973267 -968,0.9686177968978882 -969,0.9685904383659363 -970,0.9685563445091248 -971,0.968530535697937 -972,0.968501627445221 -973,0.9684756994247437 -974,0.9684423804283142 -975,0.9684156775474548 -976,0.9683876037597656 -977,0.9683617949485779 -978,0.9683277606964111 -979,0.9683034420013428 -980,0.9682755470275879 -981,0.9682525992393494 -982,0.9682177901268005 -983,0.9681909680366516 -984,0.9681602716445923 -985,0.9681358337402344 -986,0.9681033492088318 -987,0.9680788516998291 -988,0.9680535197257996 -989,0.9680317044258118 -990,0.9679985642433167 -991,0.9679653644561768 -992,0.9679375886917114 -993,0.9679133296012878 -994,0.9678831696510315 -995,0.9678567051887512 -996,0.9678340554237366 -997,0.9678078889846802 -998,0.9677746891975403 -999,0.9677491188049316 -1000,0.9677274227142334 diff --git a/training/time_weighting/cubic_root_mirrored/training_config.txt b/training/time_weighting/cubic_root_mirrored/training_config.txt deleted file mode 100644 index fd7afc3..0000000 --- a/training/time_weighting/cubic_root_mirrored/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/cubic_root_mirrored/training_log.csv b/training/time_weighting/cubic_root_mirrored/training_log.csv deleted file mode 100644 index bfd074c..0000000 --- a/training/time_weighting/cubic_root_mirrored/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,292.2589416503906 -2,175.0437469482422 -3,82.3321304321289 -4,41.70814895629883 -5,25.63973045349121 -6,20.604887008666992 -7,23.526865005493164 -8,22.734716415405273 -9,21.758644104003906 -10,21.368209838867188 -11,20.580547332763672 -12,19.803377151489258 -13,18.91210174560547 -14,18.09048080444336 -15,17.106117248535156 -16,15.424467086791992 -17,13.826833724975586 -18,12.259550094604492 -19,10.754024505615234 -20,9.787008285522461 -21,9.015267372131348 -22,8.288488388061523 -23,7.385382652282715 -24,6.623676776885986 -25,6.1179656982421875 -26,5.821076393127441 -27,5.602984428405762 -28,5.388195514678955 -29,5.2909159660339355 -30,5.278778553009033 -31,5.247122287750244 -32,5.204382419586182 -33,5.150393962860107 -34,5.082691192626953 -35,4.99971342086792 -36,4.902540683746338 -37,4.793982028961182 -38,4.677857875823975 -39,4.557766914367676 -40,4.436788558959961 -41,4.3175740242004395 -42,4.203248500823975 -43,4.095767021179199 -44,3.996610164642334 -45,3.9075517654418945 -46,3.8333373069763184 -47,3.7809972763061523 -48,3.7495334148406982 -49,3.730388879776001 -50,3.715834617614746 -51,3.700629949569702 -52,3.68172550201416 -53,3.6577584743499756 -54,3.6286768913269043 -55,3.5953142642974854 -56,3.5590600967407227 -57,3.5216877460479736 -58,3.485125780105591 -59,3.451225757598877 -60,3.4212779998779297 -61,3.395691156387329 -62,3.3739724159240723 -63,3.355052947998047 -64,3.3377058506011963 -65,3.3208651542663574 -66,3.3037705421447754 -67,3.285963773727417 -68,3.2672507762908936 -69,3.247678279876709 -70,3.227468490600586 -71,3.2069761753082275 -72,3.1866402626037598 -73,3.1668646335601807 -74,3.147984266281128 -75,3.1301605701446533 -76,3.1134133338928223 -77,3.0975823402404785 -78,3.082444667816162 -79,3.067737579345703 -80,3.0531959533691406 -81,3.038651704788208 -82,3.0240156650543213 -83,3.0092949867248535 -84,2.9945316314697266 -85,2.9798202514648438 -86,2.965283155441284 -87,2.9510326385498047 -88,2.9371554851531982 -89,2.9237027168273926 -90,2.9106738567352295 -91,2.898017406463623 -92,2.885636806488037 -93,2.8733303546905518 -94,2.861069679260254 -95,2.848824977874756 -96,2.8365707397460938 -97,2.824328899383545 -98,2.8121328353881836 -99,2.8000295162200928 -100,2.788088321685791 -101,2.7763352394104004 -102,2.764827251434326 -103,2.753589391708374 -104,2.74265456199646 -105,2.732060194015503 -106,2.721780300140381 -107,2.7118141651153564 -108,2.702164888381958 -109,2.692887783050537 -110,2.684251070022583 -111,2.676523447036743 -112,2.6695380210876465 -113,2.663193464279175 -114,2.657423257827759 -115,2.652148485183716 -116,2.647289991378784 -117,2.64278244972229 -118,2.638549327850342 -119,2.6344966888427734 -120,2.6305506229400635 -121,2.6266520023345947 -122,2.6227526664733887 -123,2.618805408477783 -124,2.6147561073303223 -125,2.6105754375457764 -126,2.60606050491333 -127,2.6013004779815674 -128,2.596435308456421 -129,2.59155011177063 -130,2.5867042541503906 -131,2.5819435119628906 -132,2.5772976875305176 -133,2.5728001594543457 -134,2.5684807300567627 -135,2.5643699169158936 -136,2.5604984760284424 -137,2.556884765625 -138,2.5535502433776855 -139,2.5505006313323975 -140,2.5477359294891357 -141,2.5452566146850586 -142,2.5430572032928467 -143,2.5411250591278076 -144,2.5394392013549805 -145,2.537975311279297 -146,2.536712884902954 -147,2.5356273651123047 -148,2.534696578979492 -149,2.533897876739502 -150,2.533205509185791 -151,2.532601833343506 -152,2.5320613384246826 -153,2.5315678119659424 -154,2.5311148166656494 -155,2.5306906700134277 -156,2.530273675918579 -157,2.5298569202423096 -158,2.5294363498687744 -159,2.5290045738220215 -160,2.528562068939209 -161,2.5281083583831787 -162,2.527644395828247 -163,2.527169942855835 -164,2.526686906814575 -165,2.5261998176574707 -166,2.5257089138031006 -167,2.5252132415771484 -168,2.52472186088562 -169,2.5242409706115723 -170,2.523776054382324 -171,2.523350238800049 -172,2.5229501724243164 -173,2.522573709487915 -174,2.522221326828003 -175,2.5218863487243652 -176,2.5215675830841064 -177,2.521261215209961 -178,2.520967960357666 -179,2.520684242248535 -180,2.5204076766967773 -181,2.5201382637023926 -182,2.5198748111724854 -183,2.519615411758423 -184,2.5193591117858887 -185,2.5191051959991455 -186,2.5188541412353516 -187,2.5186045169830322 -188,2.518354892730713 -189,2.51810622215271 -190,2.5178580284118652 -191,2.517610788345337 -192,2.5173654556274414 -193,2.517123222351074 -194,2.516885757446289 -195,2.5166525840759277 -196,2.5164248943328857 -197,2.5162010192871094 -198,2.515981435775757 -199,2.5157666206359863 -200,2.515557289123535 -201,2.5153520107269287 -202,2.515150308609009 -203,2.5149524211883545 -204,2.5147578716278076 -205,2.5145671367645264 -206,2.5143795013427734 -207,2.514194965362549 -208,2.514012336730957 -209,2.5138320922851562 -210,2.513653516769409 -211,2.5134763717651367 -212,2.5133020877838135 -213,2.513129711151123 -214,2.512958288192749 -215,2.5127875804901123 -216,2.512618064880371 -217,2.512448787689209 -218,2.5122804641723633 -219,2.512113571166992 -220,2.5119473934173584 -221,2.51178240776062 -222,2.5116183757781982 -223,2.5114548206329346 -224,2.5112924575805664 -225,2.5111305713653564 -226,2.510970115661621 -227,2.510810375213623 -228,2.510651111602783 -229,2.5104925632476807 -230,2.510338068008423 -231,2.510185480117798 -232,2.5100347995758057 -233,2.509884834289551 -234,2.509735584259033 -235,2.5095877647399902 -236,2.5094404220581055 -237,2.509293794631958 -238,2.509148120880127 -239,2.5090036392211914 -240,2.5088603496551514 -241,2.508718729019165 -242,2.508578062057495 -243,2.5084385871887207 -244,2.508300542831421 -245,2.508164167404175 -246,2.508028507232666 -247,2.507894277572632 -248,2.5077614784240723 -249,2.507628917694092 -250,2.5074973106384277 -251,2.507366418838501 -252,2.5072357654571533 -253,2.507106065750122 -254,2.5069773197174072 -255,2.5068509578704834 -256,2.506726026535034 -257,2.5066020488739014 -258,2.506478786468506 -259,2.5063557624816895 -260,2.5062334537506104 -261,2.5061118602752686 -262,2.505990982055664 -263,2.505871057510376 -264,2.5057504177093506 -265,2.5056300163269043 -266,2.5055105686187744 -267,2.505391836166382 -268,2.5052733421325684 -269,2.505155563354492 -270,2.505037546157837 -271,2.504920482635498 -272,2.504802942276001 -273,2.5046863555908203 -274,2.5045695304870605 -275,2.504453182220459 -276,2.504337787628174 -277,2.5042216777801514 -278,2.504106044769287 -279,2.503990650177002 -280,2.503875970840454 -281,2.5037620067596436 -282,2.503647804260254 -283,2.5035343170166016 -284,2.50342059135437 -285,2.5033071041107178 -286,2.5031943321228027 -287,2.503082513809204 -288,2.5029706954956055 -289,2.502859354019165 -290,2.502748727798462 -291,2.502638816833496 -292,2.5025293827056885 -293,2.502420425415039 -294,2.502311944961548 -295,2.502203941345215 -296,2.502095937728882 -297,2.5019893646240234 -298,2.501882791519165 -299,2.501776695251465 -300,2.50167179107666 -301,2.5015673637390137 -302,2.5014634132385254 -303,2.5013601779937744 -304,2.5012571811676025 -305,2.501154661178589 -306,2.501052141189575 -307,2.5009498596191406 -308,2.5008480548858643 -309,2.500746488571167 -310,2.500645399093628 -311,2.500545024871826 -312,2.500446081161499 -313,2.5003468990325928 -314,2.5002479553222656 -315,2.500149965286255 -316,2.500051736831665 -317,2.4999544620513916 -318,2.4998576641082764 -319,2.499760389328003 -320,2.4996635913848877 -321,2.4995670318603516 -322,2.4994704723358154 -323,2.4993743896484375 -324,2.4992780685424805 -325,2.4991817474365234 -326,2.499086380004883 -327,2.498991012573242 -328,2.4988956451416016 -329,2.49880051612854 -330,2.4987058639526367 -331,2.4986109733581543 -332,2.498516798019409 -333,2.498422622680664 -334,2.498328924179077 -335,2.4982354640960693 -336,2.4981417655944824 -337,2.4980485439300537 -338,2.497955322265625 -339,2.4978623390197754 -340,2.497769355773926 -341,2.497676372528076 -342,2.497584104537964 -343,2.4974918365478516 -344,2.4973995685577393 -345,2.497307777404785 -346,2.497215509414673 -347,2.497123956680298 -348,2.4970321655273438 -349,2.4969406127929688 -350,2.4968490600585938 -351,2.4967575073242188 -352,2.496666193008423 -353,2.496574640274048 -354,2.49648380279541 -355,2.4963927268981934 -356,2.4963021278381348 -357,2.496211290359497 -358,2.4961211681365967 -359,2.496030569076538 -360,2.4959402084350586 -361,2.4958505630493164 -362,2.495760440826416 -363,2.4956705570220947 -364,2.4955809116363525 -365,2.4954912662506104 -366,2.4954020977020264 -367,2.4953129291534424 -368,2.4952237606048584 -369,2.4951374530792236 -370,2.4950528144836426 -371,2.494967222213745 -372,2.4948816299438477 -373,2.4947962760925293 -374,2.494710683822632 -375,2.4946253299713135 -376,2.494539499282837 -377,2.4944541454315186 -378,2.4943692684173584 -379,2.49428391456604 -380,2.4941980838775635 -381,2.4941132068634033 -382,2.494028091430664 -383,2.493943214416504 -384,2.493859052658081 -385,2.4937753677368164 -386,2.4936916828155518 -387,2.493608236312866 -388,2.4935247898101807 -389,2.493441104888916 -390,2.4933571815490723 -391,2.4932734966278076 -392,2.4931893348693848 -393,2.49310564994812 -394,2.4930214881896973 -395,2.4929375648498535 -396,2.492854118347168 -397,2.492770195007324 -398,2.492687225341797 -399,2.4926042556762695 -400,2.492521047592163 -401,2.492438316345215 -402,2.4923555850982666 -403,2.4922733306884766 -404,2.4921905994415283 -405,2.492108106613159 -406,2.492025852203369 -407,2.491943836212158 -408,2.491861343383789 -409,2.491779088973999 -410,2.491696834564209 -411,2.491614580154419 -412,2.491532564163208 -413,2.4914498329162598 -414,2.4913673400878906 -415,2.4912853240966797 -416,2.4912025928497314 -417,2.4911201000213623 -418,2.4910385608673096 -419,2.490957021713257 -420,2.4908764362335205 -421,2.4907960891723633 -422,2.490715503692627 -423,2.490635395050049 -424,2.490556001663208 -425,2.4904768466949463 -426,2.490398406982422 -427,2.4903199672698975 -428,2.490241765975952 -429,2.490163803100586 -430,2.490086555480957 -431,2.4900095462799072 -432,2.4899330139160156 -433,2.489856481552124 -434,2.4897797107696533 -435,2.489703416824341 -436,2.48962664604187 -437,2.4895501136779785 -438,2.4894745349884033 -439,2.4893991947174072 -440,2.4893250465393066 -441,2.489250898361206 -442,2.4891774654388428 -443,2.4891045093536377 -444,2.4890317916870117 -445,2.488959312438965 -446,2.488887310028076 -447,2.4888155460357666 -448,2.4887442588806152 -449,2.488673210144043 -450,2.4886014461517334 -451,2.4885308742523193 -452,2.488459825515747 -453,2.488389492034912 -454,2.488319158554077 -455,2.4882490634918213 -456,2.4881787300109863 -457,2.4881086349487305 -458,2.4880387783050537 -459,2.487969160079956 -460,2.4878990650177 -461,2.4878294467926025 -462,2.487759828567505 -463,2.4876904487609863 -464,2.4876208305358887 -465,2.48755145072937 -466,2.4874820709228516 -467,2.487413167953491 -468,2.4873440265655518 -469,2.4872748851776123 -470,2.4872055053710938 -471,2.4871368408203125 -472,2.487067937850952 -473,2.486999034881592 -474,2.4869303703308105 -475,2.4868619441986084 -476,2.486793279647827 -477,2.486724853515625 -478,2.4866561889648438 -479,2.4865880012512207 -480,2.4865193367004395 -481,2.4864509105682373 -482,2.4863827228546143 -483,2.486314058303833 -484,2.486246109008789 -485,2.486177921295166 -486,2.4861092567443848 -487,2.48604154586792 -488,2.485973596572876 -489,2.485905647277832 -490,2.485837697982788 -491,2.485769748687744 -492,2.4857020378112793 -493,2.4856343269348145 -494,2.4855663776397705 -495,2.4854984283447266 -496,2.485430955886841 -497,2.485363245010376 -498,2.485295295715332 -499,2.485227584838867 -500,2.4851598739624023 -501,2.4850921630859375 -502,2.4850244522094727 -503,2.4849562644958496 -504,2.484888792037964 -505,2.484821081161499 -506,2.484753131866455 -507,2.484685182571411 -508,2.4846174716949463 -509,2.4845495223999023 -510,2.4844815731048584 -511,2.4844138622283936 -512,2.4843456745147705 -513,2.4842777252197266 -514,2.4842097759246826 -515,2.4841418266296387 -516,2.484074115753174 -517,2.484005928039551 -518,2.483938217163086 -519,2.483870029449463 -520,2.483802318572998 -521,2.483734607696533 -522,2.4836673736572266 -523,2.48360013961792 -524,2.483532428741455 -525,2.4834651947021484 -526,2.483398199081421 -527,2.483330726623535 -528,2.4832637310028076 -529,2.483196496963501 -530,2.4831292629241943 -531,2.4830617904663086 -532,2.482994318008423 -533,2.482927083969116 -534,2.4828600883483887 -535,2.482792615890503 -536,2.4827253818511963 -537,2.4826576709747314 -538,2.482590436935425 -539,2.482522964477539 -540,2.4824557304382324 -541,2.482388734817505 -542,2.4823217391967773 -543,2.48225474357605 -544,2.4821882247924805 -545,2.482121229171753 -546,2.4820547103881836 -547,2.4819881916046143 -548,2.481921434402466 -549,2.4818546772003174 -550,2.4817886352539062 -551,2.481722116470337 -552,2.4816555976867676 -553,2.4815893173217773 -554,2.481523036956787 -555,2.481456756591797 -556,2.4813907146453857 -557,2.4813246726989746 -558,2.4812588691711426 -559,2.4811930656433105 -560,2.4811272621154785 -561,2.481062173843384 -562,2.480997085571289 -563,2.4809329509735107 -564,2.480867624282837 -565,2.4808037281036377 -566,2.480738878250122 -567,2.4806747436523438 -568,2.4806110858917236 -569,2.4805469512939453 -570,2.480483055114746 -571,2.480419397354126 -572,2.4803555011749268 -573,2.4802918434143066 -574,2.4802277088165283 -575,2.4801642894744873 -576,2.4801008701324463 -577,2.480037212371826 -578,2.479973793029785 -579,2.479910373687744 -580,2.4798471927642822 -581,2.4797842502593994 -582,2.4797215461730957 -583,2.47965931892395 -584,2.4795970916748047 -585,2.4795358180999756 -586,2.4794743061065674 -587,2.4794130325317383 -588,2.47935152053833 -589,2.47929048538208 -590,2.4792287349700928 -591,2.4791672229766846 -592,2.4791061878204346 -593,2.4790444374084473 -594,2.478982925415039 -595,2.478922128677368 -596,2.47886061668396 -597,2.478799343109131 -598,2.4787380695343018 -599,2.4786770343780518 -600,2.478616714477539 -601,2.478555679321289 -602,2.4784951210021973 -603,2.4784348011016846 -604,2.4783742427825928 -605,2.478313684463501 -606,2.4782538414001465 -607,2.478193521499634 -608,2.478133201599121 -609,2.4780726432800293 -610,2.4780118465423584 -611,2.4779512882232666 -612,2.477890729904175 -613,2.477829933166504 -614,2.477768898010254 -615,2.477708339691162 -616,2.477647304534912 -617,2.4775867462158203 -618,2.4775259494781494 -619,2.4774656295776367 -620,2.477404832839966 -621,2.477344036102295 -622,2.477283477783203 -623,2.4772233963012695 -624,2.4771628379821777 -625,2.477102041244507 -626,2.477041721343994 -627,2.4769811630249023 -628,2.4769208431243896 -629,2.476860523223877 -630,2.476799249649048 -631,2.476738929748535 -632,2.4766783714294434 -633,2.4766175746917725 -634,2.4765570163726807 -635,2.4764962196350098 -636,2.4764351844787598 -637,2.476374626159668 -638,2.4763143062591553 -639,2.4762537479400635 -640,2.4761931896209717 -641,2.476133108139038 -642,2.476072311401367 -643,2.4760119915008545 -644,2.4759514331817627 -645,2.475890874862671 -646,2.4758307933807373 -647,2.4757704734802246 -648,2.475710391998291 -649,2.4756503105163574 -650,2.475590467453003 -651,2.4755303859710693 -652,2.475470542907715 -653,2.475410223007202 -654,2.4753506183624268 -655,2.4752912521362305 -656,2.475231647491455 -657,2.475172519683838 -658,2.4751126766204834 -659,2.475052833557129 -660,2.4749934673309326 -661,2.474933624267578 -662,2.4748735427856445 -663,2.474813461303711 -664,2.4747536182403564 -665,2.474693775177002 -666,2.4746341705322266 -667,2.474574327468872 -668,2.4745144844055176 -669,2.474454641342163 -670,2.4743947982788086 -671,2.4743354320526123 -672,2.474275827407837 -673,2.4742164611816406 -674,2.4741573333740234 -675,2.474097728729248 -676,2.4740383625030518 -677,2.4739792346954346 -678,2.4739203453063965 -679,2.4738612174987793 -680,2.473802089691162 -681,2.473743200302124 -682,2.4736838340759277 -683,2.4736247062683105 -684,2.4735662937164307 -685,2.473508596420288 -686,2.4734508991241455 -687,2.473393201828003 -688,2.4733352661132812 -689,2.4732778072357178 -690,2.473219871520996 -691,2.4731619358062744 -692,2.4731040000915527 -693,2.4730453491210938 -694,2.472987413406372 -695,2.472928762435913 -696,2.4728705883026123 -697,2.4728124141693115 -698,2.4727540016174316 -699,2.4726953506469727 -700,2.4726388454437256 -701,2.4725821018218994 -702,2.4725234508514404 -703,2.472464084625244 -704,2.4724040031433105 -705,2.4723448753356934 -706,2.4722859859466553 -707,2.472226858139038 -708,2.4721689224243164 -709,2.472111463546753 -710,2.4720542430877686 -711,2.4719951152801514 -712,2.4719364643096924 -713,2.47187876701355 -714,2.471820831298828 -715,2.4717631340026855 -716,2.471705675125122 -717,2.471648693084717 -718,2.471590280532837 -719,2.4715323448181152 -720,2.4714748859405518 -721,2.471417188644409 -722,2.471360445022583 -723,2.4713034629821777 -724,2.471245288848877 -725,2.47118878364563 -726,2.471132516860962 -727,2.4710755348205566 -728,2.4710190296173096 -729,2.4709630012512207 -730,2.470906972885132 -731,2.4708502292633057 -732,2.470794439315796 -733,2.470738172531128 -734,2.470682382583618 -735,2.4706263542175293 -736,2.4705708026885986 -737,2.4705145359039307 -738,2.470458984375 -739,2.470404624938965 -740,2.4703500270843506 -741,2.4702951908111572 -742,2.470241069793701 -743,2.470187187194824 -744,2.4701335430145264 -745,2.4700801372528076 -746,2.470026731491089 -747,2.46997332572937 -748,2.4699201583862305 -749,2.46986722946167 -750,2.4698145389556885 -751,2.4697606563568115 -752,2.4697084426879883 -753,2.4696552753448486 -754,2.4696033000946045 -755,2.4695515632629395 -756,2.469498872756958 -757,2.4694454669952393 -758,2.469393730163574 -759,2.469341516494751 -760,2.469290018081665 -761,2.46923828125 -762,2.4691853523254395 -763,2.4691336154937744 -764,2.4690818786621094 -765,2.4690301418304443 -766,2.4689786434173584 -767,2.4689269065856934 -768,2.468874216079712 -769,2.468822717666626 -770,2.46877121925354 -771,2.468719720840454 -772,2.468667984008789 -773,2.468616485595703 -774,2.4685652256011963 -775,2.4685142040252686 -776,2.4684622287750244 -777,2.4684109687805176 -778,2.468360185623169 -779,2.468308925628662 -780,2.468257427215576 -781,2.4682064056396484 -782,2.4681551456451416 -783,2.4681036472320557 -784,2.468052387237549 -785,2.468001127243042 -786,2.4679503440856934 -787,2.4678990840911865 -788,2.4678475856781006 -789,2.467796564102173 -790,2.467745304107666 -791,2.4676945209503174 -792,2.4676430225372314 -793,2.4675920009613037 -794,2.467540740966797 -795,2.4674899578094482 -796,2.4674389362335205 -797,2.4673879146575928 -798,2.467336893081665 -799,2.4672863483428955 -800,2.4672353267669678 -801,2.4671850204467773 -802,2.467134475708008 -803,2.467083692550659 -804,2.4670329093933105 -805,2.46698260307312 -806,2.4669320583343506 -807,2.46688175201416 -808,2.466831684112549 -809,2.4667811393737793 -810,2.466731071472168 -811,2.4666812419891357 -812,2.4666311740875244 -813,2.4665818214416504 -814,2.46653151512146 -815,2.4664816856384277 -816,2.4664320945739746 -817,2.4663820266723633 -818,2.4663326740264893 -819,2.466282844543457 -820,2.466233015060425 -821,2.466183662414551 -822,2.4661338329315186 -823,2.4660842418670654 -824,2.466034412384033 -825,2.46598482131958 -826,2.465934991836548 -827,2.4658854007720947 -828,2.4658358097076416 -829,2.4657864570617676 -830,2.4657368659973145 -831,2.4656870365142822 -832,2.4656381607055664 -833,2.4655885696411133 -834,2.4655401706695557 -835,2.465491533279419 -836,2.4654431343078613 -837,2.465394973754883 -838,2.4653477668762207 -839,2.465299606323242 -840,2.4652516841888428 -841,2.4652042388916016 -842,2.4651565551757812 -843,2.46510910987854 -844,2.4650611877441406 -845,2.465013265609741 -846,2.464965581893921 -847,2.464918375015259 -848,2.4648704528808594 -849,2.464823007583618 -850,2.464775800704956 -851,2.464728593826294 -852,2.464681386947632 -853,2.4646339416503906 -854,2.4645869731903076 -855,2.4645397663116455 -856,2.4644925594329834 -857,2.4644455909729004 -858,2.464398145675659 -859,2.4643514156341553 -860,2.4643044471740723 -861,2.4642574787139893 -862,2.4642105102539062 -863,2.4641637802124023 -864,2.4641165733337402 -865,2.4640698432922363 -866,2.4640231132507324 -867,2.4639766216278076 -868,2.4639294147491455 -869,2.4638829231262207 -870,2.4638359546661377 -871,2.463789224624634 -872,2.46374249458313 -873,2.463695526123047 -874,2.4636483192443848 -875,2.463601589202881 -876,2.463555097579956 -877,2.463508129119873 -878,2.4634616374969482 -879,2.4634149074554443 -880,2.4633681774139404 -881,2.463322877883911 -882,2.463275671005249 -883,2.463229179382324 -884,2.4631829261779785 -885,2.4631361961364746 -886,2.4630892276763916 -887,2.463043212890625 -888,2.4629976749420166 -889,2.462951421737671 -890,2.462904453277588 -891,2.4628589153289795 -892,2.4628121852874756 -893,2.462766170501709 -894,2.4627199172973633 -895,2.462672472000122 -896,2.46262788772583 -897,2.4625794887542725 -898,2.462533950805664 -899,2.4624874591827393 -900,2.4624412059783936 -901,2.4623944759368896 -902,2.462348461151123 -903,2.4623019695281982 -904,2.4622557163238525 -905,2.462209939956665 -906,2.4621639251708984 -907,2.4621171951293945 -908,2.462071657180786 -909,2.4620256423950195 -910,2.461979627609253 -911,2.4619336128234863 -912,2.4618871212005615 -913,2.461841344833374 -914,2.46179461479187 -915,2.461749315261841 -916,2.461703300476074 -917,2.4616572856903076 -918,2.461611032485962 -919,2.461564779281616 -920,2.4615187644958496 -921,2.461472749710083 -922,2.4614267349243164 -923,2.461381435394287 -924,2.4613356590270996 -925,2.4612903594970703 -926,2.4612438678741455 -927,2.461198568344116 -928,2.4611523151397705 -929,2.4611079692840576 -930,2.4610629081726074 -931,2.46101713180542 -932,2.460970640182495 -933,2.4609243869781494 -934,2.4608795642852783 -935,2.4608349800109863 -936,2.460789680480957 -937,2.4607439041137695 -938,2.460698366165161 -939,2.4606552124023438 -940,2.460609197616577 -941,2.4605658054351807 -942,2.4605202674865723 -943,2.4604744911193848 -944,2.460430145263672 -945,2.4603848457336426 -946,2.460341691970825 -947,2.4602973461151123 -948,2.4602534770965576 -949,2.4602081775665283 -950,2.460164785385132 -951,2.460120677947998 -952,2.460076093673706 -953,2.4600350856781006 -954,2.4599900245666504 -955,2.459946632385254 -956,2.4599037170410156 -957,2.459859609603882 -958,2.4598164558410645 -959,2.4597737789154053 -960,2.459730386734009 -961,2.4596872329711914 -962,2.4596445560455322 -963,2.459601640701294 -964,2.4595584869384766 -965,2.459515333175659 -966,2.459472417831421 -967,2.4594295024871826 -968,2.4593868255615234 -969,2.459343910217285 -970,2.459301471710205 -971,2.459259033203125 -972,2.459216594696045 -973,2.4591739177703857 -974,2.4591314792633057 -975,2.459089756011963 -976,2.459047317504883 -977,2.4590048789978027 -978,2.458962917327881 -979,2.458921432495117 -980,2.4588797092437744 -981,2.4588377475738525 -982,2.4587960243225098 -983,2.458753824234009 -984,2.458712100982666 -985,2.4586703777313232 -986,2.4586288928985596 -987,2.4585869312286377 -988,2.458545207977295 -989,2.4585039615631104 -990,2.4584617614746094 -991,2.4584200382232666 -992,2.458378314971924 -993,2.458336353302002 -994,2.4582948684692383 -995,2.4582531452178955 -996,2.4582111835479736 -997,2.458169460296631 -998,2.4581282138824463 -999,2.4580867290496826 -1000,2.458045244216919 diff --git a/training/time_weighting/inverse/training_config.txt b/training/time_weighting/inverse/training_config.txt deleted file mode 100644 index 6cf228d..0000000 --- a/training/time_weighting/inverse/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/inverse/training_log.csv b/training/time_weighting/inverse/training_log.csv deleted file mode 100644 index 7a1b89f..0000000 --- a/training/time_weighting/inverse/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,7.908361434936523 -2,4.451809883117676 -3,1.9147961139678955 -4,1.2924294471740723 -5,0.9904596209526062 -6,0.876170814037323 -7,0.827053427696228 -8,0.7950173020362854 -9,0.7693766951560974 -10,0.7498175501823425 -11,0.7320955991744995 -12,0.7134718298912048 -13,0.6977483630180359 -14,0.6826232075691223 -15,0.6670342087745667 -16,0.6561163663864136 -17,0.6492725014686584 -18,0.6434455513954163 -19,0.6380093097686768 -20,0.6329172849655151 -21,0.6280857920646667 -22,0.6234145760536194 -23,0.6188551783561707 -24,0.6143704056739807 -25,0.6099040508270264 -26,0.6054681539535522 -27,0.6011141538619995 -28,0.596895694732666 -29,0.592815101146698 -30,0.5888841152191162 -31,0.5851663947105408 -32,0.5817279815673828 -33,0.5785888433456421 -34,0.5757927894592285 -35,0.5733144283294678 -36,0.5711034536361694 -37,0.5690423846244812 -38,0.5669951438903809 -39,0.5648757219314575 -40,0.5626640915870667 -41,0.5603988766670227 -42,0.5581351518630981 -43,0.5559177994728088 -44,0.5537612438201904 -45,0.5516664981842041 -46,0.5496274828910828 -47,0.5476110577583313 -48,0.5456361770629883 -49,0.5436911582946777 -50,0.541818380355835 -51,0.5401277542114258 -52,0.5386803150177002 -53,0.5375493764877319 -54,0.5367588996887207 -55,0.5362968444824219 -56,0.5360473990440369 -57,0.5358249545097351 -58,0.5358798503875732 -59,0.5362901091575623 -60,0.5360841751098633 -61,0.535875141620636 -62,0.5358033776283264 -63,0.5356138348579407 -64,0.5352828502655029 -65,0.5351241230964661 -66,0.5351008772850037 -67,0.5349565148353577 -68,0.5348551869392395 -69,0.5348978638648987 -70,0.5349027514457703 -71,0.5347902774810791 -72,0.5346595048904419 -73,0.5346115231513977 -74,0.5346399545669556 -75,0.5346632599830627 -76,0.5346543788909912 -77,0.534640908241272 -78,0.5346270799636841 -79,0.5345977544784546 -80,0.5345541834831238 -81,0.5345125198364258 -82,0.5344935059547424 -83,0.5344852805137634 -84,0.5344712138175964 -85,0.5344524383544922 -86,0.5344444513320923 -87,0.5344486236572266 -88,0.5344498157501221 -89,0.5344369411468506 -90,0.5344160199165344 -91,0.5343964099884033 -92,0.5343813300132751 -93,0.5343656539916992 -94,0.5343477725982666 -95,0.5343347787857056 -96,0.5343269109725952 -97,0.5343179106712341 -98,0.534304678440094 -99,0.5342910885810852 -100,0.5342813730239868 -101,0.5342739224433899 -102,0.5342658758163452 -103,0.5342570543289185 -104,0.5342492461204529 -105,0.5342420339584351 -106,0.5342330932617188 -107,0.5342227220535278 -108,0.5342136025428772 -109,0.5342064499855042 -110,0.534200131893158 -111,0.5341936945915222 -112,0.534188449382782 -113,0.5341840386390686 -114,0.5341784358024597 -115,0.5341717600822449 -116,0.5341652631759644 -117,0.5341593027114868 -118,0.5341529846191406 -119,0.5341465473175049 -120,0.5341410636901855 -121,0.5341352820396423 -122,0.5341283679008484 -123,0.5341213345527649 -124,0.5341147780418396 -125,0.5341086387634277 -126,0.534102737903595 -127,0.5340973138809204 -128,0.5340919494628906 -129,0.5340864658355713 -130,0.534080445766449 -131,0.5340745449066162 -132,0.5340690016746521 -133,0.5340632796287537 -134,0.5340576171875 -135,0.5340519547462463 -136,0.5340461730957031 -137,0.5340398550033569 -138,0.5340335369110107 -139,0.5340278148651123 -140,0.534022331237793 -141,0.5340172052383423 -142,0.5340126156806946 -143,0.5340080261230469 -144,0.5340032577514648 -145,0.5339988470077515 -146,0.5339945554733276 -147,0.5339903831481934 -148,0.5339863300323486 -149,0.5339820981025696 -150,0.5339773893356323 -151,0.5339723825454712 -152,0.5339674353599548 -153,0.5339624285697937 -154,0.5339576601982117 -155,0.5339528322219849 -156,0.5339480042457581 -157,0.5339434146881104 -158,0.533939003944397 -159,0.5339348912239075 -160,0.533931314945221 -161,0.5339276194572449 -162,0.5339239835739136 -163,0.5339203476905823 -164,0.5339168906211853 -165,0.5339133143424988 -166,0.5339097380638123 -167,0.5339061617851257 -168,0.5339024066925049 -169,0.5338987708091736 -170,0.5338951945304871 -171,0.5338917374610901 -172,0.5338883996009827 -173,0.5338850617408752 -174,0.5338817238807678 -175,0.5338786244392395 -176,0.5338754653930664 -177,0.5338721871376038 -178,0.5338690280914307 -179,0.5338658690452576 -180,0.5338627099990845 -181,0.5338595509529114 -182,0.5338565111160278 -183,0.5338535308837891 -184,0.5338507890701294 -185,0.5338480472564697 -186,0.5338456034660339 -187,0.5338431596755981 -188,0.5338410139083862 -189,0.5338389277458191 -190,0.5338369011878967 -191,0.5338348746299744 -192,0.5338330864906311 -193,0.5338314771652222 -194,0.5338301062583923 -195,0.5338286757469177 -196,0.533827006816864 -197,0.5338252782821655 -198,0.5338234901428223 -199,0.5338214635848999 -200,0.5338194370269775 -201,0.5338171124458313 -202,0.533814549446106 -203,0.5338118672370911 -204,0.533809244632721 -205,0.5338068008422852 -206,0.533804178237915 -207,0.5338016152381897 -208,0.5337989926338196 -209,0.5337963104248047 -210,0.533793568611145 -211,0.5337909460067749 -212,0.5337881445884705 -213,0.533785343170166 -214,0.5337824821472168 -215,0.5337796807289124 -216,0.5337768793106079 -217,0.5337742567062378 -218,0.533771812915802 -219,0.5337695479393005 -220,0.533767580986023 -221,0.5337660908699036 -222,0.5337647199630737 -223,0.5337633490562439 -224,0.5337619781494141 -225,0.533760666847229 -226,0.5337592959403992 -227,0.5337579250335693 -228,0.5337564945220947 -229,0.5337550640106201 -230,0.5337535738945007 -231,0.5337520837783813 -232,0.5337507724761963 -233,0.5337495803833008 -234,0.5337483882904053 -235,0.5337472558021545 -236,0.5337461829185486 -237,0.5337451696395874 -238,0.5337440967559814 -239,0.5337429642677307 -240,0.5337415933609009 -241,0.533740222454071 -242,0.5337386727333069 -243,0.533737063407898 -244,0.5337355136871338 -245,0.5337339639663696 -246,0.5337325930595398 -247,0.5337314009666443 -248,0.5337303876876831 -249,0.5337293744087219 -250,0.5337285995483398 -251,0.5337277054786682 -252,0.5337268114089966 -253,0.5337258577346802 -254,0.5337247848510742 -255,0.5337236523628235 -256,0.533722460269928 -257,0.5337210297584534 -258,0.5337196588516235 -259,0.5337181687355042 -260,0.5337166786193848 -261,0.5337153077125549 -262,0.5337138175964355 -263,0.5337125062942505 -264,0.5337110757827759 -265,0.5337097644805908 -266,0.5337085723876953 -267,0.5337074995040894 -268,0.533706545829773 -269,0.5337057709693909 -270,0.5337049961090088 -271,0.5337041616439819 -272,0.5337033271789551 -273,0.5337022542953491 -274,0.5337011814117432 -275,0.5337000489234924 -276,0.5336989760398865 -277,0.5336977243423462 -278,0.5336965322494507 -279,0.5336953401565552 -280,0.5336940288543701 -281,0.5336925983428955 -282,0.5336911678314209 -283,0.5336899161338806 -284,0.5336886048316956 -285,0.5336872935295105 -286,0.533686101436615 -287,0.5336849093437195 -288,0.5336835980415344 -289,0.5336824059486389 -290,0.5336811542510986 -291,0.5336800217628479 -292,0.5336789488792419 -293,0.5336779356002808 -294,0.5336768627166748 -295,0.5336757898330688 -296,0.5336746573448181 -297,0.5336735844612122 -298,0.5336725115776062 -299,0.5336714386940002 -300,0.5336704254150391 -301,0.5336695909500122 -302,0.5336686968803406 -303,0.533667802810669 -304,0.5336667895317078 -305,0.5336659550666809 -306,0.5336649417877197 -307,0.5336640477180481 -308,0.5336628556251526 -309,0.533661961555481 -310,0.533660352230072 -311,0.5336588621139526 -312,0.5336572527885437 -313,0.5336552858352661 -314,0.5336535573005676 -315,0.53365159034729 -316,0.5336499214172363 -317,0.5336484313011169 -318,0.533646821975708 -319,0.5336455702781677 -320,0.5336441993713379 -321,0.5336430668830872 -322,0.533642053604126 -323,0.5336409211158752 -324,0.5336400866508484 -325,0.5336390137672424 -326,0.5336382985115051 -327,0.5336371064186096 -328,0.5336361527442932 -329,0.5336344838142395 -330,0.5336333513259888 -331,0.5336315631866455 -332,0.5336304306983948 -333,0.5336285829544067 -334,0.5336279273033142 -335,0.5336264371871948 -336,0.5336292386054993 -337,0.5336313247680664 -338,0.5336446762084961 -339,0.533629298210144 -340,0.5336222648620605 -341,0.5336108207702637 -342,0.5336068272590637 -343,0.5336053967475891 -344,0.5336054563522339 -345,0.5336090922355652 -346,0.5336113572120667 -347,0.5336223244667053 -348,0.5336143970489502 -349,0.5336130857467651 -350,0.5336008667945862 -351,0.5335957407951355 -352,0.5335924029350281 -353,0.5335915684700012 -354,0.5335940718650818 -355,0.5335958003997803 -356,0.5336045622825623 -357,0.5335996150970459 -358,0.5336024165153503 -359,0.5335874557495117 -360,0.5335808992385864 -361,0.5335742831230164 -362,0.5335713624954224 -363,0.5335697531700134 -364,0.5335689187049866 -365,0.5335721373558044 -366,0.5335756540298462 -367,0.5335925817489624 -368,0.5335754156112671 -369,0.5335699319839478 -370,0.5335583090782166 -371,0.5335533618927002 -372,0.533549427986145 -373,0.5335475206375122 -374,0.5335467457771301 -375,0.533545970916748 -376,0.5335496068000793 -377,0.5335513353347778 -378,0.5335657596588135 -379,0.533545970916748 -380,0.5335402488708496 -381,0.533534049987793 -382,0.5335308313369751 -383,0.5335281491279602 -384,0.5335263013839722 -385,0.5335275530815125 -386,0.5335305333137512 -387,0.5335522294044495 -388,0.533542275428772 -389,0.5335591435432434 -390,0.533515989780426 -391,0.5335181355476379 -392,0.5335599780082703 -393,0.533506453037262 -394,0.5335301160812378 -395,0.5335984230041504 -396,0.5335624814033508 -397,0.5335984826087952 -398,0.5335323810577393 -399,0.5335422158241272 -400,0.5335049033164978 -401,0.5334864258766174 -402,0.533507764339447 -403,0.5334847569465637 -404,0.5335200428962708 -405,0.5335205793380737 -406,0.533518373966217 -407,0.5335078835487366 -408,0.5334807634353638 -409,0.533492386341095 -410,0.5334815979003906 -411,0.5334779620170593 -412,0.5334851741790771 -413,0.5334802269935608 -414,0.5334765911102295 -415,0.5334770679473877 -416,0.5334782600402832 -417,0.5334804654121399 -418,0.5334758758544922 -419,0.5334739089012146 -420,0.5334721207618713 -421,0.5334706902503967 -422,0.533471405506134 -423,0.5334721207618713 -424,0.5334824323654175 -425,0.5334783792495728 -426,0.5334910154342651 -427,0.5334601998329163 -428,0.5334577560424805 -429,0.5334771871566772 -430,0.5334668159484863 -431,0.5334728360176086 -432,0.5334479808807373 -433,0.5334511995315552 -434,0.5334779620170593 -435,0.5334423184394836 -436,0.5334816575050354 -437,0.5335400700569153 -438,0.5335309505462646 -439,0.5334370136260986 -440,0.533477246761322 -441,0.5335437059402466 -442,0.5335816740989685 -443,0.5334447026252747 -444,0.5336697697639465 -445,0.5334635376930237 -446,0.533582866191864 -447,0.533502995967865 -448,0.5334025025367737 -449,0.533573567867279 -450,0.5334376692771912 -451,0.5335515141487122 -452,0.5335073471069336 -453,0.5334045886993408 -454,0.5335460305213928 -455,0.533403754234314 -456,0.5334699153900146 -457,0.5334806442260742 -458,0.5334148406982422 -459,0.5334538221359253 -460,0.5334036946296692 -461,0.5334346294403076 -462,0.5334440469741821 -463,0.533405601978302 -464,0.5334429740905762 -465,0.5334040522575378 -466,0.5334295630455017 -467,0.5334076881408691 -468,0.5334334969520569 -469,0.5333941578865051 -470,0.5334073901176453 -471,0.5333864092826843 -472,0.5334150195121765 -473,0.5334035754203796 -474,0.5334026217460632 -475,0.5333895087242126 -476,0.5333828330039978 -477,0.5333994626998901 -478,0.5333837270736694 -479,0.5333964228630066 -480,0.5333943963050842 -481,0.5333824753761292 -482,0.5334042310714722 -483,0.5333842039108276 -484,0.5333737730979919 -485,0.5333834290504456 -486,0.5333651304244995 -487,0.5333675146102905 -488,0.5333717465400696 -489,0.5333539247512817 -490,0.5333523154258728 -491,0.5333541631698608 -492,0.5333435535430908 -493,0.5333386659622192 -494,0.5333369374275208 -495,0.5333281755447388 -496,0.5333229303359985 -497,0.5333254337310791 -498,0.5333228707313538 -499,0.5333117246627808 -500,0.5333272218704224 -501,0.5333349108695984 -502,0.5333155393600464 -503,0.5333507657051086 -504,0.5333429574966431 -505,0.533335268497467 -506,0.5332838296890259 -507,0.5333049893379211 -508,0.5333319902420044 -509,0.5333306789398193 -510,0.5332647562026978 -511,0.5333512425422668 -512,0.5333088636398315 -513,0.5333592295646667 -514,0.5332757234573364 -515,0.5332666635513306 -516,0.5332465171813965 -517,0.5332728028297424 -518,0.5332682728767395 -519,0.5332341194152832 -520,0.5332837104797363 -521,0.5332566499710083 -522,0.5332777500152588 -523,0.5332354307174683 -524,0.5332518815994263 -525,0.5332293510437012 -526,0.5332573652267456 -527,0.5332226157188416 -528,0.5332399010658264 -529,0.5332238078117371 -530,0.5332431197166443 -531,0.5332164168357849 -532,0.533210277557373 -533,0.5332163572311401 -534,0.5332164168357849 -535,0.5332269668579102 -536,0.5332154035568237 -537,0.5332126021385193 -538,0.5332223176956177 -539,0.5332213640213013 -540,0.5332069396972656 -541,0.5332173109054565 -542,0.5332170128822327 -543,0.5332083106040955 -544,0.5332239270210266 -545,0.5332117676734924 -546,0.5332178473472595 -547,0.5331835746765137 -548,0.5331886410713196 -549,0.5332087874412537 -550,0.5332010984420776 -551,0.5331821441650391 -552,0.5331724286079407 -553,0.5331881046295166 -554,0.5331680178642273 -555,0.5331881642341614 -556,0.533202588558197 -557,0.5331920981407166 -558,0.5331699848175049 -559,0.5331637263298035 -560,0.5331887006759644 -561,0.5331618189811707 -562,0.5331862568855286 -563,0.5331900119781494 -564,0.5331919193267822 -565,0.5331462621688843 -566,0.5331738591194153 -567,0.5332019925117493 -568,0.5332098007202148 -569,0.5331388711929321 -570,0.5332573652267456 -571,0.5332021713256836 -572,0.5332541465759277 -573,0.5331735610961914 -574,0.533125638961792 -575,0.5332156419754028 -576,0.533156156539917 -577,0.5332451462745667 -578,0.5331687927246094 -579,0.5331242680549622 -580,0.5332731604576111 -581,0.53312748670578 -582,0.5332522392272949 -583,0.5332115292549133 -584,0.5331048369407654 -585,0.5332247018814087 -586,0.5330948233604431 -587,0.5331565737724304 -588,0.5331710577011108 -589,0.5331031084060669 -590,0.5331385135650635 -591,0.5330957174301147 -592,0.5331053137779236 -593,0.5331255197525024 -594,0.5330991148948669 -595,0.533108651638031 -596,0.5330966711044312 -597,0.5331032276153564 -598,0.5331051349639893 -599,0.5330920815467834 -600,0.5331002473831177 -601,0.533100426197052 -602,0.5330987572669983 -603,0.5330823659896851 -604,0.5330929756164551 -605,0.5331001281738281 -606,0.5330975651741028 -607,0.533099889755249 -608,0.5330812931060791 -609,0.5330783128738403 -610,0.5330870747566223 -611,0.5330952405929565 -612,0.5330790281295776 -613,0.5331013202667236 -614,0.5331288576126099 -615,0.5331118702888489 -616,0.533064067363739 -617,0.5331519246101379 -618,0.5331542491912842 -619,0.5332099795341492 -620,0.5330872535705566 -621,0.5331217646598816 -622,0.533039391040802 -623,0.533056914806366 -624,0.5330623388290405 -625,0.5330593585968018 -626,0.5330948829650879 -627,0.5330694317817688 -628,0.5331254601478577 -629,0.5330600738525391 -630,0.5330482125282288 -631,0.5330696105957031 -632,0.5330703854560852 -633,0.5331020951271057 -634,0.5330442786216736 -635,0.5330933332443237 -636,0.5330255031585693 -637,0.5330432057380676 -638,0.5330243110656738 -639,0.5330159068107605 -640,0.5330231785774231 -641,0.533031165599823 -642,0.5330619812011719 -643,0.5330116152763367 -644,0.5330842137336731 -645,0.5330634713172913 -646,0.5330973267555237 -647,0.5330213308334351 -648,0.5330548882484436 -649,0.5329979658126831 -650,0.5330200791358948 -651,0.5330220460891724 -652,0.5330085754394531 -653,0.5330115556716919 -654,0.5330163240432739 -655,0.5330249071121216 -656,0.5329814553260803 -657,0.5330226421356201 -658,0.5330206155776978 -659,0.533040463924408 -660,0.53298419713974 -661,0.5330554842948914 -662,0.5329925417900085 -663,0.5330305099487305 -664,0.5329842567443848 -665,0.5329853892326355 -666,0.5329538583755493 -667,0.5329693555831909 -668,0.5329843759536743 -669,0.5329700112342834 -670,0.532962441444397 -671,0.5329849720001221 -672,0.5329623818397522 -673,0.5329577922821045 -674,0.5329817533493042 -675,0.5329473614692688 -676,0.5329486727714539 -677,0.5329578518867493 -678,0.5329358577728271 -679,0.5329436659812927 -680,0.5329578518867493 -681,0.5329398512840271 -682,0.5329602956771851 -683,0.532975971698761 -684,0.5329791307449341 -685,0.5329247117042542 -686,0.5330293774604797 -687,0.5330694913864136 -688,0.5331434607505798 -689,0.5329517722129822 -690,0.5330378413200378 -691,0.5328987240791321 -692,0.5329580307006836 -693,0.5329782366752625 -694,0.5329005718231201 -695,0.5329961776733398 -696,0.5329257845878601 -697,0.5329722166061401 -698,0.532939612865448 -699,0.5329009294509888 -700,0.5329351425170898 -701,0.5328828692436218 -702,0.5329330563545227 -703,0.5329194664955139 -704,0.5328922867774963 -705,0.532914400100708 -706,0.5328907370567322 -707,0.5329148769378662 -708,0.532886803150177 -709,0.5328996181488037 -710,0.5328764915466309 -711,0.5329096913337708 -712,0.5328902006149292 -713,0.532917320728302 -714,0.532927930355072 -715,0.5329143404960632 -716,0.5328717231750488 -717,0.5329341888427734 -718,0.5329579710960388 -719,0.5330227017402649 -720,0.5328763127326965 -721,0.5330239534378052 -722,0.532927393913269 -723,0.5330615043640137 -724,0.5329186916351318 -725,0.5328514575958252 -726,0.5330032706260681 -727,0.5328615307807922 -728,0.5330538749694824 -729,0.5329590439796448 -730,0.532849133014679 -731,0.5329838991165161 -732,0.532835841178894 -733,0.5329086184501648 -734,0.5329486131668091 -735,0.5328454971313477 -736,0.5328777432441711 -737,0.5328724384307861 -738,0.5328430533409119 -739,0.5329077839851379 -740,0.5328611731529236 -741,0.5328328013420105 -742,0.5328766107559204 -743,0.5328611731529236 -744,0.5329135060310364 -745,0.5328494906425476 -746,0.5328521132469177 -747,0.5328106880187988 -748,0.5328395366668701 -749,0.5328314304351807 -750,0.5328251719474792 -751,0.5328195095062256 -752,0.5328259468078613 -753,0.5328236222267151 -754,0.5328176617622375 -755,0.5328128933906555 -756,0.5328145623207092 -757,0.5328118801116943 -758,0.5328138470649719 -759,0.5327996015548706 -760,0.5328055024147034 -761,0.5328127145767212 -762,0.5328004360198975 -763,0.532802402973175 -764,0.532799482345581 -765,0.5328034162521362 -766,0.5327965617179871 -767,0.5327936410903931 -768,0.5327891111373901 -769,0.5327962040901184 -770,0.5327879190444946 -771,0.5327874422073364 -772,0.532783031463623 -773,0.5327817797660828 -774,0.5327724814414978 -775,0.5327770113945007 -776,0.5327735543251038 -777,0.5327709317207336 -778,0.532770037651062 -779,0.5327761173248291 -780,0.5327686667442322 -781,0.5327711701393127 -782,0.5327606797218323 -783,0.5327640175819397 -784,0.5327658653259277 -785,0.5327684879302979 -786,0.5327603220939636 -787,0.5327693819999695 -788,0.5327964425086975 -789,0.5327839255332947 -790,0.532767117023468 -791,0.5327374339103699 -792,0.5327468514442444 -793,0.5327407121658325 -794,0.5327498316764832 -795,0.5327783823013306 -796,0.5327553153038025 -797,0.532752513885498 -798,0.5327231884002686 -799,0.5327383875846863 -800,0.5327481627464294 -801,0.5327475070953369 -802,0.5327630043029785 -803,0.5327275991439819 -804,0.5327320098876953 -805,0.5327475070953369 -806,0.5327352285385132 -807,0.5327454209327698 -808,0.5327445864677429 -809,0.532746434211731 -810,0.5327154994010925 -811,0.5327035784721375 -812,0.5327175855636597 -813,0.5327083468437195 -814,0.5327253341674805 -815,0.532751202583313 -816,0.532734215259552 -817,0.5327244997024536 -818,0.5326954126358032 -819,0.5327003002166748 -820,0.5326858758926392 -821,0.5326919555664062 -822,0.5327044725418091 -823,0.5326814651489258 -824,0.5327125191688538 -825,0.5327550768852234 -826,0.5327398777008057 -827,0.5326789617538452 -828,0.5327390432357788 -829,0.5329201817512512 -830,0.5329448580741882 -831,0.5326907634735107 -832,0.5329617857933044 -833,0.5327876210212708 -834,0.5331041216850281 -835,0.532791018486023 -836,0.5326667428016663 -837,0.5329078435897827 -838,0.5326335430145264 -839,0.5328433513641357 -840,0.5328519940376282 -841,0.5326688289642334 -842,0.5327835083007812 -843,0.5327579975128174 -844,0.5326729416847229 -845,0.5327986478805542 -846,0.5327482223510742 -847,0.532671332359314 -848,0.5327870845794678 -849,0.5326620936393738 -850,0.5327721238136292 -851,0.5327152609825134 -852,0.5326424837112427 -853,0.5327694416046143 -854,0.5326777100563049 -855,0.5328274965286255 -856,0.5326974391937256 -857,0.5326778888702393 -858,0.5326619744300842 -859,0.5326575040817261 -860,0.5327215194702148 -861,0.5326443314552307 -862,0.5326738953590393 -863,0.5326250195503235 -864,0.5326496362686157 -865,0.5326324701309204 -866,0.5326189398765564 -867,0.5326152443885803 -868,0.5326200723648071 -869,0.5326253175735474 -870,0.5325871109962463 -871,0.5326440334320068 -872,0.5326871275901794 -873,0.532707929611206 -874,0.5325793623924255 -875,0.532780110836029 -876,0.5328423976898193 -877,0.5331017374992371 -878,0.5326749086380005 -879,0.5326802134513855 -880,0.5328874588012695 -881,0.5326073169708252 -882,0.5330514311790466 -883,0.5328177809715271 -884,0.5325912833213806 -885,0.5328464508056641 -886,0.5326460599899292 -887,0.5326210856437683 -888,0.5328496098518372 -889,0.5327123403549194 -890,0.5325807332992554 -891,0.5327404141426086 -892,0.5325666069984436 -893,0.5326715707778931 -894,0.5327159762382507 -895,0.5325658917427063 -896,0.5326340794563293 -897,0.5326007008552551 -898,0.5326005220413208 -899,0.532658040523529 -900,0.5325965881347656 -901,0.5326038002967834 -902,0.5325936675071716 -903,0.5326001644134521 -904,0.5326241254806519 -905,0.5325798392295837 -906,0.5326475501060486 -907,0.5326381325721741 -908,0.5326940417289734 -909,0.5325639247894287 -910,0.5326101183891296 -911,0.5325092077255249 -912,0.532542884349823 -913,0.5325829982757568 -914,0.532555878162384 -915,0.5325654149055481 -916,0.5325438380241394 -917,0.532560408115387 -918,0.5325314998626709 -919,0.5325174331665039 -920,0.532526433467865 -921,0.5325611233711243 -922,0.5325525403022766 -923,0.532503604888916 -924,0.532495379447937 -925,0.5324997305870056 -926,0.532520055770874 -927,0.5324971079826355 -928,0.5325572490692139 -929,0.5326020121574402 -930,0.5326200127601624 -931,0.5325057506561279 -932,0.5327089428901672 -933,0.5328356027603149 -934,0.5330923199653625 -935,0.532627284526825 -936,0.5326129198074341 -937,0.5328517556190491 -938,0.5325050354003906 -939,0.532988965511322 -940,0.532723069190979 -941,0.5324777364730835 -942,0.5327807068824768 -943,0.5325810313224792 -944,0.5324954986572266 -945,0.5327465534210205 -946,0.5326344966888428 -947,0.5324852466583252 -948,0.5326828360557556 -949,0.5324603915214539 -950,0.5325776934623718 -951,0.532648503780365 -952,0.5324987173080444 -953,0.5325400829315186 -954,0.5325759053230286 -955,0.5325213670730591 -956,0.5326765179634094 -957,0.5325861573219299 -958,0.5325375199317932 -959,0.5326128602027893 -960,0.5325403213500977 -961,0.5327244400978088 -962,0.5325830578804016 -963,0.5325121283531189 -964,0.5326025485992432 -965,0.5326010584831238 -966,0.5329156517982483 -967,0.5325844883918762 -968,0.5324466228485107 -969,0.5327730178833008 -970,0.5324587821960449 -971,0.532749354839325 -972,0.5326852202415466 -973,0.532429575920105 -974,0.5326404571533203 -975,0.5324943661689758 -976,0.5324801802635193 -977,0.5326264500617981 -978,0.5325270891189575 -979,0.5324642062187195 -980,0.5325806140899658 -981,0.5324447751045227 -982,0.5325149893760681 -983,0.5325573682785034 -984,0.5324417352676392 -985,0.532492458820343 -986,0.5324585437774658 -987,0.5324519276618958 -988,0.5324977040290833 -989,0.5324528813362122 -990,0.5324669480323792 -991,0.5324319005012512 -992,0.5324699878692627 -993,0.5324579477310181 -994,0.5324413180351257 -995,0.5324161648750305 -996,0.5324227213859558 -997,0.532424807548523 -998,0.532414436340332 -999,0.5324140787124634 -1000,0.5324074625968933 diff --git a/training/time_weighting/inverse_cubed/training_config.txt b/training/time_weighting/inverse_cubed/training_config.txt deleted file mode 100644 index ae26d39..0000000 --- a/training/time_weighting/inverse_cubed/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/inverse_cubed/training_log.csv b/training/time_weighting/inverse_cubed/training_log.csv deleted file mode 100644 index 4b6a935..0000000 --- a/training/time_weighting/inverse_cubed/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,15.918230056762695 -2,8.533187866210938 -3,4.231637954711914 -4,2.8468010425567627 -5,2.3463642597198486 -6,2.207174777984619 -7,2.0992748737335205 -8,2.0057637691497803 -9,1.934615969657898 -10,1.883561611175537 -11,1.8463146686553955 -12,1.81405770778656 -13,1.7849390506744385 -14,1.758191704750061 -15,1.7332559823989868 -16,1.7097091674804688 -17,1.6872828006744385 -18,1.6659213304519653 -19,1.6455514430999756 -20,1.626145362854004 -21,1.607662320137024 -22,1.5902446508407593 -23,1.5741549730300903 -24,1.5593137741088867 -25,1.5461254119873047 -26,1.5345849990844727 -27,1.5247251987457275 -28,1.5164955854415894 -29,1.5096299648284912 -30,1.503425121307373 -31,1.4975810050964355 -32,1.4919557571411133 -33,1.4867321252822876 -34,1.4820878505706787 -35,1.4780898094177246 -36,1.4750394821166992 -37,1.4721918106079102 -38,1.4697966575622559 -39,1.468460202217102 -40,1.4686576128005981 -41,1.4696967601776123 -42,1.4705936908721924 -43,1.4711523056030273 -44,1.4706157445907593 -45,1.4684170484542847 -46,1.4657840728759766 -47,1.4635727405548096 -48,1.462889313697815 -49,1.46292245388031 -50,1.463050365447998 -51,1.4630948305130005 -52,1.4630299806594849 -53,1.4628430604934692 -54,1.4625276327133179 -55,1.4620944261550903 -56,1.4615834951400757 -57,1.4610315561294556 -58,1.460465908050537 -59,1.4599283933639526 -60,1.4594683647155762 -61,1.4591727256774902 -62,1.4591158628463745 -63,1.4592617750167847 -64,1.4594451189041138 -65,1.4594213962554932 -66,1.459152340888977 -67,1.458740234375 -68,1.4583886861801147 -69,1.4581819772720337 -70,1.4580849409103394 -71,1.458052396774292 -72,1.4580340385437012 -73,1.4579944610595703 -74,1.4579219818115234 -75,1.4578224420547485 -76,1.4576959609985352 -77,1.4575525522232056 -78,1.4574264287948608 -79,1.4573242664337158 -80,1.457261085510254 -81,1.4572272300720215 -82,1.4572017192840576 -83,1.457165241241455 -84,1.4570980072021484 -85,1.4570094347000122 -86,1.4569177627563477 -87,1.4568333625793457 -88,1.4567649364471436 -89,1.4567055702209473 -90,1.4566526412963867 -91,1.4565953016281128 -92,1.4565376043319702 -93,1.4564793109893799 -94,1.4564173221588135 -95,1.4563580751419067 -96,1.4562979936599731 -97,1.456236720085144 -98,1.4561759233474731 -99,1.4561187028884888 -100,1.4560667276382446 -101,1.456021785736084 -102,1.455978512763977 -103,1.4559314250946045 -104,1.4558817148208618 -105,1.4558295011520386 -106,1.4557772874832153 -107,1.4557253122329712 -108,1.455674171447754 -109,1.4556254148483276 -110,1.4555777311325073 -111,1.455535888671875 -112,1.4554957151412964 -113,1.4554545879364014 -114,1.4554144144058228 -115,1.4553756713867188 -116,1.4553381204605103 -117,1.4553016424179077 -118,1.4552663564682007 -119,1.45523202419281 -120,1.4551974534988403 -121,1.4551639556884766 -122,1.4551304578781128 -123,1.455096960067749 -124,1.4550644159317017 -125,1.4550328254699707 -126,1.4550023078918457 -127,1.454972505569458 -128,1.4549434185028076 -129,1.4549140930175781 -130,1.4548856019973755 -131,1.4548553228378296 -132,1.4548248052597046 -133,1.4547955989837646 -134,1.454765796661377 -135,1.4547370672225952 -136,1.4547077417373657 -137,1.454679012298584 -138,1.454649567604065 -139,1.4546194076538086 -140,1.4545890092849731 -141,1.454558253288269 -142,1.454527497291565 -143,1.4544967412948608 -144,1.4544658660888672 -145,1.454434871673584 -146,1.454403281211853 -147,1.4543712139129639 -148,1.4543381929397583 -149,1.4543055295944214 -150,1.4542725086212158 -151,1.454238772392273 -152,1.4542040824890137 -153,1.45417058467865 -154,1.4541336297988892 -155,1.4540971517562866 -156,1.4540594816207886 -157,1.4540246725082397 -158,1.4539916515350342 -159,1.4539604187011719 -160,1.4539296627044678 -161,1.4539003372192383 -162,1.4538722038269043 -163,1.4538437128067017 -164,1.4538145065307617 -165,1.4537855386734009 -166,1.4537564516067505 -167,1.4537285566329956 -168,1.4536993503570557 -169,1.453671932220459 -170,1.4536445140838623 -171,1.453616976737976 -172,1.4535841941833496 -173,1.4535560607910156 -174,1.4535255432128906 -175,1.4534934759140015 -176,1.4534677267074585 -177,1.453437089920044 -178,1.4534021615982056 -179,1.4533671140670776 -180,1.4533369541168213 -181,1.4533113241195679 -182,1.4532865285873413 -183,1.453257441520691 -184,1.453224778175354 -185,1.4531896114349365 -186,1.4531638622283936 -187,1.453128695487976 -188,1.4531031847000122 -189,1.4530744552612305 -190,1.4530432224273682 -191,1.4530168771743774 -192,1.4529834985733032 -193,1.4529565572738647 -194,1.4529290199279785 -195,1.4528982639312744 -196,1.4528717994689941 -197,1.4528461694717407 -198,1.4528205394744873 -199,1.4527846574783325 -200,1.4527567625045776 -201,1.4527137279510498 -202,1.4526816606521606 -203,1.4526501893997192 -204,1.4526153802871704 -205,1.4525834321975708 -206,1.45255446434021 -207,1.4525283575057983 -208,1.4524991512298584 -209,1.4524554014205933 -210,1.4524434804916382 -211,1.4523922204971313 -212,1.4523698091506958 -213,1.452330470085144 -214,1.452279806137085 -215,1.452267050743103 -216,1.4522134065628052 -217,1.452190637588501 -218,1.4521509408950806 -219,1.4521063566207886 -220,1.452101469039917 -221,1.4520525932312012 -222,1.4520301818847656 -223,1.4519765377044678 -224,1.4519262313842773 -225,1.4518927335739136 -226,1.451857566833496 -227,1.4518290758132935 -228,1.4517874717712402 -229,1.4517459869384766 -230,1.451704978942871 -231,1.4516689777374268 -232,1.4516292810440063 -233,1.451591968536377 -234,1.4515637159347534 -235,1.4515427350997925 -236,1.4514944553375244 -237,1.4514544010162354 -238,1.4514164924621582 -239,1.4513945579528809 -240,1.451347827911377 -241,1.4513238668441772 -242,1.4512909650802612 -243,1.4512434005737305 -244,1.4512031078338623 -245,1.4511500597000122 -246,1.451121211051941 -247,1.451073169708252 -248,1.4510469436645508 -249,1.4510304927825928 -250,1.4509868621826172 -251,1.450934886932373 -252,1.4509084224700928 -253,1.450853943824768 -254,1.4508298635482788 -255,1.4507977962493896 -256,1.4507654905319214 -257,1.4507129192352295 -258,1.4506826400756836 -259,1.45063054561615 -260,1.4505902528762817 -261,1.4505398273468018 -262,1.4505088329315186 -263,1.4504942893981934 -264,1.4504408836364746 -265,1.4504197835922241 -266,1.4504114389419556 -267,1.450353980064392 -268,1.4502958059310913 -269,1.4502314329147339 -270,1.4501808881759644 -271,1.4501354694366455 -272,1.4501005411148071 -273,1.4500560760498047 -274,1.4500294923782349 -275,1.4499951601028442 -276,1.4499590396881104 -277,1.4499232769012451 -278,1.4498639106750488 -279,1.4498158693313599 -280,1.4497673511505127 -281,1.44971764087677 -282,1.44967520236969 -283,1.4496394395828247 -284,1.4495973587036133 -285,1.449577808380127 -286,1.449534296989441 -287,1.4495083093643188 -288,1.4494504928588867 -289,1.4494366645812988 -290,1.4493745565414429 -291,1.4493274688720703 -292,1.4492815732955933 -293,1.4492498636245728 -294,1.4492517709732056 -295,1.449196457862854 -296,1.4491969347000122 -297,1.4491177797317505 -298,1.4490923881530762 -299,1.4490666389465332 -300,1.449007511138916 -301,1.4489843845367432 -302,1.448907732963562 -303,1.4488457441329956 -304,1.448794960975647 -305,1.4487814903259277 -306,1.4487900733947754 -307,1.4487576484680176 -308,1.4487578868865967 -309,1.4487279653549194 -310,1.4486958980560303 -311,1.448691964149475 -312,1.4486384391784668 -313,1.4486087560653687 -314,1.4485034942626953 -315,1.4484483003616333 -316,1.448441982269287 -317,1.4483991861343384 -318,1.448349118232727 -319,1.448347806930542 -320,1.4483389854431152 -321,1.4483270645141602 -322,1.4482600688934326 -323,1.4482581615447998 -324,1.4482742547988892 -325,1.4481804370880127 -326,1.4480667114257812 -327,1.4481005668640137 -328,1.4480305910110474 -329,1.4479703903198242 -330,1.4480087757110596 -331,1.4479832649230957 -332,1.4478893280029297 -333,1.4479337930679321 -334,1.4478830099105835 -335,1.4477853775024414 -336,1.4478031396865845 -337,1.4477304220199585 -338,1.4476571083068848 -339,1.4476827383041382 -340,1.4476256370544434 -341,1.4475449323654175 -342,1.4475959539413452 -343,1.4475243091583252 -344,1.4474658966064453 -345,1.4475104808807373 -346,1.4474427700042725 -347,1.4473707675933838 -348,1.4474226236343384 -349,1.4473496675491333 -350,1.4472600221633911 -351,1.4473185539245605 -352,1.4472342729568481 -353,1.4471544027328491 -354,1.4472637176513672 -355,1.4471443891525269 -356,1.4470868110656738 -357,1.4471896886825562 -358,1.4470144510269165 -359,1.4470093250274658 -360,1.4470218420028687 -361,1.4469081163406372 -362,1.4469096660614014 -363,1.4468622207641602 -364,1.4468154907226562 -365,1.446796178817749 -366,1.4467428922653198 -367,1.4467312097549438 -368,1.4466910362243652 -369,1.4466536045074463 -370,1.4466387033462524 -371,1.4466192722320557 -372,1.4465582370758057 -373,1.4465278387069702 -374,1.446495532989502 -375,1.4464625120162964 -376,1.446401596069336 -377,1.4463649988174438 -378,1.4463192224502563 -379,1.4462828636169434 -380,1.4462565183639526 -381,1.446225643157959 -382,1.4462084770202637 -383,1.446176528930664 -384,1.4461326599121094 -385,1.4460771083831787 -386,1.4460488557815552 -387,1.4460086822509766 -388,1.445995807647705 -389,1.445973515510559 -390,1.4459397792816162 -391,1.4458967447280884 -392,1.4458494186401367 -393,1.4458131790161133 -394,1.4457900524139404 -395,1.445807933807373 -396,1.4458190202713013 -397,1.445692777633667 -398,1.4458776712417603 -399,1.446077585220337 -400,1.4457234144210815 -401,1.446207880973816 -402,1.4457403421401978 -403,1.4458494186401367 -404,1.4455432891845703 -405,1.4459949731826782 -406,1.4455777406692505 -407,1.445844054222107 -408,1.4454045295715332 -409,1.4459044933319092 -410,1.4453513622283936 -411,1.4456124305725098 -412,1.445371150970459 -413,1.4454556703567505 -414,1.4453719854354858 -415,1.4453151226043701 -416,1.4453656673431396 -417,1.4452128410339355 -418,1.4453860521316528 -419,1.4451667070388794 -420,1.4452569484710693 -421,1.4451342821121216 -422,1.4452195167541504 -423,1.4450815916061401 -424,1.4451016187667847 -425,1.4450334310531616 -426,1.445056438446045 -427,1.4449679851531982 -428,1.4449632167816162 -429,1.4449210166931152 -430,1.4449379444122314 -431,1.4448813199996948 -432,1.4448703527450562 -433,1.4448415040969849 -434,1.444838285446167 -435,1.4448198080062866 -436,1.44478178024292 -437,1.4448094367980957 -438,1.4447587728500366 -439,1.44473397731781 -440,1.4447121620178223 -441,1.4446643590927124 -442,1.44465970993042 -443,1.4446280002593994 -444,1.4446595907211304 -445,1.4446722269058228 -446,1.4446206092834473 -447,1.4446476697921753 -448,1.4445688724517822 -449,1.444548487663269 -450,1.4445420503616333 -451,1.444472074508667 -452,1.4444693326950073 -453,1.4444445371627808 -454,1.444473147392273 -455,1.44449782371521 -456,1.4444389343261719 -457,1.4444876909255981 -458,1.4444040060043335 -459,1.4443913698196411 -460,1.4443395137786865 -461,1.444347620010376 -462,1.4443795680999756 -463,1.4443129301071167 -464,1.4443423748016357 -465,1.444243311882019 -466,1.4442627429962158 -467,1.4442112445831299 -468,1.4443023204803467 -469,1.4442793130874634 -470,1.4442390203475952 -471,1.4442020654678345 -472,1.4441972970962524 -473,1.4442259073257446 -474,1.4441524744033813 -475,1.4441802501678467 -476,1.4440621137619019 -477,1.4440999031066895 -478,1.444036841392517 -479,1.444093942642212 -480,1.444032907485962 -481,1.4440200328826904 -482,1.4439789056777954 -483,1.4439992904663086 -484,1.4439939260482788 -485,1.4439671039581299 -486,1.4439470767974854 -487,1.4438936710357666 -488,1.443915843963623 -489,1.443866491317749 -490,1.4439303874969482 -491,1.443882942199707 -492,1.4438705444335938 -493,1.4438674449920654 -494,1.4437973499298096 -495,1.4437915086746216 -496,1.4437549114227295 -497,1.4437710046768188 -498,1.4437936544418335 -499,1.443730115890503 -500,1.4437915086746216 -501,1.4437063932418823 -502,1.4437129497528076 -503,1.4437062740325928 -504,1.4436513185501099 -505,1.4436496496200562 -506,1.443616271018982 -507,1.443640947341919 -508,1.4436509609222412 -509,1.4435983896255493 -510,1.443639874458313 -511,1.4435557126998901 -512,1.4435664415359497 -513,1.443532109260559 -514,1.4435174465179443 -515,1.4435065984725952 -516,1.4434814453125 -517,1.4435181617736816 -518,1.4434973001480103 -519,1.4434701204299927 -520,1.443446397781372 -521,1.4434022903442383 -522,1.4434089660644531 -523,1.4433786869049072 -524,1.4434117078781128 -525,1.4433883428573608 -526,1.4433772563934326 -527,1.4434043169021606 -528,1.443395733833313 -529,1.4433512687683105 -530,1.4433737993240356 -531,1.4433256387710571 -532,1.4433077573776245 -533,1.4433088302612305 -534,1.443292260169983 -535,1.4433423280715942 -536,1.4432686567306519 -537,1.4433716535568237 -538,1.4433001279830933 -539,1.4432944059371948 -540,1.4432437419891357 -541,1.4432111978530884 -542,1.4432169198989868 -543,1.4431846141815186 -544,1.4432129859924316 -545,1.4431724548339844 -546,1.4431885480880737 -547,1.4431655406951904 -548,1.4432170391082764 -549,1.4432214498519897 -550,1.4431681632995605 -551,1.443198800086975 -552,1.4431180953979492 -553,1.4431321620941162 -554,1.4431129693984985 -555,1.4431146383285522 -556,1.4431452751159668 -557,1.4431138038635254 -558,1.4431692361831665 -559,1.443115234375 -560,1.443103313446045 -561,1.4431202411651611 -562,1.4430736303329468 -563,1.4430770874023438 -564,1.4430500268936157 -565,1.4430960416793823 -566,1.4431545734405518 -567,1.443093180656433 -568,1.4431508779525757 -569,1.443053960800171 -570,1.4430890083312988 -571,1.443058729171753 -572,1.443086862564087 -573,1.4430519342422485 -574,1.4430510997772217 -575,1.4430476427078247 -576,1.4430116415023804 -577,1.4430265426635742 -578,1.4429972171783447 -579,1.4430437088012695 -580,1.4430747032165527 -581,1.4430333375930786 -582,1.4430729150772095 -583,1.4430491924285889 -584,1.4430110454559326 -585,1.4430383443832397 -586,1.4429689645767212 -587,1.4430081844329834 -588,1.4429762363433838 -589,1.443036437034607 -590,1.4430510997772217 -591,1.4430073499679565 -592,1.4430478811264038 -593,1.4429755210876465 -594,1.4429794549942017 -595,1.442960262298584 -596,1.442946195602417 -597,1.4429607391357422 -598,1.44294273853302 -599,1.4429852962493896 -600,1.4429513216018677 -601,1.442956805229187 -602,1.4429634809494019 -603,1.4429353475570679 -604,1.4429285526275635 -605,1.4429117441177368 -606,1.4429205656051636 -607,1.442945122718811 -608,1.442923665046692 -609,1.4429700374603271 -610,1.4429998397827148 -611,1.4429361820220947 -612,1.443043828010559 -613,1.4430712461471558 -614,1.4429774284362793 -615,1.443047285079956 -616,1.4429008960723877 -617,1.4429622888565063 -618,1.4429007768630981 -619,1.4429914951324463 -620,1.4430358409881592 -621,1.4429731369018555 -622,1.4430099725723267 -623,1.4429265260696411 -624,1.4430197477340698 -625,1.4428813457489014 -626,1.4429999589920044 -627,1.4428678750991821 -628,1.442956805229187 -629,1.4428569078445435 -630,1.4429575204849243 -631,1.44288170337677 -632,1.4429161548614502 -633,1.4429000616073608 -634,1.4428775310516357 -635,1.44288170337677 -636,1.4428637027740479 -637,1.442946195602417 -638,1.4430291652679443 -639,1.4430036544799805 -640,1.4429255723953247 -641,1.443008303642273 -642,1.4433385133743286 -643,1.4430478811264038 -644,1.4432246685028076 -645,1.44297456741333 -646,1.4431780576705933 -647,1.4429247379302979 -648,1.4432002305984497 -649,1.4429044723510742 -650,1.4431177377700806 -651,1.4428993463516235 -652,1.4430837631225586 -653,1.4430255889892578 -654,1.443069338798523 -655,1.4428718090057373 -656,1.4431657791137695 -657,1.442946434020996 -658,1.4431217908859253 -659,1.4428938627243042 -660,1.443097472190857 -661,1.4429864883422852 -662,1.443023920059204 -663,1.442909836769104 -664,1.4430713653564453 -665,1.4428352117538452 -666,1.4430562257766724 -667,1.4428173303604126 -668,1.44301176071167 -669,1.4428396224975586 -670,1.4429491758346558 -671,1.4428373575210571 -672,1.4429843425750732 -673,1.442872405052185 -674,1.4429867267608643 -675,1.4428176879882812 -676,1.442971110343933 -677,1.4428188800811768 -678,1.4428973197937012 -679,1.4428362846374512 -680,1.4429032802581787 -681,1.4428555965423584 -682,1.4429036378860474 -683,1.4428364038467407 -684,1.4429080486297607 -685,1.44282865524292 -686,1.4428588151931763 -687,1.4428131580352783 -688,1.4428410530090332 -689,1.4428104162216187 -690,1.4428205490112305 -691,1.4427930116653442 -692,1.4428400993347168 -693,1.4428913593292236 -694,1.4428622722625732 -695,1.4428457021713257 -696,1.442804217338562 -697,1.4428887367248535 -698,1.4427851438522339 -699,1.442886233329773 -700,1.4427945613861084 -701,1.442852258682251 -702,1.4427775144577026 -703,1.4429008960723877 -704,1.4430204629898071 -705,1.4429495334625244 -706,1.442867398262024 -707,1.4429935216903687 -708,1.4431596994400024 -709,1.4430278539657593 -710,1.4429506063461304 -711,1.4430575370788574 -712,1.4430084228515625 -713,1.4430185556411743 -714,1.4428666830062866 -715,1.443079948425293 -716,1.4427647590637207 -717,1.443029761314392 -718,1.4427355527877808 -719,1.4429712295532227 -720,1.4427645206451416 -721,1.442869782447815 -722,1.4427423477172852 -723,1.4428493976593018 -724,1.4426971673965454 -725,1.4428156614303589 -726,1.4426850080490112 -727,1.442765474319458 -728,1.44267737865448 -729,1.4427242279052734 -730,1.4426815509796143 -731,1.4427293539047241 -732,1.4426838159561157 -733,1.4427231550216675 -734,1.4426594972610474 -735,1.4426897764205933 -736,1.442652940750122 -737,1.4426651000976562 -738,1.442633867263794 -739,1.4426476955413818 -740,1.4426393508911133 -741,1.4426276683807373 -742,1.4426218271255493 -743,1.4426041841506958 -744,1.442623257637024 -745,1.4425889253616333 -746,1.4426347017288208 -747,1.4426250457763672 -748,1.4426015615463257 -749,1.4426087141036987 -750,1.4425725936889648 -751,1.4425729513168335 -752,1.442568063735962 -753,1.4425557851791382 -754,1.442575216293335 -755,1.4425398111343384 -756,1.442603349685669 -757,1.4426313638687134 -758,1.4425899982452393 -759,1.442595362663269 -760,1.4425649642944336 -761,1.4426666498184204 -762,1.442546010017395 -763,1.442685842514038 -764,1.4425952434539795 -765,1.4426262378692627 -766,1.4425443410873413 -767,1.4425667524337769 -768,1.4426189661026 -769,1.4425376653671265 -770,1.4425920248031616 -771,1.4425008296966553 -772,1.442568063735962 -773,1.442467451095581 -774,1.4425723552703857 -775,1.442514419555664 -776,1.4425297975540161 -777,1.4424973726272583 -778,1.442528247833252 -779,1.4425137042999268 -780,1.442494511604309 -781,1.4425358772277832 -782,1.4424333572387695 -783,1.4424846172332764 -784,1.4424331188201904 -785,1.442465901374817 -786,1.4424434900283813 -787,1.442462682723999 -788,1.4424715042114258 -789,1.4424532651901245 -790,1.442456841468811 -791,1.4424335956573486 -792,1.4424575567245483 -793,1.4424715042114258 -794,1.4424285888671875 -795,1.4424705505371094 -796,1.4424275159835815 -797,1.4424504041671753 -798,1.4424593448638916 -799,1.4424593448638916 -800,1.4424439668655396 -801,1.4424608945846558 -802,1.4424395561218262 -803,1.442416787147522 -804,1.4424283504486084 -805,1.4424186944961548 -806,1.4424331188201904 -807,1.4424494504928589 -808,1.4424484968185425 -809,1.442436695098877 -810,1.4424207210540771 -811,1.442427158355713 -812,1.4423975944519043 -813,1.4424139261245728 -814,1.4424306154251099 -815,1.4424089193344116 -816,1.4425010681152344 -817,1.4428218603134155 -818,1.4425157308578491 -819,1.4427313804626465 -820,1.4424560070037842 -821,1.4425795078277588 -822,1.4424781799316406 -823,1.4425196647644043 -824,1.442457675933838 -825,1.4424372911453247 -826,1.4424865245819092 -827,1.4424049854278564 -828,1.4424678087234497 -829,1.4423779249191284 -830,1.4424567222595215 -831,1.4424397945404053 -832,1.4424108266830444 -833,1.442497730255127 -834,1.4423940181732178 -835,1.4424129724502563 -836,1.4423683881759644 -837,1.4423527717590332 -838,1.442362904548645 -839,1.4423484802246094 -840,1.4423861503601074 -841,1.4424031972885132 -842,1.4423694610595703 -843,1.442426085472107 -844,1.4424152374267578 -845,1.4423660039901733 -846,1.4424875974655151 -847,1.4426401853561401 -848,1.4425172805786133 -849,1.4425426721572876 -850,1.442449927330017 -851,1.4426155090332031 -852,1.442409634590149 -853,1.4426285028457642 -854,1.4424104690551758 -855,1.442527174949646 -856,1.4424333572387695 -857,1.4423903226852417 -858,1.4424235820770264 -859,1.442352056503296 -860,1.4424264430999756 -861,1.4423331022262573 -862,1.442382574081421 -863,1.442322850227356 -864,1.4423670768737793 -865,1.4424151182174683 -866,1.4423203468322754 -867,1.4424442052841187 -868,1.4423327445983887 -869,1.442391037940979 -870,1.4423454999923706 -871,1.4423071146011353 -872,1.4423224925994873 -873,1.442312479019165 -874,1.4423271417617798 -875,1.442338228225708 -876,1.4423128366470337 -877,1.4423723220825195 -878,1.4424108266830444 -879,1.4423454999923706 -880,1.4423978328704834 -881,1.4423195123672485 -882,1.4423450231552124 -883,1.442371129989624 -884,1.442348837852478 -885,1.442369818687439 -886,1.4423558712005615 -887,1.4423317909240723 -888,1.4423693418502808 -889,1.4423182010650635 -890,1.4423446655273438 -891,1.4423757791519165 -892,1.4423189163208008 -893,1.4423716068267822 -894,1.4423669576644897 -895,1.4423201084136963 -896,1.4424786567687988 -897,1.4428082704544067 -898,1.4426273107528687 -899,1.442598581314087 -900,1.4424206018447876 -901,1.4427263736724854 -902,1.4422968626022339 -903,1.4427276849746704 -904,1.4424009323120117 -905,1.4426079988479614 -906,1.4424142837524414 -907,1.442331314086914 -908,1.442423939704895 -909,1.4423280954360962 -910,1.4423410892486572 -911,1.4423160552978516 -912,1.442346453666687 -913,1.4423041343688965 -914,1.4423964023590088 -915,1.4423121213912964 -916,1.4422849416732788 -917,1.442358374595642 -918,1.4423105716705322 -919,1.4423677921295166 -920,1.4422887563705444 -921,1.4424266815185547 -922,1.4429043531417847 -923,1.4424676895141602 -924,1.4427978992462158 -925,1.4423097372055054 -926,1.4427242279052734 -927,1.4423617124557495 -928,1.4424679279327393 -929,1.4423916339874268 -930,1.4424601793289185 -931,1.442369818687439 -932,1.4423398971557617 -933,1.442384123802185 -934,1.4423052072525024 -935,1.4423433542251587 -936,1.442299723625183 -937,1.442344069480896 -938,1.4422518014907837 -939,1.4424118995666504 -940,1.4423158168792725 -941,1.4423255920410156 -942,1.4424083232879639 -943,1.4423643350601196 -944,1.4423863887786865 -945,1.4423115253448486 -946,1.4424190521240234 -947,1.4429888725280762 -948,1.4425209760665894 -949,1.4428664445877075 -950,1.4422972202301025 -951,1.442788004875183 -952,1.4423762559890747 -953,1.4424782991409302 -954,1.4424227476119995 -955,1.4424560070037842 -956,1.4423816204071045 -957,1.4423447847366333 -958,1.4423576593399048 -959,1.4423202276229858 -960,1.442307472229004 -961,1.442270278930664 -962,1.4423437118530273 -963,1.4422200918197632 -964,1.4423853158950806 -965,1.4422358274459839 -966,1.442285180091858 -967,1.4423247575759888 -968,1.4423452615737915 -969,1.442361831665039 -970,1.442278504371643 -971,1.4423866271972656 -972,1.4428250789642334 -973,1.4424176216125488 -974,1.4427496194839478 -975,1.442266821861267 -976,1.442660927772522 -977,1.4424195289611816 -978,1.4423588514328003 -979,1.4424703121185303 -980,1.4423388242721558 -981,1.4423835277557373 -982,1.442314624786377 -983,1.442328691482544 -984,1.442311406135559 -985,1.4422818422317505 -986,1.4422831535339355 -987,1.4422988891601562 -988,1.442203164100647 -989,1.4423043727874756 -990,1.442204236984253 -991,1.4422093629837036 -992,1.4422718286514282 -993,1.4422576427459717 -994,1.4422603845596313 -995,1.4422510862350464 -996,1.4422141313552856 -997,1.4422004222869873 -998,1.442224383354187 -999,1.4422478675842285 -1000,1.4422311782836914 diff --git a/training/time_weighting/inverse_cubed_mirrored/training_config.txt b/training/time_weighting/inverse_cubed_mirrored/training_config.txt deleted file mode 100644 index 64a9ff7..0000000 --- a/training/time_weighting/inverse_cubed_mirrored/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/inverse_cubed_mirrored/training_log.csv b/training/time_weighting/inverse_cubed_mirrored/training_log.csv deleted file mode 100644 index 70d0aa8..0000000 --- a/training/time_weighting/inverse_cubed_mirrored/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,91.70775604248047 -2,132.47540283203125 -3,96.54109191894531 -4,16.579389572143555 -5,5.6937761306762695 -6,3.325625419616699 -7,2.153303861618042 -8,0.9939378499984741 -9,1.207592248916626 -10,0.8454753160476685 -11,0.6507678627967834 -12,0.6557604074478149 -13,0.691925585269928 -14,0.6673983931541443 -15,0.6164923906326294 -16,0.6198005676269531 -17,0.590954601764679 -18,0.5800044536590576 -19,0.5760387182235718 -20,0.5677080154418945 -21,0.5418129563331604 -22,0.5416712760925293 -23,0.5356659889221191 -24,0.5270473957061768 -25,0.5167315602302551 -26,0.5050947070121765 -27,0.5005035996437073 -28,0.49261003732681274 -29,0.4691109359264374 -30,0.44664472341537476 -31,0.440733939409256 -32,0.4348633289337158 -33,0.4281279444694519 -34,0.42107531428337097 -35,0.4148262143135071 -36,0.4098849296569824 -37,0.40489840507507324 -38,0.39858734607696533 -39,0.3921489119529724 -40,0.3863367438316345 -41,0.38111910223960876 -42,0.3760354816913605 -43,0.3708359897136688 -44,0.3655281662940979 -45,0.36027294397354126 -46,0.35529419779777527 -47,0.3507170081138611 -48,0.34642866253852844 -49,0.3422001600265503 -50,0.3378933370113373 -51,0.33353570103645325 -52,0.32922306656837463 -53,0.3250134587287903 -54,0.3208349645137787 -55,0.31658488512039185 -56,0.31218862533569336 -57,0.3076311945915222 -58,0.30292460322380066 -59,0.29808518290519714 -60,0.29310569167137146 -61,0.28795677423477173 -62,0.28264665603637695 -63,0.2771700620651245 -64,0.2715000808238983 -65,0.2655951678752899 -66,0.25905683636665344 -67,0.25094711780548096 -68,0.24150662124156952 -69,0.23620840907096863 -70,0.2338184416294098 -71,0.23147274553775787 -72,0.22942547500133514 -73,0.2255697101354599 -74,0.219697043299675 -75,0.21265114843845367 -76,0.20634916424751282 -77,0.20319564640522003 -78,0.20151495933532715 -79,0.19917026162147522 -80,0.19535218179225922 -81,0.19041603803634644 -82,0.18556100130081177 -83,0.18192130327224731 -84,0.17942148447036743 -85,0.17720812559127808 -86,0.17461217939853668 -87,0.1714218705892563 -88,0.1678064614534378 -89,0.16413457691669464 -90,0.16077597439289093 -91,0.15789207816123962 -92,0.15521769225597382 -93,0.15233203768730164 -94,0.14898163080215454 -95,0.14526134729385376 -96,0.14140407741069794 -97,0.13753193616867065 -98,0.13366073369979858 -99,0.1297452300786972 -100,0.12577012181282043 -101,0.12183299660682678 -102,0.11806365102529526 -103,0.11466114223003387 -104,0.11154243350028992 -105,0.10863042622804642 -106,0.1059129610657692 -107,0.10332107543945312 -108,0.10077380388975143 -109,0.09828454256057739 -110,0.09584414958953857 -111,0.09348355233669281 -112,0.0911698192358017 -113,0.08900140225887299 -114,0.08699633926153183 -115,0.08517446368932724 -116,0.08351464569568634 -117,0.08201524615287781 -118,0.08068650215864182 -119,0.07952951639890671 -120,0.07853210717439651 -121,0.07767597585916519 -122,0.07694100588560104 -123,0.07631133496761322 -124,0.07577033340930939 -125,0.07530304789543152 -126,0.0748976469039917 -127,0.07454615086317062 -128,0.07424286752939224 -129,0.07398136705160141 -130,0.0737559050321579 -131,0.0735606923699379 -132,0.07338880747556686 -133,0.07323528826236725 -134,0.0730961486697197 -135,0.07296829670667648 -136,0.07284912467002869 -137,0.07273559272289276 -138,0.07262596487998962 -139,0.07251908630132675 -140,0.07241398096084595 -141,0.07230941206216812 -142,0.07220432162284851 -143,0.07209786027669907 -144,0.0719895288348198 -145,0.07187894731760025 -146,0.07176623493432999 -147,0.07165145874023438 -148,0.07153495401144028 -149,0.07141707837581635 -150,0.07129817456007004 -151,0.07117866724729538 -152,0.07105869054794312 -153,0.07093848288059235 -154,0.07081834971904755 -155,0.07069841772317886 -156,0.07057904452085495 -157,0.07046034932136536 -158,0.07034268230199814 -159,0.07022611051797867 -160,0.0701107308268547 -161,0.06999661028385162 -162,0.06988368928432465 -163,0.06977186352014542 -164,0.06966113299131393 -165,0.06955146044492722 -166,0.06944266706705093 -167,0.06933478266000748 -168,0.0692276880145073 -169,0.06912116706371307 -170,0.06901536881923676 -171,0.06891030818223953 -172,0.06880591809749603 -173,0.06870221346616745 -174,0.06859913468360901 -175,0.0684966966509819 -176,0.06839483976364136 -177,0.06829363107681274 -178,0.06819307059049606 -179,0.06809302419424057 -180,0.06799352169036865 -181,0.06789455562829971 -182,0.06779614835977554 -183,0.06769828498363495 -184,0.06760095059871674 -185,0.06750413775444031 -186,0.06740815937519073 -187,0.06731322407722473 -188,0.06721939146518707 -189,0.06712658703327179 -190,0.06703449040651321 -191,0.06694291532039642 -192,0.06685202568769455 -193,0.06676172465085983 -194,0.06667204946279526 -195,0.06658300012350082 -196,0.06649453192949295 -197,0.06640677154064178 -198,0.06631956994533539 -199,0.06623291969299316 -200,0.06614679098129272 -201,0.06606121361255646 -202,0.06597614288330078 -203,0.06589161604642868 -204,0.06580766290426254 -205,0.06572414189577103 -206,0.06564117223024368 -207,0.06555861979722977 -208,0.06547650694847107 -209,0.06539493054151535 -210,0.06531377881765366 -211,0.06523305177688599 -212,0.0651528537273407 -213,0.06507305800914764 -214,0.06499375402927399 -215,0.06491482257843018 -216,0.06483632326126099 -217,0.06475820392370224 -218,0.06468047201633453 -219,0.06460315734148026 -220,0.0645262598991394 -221,0.06444971263408661 -222,0.06437356024980545 -223,0.06429781019687653 -224,0.06422246992588043 -225,0.0641474798321724 -226,0.06407283991575241 -227,0.06399859488010406 -228,0.06392466276884079 -229,0.06385102868080139 -230,0.06377782672643661 -231,0.06370493024587631 -232,0.06363240629434586 -233,0.06356018781661987 -234,0.06348829716444016 -235,0.0634167343378067 -236,0.06334550678730011 -237,0.06327453255653381 -238,0.06320396810770035 -239,0.06313370913267136 -240,0.06306374818086624 -241,0.06299415975809097 -242,0.06292488425970078 -243,0.06285601109266281 -244,0.06278742849826813 -245,0.06271912902593613 -246,0.06265103071928024 -247,0.06258321553468704 -248,0.06251555681228638 -249,0.0624481700360775 -250,0.062380947172641754 -251,0.06231393292546272 -252,0.062247149646282196 -253,0.062180571258068085 -254,0.0621141642332077 -255,0.06204792857170105 -256,0.06198186054825783 -257,0.061915960162878036 -258,0.06185034289956093 -259,0.06178486347198486 -260,0.06171957775950432 -261,0.061654407531023026 -262,0.06158945709466934 -263,0.061524730175733566 -264,0.061460137367248535 -265,0.06139576435089111 -266,0.06133151054382324 -267,0.06126749515533447 -268,0.06120363622903824 -269,0.06113995239138603 -270,0.06107642874121666 -271,0.06101308390498161 -272,0.06094992905855179 -273,0.06088694557547569 -274,0.06082410365343094 -275,0.0607614740729332 -276,0.06069900095462799 -277,0.06063666194677353 -278,0.060574501752853394 -279,0.0605124905705452 -280,0.060450684279203415 -281,0.060389015823602676 -282,0.06032754108309746 -283,0.060266222804784775 -284,0.06020507961511612 -285,0.060144081711769104 -286,0.060083258897066116 -287,0.060022611171007156 -288,0.05996205657720566 -289,0.05990171059966087 -290,0.05984148010611534 -291,0.05978146195411682 -292,0.05972161144018173 -293,0.05966194346547127 -294,0.05960242077708244 -295,0.05954309180378914 -296,0.059483908116817474 -297,0.05942487716674805 -298,0.05936600640416145 -299,0.05930725112557411 -300,0.0592486672103405 -301,0.05919020622968674 -302,0.059131987392902374 -303,0.059073902666568756 -304,0.05901600047945976 -305,0.05895819514989853 -306,0.058900561183691025 -307,0.05884307995438576 -308,0.05878569930791855 -309,0.05872853845357895 -310,0.05867147445678711 -311,0.05861460044980049 -312,0.058557912707328796 -313,0.05850140005350113 -314,0.05844501405954361 -315,0.05838875100016594 -316,0.058332569897174835 -317,0.05827653780579567 -318,0.05822054296731949 -319,0.05816468223929405 -320,0.058108869940042496 -321,0.058053143322467804 -322,0.05799761414527893 -323,0.05794224888086319 -324,0.057887014001607895 -325,0.05783193185925484 -326,0.05777696147561073 -327,0.05772216618061066 -328,0.05766753479838371 -329,0.057613108307123184 -330,0.05755884200334549 -331,0.05750470608472824 -332,0.057450711727142334 -333,0.057396888732910156 -334,0.05734319984912872 -335,0.05728965997695923 -336,0.05723622813820839 -337,0.057182956486940384 -338,0.05712980031967163 -339,0.05707675963640213 -340,0.05702386051416397 -341,0.05697110295295715 -342,0.05691844969987869 -343,0.05686596781015396 -344,0.05681360512971878 -345,0.05676134675741196 -346,0.056709252297878265 -347,0.05665728077292442 -348,0.05660543590784073 -349,0.056553684175014496 -350,0.0565020851790905 -351,0.056450627744197845 -352,0.056399259716272354 -353,0.056348010897636414 -354,0.05629691854119301 -355,0.05624596029520035 -356,0.056195106357336044 -357,0.056144386529922485 -358,0.05609378218650818 -359,0.05604327842593193 -360,0.05599294230341911 -361,0.05594272166490555 -362,0.05589258298277855 -363,0.055842578411102295 -364,0.05579272285103798 -365,0.05574294924736023 -366,0.05569329112768173 -367,0.05564374476671219 -368,0.055594317615032196 -369,0.05554499477148056 -370,0.055495817214250565 -371,0.05544675141572952 -372,0.05539781600236893 -373,0.05534899979829788 -374,0.05530036985874176 -375,0.055251847952604294 -376,0.05520349740982056 -377,0.055155251175165176 -378,0.055107131600379944 -379,0.0550592765212059 -380,0.05501154810190201 -381,0.05496394634246826 -382,0.05491643026471138 -383,0.05486902967095375 -384,0.054821744561195374 -385,0.054774537682533264 -386,0.05472741648554802 -387,0.05468035489320755 -388,0.05463339015841484 -389,0.0545865036547184 -390,0.05453968793153763 -391,0.05449295416474342 -392,0.05444628745317459 -393,0.05439969152212143 -394,0.05435315519571304 -395,0.05430668964982033 -396,0.0542602576315403 -397,0.054213907569646835 -398,0.05416763201355934 -399,0.05412141978740692 -400,0.05407526716589928 -401,0.054029181599617004 -402,0.05398315563797951 -403,0.05393723398447037 -404,0.053891342133283615 -405,0.05384554713964462 -406,0.053799837827682495 -407,0.05375421792268753 -408,0.05370869114995003 -409,0.053663220256567 -410,0.053617823868989944 -411,0.05357252061367035 -412,0.05352730676531792 -413,0.053482167422771454 -414,0.05343712121248245 -415,0.05339217185974121 -416,0.053347352892160416 -417,0.05330267176032066 -418,0.05325808376073837 -419,0.05321355164051056 -420,0.053169094026088715 -421,0.05312471091747284 -422,0.05308038368821144 -423,0.0530361533164978 -424,0.05299200490117073 -425,0.05294794961810112 -426,0.052903953939676285 -427,0.05286003649234772 -428,0.05281618610024452 -429,0.05277237668633461 -430,0.05272866412997246 -431,0.052685048431158066 -432,0.05264149606227875 -433,0.052598051726818085 -434,0.052554693073034286 -435,0.052511487156152725 -436,0.052468426525592804 -437,0.05242550000548363 -438,0.05238265171647072 -439,0.05233994498848915 -440,0.05229734256863594 -441,0.052254870533943176 -442,0.05221256986260414 -443,0.05217043682932854 -444,0.05212856084108353 -445,0.052086830139160156 -446,0.05204525962471962 -447,0.052003778517246246 -448,0.05196240916848183 -449,0.051921144127845764 -450,0.05187998339533806 -451,0.051838938146829605 -452,0.05179798603057861 -453,0.051757119596004486 -454,0.05171635374426842 -455,0.05167565867304802 -456,0.051635053008794785 -457,0.05159449949860573 -458,0.051554031670093536 -459,0.051513638347387314 -460,0.05147333815693855 -461,0.051433105021715164 -462,0.05139295384287834 -463,0.051352884620428085 -464,0.05131286010146141 -465,0.05127289518713951 -466,0.051232971251010895 -467,0.051193103194236755 -468,0.0511532761156559 -469,0.051113516092300415 -470,0.05107380822300911 -471,0.051034167408943176 -472,0.050994597375392914 -473,0.05095507949590683 -474,0.05091560631990433 -475,0.0508762001991272 -476,0.050836868584156036 -477,0.050797607749700546 -478,0.05075840651988983 -479,0.0507192425429821 -480,0.05068014934659004 -481,0.05064111202955246 -482,0.05060216039419174 -483,0.050563231110572815 -484,0.05052436888217926 -485,0.050485555082559586 -486,0.05044679343700409 -487,0.05040811002254486 -488,0.050369445234537125 -489,0.05033087730407715 -490,0.05029234290122986 -491,0.05025384575128555 -492,0.050215426832437515 -493,0.05017703399062157 -494,0.05013870447874069 -495,0.0501004122197628 -496,0.05006219074130058 -497,0.050024013966321945 -498,0.04998597130179405 -499,0.04994804412126541 -500,0.04991018772125244 -501,0.04987242445349693 -502,0.04983474314212799 -503,0.04979715123772621 -504,0.049759604036808014 -505,0.04972214996814728 -506,0.04968472942709923 -507,0.04964739456772804 -508,0.049610111862421036 -509,0.04957288131117821 -510,0.04953567683696747 -511,0.049498528242111206 -512,0.04946143552660942 -513,0.04942437633872032 -514,0.04938740283250809 -515,0.04935045167803764 -516,0.04931359365582466 -517,0.04927676543593407 -518,0.04924001172184944 -519,0.0492032915353775 -520,0.04916661977767944 -521,0.04912998154759407 -522,0.049093376845121384 -523,0.04905688017606735 -524,0.04902037978172302 -525,0.04898400232195854 -526,0.04894766956567764 -527,0.048911381512880325 -528,0.048875145614147186 -529,0.04883893206715584 -530,0.04880281165242195 -531,0.048766762018203735 -532,0.048730768263339996 -533,0.04869484528899193 -534,0.04865901917219162 -535,0.04862326383590698 -536,0.04858756810426712 -537,0.04855195805430412 -538,0.04851651191711426 -539,0.04848119616508484 -540,0.04844614863395691 -541,0.0484112873673439 -542,0.04837654158473015 -543,0.048341937363147736 -544,0.04830743372440338 -545,0.04827306419610977 -546,0.04823889210820198 -547,0.0482049398124218 -548,0.048171114176511765 -549,0.048137396574020386 -550,0.04810381680727005 -551,0.04807032272219658 -552,0.04803694784641266 -553,0.0480036661028862 -554,0.047970470041036606 -555,0.04793735593557358 -556,0.04790430888533592 -557,0.04787135496735573 -558,0.0478384904563427 -559,0.04780571535229683 -560,0.047772977501153946 -561,0.04774029552936554 -562,0.047707654535770416 -563,0.04767504334449768 -564,0.04764246568083763 -565,0.04760991409420967 -566,0.047577425837516785 -567,0.04754498973488808 -568,0.04751259461045265 -569,0.047480251640081406 -570,0.04744795337319374 -571,0.047415681183338165 -572,0.047383468598127365 -573,0.04735127463936806 -574,0.047319136559963226 -575,0.04728706553578377 -576,0.04725506156682968 -577,0.047223083674907684 -578,0.047191157937049866 -579,0.04715929552912712 -580,0.047127462923526764 -581,0.047095682471990585 -582,0.04706394672393799 -583,0.04703226685523987 -584,0.04700063169002533 -585,0.046969059854745865 -586,0.046937521547079086 -587,0.04690602794289589 -588,0.04687459394335747 -589,0.04684320464730263 -590,0.04681184142827988 -591,0.046780530363321304 -592,0.04674927890300751 -593,0.04671808332204819 -594,0.04668693244457245 -595,0.04665585234761238 -596,0.046624839305877686 -597,0.046593889594078064 -598,0.04656298831105232 -599,0.04653216153383255 -600,0.04650138318538666 -601,0.046470679342746735 -602,0.04644002392888069 -603,0.04640943557024002 -604,0.046378910541534424 -605,0.04634847119450569 -606,0.04631812497973442 -607,0.046287912875413895 -608,0.0462578646838665 -609,0.04622787609696388 -610,0.04619794711470604 -611,0.046168092638254166 -612,0.04613829031586647 -613,0.046108558773994446 -614,0.04607890173792839 -615,0.04604931175708771 -616,0.04601976275444031 -617,0.04599028080701828 -618,0.04596084728837013 -619,0.045931484550237656 -620,0.04590217024087906 -621,0.04587289318442345 -622,0.04584367200732231 -623,0.04581451416015625 -624,0.045785415917634964 -625,0.04575635492801666 -626,0.045727357268333435 -627,0.04569840431213379 -628,0.045669496059417725 -629,0.04564062878489494 -630,0.04561181738972664 -631,0.045583050698041916 -632,0.04555436596274376 -633,0.04552571102976799 -634,0.0454971008002758 -635,0.045468542724847794 -636,0.04544003680348396 -637,0.04541158676147461 -638,0.04538321495056152 -639,0.04535491392016411 -640,0.04532666131854057 -641,0.04529844969511032 -642,0.045270275324583054 -643,0.04524213448166847 -644,0.04521401226520538 -645,0.045185912400484085 -646,0.04515784978866577 -647,0.04512983188033104 -648,0.045101843774318695 -649,0.04507390409708023 -650,0.04504599794745445 -651,0.04501810669898987 -652,0.04499023035168648 -653,0.04496236518025398 -654,0.044934529811143875 -655,0.044906724244356155 -656,0.044878944754600525 -657,0.04485119879245758 -658,0.04482347145676613 -659,0.04479578882455826 -660,0.04476812854409218 -661,0.0447404682636261 -662,0.04471283406019211 -663,0.044685203582048416 -664,0.04465761035680771 -665,0.04463004320859909 -666,0.04460247606039047 -667,0.044574953615665436 -668,0.044547438621520996 -669,0.04451994225382805 -670,0.044492438435554504 -671,0.044464968144893646 -672,0.044437531381845474 -673,0.04441012814640999 -674,0.04438274726271629 -675,0.04435539245605469 -676,0.044328078627586365 -677,0.04430077597498894 -678,0.044273491948843 -679,0.044246233999729156 -680,0.0442189984023571 -681,0.044191811233758926 -682,0.04416464641690254 -683,0.044137511402368546 -684,0.04411040246486664 -685,0.04408334195613861 -686,0.04405629262328148 -687,0.044029273092746735 -688,0.04400229454040527 -689,0.04397532716393471 -690,0.04394841939210892 -691,0.043921519070863724 -692,0.04389466345310211 -693,0.04386783391237259 -694,0.04384103789925575 -695,0.043814241886138916 -696,0.043787501752376556 -697,0.04376080632209778 -698,0.04373416304588318 -699,0.04370759800076485 -700,0.0436810627579689 -701,0.04365459457039833 -702,0.04362814128398895 -703,0.04360174387693405 -704,0.04357537999749184 -705,0.04354904964566231 -706,0.04352274909615517 -707,0.04349648579955101 -708,0.04347025230526924 -709,0.04344404861330986 -710,0.04341788962483406 -711,0.04339177533984184 -712,0.04336569085717201 -713,0.04333964362740517 -714,0.043313611298799515 -715,0.04328760504722595 -716,0.04326162859797478 -717,0.04323568567633629 -718,0.043209776282310486 -719,0.04318390041589737 -720,0.04315805807709694 -721,0.043132245540618896 -722,0.04310646653175354 -723,0.04308072105050087 -724,0.043055009096860886 -725,0.043029431253671646 -726,0.04300396889448166 -727,0.04297858104109764 -728,0.042953260242938995 -729,0.04292799159884453 -730,0.042902760207653046 -731,0.04287753626704216 -732,0.04285229369997978 -733,0.04282710328698158 -734,0.04280192032456398 -735,0.04277675971388817 -736,0.042751628905534744 -737,0.04272649809718132 -738,0.04270140826702118 -739,0.04267639294266701 -740,0.04265144094824791 -741,0.04262654110789299 -742,0.04260174185037613 -743,0.04257703199982643 -744,0.04255241900682449 -745,0.042527880519628525 -746,0.042503438889980316 -747,0.04247907176613808 -748,0.04245471581816673 -749,0.042430415749549866 -750,0.042406149208545685 -751,0.042381953448057175 -752,0.04235781729221344 -753,0.04233372211456299 -754,0.04230967164039612 -755,0.04228563979268074 -756,0.04226166754961014 -757,0.042237788438797 -758,0.042213983833789825 -759,0.04219024255871773 -760,0.042166564613580704 -761,0.04214290529489517 -762,0.04211926832795143 -763,0.04209566116333008 -764,0.04207205772399902 -765,0.04204848036170006 -766,0.04202492535114288 -767,0.0420013926923275 -768,0.04197792336344719 -769,0.04195448383688927 -770,0.04193108528852463 -771,0.041907697916030884 -772,0.04188434034585953 -773,0.04186101630330086 -774,0.041837744414806366 -775,0.041814520955085754 -776,0.04179135337471962 -777,0.04176824912428856 -778,0.04174515977501869 -779,0.04172210395336151 -780,0.041699059307575226 -781,0.04167604446411133 -782,0.041653022170066833 -783,0.041630037128925323 -784,0.0416070818901062 -785,0.04158419370651245 -786,0.041561346501111984 -787,0.041538529098033905 -788,0.04151574894785881 -789,0.041493009775877 -790,0.04147028177976608 -791,0.04144758731126785 -792,0.04142491891980171 -793,0.04140228405594826 -794,0.04137970134615898 -795,0.04135715588927269 -796,0.04133462905883789 -797,0.041312120854854584 -798,0.04128964617848396 -799,0.04126718267798424 -800,0.0412447527050972 -801,0.04122234880924225 -802,0.04119999706745148 -803,0.04117768630385399 -804,0.041155412793159485 -805,0.04113315790891647 -806,0.041110921651124954 -807,0.04108874127268791 -808,0.04106659069657326 -809,0.041044484823942184 -810,0.04102245718240738 -811,0.041000522673130035 -812,0.04097863659262657 -813,0.040956802666187286 -814,0.040934979915618896 -815,0.040913213044404984 -816,0.040891483426094055 -817,0.04086979106068611 -818,0.04084810987114906 -819,0.04082649201154709 -820,0.0408049151301384 -821,0.0407833568751812 -822,0.04076182842254639 -823,0.04074034094810486 -824,0.04071887210011482 -825,0.04069743677973747 -826,0.040676016360521317 -827,0.04065464809536934 -828,0.04063332825899124 -829,0.040612056851387024 -830,0.0405908077955246 -831,0.04056960344314575 -832,0.04054848104715347 -833,0.040527425706386566 -834,0.040506426244974136 -835,0.04048551991581917 -836,0.04046466946601868 -837,0.04044385999441147 -838,0.04042309895157814 -839,0.04040238633751869 -840,0.04038171470165253 -841,0.040361057966947556 -842,0.04034043848514557 -843,0.040319833904504776 -844,0.04029925912618637 -845,0.040278736501932144 -846,0.0402582548558712 -847,0.04023786634206772 -848,0.0402175597846508 -849,0.04019737243652344 -850,0.04017725959420204 -851,0.04015716537833214 -852,0.0401371493935585 -853,0.04011717811226845 -854,0.04009726643562317 -855,0.04007740691304207 -856,0.040057599544525146 -857,0.040037814527750015 -858,0.040018048137426376 -859,0.03999830037355423 -860,0.03997858613729477 -861,0.039958883076906204 -862,0.03993922099471092 -863,0.03991961106657982 -864,0.039900042116642 -865,0.039880502969026566 -866,0.03986098989844322 -867,0.03984152153134346 -868,0.039822086691856384 -869,0.0398026742041111 -870,0.0397832877933979 -871,0.039763979613780975 -872,0.03974469378590584 -873,0.039725445210933685 -874,0.03970622643828392 -875,0.03968698903918266 -876,0.03966779261827469 -877,0.039645783603191376 -878,0.03973241150379181 -879,0.03968963772058487 -880,0.039650339633226395 -881,0.03958110883831978 -882,0.039632003754377365 -883,0.03955258056521416 -884,0.039544727653265 -885,0.03955238685011864 -886,0.03948259353637695 -887,0.039491381496191025 -888,0.03948799893260002 -889,0.03943959251046181 -890,0.039433613419532776 -891,0.039397258311510086 -892,0.039378564804792404 -893,0.03937361016869545 -894,0.03933332487940788 -895,0.03932797908782959 -896,0.03930700570344925 -897,0.03927753120660782 -898,0.039271678775548935 -899,0.03924255818128586 -900,0.039220791310071945 -901,0.03919999673962593 -902,0.03920179232954979 -903,0.03919162228703499 -904,0.039140570908784866 -905,0.03911828249692917 -906,0.03912259265780449 -907,0.03907400369644165 -908,0.03907184675335884 -909,0.03905872255563736 -910,0.039019979536533356 -911,0.03902728855609894 -912,0.03900528326630592 -913,0.03897452726960182 -914,0.0389668233692646 -915,0.03893303498625755 -916,0.03890686482191086 -917,0.0388907790184021 -918,0.038864269852638245 -919,0.0388430617749691 -920,0.03882625326514244 -921,0.038803018629550934 -922,0.038781218230724335 -923,0.03876917064189911 -924,0.038751620799303055 -925,0.03872935101389885 -926,0.038712769746780396 -927,0.03869329392910004 -928,0.03867648169398308 -929,0.0386890284717083 -930,0.03864319249987602 -931,0.03861081600189209 -932,0.03861761838197708 -933,0.03859986364841461 -934,0.03856077045202255 -935,0.038550324738025665 -936,0.0385485514998436 -937,0.038522906601428986 -938,0.038495589047670364 -939,0.038485750555992126 -940,0.03847029432654381 -941,0.03843986615538597 -942,0.038420408964157104 -943,0.03840893134474754 -944,0.03838567063212395 -945,0.03837120532989502 -946,0.03835880756378174 -947,0.038329873234033585 -948,0.03831639885902405 -949,0.03829837217926979 -950,0.03828243538737297 -951,0.03826700523495674 -952,0.038248877972364426 -953,0.03823808580636978 -954,0.03822055459022522 -955,0.03819706663489342 -956,0.03818054869771004 -957,0.03816196694970131 -958,0.038146618753671646 -959,0.03812936320900917 -960,0.03811031952500343 -961,0.038093987852334976 -962,0.03807695582509041 -963,0.03806168586015701 -964,0.038046132773160934 -965,0.038029368966817856 -966,0.038011375814676285 -967,0.037996042519807816 -968,0.03797714039683342 -969,0.03795954957604408 -970,0.037943799048662186 -971,0.037928178906440735 -972,0.037912916392087936 -973,0.03789769485592842 -974,0.03788276016712189 -975,0.03786801919341087 -976,0.03785144165158272 -977,0.037837956100702286 -978,0.03782254830002785 -979,0.03780443221330643 -980,0.03779205307364464 -981,0.037775080651044846 -982,0.03775356337428093 -983,0.0377398282289505 -984,0.0377238504588604 -985,0.037707362323999405 -986,0.037693627178668976 -987,0.037678685039281845 -988,0.03766405209898949 -989,0.03764902055263519 -990,0.037633251398801804 -991,0.037616949528455734 -992,0.03760256618261337 -993,0.03758420795202255 -994,0.03757070377469063 -995,0.03755544126033783 -996,0.037537831813097 -997,0.03752824664115906 -998,0.037513941526412964 -999,0.03749345988035202 -1000,0.03748048096895218 diff --git a/training/time_weighting/inverse_mirrored/training_config.txt b/training/time_weighting/inverse_mirrored/training_config.txt deleted file mode 100644 index 6e86722..0000000 --- a/training/time_weighting/inverse_mirrored/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/inverse_mirrored/training_log.csv b/training/time_weighting/inverse_mirrored/training_log.csv deleted file mode 100644 index 272f88b..0000000 --- a/training/time_weighting/inverse_mirrored/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,24.369340896606445 -2,461.9505920410156 -3,50.34978485107422 -4,18.918930053710938 -5,12.453255653381348 -6,8.493377685546875 -7,5.254426956176758 -8,4.255946159362793 -9,4.83012056350708 -10,4.859580039978027 -11,4.571417808532715 -12,5.239599227905273 -13,6.27605676651001 -14,5.092806339263916 -15,5.063053607940674 -16,6.2152605056762695 -17,12.248559951782227 -18,38.32664108276367 -19,4.288180351257324 -20,3.0220494270324707 -21,1.9917105436325073 -22,1.73623788356781 -23,1.6188771724700928 -24,1.3370015621185303 -25,1.2342694997787476 -26,1.1752384901046753 -27,1.156418800354004 -28,1.1132892370224 -29,1.0980710983276367 -30,1.0900038480758667 -31,1.0068719387054443 -32,0.9798465967178345 -33,1.0364035367965698 -34,1.0493671894073486 -35,1.0271708965301514 -36,0.9927746653556824 -37,0.9600158929824829 -38,0.9341144561767578 -39,0.9190126657485962 -40,0.9015825390815735 -41,0.8826314210891724 -42,0.8647568225860596 -43,0.8489412069320679 -44,0.8352264761924744 -45,0.8234767317771912 -46,0.8135035634040833 -47,0.8046491742134094 -48,0.7958680987358093 -49,0.7863254547119141 -50,0.7759387493133545 -51,0.7651107907295227 -52,0.7545374631881714 -53,0.7448630928993225 -54,0.7367032170295715 -55,0.7303656935691833 -56,0.7254881262779236 -57,0.7207314372062683 -58,0.7148568630218506 -59,0.7078380584716797 -60,0.7006659507751465 -61,0.6938673257827759 -62,0.6876028776168823 -63,0.6819575428962708 -64,0.6768419146537781 -65,0.6720624566078186 -66,0.6674783229827881 -67,0.6629692316055298 -68,0.6584796905517578 -69,0.653952419757843 -70,0.6493877172470093 -71,0.6448102593421936 -72,0.6402521133422852 -73,0.6357572078704834 -74,0.6313247680664062 -75,0.6267485618591309 -76,0.622037410736084 -77,0.6169754266738892 -78,0.6113805174827576 -79,0.6053227186203003 -80,0.5994185209274292 -81,0.5927627086639404 -82,0.5835195779800415 -83,0.5725672841072083 -84,0.5605225563049316 -85,0.5470303297042847 -86,0.529535174369812 -87,0.5091666579246521 -88,0.48960214853286743 -89,0.4726111590862274 -90,0.45581045746803284 -91,0.44332319498062134 -92,0.4250919222831726 -93,0.4093132019042969 -94,0.39954873919487 -95,0.3769957423210144 -96,0.3730767071247101 -97,0.40884947776794434 -98,0.36547908186912537 -99,0.3453086018562317 -100,0.35576704144477844 -101,0.4166208803653717 -102,0.4169461131095886 -103,0.4145467281341553 -104,0.41223713755607605 -105,0.4113996922969818 -106,0.405015766620636 -107,0.3883588910102844 -108,0.371525377035141 -109,0.35809871554374695 -110,0.3527248203754425 -111,0.346714586019516 -112,0.33898094296455383 -113,0.3317435681819916 -114,0.3364846408367157 -115,0.3281848430633545 -116,0.33166298270225525 -117,0.3282705545425415 -118,0.3258231580257416 -119,0.3258395195007324 -120,0.32547858357429504 -121,0.3231695592403412 -122,0.3225998878479004 -123,0.3220314383506775 -124,0.32041263580322266 -125,0.31904324889183044 -126,0.31860998272895813 -127,0.3170239329338074 -128,0.31563565135002136 -129,0.3148478865623474 -130,0.31359511613845825 -131,0.3120215833187103 -132,0.31099674105644226 -133,0.3098442256450653 -134,0.3082396984100342 -135,0.3069997727870941 -136,0.3057180345058441 -137,0.3040336072444916 -138,0.3025475740432739 -139,0.30103200674057007 -140,0.2990981340408325 -141,0.2976607084274292 -142,0.2963371276855469 -143,0.2952558100223541 -144,0.2945505678653717 -145,0.2930665612220764 -146,0.2918602228164673 -147,0.29016250371932983 -148,0.2888782322406769 -149,0.2875387370586395 -150,0.2863922119140625 -151,0.2851865589618683 -152,0.28399068117141724 -153,0.2827247083187103 -154,0.2813965976238251 -155,0.2800789773464203 -156,0.2787705659866333 -157,0.27742546796798706 -158,0.2762049436569214 -159,0.2748551368713379 -160,0.2734857201576233 -161,0.27198344469070435 -162,0.27030235528945923 -163,0.26882484555244446 -164,0.2672373056411743 -165,0.2656394839286804 -166,0.26414185762405396 -167,0.2624441087245941 -168,0.26079699397087097 -169,0.2591596841812134 -170,0.25736793875694275 -171,0.25563302636146545 -172,0.25391268730163574 -173,0.25212377309799194 -174,0.2503654658794403 -175,0.24863918125629425 -176,0.24689261615276337 -177,0.24518190324306488 -178,0.24350206553936005 -179,0.24180713295936584 -180,0.24012722074985504 -181,0.23844440281391144 -182,0.236759215593338 -183,0.23507463932037354 -184,0.23339064419269562 -185,0.23170778155326843 -186,0.2300247848033905 -187,0.22832639515399933 -188,0.22661665081977844 -189,0.2248796671628952 -190,0.22311219573020935 -191,0.22130532562732697 -192,0.21945126354694366 -193,0.2175440490245819 -194,0.2155752182006836 -195,0.213544562458992 -196,0.21143874526023865 -197,0.20925480127334595 -198,0.20698535442352295 -199,0.20463089644908905 -200,0.20218625664710999 -201,0.19964902102947235 -202,0.1970219761133194 -203,0.19432270526885986 -204,0.19156695902347565 -205,0.1887795627117157 -206,0.18598559498786926 -207,0.18321338295936584 -208,0.18051227927207947 -209,0.17788246273994446 -210,0.17534738779067993 -211,0.1729157269001007 -212,0.17060169577598572 -213,0.16841952502727509 -214,0.166358083486557 -215,0.16443629562854767 -216,0.16264265775680542 -217,0.16094644367694855 -218,0.1593611091375351 -219,0.15787342190742493 -220,0.15642094612121582 -221,0.15504635870456696 -222,0.15375421941280365 -223,0.15252412855625153 -224,0.151374951004982 -225,0.1502920240163803 -226,0.14926403760910034 -227,0.14829698204994202 -228,0.1473805457353592 -229,0.14650148153305054 -230,0.14566762745380402 -231,0.14487051963806152 -232,0.14409281313419342 -233,0.14335103332996368 -234,0.14263592660427094 -235,0.1419309377670288 -236,0.14125023782253265 -237,0.1405867338180542 -238,0.13993053138256073 -239,0.1392943561077118 -240,0.13868166506290436 -241,0.13807924091815948 -242,0.13748610019683838 -243,0.13690854609012604 -244,0.13633601367473602 -245,0.1357705146074295 -246,0.13522115349769592 -247,0.13467323780059814 -248,0.13414159417152405 -249,0.13362762331962585 -250,0.13311786949634552 -251,0.1326243281364441 -252,0.13214711844921112 -253,0.131671741604805 -254,0.1312103420495987 -255,0.13076093792915344 -256,0.13031525909900665 -257,0.12988407909870148 -258,0.12945720553398132 -259,0.12904193997383118 -260,0.12863701581954956 -261,0.12823811173439026 -262,0.12784984707832336 -263,0.12746107578277588 -264,0.1270829737186432 -265,0.12670792639255524 -266,0.1263451874256134 -267,0.125981405377388 -268,0.12563206255435944 -269,0.12528544664382935 -270,0.1249486654996872 -271,0.12461227923631668 -272,0.12428358942270279 -273,0.12396246939897537 -274,0.12364266812801361 -275,0.12332846969366074 -276,0.12301716953516006 -277,0.12271120399236679 -278,0.12240690737962723 -279,0.12210989743471146 -280,0.12181589752435684 -281,0.12152474373579025 -282,0.12123788148164749 -283,0.12095391005277634 -284,0.12067551165819168 -285,0.12039672583341599 -286,0.12012319266796112 -287,0.11985170841217041 -288,0.1195865273475647 -289,0.11932048946619034 -290,0.11905913800001144 -291,0.11880005151033401 -292,0.11854136735200882 -293,0.1182854026556015 -294,0.11803269386291504 -295,0.11778150498867035 -296,0.11753302812576294 -297,0.11728773266077042 -298,0.1170421838760376 -299,0.1167973205447197 -300,0.11655524373054504 -301,0.11631353199481964 -302,0.11607451736927032 -303,0.11583779007196426 -304,0.11560151726007462 -305,0.11536501348018646 -306,0.11513008177280426 -307,0.11489466577768326 -308,0.11466071009635925 -309,0.11442819982767105 -310,0.11419344693422318 -311,0.11386819928884506 -312,0.11347507685422897 -313,0.1130734384059906 -314,0.11265160888433456 -315,0.11205305904150009 -316,0.11145574599504471 -317,0.11091195046901703 -318,0.11040235310792923 -319,0.10989174246788025 -320,0.10932781547307968 -321,0.10876872390508652 -322,0.10823240131139755 -323,0.10772702097892761 -324,0.1072707325220108 -325,0.10685770213603973 -326,0.1064424142241478 -327,0.10602767765522003 -328,0.10562604665756226 -329,0.10523924976587296 -330,0.10487111657857895 -331,0.10451467335224152 -332,0.10416334122419357 -333,0.10382061451673508 -334,0.10348455607891083 -335,0.1031564399600029 -336,0.10282458364963531 -337,0.10250108689069748 -338,0.10218198597431183 -339,0.10186611115932465 -340,0.10157179832458496 -341,0.10126253217458725 -342,0.10093975067138672 -343,0.10064413398504257 -344,0.10034485906362534 -345,0.10003858804702759 -346,0.0997273176908493 -347,0.0994362160563469 -348,0.09913880378007889 -349,0.09882890433073044 -350,0.09855435788631439 -351,0.09827521443367004 -352,0.09799910336732864 -353,0.09780438989400864 -354,0.09782308340072632 -355,0.09847643971443176 -356,0.1056293249130249 -357,0.11505243182182312 -358,0.10640937834978104 -359,0.1021886095404625 -360,0.11200325936079025 -361,0.09878089278936386 -362,0.1100262776017189 -363,0.09693411737680435 -364,0.1053338497877121 -365,0.09865621477365494 -366,0.10026420652866364 -367,0.10205325484275818 -368,0.09679803252220154 -369,0.10253161936998367 -370,0.09587865322828293 -371,0.09996432065963745 -372,0.09590591490268707 -373,0.09558858722448349 -374,0.09718704223632812 -375,0.09456142038106918 -376,0.09499874711036682 -377,0.09517079591751099 -378,0.09322220087051392 -379,0.09354712069034576 -380,0.09343317151069641 -381,0.0920272022485733 -382,0.09254401177167892 -383,0.09184865653514862 -384,0.09118299931287766 -385,0.09153498709201813 -386,0.090630903840065 -387,0.09024547040462494 -388,0.09037966281175613 -389,0.08971808105707169 -390,0.0894712284207344 -391,0.0894567221403122 -392,0.08892960101366043 -393,0.08867910504341125 -394,0.08863229304552078 -395,0.08824539184570312 -396,0.08783677220344543 -397,0.08771409839391708 -398,0.08746860921382904 -399,0.08714945614337921 -400,0.08699287474155426 -401,0.08673449605703354 -402,0.08639395982027054 -403,0.08618626743555069 -404,0.0859818235039711 -405,0.08567088097333908 -406,0.08541896194219589 -407,0.0852086991071701 -408,0.08494356274604797 -409,0.08467201143503189 -410,0.08444078266620636 -411,0.08416704833507538 -412,0.08378222584724426 -413,0.08351682126522064 -414,0.08328680694103241 -415,0.08303683996200562 -416,0.08275503665208817 -417,0.08247821033000946 -418,0.08222608268260956 -419,0.08198237419128418 -420,0.0817299485206604 -421,0.08146525174379349 -422,0.08119648694992065 -423,0.08093801885843277 -424,0.08069407194852829 -425,0.08045679330825806 -426,0.08021438866853714 -427,0.07997167855501175 -428,0.07973729074001312 -429,0.07951075583696365 -430,0.07928643375635147 -431,0.079060859978199 -432,0.07883626222610474 -433,0.07861625403165817 -434,0.0783989280462265 -435,0.07818058878183365 -436,0.07795927673578262 -437,0.07773738354444504 -438,0.07751461863517761 -439,0.07729187607765198 -440,0.07706984877586365 -441,0.07684812694787979 -442,0.07662354409694672 -443,0.07639548927545547 -444,0.07616666704416275 -445,0.07594054192304611 -446,0.0757138729095459 -447,0.07548559457063675 -448,0.07525750994682312 -449,0.07502851635217667 -450,0.07479733973741531 -451,0.07456525415182114 -452,0.074330173432827 -453,0.07409750670194626 -454,0.07386494427919388 -455,0.07362892478704453 -456,0.07339099049568176 -457,0.07315222918987274 -458,0.07291189581155777 -459,0.0726703405380249 -460,0.07242754101753235 -461,0.07218515127897263 -462,0.07193969935178757 -463,0.07169722020626068 -464,0.0714578926563263 -465,0.07122492790222168 -466,0.07099409401416779 -467,0.07076359540224075 -468,0.07053706794977188 -469,0.07030791789293289 -470,0.07007899135351181 -471,0.06984671950340271 -472,0.06961609423160553 -473,0.06938271224498749 -474,0.06917078793048859 -475,0.06894270330667496 -476,0.0687343031167984 -477,0.06852459162473679 -478,0.06831394135951996 -479,0.0681147351861 -480,0.06790097057819366 -481,0.06770362704992294 -482,0.06750714033842087 -483,0.06731196492910385 -484,0.06712102890014648 -485,0.06695622950792313 -486,0.06675362586975098 -487,0.066578209400177 -488,0.06639880686998367 -489,0.06621917337179184 -490,0.06603549420833588 -491,0.0658557191491127 -492,0.06567537039518356 -493,0.06550787389278412 -494,0.06532400101423264 -495,0.0651567354798317 -496,0.06498395651578903 -497,0.06481389701366425 -498,0.06466677784919739 -499,0.06448972225189209 -500,0.06433647125959396 -501,0.06417415291070938 -502,0.06403109431266785 -503,0.06385841220617294 -504,0.06370634585618973 -505,0.06354527920484543 -506,0.06339747458696365 -507,0.06324315071105957 -508,0.06309844553470612 -509,0.06294821202754974 -510,0.06281164288520813 -511,0.06266988068819046 -512,0.06253587454557419 -513,0.0624069944024086 -514,0.062268394976854324 -515,0.062131885439157486 -516,0.06200748682022095 -517,0.06187240779399872 -518,0.061744511127471924 -519,0.06160819157958031 -520,0.06149197742342949 -521,0.061355456709861755 -522,0.061240289360284805 -523,0.06110193952918053 -524,0.060973893851041794 -525,0.06086275354027748 -526,0.06071890518069267 -527,0.060596708208322525 -528,0.060476191341876984 -529,0.06034807115793228 -530,0.0602169893682003 -531,0.060105886310338974 -532,0.059969719499349594 -533,0.059846989810466766 -534,0.059720948338508606 -535,0.05960794910788536 -536,0.059482526034116745 -537,0.059351131319999695 -538,0.059226784855127335 -539,0.05910122022032738 -540,0.05898528918623924 -541,0.05884978547692299 -542,0.058728765696287155 -543,0.05862128362059593 -544,0.05848594009876251 -545,0.05836868658661842 -546,0.05825089290738106 -547,0.05812731757760048 -548,0.05800085514783859 -549,0.05788673087954521 -550,0.05776481330394745 -551,0.05764942616224289 -552,0.05750855430960655 -553,0.05738573148846626 -554,0.05726412683725357 -555,0.05713735148310661 -556,0.05701103061437607 -557,0.05691200867295265 -558,0.056786466389894485 -559,0.05665066838264465 -560,0.05652833729982376 -561,0.0564066544175148 -562,0.05628528445959091 -563,0.056150708347558975 -564,0.056030746549367905 -565,0.05591334030032158 -566,0.05578182637691498 -567,0.05564816668629646 -568,0.05552239343523979 -569,0.055392466485500336 -570,0.05527609959244728 -571,0.05513827130198479 -572,0.05502357706427574 -573,0.054882895201444626 -574,0.05474912375211716 -575,0.05462072789669037 -576,0.05448393523693085 -577,0.054356690496206284 -578,0.05422304570674896 -579,0.05408026650547981 -580,0.05396875739097595 -581,0.05383899435400963 -582,0.053689032793045044 -583,0.05355677008628845 -584,0.05341767519712448 -585,0.05328409746289253 -586,0.05315057188272476 -587,0.053018178790807724 -588,0.05287063866853714 -589,0.0527559332549572 -590,0.052605707198381424 -591,0.05246114358305931 -592,0.0523342601954937 -593,0.05220416933298111 -594,0.052074357867240906 -595,0.051946379244327545 -596,0.05182287096977234 -597,0.051698509603738785 -598,0.05158260837197304 -599,0.05146037042140961 -600,0.05134071037173271 -601,0.05123370513319969 -602,0.05111226066946983 -603,0.05101848393678665 -604,0.050905052572488785 -605,0.05078878998756409 -606,0.05071059986948967 -607,0.05059848353266716 -608,0.050478145480155945 -609,0.0503883957862854 -610,0.05027563497424126 -611,0.050173062831163406 -612,0.050078608095645905 -613,0.049979958683252335 -614,0.0498879998922348 -615,0.04979162663221359 -616,0.049702271819114685 -617,0.04961073026061058 -618,0.049519069492816925 -619,0.049427639693021774 -620,0.04933454096317291 -621,0.049250271171331406 -622,0.04915637895464897 -623,0.049072857946157455 -624,0.04899394512176514 -625,0.048909563571214676 -626,0.048828814178705215 -627,0.04874727502465248 -628,0.048669129610061646 -629,0.04859035462141037 -630,0.04851929098367691 -631,0.04843743517994881 -632,0.04836305230855942 -633,0.04828884080052376 -634,0.04821138083934784 -635,0.048138950020074844 -636,0.04806818068027496 -637,0.048000965267419815 -638,0.04793200269341469 -639,0.047860950231552124 -640,0.047791969031095505 -641,0.047723155468702316 -642,0.04765358194708824 -643,0.04758884012699127 -644,0.04751787707209587 -645,0.04745214432477951 -646,0.04738302156329155 -647,0.047318074852228165 -648,0.04725397378206253 -649,0.04718957096338272 -650,0.04712262004613876 -651,0.04706168919801712 -652,0.04699520021677017 -653,0.04693502560257912 -654,0.046887949109077454 -655,0.046815428882837296 -656,0.04674660041928291 -657,0.046690672636032104 -658,0.04665425792336464 -659,0.0465933158993721 -660,0.04671859368681908 -661,0.04681093990802765 -662,0.04678025841712952 -663,0.046704135835170746 -664,0.0466618612408638 -665,0.04663052782416344 -666,0.0465521514415741 -667,0.04639860987663269 -668,0.04622175917029381 -669,0.04606863111257553 -670,0.04646015539765358 -671,0.05585019290447235 -672,0.04926440492272377 -673,0.0526503250002861 -674,0.055081941187381744 -675,0.05303095653653145 -676,0.05391188710927963 -677,0.05359986424446106 -678,0.05329771712422371 -679,0.05401681736111641 -680,0.05421125516295433 -681,0.05350282043218613 -682,0.052273593842983246 -683,0.051298923790454865 -684,0.05130595713853836 -685,0.051533546298742294 -686,0.05095672234892845 -687,0.050272028893232346 -688,0.049609217792749405 -689,0.04882005602121353 -690,0.04820878058671951 -691,0.047926370054483414 -692,0.04774542897939682 -693,0.04741239920258522 -694,0.04697604477405548 -695,0.046592336148023605 -696,0.046422045677900314 -697,0.0463043749332428 -698,0.046116724610328674 -699,0.04589196667075157 -700,0.04569622129201889 -701,0.045549023896455765 -702,0.04540782794356346 -703,0.045238081365823746 -704,0.045096080750226974 -705,0.04505108296871185 -706,0.04507457837462425 -707,0.045004311949014664 -708,0.04482525214552879 -709,0.04465813189744949 -710,0.0445234440267086 -711,0.044421736150979996 -712,0.0443461537361145 -713,0.04428767040371895 -714,0.04423549026250839 -715,0.044174667447805405 -716,0.04409448057413101 -717,0.04399801790714264 -718,0.04390699416399002 -719,0.043832533061504364 -720,0.043770212680101395 -721,0.04370071738958359 -722,0.04361598193645477 -723,0.043527789413928986 -724,0.043449632823467255 -725,0.04337634891271591 -726,0.04330534115433693 -727,0.04324546828866005 -728,0.04319378733634949 -729,0.043141722679138184 -730,0.04308753088116646 -731,0.0430351085960865 -732,0.04298413544893265 -733,0.042929090559482574 -734,0.04286811873316765 -735,0.04280740022659302 -736,0.04275260120630264 -737,0.04270390793681145 -738,0.04265739396214485 -739,0.04261336848139763 -740,0.04257155954837799 -741,0.04253014177083969 -742,0.042490459978580475 -743,0.04245481640100479 -744,0.04242141544818878 -745,0.0423869751393795 -746,0.04234927520155907 -747,0.04230770841240883 -748,0.04226262867450714 -749,0.04221523925662041 -750,0.04216734692454338 -751,0.04212070629000664 -752,0.04207589477300644 -753,0.042033009231090546 -754,0.041991930454969406 -755,0.04195258766412735 -756,0.04191465303301811 -757,0.04187694936990738 -758,0.0418386310338974 -759,0.04180016368627548 -760,0.04176199436187744 -761,0.041724834591150284 -762,0.041687656193971634 -763,0.0416504368185997 -764,0.0416126511991024 -765,0.041574254631996155 -766,0.04153520613908768 -767,0.04149511083960533 -768,0.04145316779613495 -769,0.04140932857990265 -770,0.0413639210164547 -771,0.04131730645895004 -772,0.04126955196261406 -773,0.04122079908847809 -774,0.04117174074053764 -775,0.041123080998659134 -776,0.04107503592967987 -777,0.04102640599012375 -778,0.04097723588347435 -779,0.040925249457359314 -780,0.04086943343281746 -781,0.04081057012081146 -782,0.040749795734882355 -783,0.04069072753190994 -784,0.040637340396642685 -785,0.04059167578816414 -786,0.04054589197039604 -787,0.040493398904800415 -788,0.0404391810297966 -789,0.040388014167547226 -790,0.04033682122826576 -791,0.04028410091996193 -792,0.040228694677352905 -793,0.04017321765422821 -794,0.04011817276477814 -795,0.0400657020509243 -796,0.04001792520284653 -797,0.039975252002477646 -798,0.039935048669576645 -799,0.03989516198635101 -800,0.039854928851127625 -801,0.03981366381049156 -802,0.03976987302303314 -803,0.039725109934806824 -804,0.03968273475766182 -805,0.03964226320385933 -806,0.03960220888257027 -807,0.0395628847181797 -808,0.03952477499842644 -809,0.039485543966293335 -810,0.0394449308514595 -811,0.03940441086888313 -812,0.03936489298939705 -813,0.03932599350810051 -814,0.039287593215703964 -815,0.03924994543194771 -816,0.03921262174844742 -817,0.039175260812044144 -818,0.03913797438144684 -819,0.039101291447877884 -820,0.03906523436307907 -821,0.0390300452709198 -822,0.03899548947811127 -823,0.03896094486117363 -824,0.03892632946372032 -825,0.03889211639761925 -826,0.038858022540807724 -827,0.038823895156383514 -828,0.038789622485637665 -829,0.038755618035793304 -830,0.038721758872270584 -831,0.03868837282061577 -832,0.038655299693346024 -833,0.03862248733639717 -834,0.038589488714933395 -835,0.03855666518211365 -836,0.03852367773652077 -837,0.03849070891737938 -838,0.03845783695578575 -839,0.038425203412771225 -840,0.038392867892980576 -841,0.03836055099964142 -842,0.03832770138978958 -843,0.038294751197099686 -844,0.03826173022389412 -845,0.03822850435972214 -846,0.03819591924548149 -847,0.03816382586956024 -848,0.038133468478918076 -849,0.03810131922364235 -850,0.03807027265429497 -851,0.038038164377212524 -852,0.03800741210579872 -853,0.03797541931271553 -854,0.03794510290026665 -855,0.03791266307234764 -856,0.03788205608725548 -857,0.037853606045246124 -858,0.03782101720571518 -859,0.03779379278421402 -860,0.037762366235256195 -861,0.03773275762796402 -862,0.03769911825656891 -863,0.03766960650682449 -864,0.03763691335916519 -865,0.03760446980595589 -866,0.03757816553115845 -867,0.03754333406686783 -868,0.03751282021403313 -869,0.03747962415218353 -870,0.03744790330529213 -871,0.037413280457258224 -872,0.03738410398364067 -873,0.03735453262925148 -874,0.037322577089071274 -875,0.03729360178112984 -876,0.037265945225954056 -877,0.03723886236548424 -878,0.03720979765057564 -879,0.037181515246629715 -880,0.03715084120631218 -881,0.03711822256445885 -882,0.03708641603589058 -883,0.0370568111538887 -884,0.03702471777796745 -885,0.03699734807014465 -886,0.036971043795347214 -887,0.03694045916199684 -888,0.03691176697611809 -889,0.03688088804483414 -890,0.03684918209910393 -891,0.03681601956486702 -892,0.036791738122701645 -893,0.03675706684589386 -894,0.03672393411397934 -895,0.03669808804988861 -896,0.03666038066148758 -897,0.036629535257816315 -898,0.03660079836845398 -899,0.0365685410797596 -900,0.03652559220790863 -901,0.03650059923529625 -902,0.0364731028676033 -903,0.036436960101127625 -904,0.03640742227435112 -905,0.03637754172086716 -906,0.036340974271297455 -907,0.036318715661764145 -908,0.036291513592004776 -909,0.03626544773578644 -910,0.03623862937092781 -911,0.03621362894773483 -912,0.036188773810863495 -913,0.036162253469228745 -914,0.03613375127315521 -915,0.036104753613471985 -916,0.03607689216732979 -917,0.036049168556928635 -918,0.036021869629621506 -919,0.035997241735458374 -920,0.03597201406955719 -921,0.035945575684309006 -922,0.0359201654791832 -923,0.035895250737667084 -924,0.035869304090738297 -925,0.03584425896406174 -926,0.03581981733441353 -927,0.03579478710889816 -928,0.03576808422803879 -929,0.03574250265955925 -930,0.03571736440062523 -931,0.03569147363305092 -932,0.035664740949869156 -933,0.035638995468616486 -934,0.03561295196413994 -935,0.03558703139424324 -936,0.035562533885240555 -937,0.03553593158721924 -938,0.03551003336906433 -939,0.03548308089375496 -940,0.0354578010737896 -941,0.03543710708618164 -942,0.03540678322315216 -943,0.035387638956308365 -944,0.035371508449316025 -945,0.03534694015979767 -946,0.0353502482175827 -947,0.0352921187877655 -948,0.03526119515299797 -949,0.03522704169154167 -950,0.03520023822784424 -951,0.035171907395124435 -952,0.03517032042145729 -953,0.03512062504887581 -954,0.035094644874334335 -955,0.0350741446018219 -956,0.03508203104138374 -957,0.035151977092027664 -958,0.03501451015472412 -959,0.034980617463588715 -960,0.03507988154888153 -961,0.03506078943610191 -962,0.03492956981062889 -963,0.04024307057261467 -964,0.06197436898946762 -965,0.06592713296413422 -966,0.06663034111261368 -967,0.05920100584626198 -968,0.05805770680308342 -969,0.050290077924728394 -970,0.04081105813384056 -971,0.03564972057938576 -972,0.034189071506261826 -973,0.033124905079603195 -974,0.0340019166469574 -975,0.03565043956041336 -976,0.0366227962076664 -977,0.03649352863430977 -978,0.03577255457639694 -979,0.035188931971788406 -980,0.034849632531404495 -981,0.034683793783187866 -982,0.034548673778772354 -983,0.034333594143390656 -984,0.03400572016835213 -985,0.033569492399692535 -986,0.033043328672647476 -987,0.0324997678399086 -988,0.03202296420931816 -989,0.031666964292526245 -990,0.031407829374074936 -991,0.03119562566280365 -992,0.030938241630792618 -993,0.030581947416067123 -994,0.03015807643532753 -995,0.02973311021924019 -996,0.029406055808067322 -997,0.029187273234128952 -998,0.02903948351740837 -999,0.02892293967306614 -1000,0.02880704030394554 diff --git a/training/time_weighting/inverse_squared/training_config.txt b/training/time_weighting/inverse_squared/training_config.txt deleted file mode 100644 index 51ccad7..0000000 --- a/training/time_weighting/inverse_squared/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/inverse_squared/training_log.csv b/training/time_weighting/inverse_squared/training_log.csv deleted file mode 100644 index 22e8490..0000000 --- a/training/time_weighting/inverse_squared/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,12.731148719787598 -2,6.7973246574401855 -3,4.645661354064941 -4,3.0446767807006836 -5,2.352785587310791 -6,2.082245349884033 -7,1.9553306102752686 -8,1.8541113138198853 -9,1.7611404657363892 -10,1.6823941469192505 -11,1.624497890472412 -12,1.5689671039581299 -13,1.5183813571929932 -14,1.4705162048339844 -15,1.4309831857681274 -16,1.397046685218811 -17,1.3773638010025024 -18,1.3585766553878784 -19,1.3408907651901245 -20,1.3240480422973633 -21,1.3080179691314697 -22,1.2929450273513794 -23,1.2788549661636353 -24,1.2657957077026367 -25,1.2538055181503296 -26,1.2428832054138184 -27,1.2331387996673584 -28,1.225016474723816 -29,1.2184417247772217 -30,1.213083028793335 -31,1.2094147205352783 -32,1.2046443223953247 -33,1.2072964906692505 -34,1.2077635526657104 -35,1.2020316123962402 -36,1.1968995332717896 -37,1.1964377164840698 -38,1.1964952945709229 -39,1.196899652481079 -40,1.1972837448120117 -41,1.197258472442627 -42,1.1966689825057983 -43,1.1957367658615112 -44,1.1947888135910034 -45,1.1939033269882202 -46,1.19310462474823 -47,1.1924728155136108 -48,1.1921300888061523 -49,1.1920268535614014 -50,1.1918525695800781 -51,1.1914008855819702 -52,1.1909064054489136 -53,1.1905262470245361 -54,1.1902412176132202 -55,1.1900098323822021 -56,1.189842700958252 -57,1.189703345298767 -58,1.18955659866333 -59,1.189378023147583 -60,1.1891506910324097 -61,1.1888829469680786 -62,1.1886165142059326 -63,1.1884263753890991 -64,1.1882954835891724 -65,1.1881365776062012 -66,1.1879377365112305 -67,1.1877293586730957 -68,1.1875718832015991 -69,1.1874847412109375 -70,1.1874232292175293 -71,1.1873157024383545 -72,1.187163233757019 -73,1.1870129108428955 -74,1.1868915557861328 -75,1.186808705329895 -76,1.1867482662200928 -77,1.1866766214370728 -78,1.1865801811218262 -79,1.18648362159729 -80,1.1864091157913208 -81,1.186334490776062 -82,1.1862585544586182 -83,1.186172366142273 -84,1.186081886291504 -85,1.1860065460205078 -86,1.185936450958252 -87,1.185871958732605 -88,1.185821533203125 -89,1.1857693195343018 -90,1.1857136487960815 -91,1.1856518983840942 -92,1.1855831146240234 -93,1.185532569885254 -94,1.185483694076538 -95,1.1854243278503418 -96,1.1853581666946411 -97,1.1853116750717163 -98,1.1852569580078125 -99,1.1851909160614014 -100,1.1851246356964111 -101,1.1850709915161133 -102,1.1850121021270752 -103,1.1849490404129028 -104,1.184897541999817 -105,1.184840202331543 -106,1.1847801208496094 -107,1.1847294569015503 -108,1.184678316116333 -109,1.1846286058425903 -110,1.184582233428955 -111,1.1845331192016602 -112,1.184483289718628 -113,1.1844333410263062 -114,1.1843854188919067 -115,1.18433678150177 -116,1.1842888593673706 -117,1.1842421293258667 -118,1.1841976642608643 -119,1.1841524839401245 -120,1.1841095685958862 -121,1.1840652227401733 -122,1.1840230226516724 -123,1.1839812994003296 -124,1.1839417219161987 -125,1.1839029788970947 -126,1.183863639831543 -127,1.1838252544403076 -128,1.1837877035140991 -129,1.1837499141693115 -130,1.1837124824523926 -131,1.1836761236190796 -132,1.1836411952972412 -133,1.1836071014404297 -134,1.183573603630066 -135,1.1835414171218872 -136,1.1835105419158936 -137,1.1834802627563477 -138,1.1834508180618286 -139,1.1834224462509155 -140,1.1833938360214233 -141,1.183365821838379 -142,1.1833380460739136 -143,1.1833102703094482 -144,1.1832826137542725 -145,1.1832555532455444 -146,1.183228611946106 -147,1.183201789855957 -148,1.1831752061843872 -149,1.183148741722107 -150,1.1831222772598267 -151,1.1830955743789673 -152,1.1830695867538452 -153,1.183043122291565 -154,1.1830171346664429 -155,1.182989478111267 -156,1.1829614639282227 -157,1.1829334497451782 -158,1.1829055547714233 -159,1.1828781366348267 -160,1.1828505992889404 -161,1.1828230619430542 -162,1.1827954053878784 -163,1.182767629623413 -164,1.1827396154403687 -165,1.1827105283737183 -166,1.182681918144226 -167,1.1826539039611816 -168,1.182625651359558 -169,1.1825947761535645 -170,1.182569146156311 -171,1.1825374364852905 -172,1.1825095415115356 -173,1.182474970817566 -174,1.182445764541626 -175,1.1824114322662354 -176,1.1823821067810059 -177,1.1823476552963257 -178,1.1823186874389648 -179,1.1822844743728638 -180,1.1822547912597656 -181,1.1822195053100586 -182,1.1821885108947754 -183,1.1821552515029907 -184,1.1821248531341553 -185,1.1820893287658691 -186,1.1820604801177979 -187,1.18202543258667 -188,1.1820158958435059 -189,1.1819791793823242 -190,1.181923270225525 -191,1.1819180250167847 -192,1.1818877458572388 -193,1.1818240880966187 -194,1.1817960739135742 -195,1.181767463684082 -196,1.181726336479187 -197,1.1816937923431396 -198,1.1816792488098145 -199,1.1816295385360718 -200,1.1815844774246216 -201,1.1815592050552368 -202,1.1815177202224731 -203,1.1814849376678467 -204,1.1814608573913574 -205,1.1814393997192383 -206,1.1813908815383911 -207,1.1813677549362183 -208,1.181335210800171 -209,1.1813119649887085 -210,1.1812819242477417 -211,1.181258201599121 -212,1.1812204122543335 -213,1.1811976432800293 -214,1.1811726093292236 -215,1.1811316013336182 -216,1.1811184883117676 -217,1.18108332157135 -218,1.1810673475265503 -219,1.1810252666473389 -220,1.180986762046814 -221,1.1809684038162231 -222,1.1809320449829102 -223,1.1809170246124268 -224,1.1808730363845825 -225,1.1808522939682007 -226,1.1808120012283325 -227,1.180790662765503 -228,1.1807615756988525 -229,1.1807334423065186 -230,1.18069589138031 -231,1.1806716918945312 -232,1.1806422472000122 -233,1.1806117296218872 -234,1.1805845499038696 -235,1.180554986000061 -236,1.1805237531661987 -237,1.1804893016815186 -238,1.1804615259170532 -239,1.1804330348968506 -240,1.180401086807251 -241,1.180366039276123 -242,1.180338740348816 -243,1.1803154945373535 -244,1.1802889108657837 -245,1.1802600622177124 -246,1.1802312135696411 -247,1.1802068948745728 -248,1.1801784038543701 -249,1.1801515817642212 -250,1.1801284551620483 -251,1.1801007986068726 -252,1.1800777912139893 -253,1.1800507307052612 -254,1.1800227165222168 -255,1.1799947023391724 -256,1.1799709796905518 -257,1.1799496412277222 -258,1.1799222230911255 -259,1.1798999309539795 -260,1.179877519607544 -261,1.1798553466796875 -262,1.179825782775879 -263,1.1798053979873657 -264,1.179787278175354 -265,1.1797603368759155 -266,1.1797324419021606 -267,1.1797099113464355 -268,1.1796942949295044 -269,1.1796631813049316 -270,1.1796462535858154 -271,1.1796258687973022 -272,1.1796051263809204 -273,1.179574728012085 -274,1.1795543432235718 -275,1.1795412302017212 -276,1.1795125007629395 -277,1.1794911623001099 -278,1.1794699430465698 -279,1.179457426071167 -280,1.1794317960739136 -281,1.1794120073318481 -282,1.179395079612732 -283,1.1793830394744873 -284,1.1793628931045532 -285,1.1793444156646729 -286,1.1793279647827148 -287,1.1793124675750732 -288,1.1792925596237183 -289,1.1792709827423096 -290,1.1792529821395874 -291,1.179240107536316 -292,1.1792179346084595 -293,1.1791973114013672 -294,1.1791791915893555 -295,1.1791625022888184 -296,1.1791445016860962 -297,1.1791237592697144 -298,1.17910635471344 -299,1.179092526435852 -300,1.1790779829025269 -301,1.1790621280670166 -302,1.17904531955719 -303,1.1790329217910767 -304,1.1790204048156738 -305,1.1790032386779785 -306,1.1789889335632324 -307,1.1789779663085938 -308,1.1789649724960327 -309,1.178948998451233 -310,1.1789357662200928 -311,1.1789227724075317 -312,1.1789095401763916 -313,1.1788934469223022 -314,1.1788791418075562 -315,1.1788679361343384 -316,1.1788489818572998 -317,1.1788328886032104 -318,1.1788228750228882 -319,1.1788023710250854 -320,1.1787933111190796 -321,1.1787775754928589 -322,1.1787573099136353 -323,1.1787434816360474 -324,1.178734540939331 -325,1.1787109375 -326,1.178696632385254 -327,1.1786901950836182 -328,1.1786680221557617 -329,1.1786545515060425 -330,1.1786469221115112 -331,1.1786255836486816 -332,1.178619623184204 -333,1.1786139011383057 -334,1.1785892248153687 -335,1.1785820722579956 -336,1.178580403327942 -337,1.1785510778427124 -338,1.1785398721694946 -339,1.1785379648208618 -340,1.178508996963501 -341,1.1785037517547607 -342,1.178501009941101 -343,1.1784718036651611 -344,1.1784672737121582 -345,1.1784601211547852 -346,1.1784287691116333 -347,1.1784225702285767 -348,1.17841637134552 -349,1.1783907413482666 -350,1.1783883571624756 -351,1.1783751249313354 -352,1.1783527135849 -353,1.1783503293991089 -354,1.1783348321914673 -355,1.1783132553100586 -356,1.178309440612793 -357,1.1782970428466797 -358,1.178271770477295 -359,1.178269863128662 -360,1.178259015083313 -361,1.1782320737838745 -362,1.1782302856445312 -363,1.1782182455062866 -364,1.1781954765319824 -365,1.1781922578811646 -366,1.1781822443008423 -367,1.1781619787216187 -368,1.1781606674194336 -369,1.1781498193740845 -370,1.1781266927719116 -371,1.1781258583068848 -372,1.178116798400879 -373,1.1780883073806763 -374,1.1780850887298584 -375,1.1780794858932495 -376,1.1780508756637573 -377,1.1780493259429932 -378,1.1780427694320679 -379,1.1780153512954712 -380,1.17801833152771 -381,1.1780108213424683 -382,1.1779817342758179 -383,1.1779829263687134 -384,1.1779755353927612 -385,1.1779484748840332 -386,1.1779491901397705 -387,1.177943229675293 -388,1.177916407585144 -389,1.1779189109802246 -390,1.1779234409332275 -391,1.1778837442398071 -392,1.1778790950775146 -393,1.1778826713562012 -394,1.177863359451294 -395,1.1778496503829956 -396,1.1778539419174194 -397,1.1778446435928345 -398,1.1778467893600464 -399,1.177834153175354 -400,1.177814245223999 -401,1.1778088808059692 -402,1.1778006553649902 -403,1.1777833700180054 -404,1.1777737140655518 -405,1.1777852773666382 -406,1.1777606010437012 -407,1.1777467727661133 -408,1.1777466535568237 -409,1.1777477264404297 -410,1.1777265071868896 -411,1.1777353286743164 -412,1.177730679512024 -413,1.177718162536621 -414,1.1777150630950928 -415,1.1776914596557617 -416,1.1776885986328125 -417,1.1776764392852783 -418,1.1776880025863647 -419,1.1776678562164307 -420,1.177666425704956 -421,1.1777077913284302 -422,1.1776589155197144 -423,1.1776533126831055 -424,1.1776443719863892 -425,1.1776416301727295 -426,1.1776235103607178 -427,1.1776151657104492 -428,1.1776270866394043 -429,1.1776080131530762 -430,1.177627682685852 -431,1.1776456832885742 -432,1.1776201725006104 -433,1.1775903701782227 -434,1.1776143312454224 -435,1.1776577234268188 -436,1.1775845289230347 -437,1.177594780921936 -438,1.1775712966918945 -439,1.177573323249817 -440,1.1775739192962646 -441,1.1775562763214111 -442,1.1775574684143066 -443,1.1775479316711426 -444,1.1775404214859009 -445,1.177539587020874 -446,1.1775108575820923 -447,1.1775130033493042 -448,1.1774765253067017 -449,1.1774615049362183 -450,1.177445411682129 -451,1.1774219274520874 -452,1.17741060256958 -453,1.1774141788482666 -454,1.1774357557296753 -455,1.177396535873413 -456,1.1774122714996338 -457,1.177430272102356 -458,1.177372932434082 -459,1.1773535013198853 -460,1.1773446798324585 -461,1.1773515939712524 -462,1.1773220300674438 -463,1.1773405075073242 -464,1.1773557662963867 -465,1.1773794889450073 -466,1.1773433685302734 -467,1.1773678064346313 -468,1.1773017644882202 -469,1.1773842573165894 -470,1.1773029565811157 -471,1.1773650646209717 -472,1.177255630493164 -473,1.1773968935012817 -474,1.177280068397522 -475,1.1773077249526978 -476,1.1772922277450562 -477,1.1772476434707642 -478,1.1773245334625244 -479,1.1772522926330566 -480,1.1772522926330566 -481,1.1772211790084839 -482,1.177233338356018 -483,1.1772264242172241 -484,1.1772019863128662 -485,1.1771924495697021 -486,1.1771821975708008 -487,1.1771750450134277 -488,1.17719566822052 -489,1.17717707157135 -490,1.1771814823150635 -491,1.1771621704101562 -492,1.1771793365478516 -493,1.1771819591522217 -494,1.1771624088287354 -495,1.17717707157135 -496,1.1771589517593384 -497,1.1771620512008667 -498,1.1771585941314697 -499,1.1771360635757446 -500,1.177125096321106 -501,1.1771072149276733 -502,1.177101969718933 -503,1.1770985126495361 -504,1.1770967245101929 -505,1.1770857572555542 -506,1.1770765781402588 -507,1.1770957708358765 -508,1.1771032810211182 -509,1.1770762205123901 -510,1.1770678758621216 -511,1.1770693063735962 -512,1.177098274230957 -513,1.177079677581787 -514,1.1770448684692383 -515,1.1770293712615967 -516,1.177050232887268 -517,1.177037239074707 -518,1.1770187616348267 -519,1.1770414113998413 -520,1.1770284175872803 -521,1.1770095825195312 -522,1.1770216226577759 -523,1.1769968271255493 -524,1.1769837141036987 -525,1.1769829988479614 -526,1.1769726276397705 -527,1.1769671440124512 -528,1.1769658327102661 -529,1.1769570112228394 -530,1.1769522428512573 -531,1.176938533782959 -532,1.1769345998764038 -533,1.1769156455993652 -534,1.176905632019043 -535,1.176910638809204 -536,1.1768995523452759 -537,1.1768943071365356 -538,1.1769189834594727 -539,1.176881194114685 -540,1.1768746376037598 -541,1.1768851280212402 -542,1.1768608093261719 -543,1.176880121231079 -544,1.176944613456726 -545,1.176867127418518 -546,1.1768838167190552 -547,1.1769148111343384 -548,1.176884412765503 -549,1.1768611669540405 -550,1.1768696308135986 -551,1.1768372058868408 -552,1.176924705505371 -553,1.1769100427627563 -554,1.1768964529037476 -555,1.176857352256775 -556,1.176896095275879 -557,1.1768302917480469 -558,1.1769038438796997 -559,1.1768776178359985 -560,1.1768748760223389 -561,1.1768176555633545 -562,1.1768794059753418 -563,1.1768196821212769 -564,1.1768852472305298 -565,1.1768722534179688 -566,1.1768689155578613 -567,1.1768461465835571 -568,1.1769332885742188 -569,1.1767977476119995 -570,1.1768654584884644 -571,1.1768348217010498 -572,1.17680823802948 -573,1.1768039464950562 -574,1.1768020391464233 -575,1.176773190498352 -576,1.1767934560775757 -577,1.1767747402191162 -578,1.1767953634262085 -579,1.176835060119629 -580,1.1768077611923218 -581,1.1768215894699097 -582,1.1767733097076416 -583,1.1768077611923218 -584,1.1768336296081543 -585,1.1767481565475464 -586,1.1767610311508179 -587,1.1767513751983643 -588,1.1767780780792236 -589,1.1767321825027466 -590,1.17673659324646 -591,1.176746129989624 -592,1.1767228841781616 -593,1.1767685413360596 -594,1.1767544746398926 -595,1.1767234802246094 -596,1.1767022609710693 -597,1.1767276525497437 -598,1.1767024993896484 -599,1.1766847372055054 -600,1.176714539527893 -601,1.1766815185546875 -602,1.176694393157959 -603,1.1767091751098633 -604,1.1766679286956787 -605,1.1766750812530518 -606,1.1766839027404785 -607,1.1766752004623413 -608,1.176657795906067 -609,1.1766610145568848 -610,1.1766523122787476 -611,1.1766752004623413 -612,1.1766648292541504 -613,1.1766473054885864 -614,1.1766446828842163 -615,1.1766265630722046 -616,1.17665433883667 -617,1.1766434907913208 -618,1.176627516746521 -619,1.1766231060028076 -620,1.1766254901885986 -621,1.1766471862792969 -622,1.1766334772109985 -623,1.1766334772109985 -624,1.1766366958618164 -625,1.1766372919082642 -626,1.1766213178634644 -627,1.1766173839569092 -628,1.176596999168396 -629,1.176607370376587 -630,1.1766031980514526 -631,1.1766009330749512 -632,1.1765893697738647 -633,1.17660391330719 -634,1.1766175031661987 -635,1.1765844821929932 -636,1.176576852798462 -637,1.176601767539978 -638,1.1765962839126587 -639,1.1765860319137573 -640,1.1765732765197754 -641,1.1765625476837158 -642,1.1765820980072021 -643,1.1765700578689575 -644,1.1765639781951904 -645,1.1765656471252441 -646,1.176579475402832 -647,1.176572322845459 -648,1.17658269405365 -649,1.1765631437301636 -650,1.176567792892456 -651,1.1765680313110352 -652,1.176548957824707 -653,1.1765515804290771 -654,1.1765562295913696 -655,1.1765706539154053 -656,1.176573395729065 -657,1.1765576601028442 -658,1.1765464544296265 -659,1.176572561264038 -660,1.1765389442443848 -661,1.1765365600585938 -662,1.1765552759170532 -663,1.1765637397766113 -664,1.1765215396881104 -665,1.1765491962432861 -666,1.1765574216842651 -667,1.1765427589416504 -668,1.1765997409820557 -669,1.176563024520874 -670,1.1765848398208618 -671,1.1765323877334595 -672,1.1765625476837158 -673,1.1765780448913574 -674,1.1765820980072021 -675,1.1765191555023193 -676,1.176604986190796 -677,1.176621437072754 -678,1.176600456237793 -679,1.1766139268875122 -680,1.1765562295913696 -681,1.1765527725219727 -682,1.1765433549880981 -683,1.1765910387039185 -684,1.1765159368515015 -685,1.1765410900115967 -686,1.176561713218689 -687,1.176500678062439 -688,1.1765751838684082 -689,1.1765326261520386 -690,1.1765656471252441 -691,1.1765373945236206 -692,1.176530361175537 -693,1.176543951034546 -694,1.176506519317627 -695,1.1765140295028687 -696,1.1765084266662598 -697,1.1764870882034302 -698,1.1764968633651733 -699,1.1765080690383911 -700,1.1764707565307617 -701,1.1764883995056152 -702,1.1765023469924927 -703,1.1764811277389526 -704,1.1764658689498901 -705,1.1764887571334839 -706,1.1764799356460571 -707,1.1764789819717407 -708,1.1764788627624512 -709,1.176458477973938 -710,1.1764968633651733 -711,1.176497459411621 -712,1.1765024662017822 -713,1.1764672994613647 -714,1.1765320301055908 -715,1.176479697227478 -716,1.17650306224823 -717,1.1764463186264038 -718,1.176512598991394 -719,1.1764670610427856 -720,1.1765207052230835 -721,1.1764529943466187 -722,1.1764720678329468 -723,1.1765034198760986 -724,1.176438331604004 -725,1.1764676570892334 -726,1.1764609813690186 -727,1.1764817237854004 -728,1.176429033279419 -729,1.1764613389968872 -730,1.1764174699783325 -731,1.1764674186706543 -732,1.176448106765747 -733,1.1764386892318726 -734,1.1764609813690186 -735,1.1764421463012695 -736,1.1764719486236572 -737,1.1764872074127197 -738,1.1764791011810303 -739,1.1764476299285889 -740,1.176481008529663 -741,1.1764250993728638 -742,1.1764392852783203 -743,1.1764280796051025 -744,1.1764315366744995 -745,1.1764109134674072 -746,1.1764174699783325 -747,1.1764085292816162 -748,1.1764097213745117 -749,1.1764304637908936 -750,1.1764724254608154 -751,1.1764391660690308 -752,1.17643141746521 -753,1.1764616966247559 -754,1.1764148473739624 -755,1.176427960395813 -756,1.1764216423034668 -757,1.1764484643936157 -758,1.176450252532959 -759,1.1764551401138306 -760,1.1764332056045532 -761,1.1764470338821411 -762,1.1764616966247559 -763,1.1765278577804565 -764,1.176495909690857 -765,1.1764382123947144 -766,1.1765916347503662 -767,1.1766451597213745 -768,1.1767231225967407 -769,1.17648446559906 -770,1.1766459941864014 -771,1.1764971017837524 -772,1.1764787435531616 -773,1.1765156984329224 -774,1.176389455795288 -775,1.1765453815460205 -776,1.1764066219329834 -777,1.1764707565307617 -778,1.1764575242996216 -779,1.17641019821167 -780,1.176548719406128 -781,1.176435112953186 -782,1.176509976387024 -783,1.176476240158081 -784,1.1765198707580566 -785,1.1764932870864868 -786,1.1764519214630127 -787,1.1764330863952637 -788,1.1764237880706787 -789,1.176459550857544 -790,1.1763958930969238 -791,1.1763920783996582 -792,1.1763720512390137 -793,1.1764037609100342 -794,1.176381230354309 -795,1.1763824224472046 -796,1.1763790845870972 -797,1.1763945817947388 -798,1.1764957904815674 -799,1.1763972043991089 -800,1.1764012575149536 -801,1.1763722896575928 -802,1.1763885021209717 -803,1.1764109134674072 -804,1.176344871520996 -805,1.1763371229171753 -806,1.1764029264450073 -807,1.1763548851013184 -808,1.1763324737548828 -809,1.176344394683838 -810,1.1763595342636108 -811,1.1764473915100098 -812,1.1763617992401123 -813,1.1763509511947632 -814,1.1764239072799683 -815,1.176407814025879 -816,1.1763521432876587 -817,1.176356315612793 -818,1.1763916015625 -819,1.1764023303985596 -820,1.1763538122177124 -821,1.1763571500778198 -822,1.1763904094696045 -823,1.17641282081604 -824,1.1764098405838013 -825,1.1763660907745361 -826,1.1763722896575928 -827,1.1763767004013062 -828,1.1763691902160645 -829,1.1763705015182495 -830,1.1763501167297363 -831,1.176396131515503 -832,1.1763336658477783 -833,1.1763874292373657 -834,1.176337480545044 -835,1.1763604879379272 -836,1.1763789653778076 -837,1.1763514280319214 -838,1.176355004310608 -839,1.1763969659805298 -840,1.176357626914978 -841,1.1763625144958496 -842,1.176337718963623 -843,1.1763249635696411 -844,1.1763192415237427 -845,1.1763191223144531 -846,1.1763067245483398 -847,1.1763081550598145 -848,1.1763063669204712 -849,1.17633056640625 -850,1.1764363050460815 -851,1.176334023475647 -852,1.1763792037963867 -853,1.1763741970062256 -854,1.176383376121521 -855,1.1763439178466797 -856,1.1763492822647095 -857,1.1763688325881958 -858,1.1763397455215454 -859,1.1763343811035156 -860,1.1763005256652832 -861,1.1764156818389893 -862,1.1763620376586914 -863,1.176485538482666 -864,1.176329493522644 -865,1.1764229536056519 -866,1.17633056640625 -867,1.1763765811920166 -868,1.176292061805725 -869,1.1763278245925903 -870,1.176349401473999 -871,1.1763051748275757 -872,1.176331639289856 -873,1.1763010025024414 -874,1.176369547843933 -875,1.1763139963150024 -876,1.1763194799423218 -877,1.1763479709625244 -878,1.1764179468154907 -879,1.1764273643493652 -880,1.1763389110565186 -881,1.1763006448745728 -882,1.1763687133789062 -883,1.1763243675231934 -884,1.1763136386871338 -885,1.1763064861297607 -886,1.1763018369674683 -887,1.1762762069702148 -888,1.1762933731079102 -889,1.1762782335281372 -890,1.1763153076171875 -891,1.1763014793395996 -892,1.176313877105713 -893,1.1762654781341553 -894,1.1763283014297485 -895,1.1762906312942505 -896,1.1762993335723877 -897,1.176263689994812 -898,1.176287293434143 -899,1.1763075590133667 -900,1.1763155460357666 -901,1.1762794256210327 -902,1.176307201385498 -903,1.1762999296188354 -904,1.1762938499450684 -905,1.1763006448745728 -906,1.1763176918029785 -907,1.1763503551483154 -908,1.1763898134231567 -909,1.1762877702713013 -910,1.1763319969177246 -911,1.176331639289856 -912,1.176313877105713 -913,1.1762712001800537 -914,1.176283597946167 -915,1.1763218641281128 -916,1.176374912261963 -917,1.1762945652008057 -918,1.1763091087341309 -919,1.1763452291488647 -920,1.1762821674346924 -921,1.1763641834259033 -922,1.1762958765029907 -923,1.1763898134231567 -924,1.1763499975204468 -925,1.176422119140625 -926,1.1762447357177734 -927,1.1764105558395386 -928,1.176247477531433 -929,1.176329493522644 -930,1.1762460470199585 -931,1.176285743713379 -932,1.1762441396713257 -933,1.1763675212860107 -934,1.1762603521347046 -935,1.176275610923767 -936,1.1762688159942627 -937,1.1762408018112183 -938,1.1762932538986206 -939,1.1762275695800781 -940,1.17622709274292 -941,1.1762248277664185 -942,1.1762101650238037 -943,1.1762335300445557 -944,1.1762093305587769 -945,1.1762406826019287 -946,1.1762187480926514 -947,1.1762516498565674 -948,1.176220417022705 -949,1.1762586832046509 -950,1.176255226135254 -951,1.1763333082199097 -952,1.1762582063674927 -953,1.1762710809707642 -954,1.17623770236969 -955,1.1762627363204956 -956,1.1762237548828125 -957,1.1762230396270752 -958,1.1762279272079468 -959,1.1762083768844604 -960,1.1762230396270752 -961,1.1762001514434814 -962,1.1762930154800415 -963,1.176255226135254 -964,1.1762012243270874 -965,1.1762460470199585 -966,1.1762282848358154 -967,1.1761929988861084 -968,1.176210880279541 -969,1.1762021780014038 -970,1.176207423210144 -971,1.1762053966522217 -972,1.1761963367462158 -973,1.1763392686843872 -974,1.1762292385101318 -975,1.1762381792068481 -976,1.1763156652450562 -977,1.1762586832046509 -978,1.1761906147003174 -979,1.1762728691101074 -980,1.17621648311615 -981,1.17622709274292 -982,1.176188349723816 -983,1.176283597946167 -984,1.1762441396713257 -985,1.1762584447860718 -986,1.176232099533081 -987,1.1762648820877075 -988,1.1762281656265259 -989,1.176284670829773 -990,1.1762144565582275 -991,1.1762175559997559 -992,1.1762233972549438 -993,1.176224946975708 -994,1.176167368888855 -995,1.1762092113494873 -996,1.1761949062347412 -997,1.1761858463287354 -998,1.1761645078659058 -999,1.176212191581726 -1000,1.1762003898620605 diff --git a/training/time_weighting/inverse_squared_mirrored/training_config.txt b/training/time_weighting/inverse_squared_mirrored/training_config.txt deleted file mode 100644 index db56605..0000000 --- a/training/time_weighting/inverse_squared_mirrored/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/inverse_squared_mirrored/training_log.csv b/training/time_weighting/inverse_squared_mirrored/training_log.csv deleted file mode 100644 index c30425d..0000000 --- a/training/time_weighting/inverse_squared_mirrored/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,54.768924713134766 -2,65.995849609375 -3,28.8320255279541 -4,12.531310081481934 -5,8.920807838439941 -6,5.447623252868652 -7,5.7000861167907715 -8,5.494168281555176 -9,5.348268508911133 -10,5.2486958503723145 -11,5.121695518493652 -12,4.957922458648682 -13,4.7362189292907715 -14,4.399832248687744 -15,4.304696083068848 -16,4.210378170013428 -17,4.1199493408203125 -18,4.039299488067627 -19,3.964306116104126 -20,3.892474412918091 -21,3.8232898712158203 -22,3.7562763690948486 -23,3.6899638175964355 -24,3.6242237091064453 -25,3.558915138244629 -26,3.4935922622680664 -27,3.42710018157959 -28,3.3577613830566406 -29,3.284367084503174 -30,3.2027666568756104 -31,3.106908082962036 -32,2.973788022994995 -33,2.79266619682312 -34,2.5641098022460938 -35,2.400299549102783 -36,2.293978691101074 -37,2.2130889892578125 -38,2.141448974609375 -39,2.0754470825195312 -40,2.014449119567871 -41,1.9570987224578857 -42,1.902653455734253 -43,1.8500221967697144 -44,1.7983932495117188 -45,1.7468254566192627 -46,1.693312168121338 -47,1.6345188617706299 -48,1.5665782690048218 -49,1.4915540218353271 -50,1.4199403524398804 -51,1.3588968515396118 -52,1.3067717552185059 -53,1.258798599243164 -54,1.2133901119232178 -55,1.1699862480163574 -56,1.128941297531128 -57,1.0900667905807495 -58,1.052966833114624 -59,1.0175807476043701 -60,0.9838554859161377 -61,0.9516232013702393 -62,0.9206209182739258 -63,0.8903974890708923 -64,0.8602375984191895 -65,0.8295010924339294 -66,0.7984203696250916 -67,0.7690806984901428 -68,0.7447999715805054 -69,0.7267684936523438 -70,0.7137652039527893 -71,0.7040850520133972 -72,0.6961154341697693 -73,0.6889176368713379 -74,0.6819804906845093 -75,0.675029993057251 -76,0.6679157614707947 -77,0.660566508769989 -78,0.6530569791793823 -79,0.645258903503418 -80,0.637169599533081 -81,0.6288663148880005 -82,0.6203665733337402 -83,0.6116905212402344 -84,0.6028931736946106 -85,0.5940271019935608 -86,0.5851433873176575 -87,0.5762900710105896 -88,0.5675404071807861 -89,0.558955729007721 -90,0.550589382648468 -91,0.5424956679344177 -92,0.5347349047660828 -93,0.527384877204895 -94,0.5204676985740662 -95,0.5139321088790894 -96,0.5077510476112366 -97,0.501858651638031 -98,0.49619531631469727 -99,0.49073681235313416 -100,0.4854750633239746 -101,0.48027724027633667 -102,0.47506552934646606 -103,0.4698019325733185 -104,0.46446430683135986 -105,0.4590415358543396 -106,0.4535357654094696 -107,0.4479563236236572 -108,0.4423157274723053 -109,0.43662863969802856 -110,0.43090829253196716 -111,0.42516419291496277 -112,0.4194009602069855 -113,0.41361719369888306 -114,0.40780532360076904 -115,0.401951402425766 -116,0.3960355222225189 -117,0.39003294706344604 -118,0.38391855359077454 -119,0.37767529487609863 -120,0.37125295400619507 -121,0.3646126985549927 -122,0.357717365026474 -123,0.3505338728427887 -124,0.3430420458316803 -125,0.3352371156215668 -126,0.32720017433166504 -127,0.31895509362220764 -128,0.3104632794857025 -129,0.30182594060897827 -130,0.29316821694374084 -131,0.28440508246421814 -132,0.27560240030288696 -133,0.26684367656707764 -134,0.2582748234272003 -135,0.24993011355400085 -136,0.2418672740459442 -137,0.23422281444072723 -138,0.22696326673030853 -139,0.2200208604335785 -140,0.21340079605579376 -141,0.20708239078521729 -142,0.20105065405368805 -143,0.19531400501728058 -144,0.1898767352104187 -145,0.18472114205360413 -146,0.17983515560626984 -147,0.17521017789840698 -148,0.17084810137748718 -149,0.16670264303684235 -150,0.16275887191295624 -151,0.15900513529777527 -152,0.15543270111083984 -153,0.15203221142292023 -154,0.148795947432518 -155,0.14572471380233765 -156,0.14283601939678192 -157,0.14008454978466034 -158,0.13744767010211945 -159,0.13491521775722504 -160,0.13247685134410858 -161,0.130131796002388 -162,0.127873495221138 -163,0.12569621205329895 -164,0.12359528988599777 -165,0.12156598269939423 -166,0.11960380524396896 -167,0.11770454794168472 -168,0.11586391180753708 -169,0.11407672613859177 -170,0.11233917623758316 -171,0.11064785718917847 -172,0.10900295525789261 -173,0.1074036955833435 -174,0.10584903508424759 -175,0.10433660447597504 -176,0.10286439210176468 -177,0.10142447054386139 -178,0.1000145897269249 -179,0.09863249957561493 -180,0.09727621078491211 -181,0.09594368934631348 -182,0.09463401138782501 -183,0.09334587305784225 -184,0.09207828342914581 -185,0.09083009511232376 -186,0.08960061520338058 -187,0.0883897989988327 -188,0.08719664067029953 -189,0.08602023124694824 -190,0.0848601907491684 -191,0.0837160274386406 -192,0.08258774876594543 -193,0.08147498220205307 -194,0.08037804812192917 -195,0.07929722964763641 -196,0.07823451608419418 -197,0.07718950510025024 -198,0.07615695148706436 -199,0.07513745874166489 -200,0.07413055002689362 -201,0.07313606142997742 -202,0.07215459644794464 -203,0.07118997722864151 -204,0.07024132460355759 -205,0.06930658221244812 -206,0.06838470697402954 -207,0.06748006492853165 -208,0.06659229844808578 -209,0.06572448462247849 -210,0.06487981230020523 -211,0.06406238675117493 -212,0.06327062100172043 -213,0.06250077486038208 -214,0.06175638735294342 -215,0.061042867600917816 -216,0.0603654608130455 -217,0.059713419526815414 -218,0.05908214673399925 -219,0.058472566306591034 -220,0.05788562446832657 -221,0.05732221156358719 -222,0.056783098727464676 -223,0.05626906454563141 -224,0.055780183523893356 -225,0.055316828191280365 -226,0.054879628121852875 -227,0.05446803942322731 -228,0.05408095568418503 -229,0.05371810123324394 -230,0.05337914079427719 -231,0.053063344210386276 -232,0.052769824862480164 -233,0.05249733850359917 -234,0.05224492400884628 -235,0.052010975778102875 -236,0.05179399624466896 -237,0.05159250646829605 -238,0.05140499770641327 -239,0.051229991018772125 -240,0.05106600001454353 -241,0.05091159790754318 -242,0.05076552927494049 -243,0.0506264828145504 -244,0.050493307411670685 -245,0.05036505311727524 -246,0.05024082213640213 -247,0.050119757652282715 -248,0.05000119283795357 -249,0.04988466575741768 -250,0.04976969584822655 -251,0.04965587705373764 -252,0.04954303428530693 -253,0.049431025981903076 -254,0.04931970685720444 -255,0.04920900613069534 -256,0.04909896105527878 -257,0.04898962378501892 -258,0.0488809272646904 -259,0.04877301678061485 -260,0.048666056245565414 -261,0.04856007546186447 -262,0.0484551340341568 -263,0.04835130274295807 -264,0.04824875295162201 -265,0.048147451132535934 -266,0.048047568649053574 -267,0.04794927313923836 -268,0.04785254970192909 -269,0.04775729030370712 -270,0.04766349121928215 -271,0.04757111147046089 -272,0.04748012498021126 -273,0.04739048704504967 -274,0.04730212315917015 -275,0.04721502587199211 -276,0.04712913557887077 -277,0.047044433653354645 -278,0.04696083068847656 -279,0.0468783900141716 -280,0.04679698497056961 -281,0.04671657085418701 -282,0.046637095510959625 -283,0.046558599919080734 -284,0.04648097977042198 -285,0.0464041568338871 -286,0.046328119933605194 -287,0.04625287279486656 -288,0.04617837071418762 -289,0.046104613691568375 -290,0.04603153467178345 -291,0.045959148555994034 -292,0.04588742181658745 -293,0.04581631347537041 -294,0.045745812356472015 -295,0.04567601531744003 -296,0.045606788247823715 -297,0.04553816467523575 -298,0.04547012224793434 -299,0.0454026535153389 -300,0.045335736125707626 -301,0.045269377529621124 -302,0.045203547924757004 -303,0.04513828828930855 -304,0.045073527842760086 -305,0.0450093112885952 -306,0.0449456125497818 -307,0.04488251358270645 -308,0.044819872826337814 -309,0.04475775361061096 -310,0.0446961335837841 -311,0.04463506117463112 -312,0.0445745587348938 -313,0.04451461136341095 -314,0.04445521533489227 -315,0.04439636692404747 -316,0.044337980449199677 -317,0.04428006708621979 -318,0.044222716242074966 -319,0.04416583105921745 -320,0.044109441339969635 -321,0.04405353590846062 -322,0.04399813711643219 -323,0.043943170458078384 -324,0.043888598680496216 -325,0.04383448138833046 -326,0.04378078132867813 -327,0.04372749850153923 -328,0.043674591928720474 -329,0.043622106313705444 -330,0.04357004165649414 -331,0.04351836442947388 -332,0.04346701502799988 -333,0.04341605678200722 -334,0.043365463614463806 -335,0.04331521689891815 -336,0.04326533153653145 -337,0.04321577772498131 -338,0.043166618794202805 -339,0.043117791414260864 -340,0.04306924343109131 -341,0.04302102327346802 -342,0.04297315701842308 -343,0.04292559251189232 -344,0.04287832975387573 -345,0.04283136501908302 -346,0.04278472438454628 -347,0.042738381773233414 -348,0.04269230365753174 -349,0.04264652729034424 -350,0.042601048946380615 -351,0.04255584254860878 -352,0.04251089692115784 -353,0.042466312646865845 -354,0.04242204129695892 -355,0.042378019541502 -356,0.042334191501140594 -357,0.04229059815406799 -358,0.04224725812673569 -359,0.042204126715660095 -360,0.04216117039322853 -361,0.042118463665246964 -362,0.0420759953558445 -363,0.04203373193740845 -364,0.0419916957616806 -365,0.041949860751628876 -366,0.04190825670957565 -367,0.041866909712553024 -368,0.04182577133178711 -369,0.04178486764431 -370,0.041744206100702286 -371,0.04170379787683487 -372,0.04166361689567566 -373,0.04162362962961197 -374,0.04158388450741768 -375,0.04154437780380249 -376,0.041505131870508194 -377,0.041466109454631805 -378,0.04142731800675392 -379,0.041388776153326035 -380,0.04135045409202576 -381,0.04131235182285309 -382,0.04127447307109833 -383,0.04123680666089058 -384,0.04119938984513283 -385,0.041162192821502686 -386,0.04112519696354866 -387,0.04108840227127075 -388,0.04105184227228165 -389,0.04101545736193657 -390,0.040979281067848206 -391,0.04094329848885536 -392,0.040907539427280426 -393,0.04087195545434952 -394,0.040836602449417114 -395,0.04080140218138695 -396,0.040766388177871704 -397,0.04073154181241989 -398,0.0406968854367733 -399,0.04066239669919014 -400,0.0406281054019928 -401,0.040593989193439484 -402,0.040560025721788406 -403,0.04052625223994255 -404,0.04049263149499893 -405,0.04045915976166725 -406,0.0404258593916893 -407,0.04039274528622627 -408,0.04035979509353638 -409,0.04032698646187782 -410,0.040294308215379715 -411,0.04026181250810623 -412,0.04022946581244469 -413,0.04019724577665329 -414,0.04016515240073204 -415,0.04013324901461601 -416,0.04010150209069252 -417,0.040069885551929474 -418,0.040038395673036575 -419,0.040007028728723526 -420,0.03997582942247391 -421,0.03994477540254593 -422,0.0399138517677784 -423,0.03988305479288101 -424,0.03985241800546646 -425,0.039821941405534744 -426,0.03979158774018288 -427,0.03976135700941086 -428,0.0397312194108963 -429,0.039701178669929504 -430,0.039671264588832855 -431,0.03964145854115486 -432,0.03961171954870224 -433,0.03958207741379738 -434,0.03955253213644028 -435,0.03952309489250183 -436,0.039493754506111145 -437,0.039464496076107025 -438,0.03943537175655365 -439,0.039406340569257736 -440,0.03937738761305809 -441,0.03934849426150322 -442,0.03931969776749611 -443,0.03929096832871437 -444,0.03926233574748039 -445,0.03923380747437477 -446,0.03920535370707512 -447,0.03917699679732323 -448,0.03914867714047432 -449,0.03912045806646347 -450,0.03909233212471008 -451,0.039064280688762665 -452,0.03903632611036301 -453,0.039008431136608124 -454,0.038980595767498016 -455,0.03895283862948418 -456,0.038925155997276306 -457,0.03889753669500351 -458,0.03886997327208519 -459,0.03884248435497284 -460,0.03881509229540825 -461,0.038787804543972015 -462,0.03876056522130966 -463,0.03873339667916298 -464,0.03870629519224167 -465,0.038679298013448715 -466,0.03865237534046173 -467,0.038625504821538925 -468,0.03859870880842209 -469,0.03857199475169182 -470,0.03854537010192871 -471,0.03851878270506859 -472,0.03849225863814354 -473,0.03846578299999237 -474,0.03843935579061508 -475,0.038412995636463165 -476,0.03838662430644035 -477,0.03836032375693321 -478,0.03833404928445816 -479,0.03830782696604729 -480,0.03828169405460358 -481,0.038255639374256134 -482,0.03822962939739227 -483,0.0382036454975605 -484,0.03817776218056679 -485,0.03815191239118576 -486,0.03812612220644951 -487,0.03810037672519684 -488,0.03807467967271805 -489,0.038049038499593735 -490,0.038023464381694794 -491,0.03799796849489212 -492,0.037972524762153625 -493,0.037947166711091995 -494,0.03792186453938484 -495,0.03789657726883888 -496,0.037871379405260086 -497,0.03784624859690666 -498,0.037821173667907715 -499,0.037796176970005035 -500,0.03777124732732773 -501,0.03774639591574669 -502,0.03772161155939102 -503,0.03769687935709953 -504,0.03767220303416252 -505,0.037647545337677 -506,0.03762296959757805 -507,0.03759848698973656 -508,0.037574056535959244 -509,0.03754967823624611 -510,0.03752535581588745 -511,0.037501055747270584 -512,0.03747684136033058 -513,0.03745267167687416 -514,0.03742855042219162 -515,0.03740447387099266 -516,0.03738042712211609 -517,0.0373564213514328 -518,0.037332478910684586 -519,0.03730855882167816 -520,0.037284668534994125 -521,0.03726082667708397 -522,0.037237055599689484 -523,0.03721340373158455 -524,0.03718980401754379 -525,0.03716626390814781 -526,0.03714277222752571 -527,0.037119343876838684 -528,0.03709600120782852 -529,0.03707277774810791 -530,0.037049680948257446 -531,0.037026699632406235 -532,0.03700374439358711 -533,0.03698084130883217 -534,0.0369579903781414 -535,0.03693517670035362 -536,0.03691239282488823 -537,0.036889683455228806 -538,0.03686700016260147 -539,0.03684437274932861 -540,0.03682177886366844 -541,0.036799222230911255 -542,0.03677668422460556 -543,0.03675418347120285 -544,0.036731697618961334 -545,0.03670931234955788 -546,0.036686938256025314 -547,0.03666461259126663 -548,0.03664229437708855 -549,0.036620013415813446 -550,0.036597803235054016 -551,0.036575693637132645 -552,0.03655368834733963 -553,0.03653179109096527 -554,0.036510009318590164 -555,0.0364883691072464 -556,0.03646678850054741 -557,0.0364452488720417 -558,0.03642374649643898 -559,0.03640229254961014 -560,0.03638090193271637 -561,0.03635961189866066 -562,0.03633834794163704 -563,0.03631709888577461 -564,0.03629589453339577 -565,0.03627471253275871 -566,0.03625358268618584 -567,0.03623250126838684 -568,0.03621143102645874 -569,0.036190375685691833 -570,0.03616933524608612 -571,0.036148346960544586 -572,0.03612738847732544 -573,0.03610646352171898 -574,0.03608554229140282 -575,0.03606466203927994 -576,0.03604383394122124 -577,0.03602306544780731 -578,0.03600229695439339 -579,0.03598153591156006 -580,0.03596079722046852 -581,0.03594009205698967 -582,0.0359194315969944 -583,0.035898804664611816 -584,0.035878222435712814 -585,0.035857658833265305 -586,0.03583711385726929 -587,0.035816632211208344 -588,0.035796184092760086 -589,0.035775765776634216 -590,0.035755354911088943 -591,0.03573499247431755 -592,0.03571467101573944 -593,0.03569437190890312 -594,0.03567409887909889 -595,0.035653840750455856 -596,0.03563361242413521 -597,0.035613421350717545 -598,0.035593245178461075 -599,0.03557309880852699 -600,0.0355529859662056 -601,0.035532910376787186 -602,0.03551286831498146 -603,0.035492874681949615 -604,0.035472892224788666 -605,0.03545292094349861 -606,0.035432953387498856 -607,0.0354129821062088 -608,0.03539304807782173 -609,0.03537312522530556 -610,0.03535319119691849 -611,0.035333260893821716 -612,0.03531332686543465 -613,0.03529340401291847 -614,0.035273477435112 -615,0.03525354713201523 -616,0.03523358702659607 -617,0.03521357476711273 -618,0.03519357740879059 -619,0.03517359122633934 -620,0.03515364229679108 -621,0.03513380512595177 -622,0.0351141132414341 -623,0.035094622522592545 -624,0.03507527336478233 -625,0.03505608066916466 -626,0.03503706678748131 -627,0.03501825034618378 -628,0.034999676048755646 -629,0.034981343895196915 -630,0.03496323898434639 -631,0.03494536131620407 -632,0.03492770716547966 -633,0.03491028770804405 -634,0.03489311784505844 -635,0.03487619012594223 -636,0.03484734892845154 -637,0.03480249270796776 -638,0.03477396070957184 -639,0.03475764021277428 -640,0.03475135192275047 -641,0.03473857417702675 -642,0.034728653728961945 -643,0.034714166074991226 -644,0.03469573333859444 -645,0.034673016518354416 -646,0.03464997187256813 -647,0.03463062644004822 -648,0.03461674228310585 -649,0.034603871405124664 -650,0.03458847850561142 -651,0.03471466898918152 -652,0.03459521383047104 -653,0.034607332199811935 -654,0.034546393901109695 -655,0.03449816629290581 -656,0.034521620720624924 -657,0.03453857824206352 -658,0.034498993307352066 -659,0.034469906240701675 -660,0.03447915241122246 -661,0.03447379916906357 -662,0.03443870320916176 -663,0.03441855311393738 -664,0.03442596271634102 -665,0.034418899565935135 -666,0.03438827767968178 -667,0.0343683622777462 -668,0.03436288237571716 -669,0.034347109496593475 -670,0.0343211330473423 -671,0.034306466579437256 -672,0.034302935004234314 -673,0.03429128974676132 -674,0.03427194803953171 -675,0.03426108509302139 -676,0.03426438942551613 -677,0.0342557393014431 -678,0.034223511815071106 -679,0.034205056726932526 -680,0.034208472818136215 -681,0.03420048952102661 -682,0.03417593240737915 -683,0.034162089228630066 -684,0.034161411225795746 -685,0.03415306657552719 -686,0.0341353639960289 -687,0.03412427008152008 -688,0.034120090305805206 -689,0.03410870209336281 -690,0.03409048914909363 -691,0.034077271819114685 -692,0.03406897559762001 -693,0.03405695781111717 -694,0.03404150903224945 -695,0.034029267728328705 -696,0.034019585698843 -697,0.03400691971182823 -698,0.03399226441979408 -699,0.03398049250245094 -700,0.03397126495838165 -701,0.03396017476916313 -702,0.03394593298435211 -703,0.033938050270080566 -704,0.0339217372238636 -705,0.033908579498529434 -706,0.033896517008543015 -707,0.03388538584113121 -708,0.03387536481022835 -709,0.03386598825454712 -710,0.0338563434779644 -711,0.03384619578719139 -712,0.03383597359061241 -713,0.03382587805390358 -714,0.03381567820906639 -715,0.03380531072616577 -716,0.033794790506362915 -717,0.033783961087465286 -718,0.03377245366573334 -719,0.033760275691747665 -720,0.03374787047505379 -721,0.03373560309410095 -722,0.03372366726398468 -723,0.03371218591928482 -724,0.033701155334711075 -725,0.033690325915813446 -726,0.03367944434285164 -727,0.03366846218705177 -728,0.03365742042660713 -729,0.033645570278167725 -730,0.03363447263836861 -731,0.03362186625599861 -732,0.033610500395298004 -733,0.033600565046072006 -734,0.03359422832727432 -735,0.0335799939930439 -736,0.03356511518359184 -737,0.0335574597120285 -738,0.033550605177879333 -739,0.03353768214583397 -740,0.0335233248770237 -741,0.03351107984781265 -742,0.033501945436000824 -743,0.03349487483501434 -744,0.03349066898226738 -745,0.03347710892558098 -746,0.03345910459756851 -747,0.03344083949923515 -748,0.03343009576201439 -749,0.03342593088746071 -750,0.03341137617826462 -751,0.033399082720279694 -752,0.03338860347867012 -753,0.033381387591362 -754,0.03337286412715912 -755,0.03336244076490402 -756,0.03335336223244667 -757,0.03334582969546318 -758,0.0333370566368103 -759,0.03332630544900894 -760,0.03331590071320534 -761,0.033306367695331573 -762,0.03329581022262573 -763,0.033284131437540054 -764,0.03327244892716408 -765,0.033261124044656754 -766,0.03324954956769943 -767,0.033238064497709274 -768,0.03322772681713104 -769,0.03321809321641922 -770,0.03320789709687233 -771,0.033197056502103806 -772,0.03318638354539871 -773,0.03317621722817421 -774,0.03316612169146538 -775,0.03315505012869835 -776,0.03315851837396622 -777,0.033135898411273956 -778,0.03312448784708977 -779,0.03310970962047577 -780,0.03309645876288414 -781,0.03308788686990738 -782,0.03308045491576195 -783,0.033070895820856094 -784,0.03306051343679428 -785,0.03305157274007797 -786,0.03304282948374748 -787,0.033032696694135666 -788,0.03302234783768654 -789,0.03301328048110008 -790,0.033004723489284515 -791,0.0329948328435421 -792,0.032982829958200455 -793,0.03297017142176628 -794,0.03295809403061867 -795,0.03294657915830612 -796,0.03293531760573387 -797,0.03292457014322281 -798,0.03291447460651398 -799,0.03290437534451485 -800,0.0328938253223896 -801,0.03288329392671585 -802,0.032872628420591354 -803,0.0328606441617012 -804,0.03285321965813637 -805,0.03284315764904022 -806,0.03283292427659035 -807,0.032819416373968124 -808,0.0328080840408802 -809,0.032800231128931046 -810,0.032792698591947556 -811,0.032782554626464844 -812,0.03277100622653961 -813,0.0327608585357666 -814,0.03275233879685402 -815,0.03274353966116905 -816,0.032733313739299774 -817,0.03272296115756035 -818,0.03271372988820076 -819,0.032704710960388184 -820,0.03269467502832413 -821,0.032683730125427246 -822,0.03267301246523857 -823,0.03266239911317825 -824,0.032651595771312714 -825,0.03264079615473747 -826,0.032630883157253265 -827,0.03262215480208397 -828,0.03261339291930199 -829,0.03260364383459091 -830,0.03259319067001343 -831,0.03258275240659714 -832,0.03257255256175995 -833,0.032563768327236176 -834,0.03255341574549675 -835,0.03254375979304314 -836,0.03253183141350746 -837,0.03252118453383446 -838,0.032513007521629333 -839,0.032505329698324203 -840,0.03249604254961014 -841,0.0324857197701931 -842,0.03247595950961113 -843,0.03246700391173363 -844,0.03245783969759941 -845,0.032447945326566696 -846,0.032438166439533234 -847,0.03242889419198036 -848,0.032419539988040924 -849,0.032409291714429855 -850,0.03239841014146805 -851,0.03238736093044281 -852,0.03237617015838623 -853,0.03236496448516846 -854,0.03235447779297829 -855,0.032344650477170944 -856,0.03233484551310539 -857,0.03232460841536522 -858,0.03231398016214371 -859,0.032303109765052795 -860,0.032292310148477554 -861,0.03228142857551575 -862,0.03226814791560173 -863,0.03226889297366142 -864,0.03224952518939972 -865,0.03223782405257225 -866,0.03222200646996498 -867,0.03220820799469948 -868,0.03219984099268913 -869,0.032192762941122055 -870,0.03218180313706398 -871,0.03216852992773056 -872,0.03215745836496353 -873,0.03214871883392334 -874,0.03213914483785629 -875,0.03212719038128853 -876,0.032115187495946884 -877,0.03210481256246567 -878,0.03209436312317848 -879,0.03208230808377266 -880,0.03206973150372505 -881,0.032057907432317734 -882,0.03204569220542908 -883,0.032032549381256104 -884,0.03202004358172417 -885,0.032009005546569824 -886,0.031998779624700546 -887,0.03198796883225441 -888,0.031975872814655304 -889,0.031963784247636795 -890,0.03195055201649666 -891,0.03195510432124138 -892,0.03193669021129608 -893,0.03192626312375069 -894,0.031907275319099426 -895,0.03189118951559067 -896,0.031884875148534775 -897,0.03187934681773186 -898,0.03186677396297455 -899,0.03185297176241875 -900,0.03184455633163452 -901,0.03183775395154953 -902,0.03182683512568474 -903,0.03181387484073639 -904,0.03180419281125069 -905,0.03179660066962242 -906,0.03178654983639717 -907,0.031773846596479416 -908,0.03176230937242508 -909,0.03175274282693863 -910,0.03174208104610443 -911,0.03172982111573219 -912,0.03171843662858009 -913,0.031709153205156326 -914,0.0316997654736042 -915,0.031689222902059555 -916,0.0316784642636776 -917,0.031668342649936676 -918,0.03165695071220398 -919,0.03167619928717613 -920,0.03164936974644661 -921,0.031641412526369095 -922,0.03162086755037308 -923,0.03160417079925537 -924,0.03160129487514496 -925,0.03159830719232559 -926,0.031585242599248886 -927,0.03157265856862068 -928,0.031568340957164764 -929,0.03156405687332153 -930,0.03155336529016495 -931,0.031541500240564346 -932,0.03153511881828308 -933,0.03153005242347717 -934,0.03152001276612282 -935,0.031507838517427444 -936,0.031498733907938004 -937,0.03149108961224556 -938,0.031481366604566574 -939,0.03147071227431297 -940,0.03146211802959442 -941,0.03145472705364227 -942,0.03144577518105507 -943,0.031435903161764145 -944,0.03142741695046425 -945,0.03141987323760986 -946,0.03140697255730629 -947,0.031435538083314896 -948,0.03140702843666077 -949,0.03140122443437576 -950,0.03138037770986557 -951,0.031365033239126205 -952,0.03136594220995903 -953,0.03136446699500084 -954,0.03135058283805847 -955,0.0313398614525795 -956,0.0313391350209713 -957,0.03133665770292282 -958,0.031326472759246826 -959,0.031316839158535004 -960,0.03131355345249176 -961,0.03130956366658211 -962,0.031299613416194916 -963,0.031289927661418915 -964,0.031284067779779434 -965,0.031277891248464584 -966,0.03126860037446022 -967,0.03125974163413048 -968,0.0312538780272007 -969,0.03124794363975525 -970,0.031239435076713562 -971,0.031230928376317024 -972,0.031224679201841354 -973,0.031219035387039185 -974,0.031207578256726265 -975,0.031245771795511246 -976,0.031209534034132957 -977,0.031207572668790817 -978,0.031188039109110832 -979,0.031172534450888634 -980,0.031175317242741585 -981,0.031176719814538956 -982,0.031164009124040604 -983,0.031154857948422432 -984,0.031156472861766815 -985,0.031155455857515335 -986,0.031146103516221046 -987,0.031138360500335693 -988,0.031137656420469284 -989,0.031135087832808495 -990,0.031125975772738457 -991,0.031118130311369896 -992,0.03111453540623188 -993,0.031109625473618507 -994,0.031101522967219353 -995,0.031094787642359734 -996,0.03109094314277172 -997,0.031085677444934845 -998,0.031077750027179718 -999,0.031071150675415993 -1000,0.031066859140992165 diff --git a/training/time_weighting/linear/training_config.txt b/training/time_weighting/linear/training_config.txt deleted file mode 100644 index a70c6e1..0000000 --- a/training/time_weighting/linear/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/linear/training_log.csv b/training/time_weighting/linear/training_log.csv deleted file mode 100644 index 3335d09..0000000 --- a/training/time_weighting/linear/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,331.6314697265625 -2,229.41986083984375 -3,66.90505981445312 -4,15.73501968383789 -5,10.245162010192871 -6,6.034902095794678 -7,6.718071937561035 -8,5.9916486740112305 -9,5.246847629547119 -10,4.830845832824707 -11,4.461063861846924 -12,4.149963855743408 -13,3.8985042572021484 -14,3.652695417404175 -15,3.4070072174072266 -16,3.0902457237243652 -17,2.6531362533569336 -18,2.2523725032806396 -19,2.0155649185180664 -20,1.864186406135559 -21,1.7501277923583984 -22,1.6531012058258057 -23,1.5662291049957275 -24,1.4868136644363403 -25,1.412714958190918 -26,1.343291163444519 -27,1.2769590616226196 -28,1.2128911018371582 -29,1.1505085229873657 -30,1.0896309614181519 -31,1.030298113822937 -32,0.9733155369758606 -33,0.9197597503662109 -34,0.8724044561386108 -35,0.8346154093742371 -36,0.809155285358429 -37,0.7957659959793091 -38,0.7907580733299255 -39,0.790008008480072 -40,0.7902307510375977 -41,0.789132833480835 -42,0.7854013442993164 -43,0.7784242033958435 -44,0.7681233286857605 -45,0.7546961903572083 -46,0.7385615110397339 -47,0.7202516198158264 -48,0.7003911733627319 -49,0.6796826124191284 -50,0.6588981747627258 -51,0.6388257741928101 -52,0.6202259659767151 -53,0.6037768721580505 -54,0.5900211334228516 -55,0.5792988538742065 -56,0.5716701149940491 -57,0.5668969750404358 -58,0.5645080804824829 -59,0.5638712048530579 -60,0.5642659664154053 -61,0.5650161504745483 -62,0.5655606985092163 -63,0.5654870271682739 -64,0.564556360244751 -65,0.5627163052558899 -66,0.5600520968437195 -67,0.5567556619644165 -68,0.5530799031257629 -69,0.5492938160896301 -70,0.5456423759460449 -71,0.5423165559768677 -72,0.5394356846809387 -73,0.5370345115661621 -74,0.5350825786590576 -75,0.5334925055503845 -76,0.5321435332298279 -77,0.5309106707572937 -78,0.529678463935852 -79,0.5283538699150085 -80,0.5268754363059998 -81,0.5252244472503662 -82,0.5234056115150452 -83,0.5214517116546631 -84,0.5194135904312134 -85,0.5173507332801819 -86,0.5153215527534485 -87,0.5133740901947021 -88,0.5115427374839783 -89,0.509846031665802 -90,0.5082811713218689 -91,0.5068297982215881 -92,0.5054628252983093 -93,0.5041466355323792 -94,0.5028454661369324 -95,0.5015294551849365 -96,0.5001783967018127 -97,0.49878230690956116 -98,0.4973395764827728 -99,0.495857298374176 -100,0.4943472146987915 -101,0.492825984954834 -102,0.4913085401058197 -103,0.48980575799942017 -104,0.488327294588089 -105,0.4868771433830261 -106,0.4854540526866913 -107,0.48405665159225464 -108,0.4826776385307312 -109,0.4813104569911957 -110,0.4799485504627228 -111,0.47858762741088867 -112,0.47722548246383667 -113,0.47585830092430115 -114,0.4744883179664612 -115,0.4731177091598511 -116,0.47174951434135437 -117,0.47038590908050537 -118,0.4690309762954712 -119,0.4676840901374817 -120,0.46634548902511597 -121,0.46501532196998596 -122,0.4636923372745514 -123,0.4623762369155884 -124,0.4610663950443268 -125,0.4597606956958771 -126,0.4584578275680542 -127,0.4571576714515686 -128,0.4558611512184143 -129,0.4545673727989197 -130,0.45327675342559814 -131,0.4519897997379303 -132,0.450708270072937 -133,0.44943270087242126 -134,0.44816282391548157 -135,0.4469008445739746 -136,0.44564568996429443 -137,0.44439709186553955 -138,0.44315552711486816 -139,0.4419204592704773 -140,0.4406907856464386 -141,0.43946799635887146 -142,0.43825140595436096 -143,0.43703991174697876 -144,0.4358339011669159 -145,0.43463414907455444 -146,0.4334406852722168 -147,0.4322536289691925 -148,0.43107306957244873 -149,0.42989882826805115 -150,0.4287306070327759 -151,0.4275677502155304 -152,0.42641016840934753 -153,0.42525869607925415 -154,0.42411327362060547 -155,0.42297279834747314 -156,0.4218376576900482 -157,0.42070701718330383 -158,0.4195820391178131 -159,0.41846147179603577 -160,0.41734614968299866 -161,0.41623589396476746 -162,0.4151304066181183 -163,0.41402921080589294 -164,0.41293269395828247 -165,0.4118400514125824 -166,0.4107521176338196 -167,0.4096696674823761 -168,0.40859082341194153 -169,0.4075160622596741 -170,0.4064469039440155 -171,0.4053814709186554 -172,0.40431979298591614 -173,0.40326184034347534 -174,0.40220868587493896 -175,0.4011598825454712 -176,0.4001144468784332 -177,0.39907315373420715 -178,0.3980376422405243 -179,0.39700600504875183 -180,0.39597848057746887 -181,0.39495620131492615 -182,0.39393851161003113 -183,0.39292529225349426 -184,0.3919166028499603 -185,0.39091238379478455 -186,0.389914333820343 -187,0.38892215490341187 -188,0.387935072183609 -189,0.3869536221027374 -190,0.38597753643989563 -191,0.3850064277648926 -192,0.38404080271720886 -193,0.3830806612968445 -194,0.3821260631084442 -195,0.3811761736869812 -196,0.38023072481155396 -197,0.379291296005249 -198,0.37835583090782166 -199,0.37742510437965393 -200,0.37650009989738464 -201,0.3755800724029541 -202,0.3746646046638489 -203,0.3737538754940033 -204,0.3728483319282532 -205,0.3719477355480194 -206,0.3710518777370453 -207,0.3701605200767517 -208,0.3692747950553894 -209,0.36839330196380615 -210,0.3675168752670288 -211,0.3666451871395111 -212,0.3657785952091217 -213,0.36491674184799194 -214,0.3640589416027069 -215,0.3632056713104248 -216,0.36235764622688293 -217,0.3615137040615082 -218,0.36067360639572144 -219,0.35983842611312866 -220,0.35900744795799255 -221,0.3581801652908325 -222,0.3573567569255829 -223,0.3565376102924347 -224,0.35572248697280884 -225,0.3549107015132904 -226,0.3541039526462555 -227,0.3533007502555847 -228,0.3525010347366333 -229,0.35170507431030273 -230,0.35091277956962585 -231,0.3501236140727997 -232,0.3493383228778839 -233,0.34855687618255615 -234,0.3477790355682373 -235,0.3470051884651184 -236,0.3462353050708771 -237,0.34546953439712524 -238,0.3447069525718689 -239,0.3439481258392334 -240,0.34319353103637695 -241,0.34244224429130554 -242,0.3416946232318878 -243,0.34095117449760437 -244,0.34021154046058655 -245,0.3394756615161896 -246,0.3387431800365448 -247,0.3380149006843567 -248,0.33729052543640137 -249,0.33656954765319824 -250,0.33585232496261597 -251,0.33513906598091125 -252,0.3344293534755707 -253,0.33372288942337036 -254,0.33302053809165955 -255,0.332321435213089 -256,0.3316251337528229 -257,0.3309336006641388 -258,0.3302452564239502 -259,0.3295597434043884 -260,0.3288777768611908 -261,0.3281990885734558 -262,0.32752320170402527 -263,0.3268505334854126 -264,0.3261820077896118 -265,0.3255167007446289 -266,0.32485419511795044 -267,0.3241952061653137 -268,0.323539674282074 -269,0.3228869140148163 -270,0.3222373127937317 -271,0.321590781211853 -272,0.320946604013443 -273,0.3203054368495941 -274,0.3196679651737213 -275,0.31903254985809326 -276,0.31839972734451294 -277,0.31777018308639526 -278,0.31714367866516113 -279,0.31651970744132996 -280,0.3158985376358032 -281,0.31528034806251526 -282,0.3146648705005646 -283,0.31405210494995117 -284,0.31344303488731384 -285,0.31283655762672424 -286,0.3122326731681824 -287,0.311632364988327 -288,0.31103453040122986 -289,0.31043919920921326 -290,0.3098469376564026 -291,0.30925750732421875 -292,0.3086712658405304 -293,0.30808860063552856 -294,0.30750927329063416 -295,0.30693289637565613 -296,0.30635952949523926 -297,0.3057895302772522 -298,0.3052222430706024 -299,0.30465763807296753 -300,0.304095983505249 -301,0.303536981344223 -302,0.3029804825782776 -303,0.30242690443992615 -304,0.30187639594078064 -305,0.30132853984832764 -306,0.3007834553718567 -307,0.30024129152297974 -308,0.2997016906738281 -309,0.2991647720336914 -310,0.29863062500953674 -311,0.29809898138046265 -312,0.29756981134414673 -313,0.29704368114471436 -314,0.29651978611946106 -315,0.295998752117157 -316,0.29548028111457825 -317,0.29496416449546814 -318,0.2944505512714386 -319,0.2939389944076538 -320,0.2934296429157257 -321,0.2929227650165558 -322,0.29241809248924255 -323,0.29191625118255615 -324,0.29141664505004883 -325,0.2909190058708191 -326,0.2904239892959595 -327,0.289930522441864 -328,0.2894383370876312 -329,0.2889478802680969 -330,0.2884588837623596 -331,0.28797104954719543 -332,0.28748470544815063 -333,0.2870001792907715 -334,0.28651681542396545 -335,0.28603431582450867 -336,0.28555405139923096 -337,0.285074919462204 -338,0.2845964729785919 -339,0.284119576215744 -340,0.2836441397666931 -341,0.28316909074783325 -342,0.2826952040195465 -343,0.2822231352329254 -344,0.2817516028881073 -345,0.2812809646129608 -346,0.2808120548725128 -347,0.2803434729576111 -348,0.27987587451934814 -349,0.2794094681739807 -350,0.27894365787506104 -351,0.2784787714481354 -352,0.27801501750946045 -353,0.27755165100097656 -354,0.2770889699459076 -355,0.27662742137908936 -356,0.27616623044013977 -357,0.27570587396621704 -358,0.27524644136428833 -359,0.2747875452041626 -360,0.27432939410209656 -361,0.273872435092926 -362,0.27341604232788086 -363,0.2729608118534088 -364,0.2725066542625427 -365,0.272053062915802 -366,0.2716005742549896 -367,0.2711487114429474 -368,0.2706972658634186 -369,0.2702479958534241 -370,0.26979899406433105 -371,0.26935070753097534 -372,0.2689037621021271 -373,0.26845720410346985 -374,0.26801127195358276 -375,0.2675662636756897 -376,0.2671217918395996 -377,0.26667889952659607 -378,0.2662368714809418 -379,0.2657955288887024 -380,0.2653561532497406 -381,0.26491761207580566 -382,0.2644798159599304 -383,0.26404380798339844 -384,0.2636088728904724 -385,0.26317495107650757 -386,0.26274344325065613 -387,0.2623128890991211 -388,0.26188334822654724 -389,0.2614569067955017 -390,0.2610318064689636 -391,0.26060810685157776 -392,0.2601865231990814 -393,0.25976645946502686 -394,0.2593482732772827 -395,0.2589321434497833 -396,0.2585175037384033 -397,0.2581053376197815 -398,0.2576952278614044 -399,0.25728684663772583 -400,0.2568807899951935 -401,0.25647634267807007 -402,0.256073921918869 -403,0.25567471981048584 -404,0.25527721643447876 -405,0.2548816204071045 -406,0.2544887959957123 -407,0.2540978789329529 -408,0.25370919704437256 -409,0.25332239270210266 -410,0.25293710827827454 -411,0.25255507230758667 -412,0.25217464566230774 -413,0.2517966628074646 -414,0.2514212429523468 -415,0.25104713439941406 -416,0.25067541003227234 -417,0.25030583143234253 -418,0.24993768334388733 -419,0.2495719939470291 -420,0.24920840561389923 -421,0.2488464117050171 -422,0.2484874427318573 -423,0.2481289952993393 -424,0.24777206778526306 -425,0.24741733074188232 -426,0.24706360697746277 -427,0.24671167135238647 -428,0.24636173248291016 -429,0.24601291120052338 -430,0.24566665291786194 -431,0.2453218251466751 -432,0.24497824907302856 -433,0.24463683366775513 -434,0.24429675936698914 -435,0.24395832419395447 -436,0.2436216026544571 -437,0.24328607320785522 -438,0.24295297265052795 -439,0.24262118339538574 -440,0.24229039251804352 -441,0.2419617623090744 -442,0.2416342794895172 -443,0.24130812287330627 -444,0.2409847229719162 -445,0.2406623810529709 -446,0.24034184217453003 -447,0.24002312123775482 -448,0.23970577120780945 -449,0.23939038813114166 -450,0.23907607793807983 -451,0.23876304924488068 -452,0.23845219612121582 -453,0.2381432056427002 -454,0.2378344088792801 -455,0.2375280112028122 -456,0.23722274601459503 -457,0.23691748082637787 -458,0.23661357164382935 -459,0.23631036281585693 -460,0.23600822687149048 -461,0.2357071191072464 -462,0.2354063093662262 -463,0.23510614037513733 -464,0.23480692505836487 -465,0.2345082312822342 -466,0.23421066999435425 -467,0.23391464352607727 -468,0.2336188107728958 -469,0.23332516849040985 -470,0.23303325474262238 -471,0.2327425479888916 -472,0.23245516419410706 -473,0.2321709245443344 -474,0.2318894863128662 -475,0.23161181807518005 -476,0.23133789002895355 -477,0.23106759786605835 -478,0.23080135881900787 -479,0.23053906857967377 -480,0.23028098046779633 -481,0.23002709448337555 -482,0.22977691888809204 -483,0.22953058779239655 -484,0.22928839921951294 -485,0.22905026376247406 -486,0.22881530225276947 -487,0.22858387231826782 -488,0.22835604846477509 -489,0.22813186049461365 -490,0.22791039943695068 -491,0.22769193351268768 -492,0.22747649252414703 -493,0.22726403176784515 -494,0.22705404460430145 -495,0.22684648633003235 -496,0.22664140164852142 -497,0.2264382541179657 -498,0.22623714804649353 -499,0.22603808343410492 -500,0.22584083676338196 -501,0.2256452441215515 -502,0.22545120120048523 -503,0.22525876760482788 -504,0.2250678986310959 -505,0.22487826645374298 -506,0.22468967735767365 -507,0.22450244426727295 -508,0.2243165373802185 -509,0.22413143515586853 -510,0.22394733130931854 -511,0.2237643152475357 -512,0.22358198463916779 -513,0.22340042889118195 -514,0.22321943938732147 -515,0.22303934395313263 -516,0.2228601723909378 -517,0.22268174588680267 -518,0.22250372171401978 -519,0.22232671082019806 -520,0.22215032577514648 -521,0.22197452187538147 -522,0.2217990905046463 -523,0.22162441909313202 -524,0.22145015001296997 -525,0.22127628326416016 -526,0.2211027443408966 -527,0.22092990577220917 -528,0.22075767815113068 -529,0.22058576345443726 -530,0.22041401267051697 -531,0.2202431708574295 -532,0.22007253766059875 -533,0.2199021875858307 -534,0.2197321504354477 -535,0.21956245601177216 -536,0.21939301490783691 -537,0.21922381222248077 -538,0.2190551608800888 -539,0.21888689696788788 -540,0.21871884167194366 -541,0.2185509204864502 -542,0.21838365495204926 -543,0.21821677684783936 -544,0.21804998815059662 -545,0.21788334846496582 -546,0.21771728992462158 -547,0.2175513207912445 -548,0.21738550066947937 -549,0.21721994876861572 -550,0.21705494821071625 -551,0.21689002215862274 -552,0.2167252153158188 -553,0.21656079590320587 -554,0.21639679372310638 -555,0.2162328064441681 -556,0.21606875956058502 -557,0.21590524911880493 -558,0.21574176847934723 -559,0.21557806432247162 -560,0.21541441977024078 -561,0.21525141596794128 -562,0.21508826315402985 -563,0.2149248868227005 -564,0.21476203203201294 -565,0.21459932625293732 -566,0.21443656086921692 -567,0.21427375078201294 -568,0.21411128342151642 -569,0.21394871175289154 -570,0.21378616988658905 -571,0.21362382173538208 -572,0.2134617418050766 -573,0.21329949796199799 -574,0.21313658356666565 -575,0.21296928822994232 -576,0.21279947459697723 -577,0.21262872219085693 -578,0.21245822310447693 -579,0.21228817105293274 -580,0.21211843192577362 -581,0.2119496464729309 -582,0.21178273856639862 -583,0.21161682903766632 -584,0.21145209670066833 -585,0.2112892121076584 -586,0.21112817525863647 -587,0.2109689712524414 -588,0.2108117938041687 -589,0.21065708994865417 -590,0.2105047106742859 -591,0.21035417914390564 -592,0.21020661294460297 -593,0.21006132662296295 -594,0.20991834998130798 -595,0.2097783386707306 -596,0.20964108407497406 -597,0.20950616896152496 -598,0.20937377214431763 -599,0.20924420654773712 -600,0.20911692082881927 -601,0.20899218320846558 -602,0.2088703215122223 -603,0.20875146985054016 -604,0.20863540470600128 -605,0.2085220366716385 -606,0.208411306142807 -607,0.20830336213111877 -608,0.20819807052612305 -609,0.20809532701969147 -610,0.20799556374549866 -611,0.2078985720872879 -612,0.20780415832996368 -613,0.20771244168281555 -614,0.20762336254119873 -615,0.20753726363182068 -616,0.20745381712913513 -617,0.20737291872501373 -618,0.20729447901248932 -619,0.20721879601478577 -620,0.20714551210403442 -621,0.20707447826862335 -622,0.20700570940971375 -623,0.20693910121917725 -624,0.20687463879585266 -625,0.20681218802928925 -626,0.2067517787218094 -627,0.20669323205947876 -628,0.2066364288330078 -629,0.20658136904239655 -630,0.2065279632806778 -631,0.20647603273391724 -632,0.20642556250095367 -633,0.20637662708759308 -634,0.2063291221857071 -635,0.20628269016742706 -636,0.20623748004436493 -637,0.2061934471130371 -638,0.20615039765834808 -639,0.20610827207565308 -640,0.2060670256614685 -641,0.20602665841579437 -642,0.20598717033863068 -643,0.20594844222068787 -644,0.20591038465499878 -645,0.20587289333343506 -646,0.2058362513780594 -647,0.2058001607656479 -648,0.20576462149620056 -649,0.2057296186685562 -650,0.2056950479745865 -651,0.20566104352474213 -652,0.20562757551670074 -653,0.20559455454349518 -654,0.2055620402097702 -655,0.20552989840507507 -656,0.20549817383289337 -657,0.20546679198741913 -658,0.20543567836284637 -659,0.20540493726730347 -660,0.20537444949150085 -661,0.20534424483776093 -662,0.20531423389911652 -663,0.20528444647789001 -664,0.20525488257408142 -665,0.20522554218769073 -666,0.20519642531871796 -667,0.20516736805438995 -668,0.20513875782489777 -669,0.2051103264093399 -670,0.20508210361003876 -671,0.20505405962467194 -672,0.2050262838602066 -673,0.20499855279922485 -674,0.20497114956378937 -675,0.20494386553764343 -676,0.20491677522659302 -677,0.20488981902599335 -678,0.20486319065093994 -679,0.20483671128749847 -680,0.20481035113334656 -681,0.20478418469429016 -682,0.2047581970691681 -683,0.2047322690486908 -684,0.2047065645456314 -685,0.20468094944953918 -686,0.20465554296970367 -687,0.20463010668754578 -688,0.20460481941699982 -689,0.2045796811580658 -690,0.20455457270145416 -691,0.20452962815761566 -692,0.2045048624277115 -693,0.20448020100593567 -694,0.20445562899112701 -695,0.20443114638328552 -696,0.20440673828125 -697,0.20438246428966522 -698,0.20435813069343567 -699,0.20433403551578522 -700,0.20430994033813477 -701,0.20428597927093506 -702,0.20426207780838013 -703,0.20423832535743713 -704,0.20421460270881653 -705,0.20419101417064667 -706,0.204167440533638 -707,0.20414404571056366 -708,0.20412075519561768 -709,0.20409761369228363 -710,0.20407459139823914 -711,0.204051673412323 -712,0.20402882993221283 -713,0.20400603115558624 -714,0.203983336687088 -715,0.20396076142787933 -716,0.20393826067447662 -717,0.2039157897233963 -718,0.20389343798160553 -719,0.20387117564678192 -720,0.2038489431142807 -721,0.20382685959339142 -722,0.20380482077598572 -723,0.2037828266620636 -724,0.20376093685626984 -725,0.20373915135860443 -726,0.20371763408184052 -727,0.20369626581668854 -728,0.2036750316619873 -729,0.2036539614200592 -730,0.20363326370716095 -731,0.20361287891864777 -732,0.20359259843826294 -733,0.20357225835323334 -734,0.2035520076751709 -735,0.203531876206398 -736,0.20351171493530273 -737,0.20349165797233582 -738,0.2034716159105301 -739,0.20345176756381989 -740,0.20343194901943207 -741,0.2034122794866562 -742,0.20339316129684448 -743,0.20337426662445068 -744,0.20335550606250763 -745,0.20333682000637054 -746,0.2033182680606842 -747,0.20329976081848145 -748,0.20328126847743988 -749,0.20326289534568787 -750,0.20324458181858063 -751,0.2032262235879898 -752,0.20320802927017212 -753,0.20318980515003204 -754,0.20317167043685913 -755,0.2031535655260086 -756,0.20313553512096405 -757,0.20311763882637024 -758,0.20309965312480927 -759,0.20308178663253784 -760,0.2030639797449112 -761,0.20304623246192932 -762,0.20302854478359222 -763,0.20301082730293274 -764,0.20299313962459564 -765,0.2029755413532257 -766,0.20295795798301697 -767,0.20294040441513062 -768,0.20292289555072784 -769,0.20290540158748627 -770,0.20288801193237305 -771,0.20287060737609863 -772,0.2028532326221466 -773,0.20283593237400055 -774,0.2028186321258545 -775,0.2028014063835144 -776,0.20278413593769073 -777,0.2027669996023178 -778,0.2027498185634613 -779,0.20273272693157196 -780,0.202715665102005 -781,0.20269860327243805 -782,0.2026815563440323 -783,0.2026645988225937 -784,0.20264768600463867 -785,0.20263083279132843 -786,0.20261400938034058 -787,0.20259727537631989 -788,0.20258058607578278 -789,0.20256389677524567 -790,0.20254723727703094 -791,0.2025306522846222 -792,0.20251406729221344 -793,0.20249755680561066 -794,0.20248113572597504 -795,0.2024647295475006 -796,0.20244836807250977 -797,0.2024320363998413 -798,0.20241579413414001 -799,0.20239968597888947 -800,0.2023836076259613 -801,0.20236758887767792 -802,0.20235173404216766 -803,0.20233629643917084 -804,0.20232120156288147 -805,0.20230615139007568 -806,0.20229119062423706 -807,0.20227627456188202 -808,0.20226140320301056 -809,0.2022465616464615 -810,0.2022317349910736 -811,0.2022169977426529 -812,0.20220236480236053 -813,0.20218776166439056 -814,0.20217324793338776 -815,0.20215871930122375 -816,0.20214422047138214 -817,0.2021298110485077 -818,0.20211546123027802 -819,0.20210108160972595 -820,0.20208676159381866 -821,0.2020723968744278 -822,0.20205815136432648 -823,0.202043816447258 -824,0.2020295411348343 -825,0.20201529562473297 -826,0.20200110971927643 -827,0.2019868791103363 -828,0.20197272300720215 -829,0.20195861160755157 -830,0.2019445151090622 -831,0.2019304633140564 -832,0.20191645622253418 -833,0.20190247893333435 -834,0.2018885761499405 -835,0.20187464356422424 -836,0.20186077058315277 -837,0.20184685289859772 -838,0.2018330693244934 -839,0.20181922614574432 -840,0.20180544257164001 -841,0.2017917037010193 -842,0.2017780989408493 -843,0.20176446437835693 -844,0.20175093412399292 -845,0.2017373889684677 -846,0.20172391831874847 -847,0.20171049237251282 -848,0.20169697701931 -849,0.20168370008468628 -850,0.2016705870628357 -851,0.20165756344795227 -852,0.20164461433887482 -853,0.20163173973560333 -854,0.20161893963813782 -855,0.20160619914531708 -856,0.20159348845481873 -857,0.20158079266548157 -858,0.201568141579628 -859,0.20155566930770874 -860,0.20154336094856262 -861,0.20153115689754486 -862,0.20151899755001068 -863,0.20150689780712128 -864,0.20149482786655426 -865,0.20148281753063202 -866,0.20147085189819336 -867,0.2014588713645935 -868,0.20144693553447723 -869,0.20143502950668335 -870,0.20142316818237305 -871,0.20141124725341797 -872,0.20139941573143005 -873,0.20138753950595856 -874,0.20137570798397064 -875,0.2013639211654663 -876,0.2013520449399948 -877,0.20134028792381287 -878,0.20132853090763092 -879,0.20131678879261017 -880,0.201305091381073 -881,0.20129334926605225 -882,0.20128169655799866 -883,0.20127005875110626 -884,0.20125849545001984 -885,0.20124709606170654 -886,0.20123586058616638 -887,0.2012246549129486 -888,0.201213538646698 -889,0.20120243728160858 -890,0.20119155943393707 -891,0.20118090510368347 -892,0.20117028057575226 -893,0.2011597752571106 -894,0.20114929974079132 -895,0.20113889873027802 -896,0.20112846791744232 -897,0.20111814141273499 -898,0.20110780000686646 -899,0.20109745860099792 -900,0.20108722150325775 -901,0.20107696950435638 -902,0.2010667324066162 -903,0.20105674862861633 -904,0.20104709267616272 -905,0.20103766024112701 -906,0.20102831721305847 -907,0.2010190188884735 -908,0.20100976526737213 -909,0.20100052654743195 -910,0.20099130272865295 -911,0.20098206400871277 -912,0.20097289979457855 -913,0.2009638398885727 -914,0.2009548395872116 -915,0.2009458690881729 -916,0.20093688368797302 -917,0.2009280025959015 -918,0.20091907680034637 -919,0.20091019570827484 -920,0.20090137422084808 -921,0.2008925825357437 -922,0.20088379085063934 -923,0.20087498426437378 -924,0.20086628198623657 -925,0.20085753500461578 -926,0.20084889233112335 -927,0.20084024965763092 -928,0.20083169639110565 -929,0.20082315802574158 -930,0.2008146494626999 -931,0.2008061558008194 -932,0.20079778134822845 -933,0.20078937709331512 -934,0.2007809430360794 -935,0.20077264308929443 -936,0.20076431334018707 -937,0.20075605809688568 -938,0.20074781775474548 -939,0.2007395625114441 -940,0.20073144137859344 -941,0.20072323083877563 -942,0.20071515440940857 -943,0.2007070779800415 -944,0.20069901645183563 -945,0.20069099962711334 -946,0.20068296790122986 -947,0.2006748914718628 -948,0.2006669044494629 -949,0.20065900683403015 -950,0.20065103471279144 -951,0.20064310729503632 -952,0.20063525438308716 -953,0.200627401471138 -954,0.20061953365802765 -955,0.20061177015304565 -956,0.2006039321422577 -957,0.2005961686372757 -958,0.2005884200334549 -959,0.20058071613311768 -960,0.20057299733161926 -961,0.20056529343128204 -962,0.2005576342344284 -963,0.20055000483989716 -964,0.2005424201488495 -965,0.20053480565547943 -966,0.20052728056907654 -967,0.20051980018615723 -968,0.20051220059394836 -969,0.20050472021102905 -970,0.20049722492694855 -971,0.20048978924751282 -972,0.2004822939634323 -973,0.20047487318515778 -974,0.20046743750572205 -975,0.2004600316286087 -976,0.20045270025730133 -977,0.2004452645778656 -978,0.20043790340423584 -979,0.20043055713176727 -980,0.2004232257604599 -981,0.20041592419147491 -982,0.20040853321552277 -983,0.20040123164653778 -984,0.20039387047290802 -985,0.20038658380508423 -986,0.2003794014453888 -987,0.2003720998764038 -988,0.2003648728132248 -989,0.20035766065120697 -990,0.20035043358802795 -991,0.20034323632717133 -992,0.2003360539674759 -993,0.20032885670661926 -994,0.2003217339515686 -995,0.2003144919872284 -996,0.20030736923217773 -997,0.20030023157596588 -998,0.2002931535243988 -999,0.20028600096702576 -1000,0.20027892291545868 diff --git a/training/time_weighting/linear_mirrored/training_config.txt b/training/time_weighting/linear_mirrored/training_config.txt deleted file mode 100644 index c58ff01..0000000 --- a/training/time_weighting/linear_mirrored/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/linear_mirrored/training_log.csv b/training/time_weighting/linear_mirrored/training_log.csv deleted file mode 100644 index 28c0daf..0000000 --- a/training/time_weighting/linear_mirrored/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,142.1143341064453 -2,62.141761779785156 -3,22.319393157958984 -4,11.238194465637207 -5,7.8776960372924805 -6,6.502230167388916 -7,5.834667205810547 -8,5.270786762237549 -9,4.75474739074707 -10,4.276384353637695 -11,3.9199726581573486 -12,3.6407549381256104 -13,3.492306709289551 -14,3.440305471420288 -15,3.408323049545288 -16,3.38447904586792 -17,3.3639886379241943 -18,3.3439502716064453 -19,3.322413682937622 -20,3.298474073410034 -21,3.2719016075134277 -22,3.2426583766937256 -23,3.2110393047332764 -24,3.1775176525115967 -25,3.1427016258239746 -26,3.1071629524230957 -27,3.0714950561523438 -28,3.03623366355896 -29,3.0019521713256836 -30,2.969174385070801 -31,2.938415765762329 -32,2.9101920127868652 -33,2.884929895401001 -34,2.862809896469116 -35,2.843726873397827 -36,2.8273420333862305 -37,2.812971830368042 -38,2.7997140884399414 -39,2.786623477935791 -40,2.7731006145477295 -41,2.758796215057373 -42,2.7435755729675293 -43,2.72745943069458 -44,2.7106399536132812 -45,2.6933884620666504 -46,2.6760177612304688 -47,2.6588542461395264 -48,2.642108678817749 -49,2.625850200653076 -50,2.6102638244628906 -51,2.5953495502471924 -52,2.5811238288879395 -53,2.5679352283477783 -54,2.5558316707611084 -55,2.5446524620056152 -56,2.5341880321502686 -57,2.5242950916290283 -58,2.5149128437042236 -59,2.5060648918151855 -60,2.4978249073028564 -61,2.4901485443115234 -62,2.482853412628174 -63,2.475785970687866 -64,2.4687836170196533 -65,2.46179461479187 -66,2.454899787902832 -67,2.4482343196868896 -68,2.4420177936553955 -69,2.4364333152770996 -70,2.4316158294677734 -71,2.427623748779297 -72,2.4244585037231445 -73,2.4220457077026367 -74,2.42034649848938 -75,2.4192795753479004 -76,2.418673038482666 -77,2.4183595180511475 -78,2.4181480407714844 -79,2.4178614616394043 -80,2.4174399375915527 -81,2.416886806488037 -82,2.416243076324463 -83,2.4156501293182373 -84,2.4151535034179688 -85,2.4147605895996094 -86,2.4144647121429443 -87,2.414243221282959 -88,2.4140279293060303 -89,2.413788080215454 -90,2.4134902954101562 -91,2.4131226539611816 -92,2.412693977355957 -93,2.412219524383545 -94,2.411757707595825 -95,2.411318063735962 -96,2.4109082221984863 -97,2.410576820373535 -98,2.410263776779175 -99,2.409986972808838 -100,2.40973162651062 -101,2.4094576835632324 -102,2.4091904163360596 -103,2.4089102745056152 -104,2.4086203575134277 -105,2.4083287715911865 -106,2.4080309867858887 -107,2.4077372550964355 -108,2.40744948387146 -109,2.407172203063965 -110,2.4069180488586426 -111,2.4066789150238037 -112,2.40645170211792 -113,2.4062306880950928 -114,2.406019449234009 -115,2.4058139324188232 -116,2.4056100845336914 -117,2.405407428741455 -118,2.405207872390747 -119,2.405013084411621 -120,2.4048235416412354 -121,2.40463924407959 -122,2.4044628143310547 -123,2.404291868209839 -124,2.4041261672973633 -125,2.4039621353149414 -126,2.4037985801696777 -127,2.4036340713500977 -128,2.4034676551818848 -129,2.4032983779907227 -130,2.403130054473877 -131,2.402961015701294 -132,2.4027907848358154 -133,2.4026198387145996 -134,2.402447462081909 -135,2.402275323867798 -136,2.40210223197937 -137,2.4019289016723633 -138,2.4017562866210938 -139,2.4015848636627197 -140,2.4014155864715576 -141,2.4012489318847656 -142,2.4010865688323975 -143,2.400925874710083 -144,2.4007673263549805 -145,2.4006118774414062 -146,2.400455951690674 -147,2.4003000259399414 -148,2.4001495838165283 -149,2.4000065326690674 -150,2.399876117706299 -151,2.399751663208008 -152,2.399627208709717 -153,2.399503231048584 -154,2.3993778228759766 -155,2.39925217628479 -156,2.399127244949341 -157,2.3990042209625244 -158,2.398879289627075 -159,2.3987538814544678 -160,2.3986284732818604 -161,2.398502826690674 -162,2.3983771800994873 -163,2.398252248764038 -164,2.3981289863586426 -165,2.3980062007904053 -166,2.3978841304779053 -167,2.3977630138397217 -168,2.3976433277130127 -169,2.39752459526062 -170,2.397407054901123 -171,2.3972902297973633 -172,2.3971736431121826 -173,2.397059679031372 -174,2.3969502449035645 -175,2.396841526031494 -176,2.3967342376708984 -177,2.3966269493103027 -178,2.3965208530426025 -179,2.3964157104492188 -180,2.396310329437256 -181,2.3962056636810303 -182,2.396101474761963 -183,2.395998239517212 -184,2.395895481109619 -185,2.395794153213501 -186,2.3956940174102783 -187,2.3955957889556885 -188,2.3954989910125732 -189,2.395402431488037 -190,2.3953053951263428 -191,2.3952083587646484 -192,2.3951122760772705 -193,2.395016670227051 -194,2.3949222564697266 -195,2.3948280811309814 -196,2.3947348594665527 -197,2.3946423530578613 -198,2.394550323486328 -199,2.394458055496216 -200,2.394367218017578 -201,2.3942766189575195 -202,2.394186019897461 -203,2.394096612930298 -204,2.394007921218872 -205,2.3939201831817627 -206,2.3938324451446533 -207,2.3937463760375977 -208,2.393660306930542 -209,2.3935744762420654 -210,2.393489360809326 -211,2.393404722213745 -212,2.393320083618164 -213,2.393235445022583 -214,2.3931503295898438 -215,2.3930654525756836 -216,2.3929800987243652 -217,2.392894744873047 -218,2.392810106277466 -219,2.3927249908447266 -220,2.3926405906677246 -221,2.392556667327881 -222,2.392472743988037 -223,2.3923892974853516 -224,2.3923051357269287 -225,2.3922219276428223 -226,2.392138957977295 -227,2.3920555114746094 -228,2.391972780227661 -229,2.391890525817871 -230,2.39180850982666 -231,2.3917276859283447 -232,2.3916468620300293 -233,2.391566038131714 -234,2.391486644744873 -235,2.391406774520874 -236,2.391327381134033 -237,2.3912487030029297 -238,2.3911705017089844 -239,2.3910927772521973 -240,2.3910155296325684 -241,2.3909389972686768 -242,2.3908627033233643 -243,2.390787363052368 -244,2.3907124996185303 -245,2.39064621925354 -246,2.3905460834503174 -247,2.390484094619751 -248,2.3904473781585693 -249,2.3902885913848877 -250,2.390082836151123 -251,2.3899340629577637 -252,2.3898673057556152 -253,2.3897695541381836 -254,2.389613628387451 -255,2.389516592025757 -256,2.3894190788269043 -257,2.3893332481384277 -258,2.3892581462860107 -259,2.3891725540161133 -260,2.38908314704895 -261,2.3890039920806885 -262,2.3889405727386475 -263,2.388890027999878 -264,2.3888373374938965 -265,2.3887486457824707 -266,2.3886401653289795 -267,2.3885488510131836 -268,2.388475179672241 -269,2.3884029388427734 -270,2.388324022293091 -271,2.3882429599761963 -272,2.3881704807281494 -273,2.388111114501953 -274,2.3880488872528076 -275,2.3879730701446533 -276,2.3878884315490723 -277,2.387808322906494 -278,2.3877365589141846 -279,2.3876707553863525 -280,2.3876070976257324 -281,2.3875434398651123 -282,2.387482166290283 -283,2.3874261379241943 -284,2.3873724937438965 -285,2.387317180633545 -286,2.3872578144073486 -287,2.3871965408325195 -288,2.3871371746063232 -289,2.387079954147339 -290,2.3870229721069336 -291,2.386967658996582 -292,2.386913776397705 -293,2.3868582248687744 -294,2.3868038654327393 -295,2.3867483139038086 -296,2.3866946697235107 -297,2.3866374492645264 -298,2.3865790367126465 -299,2.3865227699279785 -300,2.386469602584839 -301,2.386418104171753 -302,2.3863630294799805 -303,2.386312961578369 -304,2.386260509490967 -305,2.386202335357666 -306,2.3861424922943115 -307,2.38608455657959 -308,2.3860301971435547 -309,2.3859755992889404 -310,2.3859193325042725 -311,2.385861873626709 -312,2.3858044147491455 -313,2.385746717453003 -314,2.3856866359710693 -315,2.385622262954712 -316,2.3855578899383545 -317,2.3854942321777344 -318,2.3854310512542725 -319,2.3853676319122314 -320,2.3853044509887695 -321,2.3852412700653076 -322,2.385175943374634 -323,2.3851096630096436 -324,2.3850433826446533 -325,2.384977340698242 -326,2.3849129676818848 -327,2.384850263595581 -328,2.3847904205322266 -329,2.3847358226776123 -330,2.3846852779388428 -331,2.3846323490142822 -332,2.384578227996826 -333,2.384526014328003 -334,2.3844797611236572 -335,2.3844246864318848 -336,2.384378433227539 -337,2.3843295574188232 -338,2.384275197982788 -339,2.3842175006866455 -340,2.3841605186462402 -341,2.3841066360473633 -342,2.3840584754943848 -343,2.384000778198242 -344,2.3839492797851562 -345,2.3838984966278076 -346,2.383843421936035 -347,2.383784770965576 -348,2.3837268352508545 -349,2.3836729526519775 -350,2.3836252689361572 -351,2.3835668563842773 -352,2.3835196495056152 -353,2.383470058441162 -354,2.383413791656494 -355,2.383354663848877 -356,2.3832993507385254 -357,2.3832521438598633 -358,2.3831944465637207 -359,2.383146286010742 -360,2.3830976486206055 -361,2.3830435276031494 -362,2.382986307144165 -363,2.382929563522339 -364,2.382880926132202 -365,2.3828232288360596 -366,2.3827738761901855 -367,2.382723331451416 -368,2.3826687335968018 -369,2.382611036300659 -370,2.382554054260254 -371,2.382509231567383 -372,2.3824462890625 -373,2.382397413253784 -374,2.3823461532592773 -375,2.3822898864746094 -376,2.3822319507598877 -377,2.3821771144866943 -378,2.382133960723877 -379,2.3820712566375732 -380,2.3820226192474365 -381,2.3819730281829834 -382,2.3819193840026855 -383,2.3818628787994385 -384,2.3818085193634033 -385,2.3817617893218994 -386,2.381704330444336 -387,2.3816540241241455 -388,2.381603717803955 -389,2.381549835205078 -390,2.38149356842041 -391,2.3814384937286377 -392,2.3813891410827637 -393,2.381333112716675 -394,2.3812825679779053 -395,2.3812313079833984 -396,2.3811771869659424 -397,2.3811209201812744 -398,2.3810667991638184 -399,2.3810172080993652 -400,2.3809611797332764 -401,2.380913734436035 -402,2.3808634281158447 -403,2.380807638168335 -404,2.380751132965088 -405,2.38070011138916 -406,2.380646228790283 -407,2.38059401512146 -408,2.380542516708374 -409,2.3804879188537598 -410,2.380434036254883 -411,2.380382537841797 -412,2.3803281784057617 -413,2.380276918411255 -414,2.380223274230957 -415,2.3801681995391846 -416,2.380113124847412 -417,2.3800628185272217 -418,2.38000750541687 -419,2.3799567222595215 -420,2.3799047470092773 -421,2.3798511028289795 -422,2.3797969818115234 -423,2.3797450065612793 -424,2.379695177078247 -425,2.3796448707580566 -426,2.3795931339263916 -427,2.379545211791992 -428,2.3794963359832764 -429,2.379444122314453 -430,2.3793914318084717 -431,2.379340648651123 -432,2.3792967796325684 -433,2.379241943359375 -434,2.379194736480713 -435,2.3791468143463135 -436,2.3790946006774902 -437,2.3790411949157715 -438,2.378990650177002 -439,2.378941535949707 -440,2.3788905143737793 -441,2.3788371086120605 -442,2.3787832260131836 -443,2.378730058670044 -444,2.3786771297454834 -445,2.3786234855651855 -446,2.3785715103149414 -447,2.3785226345062256 -448,2.378471851348877 -449,2.3784234523773193 -450,2.3783719539642334 -451,2.378319025039673 -452,2.3782663345336914 -453,2.3782243728637695 -454,2.3781683444976807 -455,2.3781235218048096 -456,2.3780758380889893 -457,2.378024101257324 -458,2.3779711723327637 -459,2.3779184818267822 -460,2.377868890762329 -461,2.3778188228607178 -462,2.377765655517578 -463,2.3777098655700684 -464,2.377654552459717 -465,2.377601146697998 -466,2.3775482177734375 -467,2.377495050430298 -468,2.3774425983428955 -469,2.3773953914642334 -470,2.3773443698883057 -471,2.377297878265381 -472,2.377249002456665 -473,2.377197504043579 -474,2.3771462440490723 -475,2.377096176147461 -476,2.3770501613616943 -477,2.3770012855529785 -478,2.3769521713256836 -479,2.3769006729125977 -480,2.3768470287323 -481,2.376793146133423 -482,2.3767385482788086 -483,2.37668514251709 -484,2.3766322135925293 -485,2.3765804767608643 -486,2.37652850151062 -487,2.376476764678955 -488,2.376427173614502 -489,2.376375675201416 -490,2.37632417678833 -491,2.376274824142456 -492,2.3762240409851074 -493,2.3761749267578125 -494,2.376124143600464 -495,2.376072883605957 -496,2.3760199546813965 -497,2.375969171524048 -498,2.3759214878082275 -499,2.375871419906616 -500,2.3758225440979004 -501,2.3757739067077637 -502,2.3757216930389404 -503,2.375669002532959 -504,2.375617027282715 -505,2.37556529045105 -506,2.3755133152008057 -507,2.3754630088806152 -508,2.3754098415374756 -509,2.375354290008545 -510,2.375304937362671 -511,2.3752529621124268 -512,2.3752002716064453 -513,2.3751513957977295 -514,2.37510085105896 -515,2.3750476837158203 -516,2.374995470046997 -517,2.3749425411224365 -518,2.374886989593506 -519,2.37483286857605 -520,2.374781847000122 -521,2.374729871749878 -522,2.3746769428253174 -523,2.3746261596679688 -524,2.3745734691619873 -525,2.374525308609009 -526,2.3744754791259766 -527,2.374425172805786 -528,2.3743791580200195 -529,2.374325752258301 -530,2.3742761611938477 -531,2.37422251701355 -532,2.3741695880889893 -533,2.3741204738616943 -534,2.3740689754486084 -535,2.3740181922912598 -536,2.3739664554595947 -537,2.373913288116455 -538,2.37385892868042 -539,2.3738043308258057 -540,2.3737504482269287 -541,2.3736958503723145 -542,2.3736422061920166 -543,2.3735904693603516 -544,2.3735382556915283 -545,2.3734841346740723 -546,2.3734326362609863 -547,2.373382806777954 -548,2.3733325004577637 -549,2.373284339904785 -550,2.373234748840332 -551,2.373183488845825 -552,2.3731319904327393 -553,2.373079538345337 -554,2.3730273246765137 -555,2.3729748725891113 -556,2.3729231357574463 -557,2.372871160507202 -558,2.372819662094116 -559,2.372767448425293 -560,2.3727142810821533 -561,2.372661828994751 -562,2.3726093769073486 -563,2.3725576400756836 -564,2.3725054264068604 -565,2.3724541664123535 -566,2.372401714324951 -567,2.3723487854003906 -568,2.3722968101501465 -569,2.3722445964813232 -570,2.3721930980682373 -571,2.3721418380737305 -572,2.37209153175354 -573,2.372039794921875 -574,2.37198805809021 -575,2.371936082839966 -576,2.371884346008301 -577,2.371833086013794 -578,2.371782064437866 -579,2.3717308044433594 -580,2.3716790676116943 -581,2.3716249465942383 -582,2.3715734481811523 -583,2.3715219497680664 -584,2.3714704513549805 -585,2.37142014503479 -586,2.3713698387145996 -587,2.371317148208618 -588,2.371264696121216 -589,2.371213436126709 -590,2.3711624145507812 -591,2.371112108230591 -592,2.3710625171661377 -593,2.371011972427368 -594,2.3709616661071777 -595,2.370911121368408 -596,2.3708600997924805 -597,2.3708090782165527 -598,2.370758056640625 -599,2.3707072734832764 -600,2.3706562519073486 -601,2.370604991912842 -602,2.3705523014068604 -603,2.3705005645751953 -604,2.3704488277435303 -605,2.3703978061676025 -606,2.370347023010254 -607,2.370296001434326 -608,2.3702447414398193 -609,2.370192289352417 -610,2.370140314102173 -611,2.370089054107666 -612,2.3700382709503174 -613,2.3699874877929688 -614,2.369935989379883 -615,2.3698840141296387 -616,2.369830846786499 -617,2.3697783946990967 -618,2.3697259426116943 -619,2.3696742057800293 -620,2.3696236610412598 -621,2.3695735931396484 -622,2.3695242404937744 -623,2.3694753646850586 -624,2.369425058364868 -625,2.3693740367889404 -626,2.369323253631592 -627,2.369272470474243 -628,2.3692221641540527 -629,2.3691720962524414 -630,2.3691229820251465 -631,2.3690707683563232 -632,2.369018077850342 -633,2.3689663410186768 -634,2.368915319442749 -635,2.3688650131225586 -636,2.368814706802368 -637,2.3687644004821777 -638,2.36871075630188 -639,2.368656873703003 -640,2.3686044216156006 -641,2.36855149269104 -642,2.368499279022217 -643,2.3684463500976562 -644,2.368393898010254 -645,2.3683395385742188 -646,2.3682868480682373 -647,2.3682339191436768 -648,2.3681812286376953 -649,2.368129253387451 -650,2.368077516555786 -651,2.368023157119751 -652,2.367969036102295 -653,2.3679161071777344 -654,2.367863893508911 -655,2.367811679840088 -656,2.3677592277526855 -657,2.3677046298980713 -658,2.367649793624878 -659,2.3675947189331055 -660,2.367539167404175 -661,2.367483615875244 -662,2.3674278259277344 -663,2.367372512817383 -664,2.367316722869873 -665,2.3672592639923096 -666,2.3672025203704834 -667,2.3671464920043945 -668,2.367091417312622 -669,2.367037534713745 -670,2.36698055267334 -671,2.36692476272583 -672,2.3668692111968994 -673,2.3668148517608643 -674,2.366760492324829 -675,2.366705894470215 -676,2.3666508197784424 -677,2.3665952682495117 -678,2.3665401935577393 -679,2.366485357284546 -680,2.3664309978485107 -681,2.3663744926452637 -682,2.366316556930542 -683,2.3662619590759277 -684,2.3662049770355225 -685,2.366147041320801 -686,2.3660919666290283 -687,2.3660311698913574 -688,2.3659751415252686 -689,2.3659164905548096 -690,2.3658621311187744 -691,2.3658039569854736 -692,2.365746259689331 -693,2.365689516067505 -694,2.3656299114227295 -695,2.3655731678009033 -696,2.3655166625976562 -697,2.3654592037200928 -698,2.3653998374938965 -699,2.3653368949890137 -700,2.3652820587158203 -701,2.365220785140991 -702,2.365164041519165 -703,2.3651063442230225 -704,2.365044355392456 -705,2.3649861812591553 -706,2.3649284839630127 -707,2.364872455596924 -708,2.364814519882202 -709,2.3647513389587402 -710,2.3646929264068604 -711,2.364633798599243 -712,2.3645753860473633 -713,2.364518880844116 -714,2.3644564151763916 -715,2.3643925189971924 -716,2.3643360137939453 -717,2.364271402359009 -718,2.3642122745513916 -719,2.3641538619995117 -720,2.364086151123047 -721,2.3640265464782715 -722,2.363966464996338 -723,2.363913059234619 -724,2.363861560821533 -725,2.363797903060913 -726,2.36374831199646 -727,2.363687515258789 -728,2.363630771636963 -729,2.3635759353637695 -730,2.3635103702545166 -731,2.3634531497955322 -732,2.363394260406494 -733,2.3633434772491455 -734,2.363283634185791 -735,2.3632254600524902 -736,2.3631746768951416 -737,2.3631129264831543 -738,2.36305832862854 -739,2.3630011081695557 -740,2.362943172454834 -741,2.3628859519958496 -742,2.3628273010253906 -743,2.3627712726593018 -744,2.3627097606658936 -745,2.3626482486724854 -746,2.362593650817871 -747,2.3625338077545166 -748,2.3624744415283203 -749,2.3624207973480225 -750,2.362359046936035 -751,2.3623011112213135 -752,2.362243175506592 -753,2.3621814250946045 -754,2.362123489379883 -755,2.3620660305023193 -756,2.362006664276123 -757,2.361950397491455 -758,2.36189603805542 -759,2.3618366718292236 -760,2.3617780208587646 -761,2.3617262840270996 -762,2.3616721630096436 -763,2.361612319946289 -764,2.361560106277466 -765,2.3615031242370605 -766,2.361448049545288 -767,2.361394166946411 -768,2.3613317012786865 -769,2.361272096633911 -770,2.3612194061279297 -771,2.3611605167388916 -772,2.361102342605591 -773,2.3610472679138184 -774,2.3609869480133057 -775,2.3609299659729004 -776,2.3608813285827637 -777,2.3608241081237793 -778,2.360767126083374 -779,2.3607163429260254 -780,2.3606619834899902 -781,2.3606064319610596 -782,2.3605570793151855 -783,2.36049485206604 -784,2.3604366779327393 -785,2.360383987426758 -786,2.3603274822235107 -787,2.36027193069458 -788,2.3602135181427 -789,2.360152006149292 -790,2.3600945472717285 -791,2.3600425720214844 -792,2.3599843978881836 -793,2.359926223754883 -794,2.359870433807373 -795,2.3598155975341797 -796,2.3597617149353027 -797,2.3597002029418945 -798,2.3596441745758057 -799,2.359591007232666 -800,2.3595385551452637 -801,2.359487533569336 -802,2.359426259994507 -803,2.359365701675415 -804,2.3593108654022217 -805,2.359255790710449 -806,2.3592007160186768 -807,2.359142303466797 -808,2.3590846061706543 -809,2.3590288162231445 -810,2.358973979949951 -811,2.35890793800354 -812,2.35884952545166 -813,2.358795166015625 -814,2.3587374687194824 -815,2.3586790561676025 -816,2.358621597290039 -817,2.358564615249634 -818,2.3585104942321777 -819,2.3584494590759277 -820,2.3583953380584717 -821,2.3583366870880127 -822,2.3582797050476074 -823,2.3582258224487305 -824,2.358163356781006 -825,2.358105421066284 -826,2.3580498695373535 -827,2.3579964637756348 -828,2.3579392433166504 -829,2.3578810691833496 -830,2.357825756072998 -831,2.357769250869751 -832,2.3577115535736084 -833,2.357652425765991 -834,2.357592821121216 -835,2.3575329780578613 -836,2.3574748039245605 -837,2.357416868209839 -838,2.3573575019836426 -839,2.357297897338867 -840,2.357238531112671 -841,2.3571794033050537 -842,2.3571231365203857 -843,2.3570661544799805 -844,2.357006072998047 -845,2.3569459915161133 -846,2.356886148452759 -847,2.3568270206451416 -848,2.3567678928375244 -849,2.35670804977417 -850,2.356649398803711 -851,2.3565917015075684 -852,2.356534957885742 -853,2.3564748764038086 -854,2.356414794921875 -855,2.3563573360443115 -856,2.356304407119751 -857,2.356241464614868 -858,2.3561811447143555 -859,2.3561205863952637 -860,2.3560659885406494 -861,2.356008529663086 -862,2.3559436798095703 -863,2.3558828830718994 -864,2.355823516845703 -865,2.3557660579681396 -866,2.355698823928833 -867,2.355640172958374 -868,2.3555843830108643 -869,2.3555288314819336 -870,2.355465888977051 -871,2.3554062843322754 -872,2.3553524017333984 -873,2.3552887439727783 -874,2.3552281856536865 -875,2.3551695346832275 -876,2.355112314224243 -877,2.355048894882202 -878,2.3549892902374268 -879,2.354933261871338 -880,2.3548743724823 -881,2.3548102378845215 -882,2.354748487472534 -883,2.354696750640869 -884,2.354633092880249 -885,2.3545725345611572 -886,2.354513168334961 -887,2.3544564247131348 -888,2.3543972969055176 -889,2.3543381690979004 -890,2.354275941848755 -891,2.3542184829711914 -892,2.3541624546051025 -893,2.354099988937378 -894,2.3540499210357666 -895,2.35398530960083 -896,2.3539278507232666 -897,2.353864908218384 -898,2.3537988662719727 -899,2.353741407394409 -900,2.3536832332611084 -901,2.353614568710327 -902,2.353553295135498 -903,2.353489637374878 -904,2.3534231185913086 -905,2.3533577919006348 -906,2.353294610977173 -907,2.353236436843872 -908,2.3531689643859863 -909,2.353102922439575 -910,2.353044271469116 -911,2.3529937267303467 -912,2.3529388904571533 -913,2.3529083728790283 -914,2.3528354167938232 -915,2.352799654006958 -916,2.352825880050659 -917,2.352677345275879 -918,2.3526923656463623 -919,2.3527307510375977 -920,2.3524980545043945 -921,2.3525869846343994 -922,2.352404832839966 -923,2.3524205684661865 -924,2.352259874343872 -925,2.3522439002990723 -926,2.3521676063537598 -927,2.352079391479492 -928,2.3520076274871826 -929,2.3518905639648438 -930,2.351850748062134 -931,2.351750612258911 -932,2.351719379425049 -933,2.3515982627868652 -934,2.3515806198120117 -935,2.3514842987060547 -936,2.35146164894104 -937,2.3514130115509033 -938,2.351362943649292 -939,2.3513059616088867 -940,2.3512325286865234 -941,2.3511879444122314 -942,2.351087808609009 -943,2.3510701656341553 -944,2.350959539413452 -945,2.3509278297424316 -946,2.3508448600769043 -947,2.3507864475250244 -948,2.3507239818573 -949,2.350644111633301 -950,2.3506112098693848 -951,2.350529432296753 -952,2.350477933883667 -953,2.3504223823547363 -954,2.350344657897949 -955,2.350282669067383 -956,2.3502037525177 -957,2.3501369953155518 -958,2.3500847816467285 -959,2.350020408630371 -960,2.3499536514282227 -961,2.3498871326446533 -962,2.349813461303711 -963,2.349750280380249 -964,2.349677085876465 -965,2.349611759185791 -966,2.349541664123535 -967,2.3494653701782227 -968,2.3493969440460205 -969,2.3493220806121826 -970,2.3492844104766846 -971,2.349200963973999 -972,2.349130392074585 -973,2.3490493297576904 -974,2.3489770889282227 -975,2.348931312561035 -976,2.348841905593872 -977,2.3487794399261475 -978,2.3487026691436768 -979,2.3486218452453613 -980,2.3485817909240723 -981,2.348536491394043 -982,2.3484487533569336 -983,2.348400115966797 -984,2.348339796066284 -985,2.3482720851898193 -986,2.3481650352478027 -987,2.348112106323242 -988,2.348062038421631 -989,2.3480706214904785 -990,2.34798526763916 -991,2.348048210144043 -992,2.347968339920044 -993,2.347984790802002 -994,2.3477776050567627 -995,2.3476219177246094 -996,2.347553253173828 -997,2.347459077835083 -998,2.3473622798919678 -999,2.347337245941162 -1000,2.3473587036132812 diff --git a/training/time_weighting/quadratic/training_config.txt b/training/time_weighting/quadratic/training_config.txt deleted file mode 100644 index c258500..0000000 --- a/training/time_weighting/quadratic/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/quadratic/training_log.csv b/training/time_weighting/quadratic/training_log.csv deleted file mode 100644 index c0e4f7f..0000000 --- a/training/time_weighting/quadratic/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,266.13165283203125 -2,180.9208221435547 -3,104.00546264648438 -4,54.60481643676758 -5,30.94932746887207 -6,22.51517677307129 -7,20.3201847076416 -8,24.417190551757812 -9,21.184478759765625 -10,19.285667419433594 -11,25.639116287231445 -12,12.64896297454834 -13,9.087910652160645 -14,7.8265533447265625 -15,6.038062572479248 -16,5.3417253494262695 -17,5.200528144836426 -18,4.129286766052246 -19,3.7515759468078613 -20,3.7843384742736816 -21,3.7928402423858643 -22,3.743119239807129 -23,3.6556856632232666 -24,3.6795759201049805 -25,3.6719915866851807 -26,3.6808054447174072 -27,3.6211740970611572 -28,3.647463083267212 -29,3.623835563659668 -30,3.5715689659118652 -31,3.5070998668670654 -32,3.4377329349517822 -33,3.3644187450408936 -34,3.2880139350891113 -35,3.220442771911621 -36,3.1718218326568604 -37,3.1350595951080322 -38,3.1415863037109375 -39,3.072453022003174 -40,3.0433096885681152 -41,3.0088694095611572 -42,2.970716714859009 -43,2.930523633956909 -44,2.8897573947906494 -45,2.849764823913574 -46,2.8118765354156494 -47,2.7770540714263916 -48,2.7452893257141113 -49,2.715641975402832 -50,2.6863250732421875 -51,2.655608892440796 -52,2.622565269470215 -53,2.5870108604431152 -54,2.549340009689331 -55,2.5102901458740234 -56,2.470669984817505 -57,2.430964946746826 -58,2.391386032104492 -59,2.351856231689453 -60,2.3121144771575928 -61,2.2717812061309814 -62,2.2323310375213623 -63,2.2125449180603027 -64,2.1339738368988037 -65,2.053997278213501 -66,1.8322830200195312 -67,1.4438642263412476 -68,1.3662164211273193 -69,1.307007074356079 -70,1.249576210975647 -71,1.1973059177398682 -72,1.1503056287765503 -73,1.1079151630401611 -74,1.0692811012268066 -75,1.0348864793777466 -76,1.005520224571228 -77,0.9784612655639648 -78,0.9504485130310059 -79,0.9214949607849121 -80,0.8923044204711914 -81,0.8641082048416138 -82,0.8372215032577515 -83,0.8100785613059998 -84,0.7787714004516602 -85,0.7355623245239258 -86,0.6562596559524536 -87,0.5937755107879639 -88,0.6388580203056335 -89,0.6594637036323547 -90,0.6716957688331604 -91,0.6784345507621765 -92,0.679638147354126 -93,0.6751080751419067 -94,0.6654568314552307 -95,0.6516889929771423 -96,0.6344155669212341 -97,0.6136282682418823 -98,0.5890140533447266 -99,0.561702311038971 -100,0.5449637174606323 -101,0.5710742473602295 -102,0.5824316740036011 -103,0.5653349757194519 -104,0.534417450428009 -105,0.5300542712211609 -106,0.5380578637123108 -107,0.5429190993309021 -108,0.5429460406303406 -109,0.5355566740036011 -110,0.5242801904678345 -111,0.5133663415908813 -112,0.5079405903816223 -113,0.5140036344528198 -114,0.5182985067367554 -115,0.5112472176551819 -116,0.5016788244247437 -117,0.5001031756401062 -118,0.5022461414337158 -119,0.5027704238891602 -120,0.5000149607658386 -121,0.49428611993789673 -122,0.4886637032032013 -123,0.48765891790390015 -124,0.4891665577888489 -125,0.48671847581863403 -126,0.4813331961631775 -127,0.4785066545009613 -128,0.4783931076526642 -129,0.4776848256587982 -130,0.4751109778881073 -131,0.471744179725647 -132,0.4694240987300873 -133,0.4686708152294159 -134,0.46760094165802 -135,0.46504661440849304 -136,0.46232256293296814 -137,0.4606946110725403 -138,0.4595760405063629 -139,0.4579043984413147 -140,0.45557427406311035 -141,0.4534185826778412 -142,0.4520779550075531 -143,0.45088160037994385 -144,0.4490382969379425 -145,0.4470601975917816 -146,0.44569313526153564 -147,0.4445764422416687 -148,0.4430529475212097 -149,0.44125959277153015 -150,0.4397781491279602 -151,0.43856915831565857 -152,0.4371195435523987 -153,0.43547549843788147 -154,0.4340726137161255 -155,0.43286848068237305 -156,0.43151819705963135 -157,0.43002185225486755 -158,0.42865967750549316 -159,0.4274561405181885 -160,0.4261654019355774 -161,0.4247716963291168 -162,0.4234817624092102 -163,0.4223022758960724 -164,0.42107290029525757 -165,0.41978538036346436 -166,0.4185687005519867 -167,0.4174247682094574 -168,0.41623935103416443 -169,0.4150209426879883 -170,0.4138658046722412 -171,0.4127582907676697 -172,0.41162756085395813 -173,0.41048893332481384 -174,0.40939727425575256 -175,0.4083368182182312 -176,0.4072612226009369 -177,0.4061849117279053 -178,0.40514257550239563 -179,0.40412014722824097 -180,0.40308812260627747 -181,0.40205907821655273 -182,0.40105411410331726 -183,0.4000617563724518 -184,0.39906391501426697 -185,0.3980712890625 -186,0.3970967233181 -187,0.396130234003067 -188,0.3951597213745117 -189,0.39419642090797424 -190,0.3932483196258545 -191,0.3923085629940033 -192,0.3913710415363312 -193,0.39044132828712463 -194,0.3895238935947418 -195,0.3886142671108246 -196,0.3877083957195282 -197,0.3868102729320526 -198,0.38592272996902466 -199,0.3850415349006653 -200,0.3841647803783417 -201,0.38329464197158813 -202,0.38243284821510315 -203,0.3815770149230957 -204,0.3807262182235718 -205,0.37988218665122986 -206,0.37904438376426697 -207,0.3782113790512085 -208,0.377384752035141 -209,0.37656667828559875 -210,0.37575674057006836 -211,0.374953955411911 -212,0.3741580843925476 -213,0.37336957454681396 -214,0.3725886046886444 -215,0.37181395292282104 -216,0.37104588747024536 -217,0.37028470635414124 -218,0.3695293366909027 -219,0.36878058314323425 -220,0.368037611246109 -221,0.36730074882507324 -222,0.3665703237056732 -223,0.3658466339111328 -224,0.36512866616249084 -225,0.3644179105758667 -226,0.36371350288391113 -227,0.3630155324935913 -228,0.3623233735561371 -229,0.3616369664669037 -230,0.3609567880630493 -231,0.36028289794921875 -232,0.35961657762527466 -233,0.3589547276496887 -234,0.3582969605922699 -235,0.35764244198799133 -236,0.35699132084846497 -237,0.35634323954582214 -238,0.3556976020336151 -239,0.3550547659397125 -240,0.35441529750823975 -241,0.35377833247184753 -242,0.35314467549324036 -243,0.3525139093399048 -244,0.3518862724304199 -245,0.35126176476478577 -246,0.3506399691104889 -247,0.3500215709209442 -248,0.3494051396846771 -249,0.3487917184829712 -250,0.3481806814670563 -251,0.34757155179977417 -252,0.3469642996788025 -253,0.3463596701622009 -254,0.3457571864128113 -255,0.3451571464538574 -256,0.344560444355011 -257,0.3439667820930481 -258,0.3433755934238434 -259,0.3427867889404297 -260,0.3422003388404846 -261,0.3416162431240082 -262,0.3410363793373108 -263,0.3404596149921417 -264,0.33988702297210693 -265,0.33931785821914673 -266,0.3387526571750641 -267,0.3381911814212799 -268,0.33763304352760315 -269,0.3370784819126129 -270,0.3365272581577301 -271,0.3359793722629547 -272,0.3354349732398987 -273,0.33489343523979187 -274,0.33435550332069397 -275,0.3338211476802826 -276,0.3332904577255249 -277,0.332762748003006 -278,0.33223798871040344 -279,0.331716388463974 -280,0.3311971426010132 -281,0.33068084716796875 -282,0.33016711473464966 -283,0.3296561539173126 -284,0.3291468620300293 -285,0.3286401927471161 -286,0.3281356990337372 -287,0.327633261680603 -288,0.327132910490036 -289,0.32663455605506897 -290,0.32613804936408997 -291,0.3256432116031647 -292,0.3251502513885498 -293,0.32465842366218567 -294,0.3241685926914215 -295,0.32367998361587524 -296,0.3231925964355469 -297,0.3227066397666931 -298,0.32222163677215576 -299,0.3217380940914154 -300,0.32125571370124817 -301,0.3207806348800659 -302,0.32031115889549255 -303,0.3198462724685669 -304,0.31938493251800537 -305,0.31892699003219604 -306,0.31847092509269714 -307,0.3180164694786072 -308,0.3175623118877411 -309,0.3171084225177765 -310,0.3166542947292328 -311,0.31619954109191895 -312,0.3157444894313812 -313,0.3152894079685211 -314,0.3148341476917267 -315,0.31437942385673523 -316,0.31392550468444824 -317,0.3134722411632538 -318,0.31302040815353394 -319,0.3125694692134857 -320,0.31211966276168823 -321,0.3116708993911743 -322,0.31122273206710815 -323,0.31077489256858826 -324,0.3103272020816803 -325,0.30987927317619324 -326,0.30943167209625244 -327,0.3089834749698639 -328,0.3085356056690216 -329,0.30808719992637634 -330,0.30763816833496094 -331,0.30718815326690674 -332,0.3067372143268585 -333,0.30628493428230286 -334,0.305832177400589 -335,0.30537912249565125 -336,0.3049260079860687 -337,0.3044723868370056 -338,0.30401885509490967 -339,0.3035653531551361 -340,0.30311113595962524 -341,0.3026571273803711 -342,0.3022029995918274 -343,0.3017488420009613 -344,0.30129414796829224 -345,0.300839364528656 -346,0.300383985042572 -347,0.2999275028705597 -348,0.29947078227996826 -349,0.29901355504989624 -350,0.2985555827617645 -351,0.2980974018573761 -352,0.29763850569725037 -353,0.2971782386302948 -354,0.29671764373779297 -355,0.29625651240348816 -356,0.295795202255249 -357,0.29533329606056213 -358,0.29487091302871704 -359,0.29440805315971375 -360,0.29394373297691345 -361,0.2934795022010803 -362,0.29301488399505615 -363,0.2925495505332947 -364,0.29208365082740784 -365,0.2916165292263031 -366,0.29114875197410583 -367,0.2906799614429474 -368,0.2902097702026367 -369,0.2897385358810425 -370,0.2892654836177826 -371,0.2887907326221466 -372,0.2883133888244629 -373,0.28783419728279114 -374,0.28735217452049255 -375,0.2868673801422119 -376,0.28637996315956116 -377,0.28588974475860596 -378,0.2853964865207672 -379,0.2848993241786957 -380,0.2843986451625824 -381,0.28389468789100647 -382,0.2833864092826843 -383,0.28287553787231445 -384,0.2823609709739685 -385,0.28184419870376587 -386,0.2813228666782379 -387,0.28079575300216675 -388,0.28026285767555237 -389,0.27972424030303955 -390,0.27917951345443726 -391,0.27862831950187683 -392,0.2780705690383911 -393,0.27750611305236816 -394,0.2769375741481781 -395,0.2763649523258209 -396,0.27578699588775635 -397,0.27520304918289185 -398,0.27461278438568115 -399,0.27401554584503174 -400,0.27341166138648987 -401,0.2728021740913391 -402,0.2721861004829407 -403,0.2715640962123871 -404,0.27093636989593506 -405,0.27030327916145325 -406,0.26966509222984314 -407,0.26902294158935547 -408,0.26837676763534546 -409,0.2677288353443146 -410,0.26707953214645386 -411,0.26642879843711853 -412,0.2657768726348877 -413,0.26512381434440613 -414,0.2644692361354828 -415,0.263813853263855 -416,0.26315754652023315 -417,0.262501060962677 -418,0.261844277381897 -419,0.26118823885917664 -420,0.26053386926651 -421,0.2598806917667389 -422,0.25922855734825134 -423,0.2585779130458832 -424,0.2579286992549896 -425,0.25728172063827515 -426,0.2566367983818054 -427,0.25599396228790283 -428,0.2553532123565674 -429,0.25471463799476624 -430,0.25407874584198 -431,0.25344496965408325 -432,0.2528137266635895 -433,0.2521856725215912 -434,0.25156170129776 -435,0.25094184279441833 -436,0.2503257393836975 -437,0.24971413612365723 -438,0.24910710752010345 -439,0.2485051304101944 -440,0.2479073405265808 -441,0.247313991189003 -442,0.2467256784439087 -443,0.24614177644252777 -444,0.2455621063709259 -445,0.24498721957206726 -446,0.24441641569137573 -447,0.24385032057762146 -448,0.24328841269016266 -449,0.24273064732551575 -450,0.2421777993440628 -451,0.2416279911994934 -452,0.2410825490951538 -453,0.24054105579853058 -454,0.24000206589698792 -455,0.23946595191955566 -456,0.23893049359321594 -457,0.23839765787124634 -458,0.23786768317222595 -459,0.23734140396118164 -460,0.23681935667991638 -461,0.2363017350435257 -462,0.2357882708311081 -463,0.23527872562408447 -464,0.23477290570735931 -465,0.23427070677280426 -466,0.23377281427383423 -467,0.2332792580127716 -468,0.23278993368148804 -469,0.23230469226837158 -470,0.23182444274425507 -471,0.23134782910346985 -472,0.23087628185749054 -473,0.2304089516401291 -474,0.2299463152885437 -475,0.22948837280273438 -476,0.22903428971767426 -477,0.22858458757400513 -478,0.2281394898891449 -479,0.22769881784915924 -480,0.22726169228553772 -481,0.22682899236679077 -482,0.22640074789524078 -483,0.2259759157896042 -484,0.22555482387542725 -485,0.2251373678445816 -486,0.22472414374351501 -487,0.22431352734565735 -488,0.22390732169151306 -489,0.22350461781024933 -490,0.22310522198677063 -491,0.22270822525024414 -492,0.22231505811214447 -493,0.22192597389221191 -494,0.2215423732995987 -495,0.221162348985672 -496,0.22078710794448853 -497,0.2204158455133438 -498,0.22004766762256622 -499,0.21968315541744232 -500,0.21932224929332733 -501,0.21896472573280334 -502,0.2186107039451599 -503,0.21825887262821198 -504,0.2179109752178192 -505,0.2175653576850891 -506,0.21722249686717987 -507,0.21688199043273926 -508,0.2165445238351822 -509,0.2162090241909027 -510,0.21587544679641724 -511,0.2155444175004959 -512,0.21521537005901337 -513,0.21488849818706512 -514,0.214563250541687 -515,0.21424010396003723 -516,0.2139180302619934 -517,0.2135978490114212 -518,0.21327906847000122 -519,0.2129610925912857 -520,0.21264390647411346 -521,0.21232762932777405 -522,0.2120119035243988 -523,0.21169693768024445 -524,0.21138307452201843 -525,0.2110694944858551 -526,0.21075694262981415 -527,0.2104446440935135 -528,0.21013256907463074 -529,0.20982122421264648 -530,0.20951060950756073 -531,0.20920003950595856 -532,0.20889067649841309 -533,0.20858198404312134 -534,0.20827335119247437 -535,0.20796515047550201 -536,0.20765843987464905 -537,0.20734989643096924 -538,0.20703744888305664 -539,0.20672288537025452 -540,0.20640692114830017 -541,0.20609036087989807 -542,0.20577454566955566 -543,0.2054593861103058 -544,0.20514661073684692 -545,0.20483814179897308 -546,0.2045341283082962 -547,0.20423491299152374 -548,0.2039414942264557 -549,0.20365376770496368 -550,0.20336683094501495 -551,0.20308056473731995 -552,0.20279429852962494 -553,0.20250898599624634 -554,0.20222344994544983 -555,0.2019389420747757 -556,0.20165427029132843 -557,0.20137038826942444 -558,0.2010868489742279 -559,0.20080377161502838 -560,0.20052124559879303 -561,0.20023943483829498 -562,0.19995847344398499 -563,0.19967809319496155 -564,0.19939783215522766 -565,0.19911834597587585 -566,0.19883982837200165 -567,0.19856218993663788 -568,0.19828525185585022 -569,0.19800885021686554 -570,0.19773384928703308 -571,0.19745929539203644 -572,0.19718590378761292 -573,0.19691351056098938 -574,0.19664175808429718 -575,0.1963709443807602 -576,0.19610118865966797 -577,0.19583189487457275 -578,0.19556353986263275 -579,0.19529582560062408 -580,0.19502872228622437 -581,0.19476208090782166 -582,0.194496288895607 -583,0.19423171877861023 -584,0.19396749138832092 -585,0.19370482861995697 -586,0.19344313442707062 -587,0.19318225979804993 -588,0.1929226666688919 -589,0.19266383349895477 -590,0.19240517914295197 -591,0.1921473741531372 -592,0.1918899118900299 -593,0.19163298606872559 -594,0.19137705862522125 -595,0.1911212056875229 -596,0.1908656358718872 -597,0.19061101973056793 -598,0.1903567761182785 -599,0.19010315835475922 -600,0.18985049426555634 -601,0.18959854543209076 -602,0.1893472969532013 -603,0.1890968382358551 -604,0.1888473927974701 -605,0.18859808146953583 -606,0.1883499026298523 -607,0.18810246884822845 -608,0.1878555566072464 -609,0.18760842084884644 -610,0.18736250698566437 -611,0.18711663782596588 -612,0.18687132000923157 -613,0.18662668764591217 -614,0.18638232350349426 -615,0.18613898754119873 -616,0.18589574098587036 -617,0.18565335869789124 -618,0.1854115128517151 -619,0.18517029285430908 -620,0.18492963910102844 -621,0.1846892088651657 -622,0.18444979190826416 -623,0.18421076238155365 -624,0.18397238850593567 -625,0.18373441696166992 -626,0.18349730968475342 -627,0.1832602471113205 -628,0.18302419781684875 -629,0.18278856575489044 -630,0.1825537383556366 -631,0.18231940269470215 -632,0.1820862889289856 -633,0.18185420334339142 -634,0.18162260949611664 -635,0.18139170110225677 -636,0.18116191029548645 -637,0.18093295395374298 -638,0.18070438504219055 -639,0.18047693371772766 -640,0.18024997413158417 -641,0.18002384901046753 -642,0.17979851365089417 -643,0.17957383394241333 -644,0.1793493777513504 -645,0.17912429571151733 -646,0.17889821529388428 -647,0.17867015302181244 -648,0.17843875288963318 -649,0.17820411920547485 -650,0.17796514928340912 -651,0.17772319912910461 -652,0.17747913300991058 -653,0.177236407995224 -654,0.1769992858171463 -655,0.17677435278892517 -656,0.17656493186950684 -657,0.1763545721769333 -658,0.17614218592643738 -659,0.17592935264110565 -660,0.175716370344162 -661,0.17550386488437653 -662,0.17529095709323883 -663,0.17507785558700562 -664,0.1748644858598709 -665,0.17465169727802277 -666,0.1744394600391388 -667,0.17422805726528168 -668,0.17401732504367828 -669,0.17380689084529877 -670,0.17359653115272522 -671,0.17338673770427704 -672,0.17317713797092438 -673,0.17296800017356873 -674,0.1727590411901474 -675,0.1725499927997589 -676,0.17234067618846893 -677,0.17213068902492523 -678,0.1719246506690979 -679,0.17172203958034515 -680,0.1715203821659088 -681,0.17131797969341278 -682,0.17111442983150482 -683,0.1709088683128357 -684,0.1707022488117218 -685,0.17049555480480194 -686,0.1702893227338791 -687,0.17008478939533234 -688,0.16988149285316467 -689,0.1696784943342209 -690,0.16947516798973083 -691,0.16927096247673035 -692,0.16906540095806122 -693,0.1688593327999115 -694,0.1686534583568573 -695,0.16844849288463593 -696,0.1682441532611847 -697,0.16803976893424988 -698,0.1678358018398285 -699,0.16763092577457428 -700,0.1674257218837738 -701,0.16722147166728973 -702,0.1670173555612564 -703,0.1668139547109604 -704,0.1666111946105957 -705,0.16640907526016235 -706,0.16620726883411407 -707,0.1660061478614807 -708,0.16580505669116974 -709,0.16560503840446472 -710,0.16540572047233582 -711,0.16520732641220093 -712,0.16500988602638245 -713,0.16481314599514008 -714,0.16461758315563202 -715,0.16442255675792694 -716,0.16422860324382782 -717,0.16403509676456451 -718,0.16384215652942657 -719,0.16365087032318115 -720,0.16346006095409393 -721,0.1632698029279709 -722,0.16308045387268066 -723,0.16289135813713074 -724,0.1627030372619629 -725,0.16251547634601593 -726,0.16232813894748688 -727,0.16214106976985931 -728,0.1619538813829422 -729,0.1617668718099594 -730,0.16157974302768707 -731,0.16139304637908936 -732,0.1612067073583603 -733,0.16102106869220734 -734,0.16083595156669617 -735,0.16065116226673126 -736,0.16046689450740814 -737,0.16028337180614471 -738,0.1600995659828186 -739,0.15991652011871338 -740,0.15973401069641113 -741,0.1595521867275238 -742,0.15937112271785736 -743,0.15919017791748047 -744,0.15900973975658417 -745,0.1588301807641983 -746,0.1586507260799408 -747,0.1584717184305191 -748,0.15829378366470337 -749,0.15811580419540405 -750,0.15793858468532562 -751,0.15776169300079346 -752,0.1575850248336792 -753,0.1574094444513321 -754,0.15723372995853424 -755,0.1570596545934677 -756,0.15688635408878326 -757,0.15671439468860626 -758,0.15654338896274567 -759,0.15637336671352386 -760,0.15620388090610504 -761,0.15603522956371307 -762,0.1558672934770584 -763,0.15570004284381866 -764,0.15553371608257294 -765,0.1553679257631302 -766,0.15520305931568146 -767,0.1550387293100357 -768,0.1548752635717392 -769,0.1547122299671173 -770,0.15455016493797302 -771,0.15438878536224365 -772,0.15422792732715607 -773,0.15406747162342072 -774,0.15390829741954803 -775,0.15374939143657684 -776,0.15359123051166534 -777,0.15343345701694489 -778,0.15327660739421844 -779,0.1531200408935547 -780,0.1529640108346939 -781,0.15280866622924805 -782,0.1526540368795395 -783,0.15249988436698914 -784,0.15234623849391937 -785,0.15219320356845856 -786,0.1520405411720276 -787,0.15188822150230408 -788,0.1517370343208313 -789,0.15158583223819733 -790,0.15143533051013947 -791,0.1512852907180786 -792,0.15113581717014313 -793,0.1509867012500763 -794,0.150838240981102 -795,0.15069034695625305 -796,0.150542750954628 -797,0.15039508044719696 -798,0.1502487063407898 -799,0.1501023918390274 -800,0.1499566286802292 -801,0.1498112678527832 -802,0.14966626465320587 -803,0.1495220959186554 -804,0.14937809109687805 -805,0.14923425018787384 -806,0.14909125864505768 -807,0.1489480435848236 -808,0.14880554378032684 -809,0.14866341650485992 -810,0.14852099120616913 -811,0.1483793556690216 -812,0.1482381969690323 -813,0.1480969935655594 -814,0.147955983877182 -815,0.14781531691551208 -816,0.14767536520957947 -817,0.14753499627113342 -818,0.14739499986171722 -819,0.14725551009178162 -820,0.1471157819032669 -821,0.1469755470752716 -822,0.14683590829372406 -823,0.14669618010520935 -824,0.1465570330619812 -825,0.1464177668094635 -826,0.1462792456150055 -827,0.14614123106002808 -828,0.14600294828414917 -829,0.1458653062582016 -830,0.14572837948799133 -831,0.14559142291545868 -832,0.1454547941684723 -833,0.1453184336423874 -834,0.14518259465694427 -835,0.14504697918891907 -836,0.14491210877895355 -837,0.1447771042585373 -838,0.14464274048805237 -839,0.14450915157794952 -840,0.1443752497434616 -841,0.14424170553684235 -842,0.14410927891731262 -843,0.14397667348384857 -844,0.14384452998638153 -845,0.1437133550643921 -846,0.14358186721801758 -847,0.14345042407512665 -848,0.14332035183906555 -849,0.1431902050971985 -850,0.14306038618087769 -851,0.14293140172958374 -852,0.14280207455158234 -853,0.1426738202571869 -854,0.1425454169511795 -855,0.14241734147071838 -856,0.1422898918390274 -857,0.14216285943984985 -858,0.14203625917434692 -859,0.14190971851348877 -860,0.1417834758758545 -861,0.1416575312614441 -862,0.14153210818767548 -863,0.1414070427417755 -864,0.14128264784812927 -865,0.14115796983242035 -866,0.14103370904922485 -867,0.14091026782989502 -868,0.1407870054244995 -869,0.14066384732723236 -870,0.1405414640903473 -871,0.1404189169406891 -872,0.1402968466281891 -873,0.1401752382516861 -874,0.14005404710769653 -875,0.13993297517299652 -876,0.1398119479417801 -877,0.13969163596630096 -878,0.13957180082798004 -879,0.13945183157920837 -880,0.13933221995830536 -881,0.13921299576759338 -882,0.13909417390823364 -883,0.13897548615932465 -884,0.1388573944568634 -885,0.13873916864395142 -886,0.13862164318561554 -887,0.13850393891334534 -888,0.13838697969913483 -889,0.1382695734500885 -890,0.13815318048000336 -891,0.13803720474243164 -892,0.13792189955711365 -893,0.13780668377876282 -894,0.13769200444221497 -895,0.137578085064888 -896,0.13746415078639984 -897,0.1373504102230072 -898,0.13723696768283844 -899,0.13712380826473236 -900,0.1370106190443039 -901,0.13689792156219482 -902,0.13678519427776337 -903,0.13667277991771698 -904,0.13656066358089447 -905,0.13644859194755554 -906,0.13633692264556885 -907,0.136225163936615 -908,0.13611385226249695 -909,0.13600276410579681 -910,0.13589179515838623 -911,0.13578124344348907 -912,0.13567043840885162 -913,0.13555997610092163 -914,0.1354496330022812 -915,0.13533970713615417 -916,0.13522982597351074 -917,0.13512037694454193 -918,0.13501065969467163 -919,0.1349012553691864 -920,0.13479220867156982 -921,0.13468331098556519 -922,0.13457459211349487 -923,0.134465754032135 -924,0.13435673713684082 -925,0.13424889743328094 -926,0.13413991034030914 -927,0.13403171300888062 -928,0.1339234858751297 -929,0.13381582498550415 -930,0.1337079107761383 -931,0.13360004127025604 -932,0.13349178433418274 -933,0.13338416814804077 -934,0.13327628374099731 -935,0.13316918909549713 -936,0.1330614686012268 -937,0.13295389711856842 -938,0.13284669816493988 -939,0.13273945450782776 -940,0.13263162970542908 -941,0.13252468407154083 -942,0.13241752982139587 -943,0.13231058418750763 -944,0.1322033852338791 -945,0.1320960372686386 -946,0.13198906183242798 -947,0.1318816989660263 -948,0.1317749172449112 -949,0.13166770339012146 -950,0.1315610408782959 -951,0.13145387172698975 -952,0.1313469409942627 -953,0.13123922049999237 -954,0.13113227486610413 -955,0.1310247927904129 -956,0.13091696798801422 -957,0.1308092325925827 -958,0.13070130348205566 -959,0.13059204816818237 -960,0.13048236072063446 -961,0.1303720474243164 -962,0.13026201725006104 -963,0.13015098869800568 -964,0.13003933429718018 -965,0.1299273520708084 -966,0.12981519103050232 -967,0.12970192730426788 -968,0.12958773970603943 -969,0.12947314977645874 -970,0.12935738265514374 -971,0.12924127280712128 -972,0.12912432849407196 -973,0.1290062516927719 -974,0.1288876235485077 -975,0.12876853346824646 -976,0.12864789366722107 -977,0.12852728366851807 -978,0.12840542197227478 -979,0.12828350067138672 -980,0.12816016376018524 -981,0.12803767621517181 -982,0.1279132068157196 -983,0.12778988480567932 -984,0.12766505777835846 -985,0.12754029035568237 -986,0.1274155229330063 -987,0.12729044258594513 -988,0.12716543674468994 -989,0.12703847885131836 -990,0.1269114464521408 -991,0.12678386270999908 -992,0.12665647268295288 -993,0.1265288144350052 -994,0.1264013797044754 -995,0.12627410888671875 -996,0.12614770233631134 -997,0.12602175772190094 -998,0.12589652836322784 -999,0.12577183544635773 -1000,0.12564821541309357 diff --git a/training/time_weighting/quadratic_mirrored/training_config.txt b/training/time_weighting/quadratic_mirrored/training_config.txt deleted file mode 100644 index 8bf709c..0000000 --- a/training/time_weighting/quadratic_mirrored/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/quadratic_mirrored/training_log.csv b/training/time_weighting/quadratic_mirrored/training_log.csv deleted file mode 100644 index 45a3008..0000000 --- a/training/time_weighting/quadratic_mirrored/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,58.530155181884766 -2,19.497692108154297 -3,8.940523147583008 -4,5.174559116363525 -5,4.326138973236084 -6,3.828807830810547 -7,3.4275901317596436 -8,3.155045747756958 -9,2.9778249263763428 -10,2.876185655593872 -11,2.801992177963257 -12,2.741415023803711 -13,2.6904094219207764 -14,2.6474769115448 -15,2.6114773750305176 -16,2.5812151432037354 -17,2.554877758026123 -18,2.530730724334717 -19,2.5075430870056152 -20,2.483900785446167 -21,2.4597108364105225 -22,2.435234546661377 -23,2.410686492919922 -24,2.3864715099334717 -25,2.363163948059082 -26,2.340921401977539 -27,2.3206348419189453 -28,2.302485466003418 -29,2.287459373474121 -30,2.276064157485962 -31,2.2677712440490723 -32,2.262098550796509 -33,2.258014440536499 -34,2.2540698051452637 -35,2.2519094944000244 -36,2.253103017807007 -37,2.2569127082824707 -38,2.259121894836426 -39,2.2572028636932373 -40,2.2539334297180176 -41,2.2528138160705566 -42,2.253464937210083 -43,2.2545166015625 -44,2.2552342414855957 -45,2.255408525466919 -46,2.2550806999206543 -47,2.254439353942871 -48,2.2536227703094482 -49,2.2526915073394775 -50,2.2517104148864746 -51,2.250643491744995 -52,2.249500274658203 -53,2.2483091354370117 -54,2.247131586074829 -55,2.2460265159606934 -56,2.2450807094573975 -57,2.2443153858184814 -58,2.2437515258789062 -59,2.2434253692626953 -60,2.2432680130004883 -61,2.2432148456573486 -62,2.2431983947753906 -63,2.2431557178497314 -64,2.243037223815918 -65,2.2428135871887207 -66,2.2424802780151367 -67,2.2420501708984375 -68,2.241551637649536 -69,2.2410268783569336 -70,2.240543842315674 -71,2.2401113510131836 -72,2.2397451400756836 -73,2.2394211292266846 -74,2.2391128540039062 -75,2.238826274871826 -76,2.238609790802002 -77,2.2384016513824463 -78,2.238213539123535 -79,2.238032579421997 -80,2.2378859519958496 -81,2.2377445697784424 -82,2.237602472305298 -83,2.2374532222747803 -84,2.2372848987579346 -85,2.237092971801758 -86,2.2368884086608887 -87,2.2366833686828613 -88,2.2364940643310547 -89,2.2363178730010986 -90,2.2361481189727783 -91,2.2359962463378906 -92,2.2358338832855225 -93,2.2356865406036377 -94,2.2355430126190186 -95,2.2353992462158203 -96,2.2352566719055176 -97,2.2351224422454834 -98,2.2349812984466553 -99,2.234849214553833 -100,2.2347187995910645 -101,2.234591007232666 -102,2.234459400177002 -103,2.234327793121338 -104,2.234199285507202 -105,2.2340738773345947 -106,2.2339518070220947 -107,2.2338356971740723 -108,2.2337145805358887 -109,2.2335970401763916 -110,2.233475923538208 -111,2.233353614807129 -112,2.2332346439361572 -113,2.2331252098083496 -114,2.2330098152160645 -115,2.2329041957855225 -116,2.2328133583068848 -117,2.2326760292053223 -118,2.2325713634490967 -119,2.232454299926758 -120,2.2323429584503174 -121,2.2322354316711426 -122,2.2321248054504395 -123,2.2320125102996826 -124,2.2319140434265137 -125,2.2317922115325928 -126,2.2316930294036865 -127,2.231581211090088 -128,2.2314810752868652 -129,2.2313780784606934 -130,2.231276035308838 -131,2.231175184249878 -132,2.2310729026794434 -133,2.230971336364746 -134,2.230876922607422 -135,2.2307722568511963 -136,2.230684280395508 -137,2.2305781841278076 -138,2.230478286743164 -139,2.2303786277770996 -140,2.2302751541137695 -141,2.230165481567383 -142,2.2300593852996826 -143,2.229954242706299 -144,2.229851245880127 -145,2.229748010635376 -146,2.229649066925049 -147,2.229527711868286 -148,2.229421615600586 -149,2.229320526123047 -150,2.2292163372039795 -151,2.229109764099121 -152,2.2290000915527344 -153,2.2288858890533447 -154,2.228771924972534 -155,2.228687047958374 -156,2.2285547256469727 -157,2.2284491062164307 -158,2.228344202041626 -159,2.228236436843872 -160,2.22813081741333 -161,2.228025197982788 -162,2.2279179096221924 -163,2.2278120517730713 -164,2.2277066707611084 -165,2.227600574493408 -166,2.2274973392486572 -167,2.2273948192596436 -168,2.2272913455963135 -169,2.2271878719329834 -170,2.227085828781128 -171,2.2269859313964844 -172,2.2268869876861572 -173,2.226790189743042 -174,2.2267069816589355 -175,2.2265963554382324 -176,2.2265050411224365 -177,2.2264113426208496 -178,2.226315498352051 -179,2.2262144088745117 -180,2.2261197566986084 -181,2.2260336875915527 -182,2.2259325981140137 -183,2.2258408069610596 -184,2.2257449626922607 -185,2.225653648376465 -186,2.225560188293457 -187,2.2254650592803955 -188,2.225372791290283 -189,2.2252743244171143 -190,2.2251954078674316 -191,2.2250888347625732 -192,2.2249960899353027 -193,2.224912405014038 -194,2.2248146533966064 -195,2.2247259616851807 -196,2.2246358394622803 -197,2.2245497703552246 -198,2.2244632244110107 -199,2.224372625350952 -200,2.2242891788482666 -201,2.224203586578369 -202,2.2241158485412598 -203,2.2240328788757324 -204,2.223945379257202 -205,2.2238810062408447 -206,2.2237813472747803 -207,2.2237019538879395 -208,2.223623752593994 -209,2.223545551300049 -210,2.2234668731689453 -211,2.2233850955963135 -212,2.2233083248138428 -213,2.2232306003570557 -214,2.2231476306915283 -215,2.2230706214904785 -216,2.222994565963745 -217,2.2229158878326416 -218,2.2228376865386963 -219,2.2227587699890137 -220,2.2226791381835938 -221,2.2226035594940186 -222,2.2225284576416016 -223,2.2224457263946533 -224,2.222369909286499 -225,2.2222959995269775 -226,2.2222180366516113 -227,2.2221388816833496 -228,2.222060203552246 -229,2.2219932079315186 -230,2.2219088077545166 -231,2.2218377590179443 -232,2.2217607498168945 -233,2.221686840057373 -234,2.221609115600586 -235,2.2215356826782227 -236,2.221466541290283 -237,2.2213902473449707 -238,2.2213191986083984 -239,2.221248149871826 -240,2.221174955368042 -241,2.2211058139801025 -242,2.221036195755005 -243,2.220966339111328 -244,2.2208974361419678 -245,2.2208292484283447 -246,2.220759153366089 -247,2.2206945419311523 -248,2.2206242084503174 -249,2.220557689666748 -250,2.2204906940460205 -251,2.2204232215881348 -252,2.2203564643859863 -253,2.220290422439575 -254,2.2202231884002686 -255,2.220156192779541 -256,2.220090627670288 -257,2.220022439956665 -258,2.219956398010254 -259,2.2198903560638428 -260,2.219822645187378 -261,2.219758987426758 -262,2.219691276550293 -263,2.2196273803710938 -264,2.2195565700531006 -265,2.2194905281066895 -266,2.21941876411438 -267,2.219350576400757 -268,2.2192797660827637 -269,2.219214916229248 -270,2.2191414833068848 -271,2.2190723419189453 -272,2.218996524810791 -273,2.2189207077026367 -274,2.218846321105957 -275,2.2187724113464355 -276,2.2186970710754395 -277,2.2186200618743896 -278,2.2185442447662354 -279,2.2184677124023438 -280,2.218392848968506 -281,2.218318223953247 -282,2.2182445526123047 -283,2.2181687355041504 -284,2.2180960178375244 -285,2.2180237770080566 -286,2.2179501056671143 -287,2.2178781032562256 -288,2.217799663543701 -289,2.2177224159240723 -290,2.217644453048706 -291,2.217564344406128 -292,2.2174854278564453 -293,2.217399835586548 -294,2.2173099517822266 -295,2.217228412628174 -296,2.217129707336426 -297,2.217043876647949 -298,2.2169570922851562 -299,2.216870069503784 -300,2.2167840003967285 -301,2.21671462059021 -302,2.2166309356689453 -303,2.216567277908325 -304,2.216505289077759 -305,2.216439723968506 -306,2.216372013092041 -307,2.216315746307373 -308,2.2162442207336426 -309,2.2161831855773926 -310,2.216120719909668 -311,2.216055393218994 -312,2.215986967086792 -313,2.215935230255127 -314,2.215857744216919 -315,2.2157976627349854 -316,2.2157366275787354 -317,2.2156710624694824 -318,2.2156028747558594 -319,2.2155444622039795 -320,2.2154767513275146 -321,2.2154154777526855 -322,2.2153544425964355 -323,2.21529221534729 -324,2.215237855911255 -325,2.2151730060577393 -326,2.215113878250122 -327,2.2150518894195557 -328,2.2149882316589355 -329,2.214937210083008 -330,2.214867115020752 -331,2.2148079872131348 -332,2.2147481441497803 -333,2.214686632156372 -334,2.2146310806274414 -335,2.214566230773926 -336,2.2145073413848877 -337,2.214446544647217 -338,2.2143867015838623 -339,2.2143266201019287 -340,2.2142670154571533 -341,2.2142069339752197 -342,2.21414852142334 -343,2.2140889167785645 -344,2.214029312133789 -345,2.2139697074890137 -346,2.2139103412628174 -347,2.213853597640991 -348,2.2137951850891113 -349,2.213738203048706 -350,2.2136788368225098 -351,2.2136178016662598 -352,2.213567018508911 -353,2.2134997844696045 -354,2.213442802429199 -355,2.2133841514587402 -356,2.2133235931396484 -357,2.213271379470825 -358,2.2132067680358887 -359,2.2131497859954834 -360,2.2130916118621826 -361,2.213033676147461 -362,2.2129759788513184 -363,2.212918519973755 -364,2.2128610610961914 -365,2.212804079055786 -366,2.2127463817596436 -367,2.2126986980438232 -368,2.2126383781433105 -369,2.2125909328460693 -370,2.2125401496887207 -371,2.212486982345581 -372,2.2124271392822266 -373,2.212385654449463 -374,2.2123210430145264 -375,2.2122693061828613 -376,2.21221661567688 -377,2.2121779918670654 -378,2.21211576461792 -379,2.212069272994995 -380,2.2120163440704346 -381,2.211963176727295 -382,2.211906671524048 -383,2.2118589878082275 -384,2.2118048667907715 -385,2.2117550373077393 -386,2.2117042541503906 -387,2.2116539478302 -388,2.2116036415100098 -389,2.2115635871887207 -390,2.2115085124969482 -391,2.2114574909210205 -392,2.211418867111206 -393,2.211359739303589 -394,2.2113142013549805 -395,2.211268901824951 -396,2.2112085819244385 -397,2.2111761569976807 -398,2.2111129760742188 -399,2.2110674381256104 -400,2.2110164165496826 -401,2.210965156555176 -402,2.210907459259033 -403,2.2108590602874756 -404,2.210811138153076 -405,2.2107625007629395 -406,2.2107090950012207 -407,2.210659980773926 -408,2.210603713989258 -409,2.210557699203491 -410,2.2105066776275635 -411,2.210468053817749 -412,2.21041202545166 -413,2.210373640060425 -414,2.2103185653686523 -415,2.2102603912353516 -416,2.2102174758911133 -417,2.210157632827759 -418,2.2101099491119385 -419,2.210061550140381 -420,2.210012197494507 -421,2.2099668979644775 -422,2.209923505783081 -423,2.209871292114258 -424,2.2098171710968018 -425,2.2097723484039307 -426,2.209721803665161 -427,2.2096810340881348 -428,2.209634304046631 -429,2.2095794677734375 -430,2.20955228805542 -431,2.2094907760620117 -432,2.2094497680664062 -433,2.2094006538391113 -434,2.2093429565429688 -435,2.2093169689178467 -436,2.20924711227417 -437,2.2092082500457764 -438,2.209160566329956 -439,2.2091023921966553 -440,2.209059238433838 -441,2.209015369415283 -442,2.2089643478393555 -443,2.2089147567749023 -444,2.208869695663452 -445,2.2088143825531006 -446,2.208780288696289 -447,2.208728551864624 -448,2.2086880207061768 -449,2.208632707595825 -450,2.208571434020996 -451,2.2085533142089844 -452,2.2084708213806152 -453,2.2084295749664307 -454,2.2083802223205566 -455,2.208324670791626 -456,2.2082762718200684 -457,2.2082417011260986 -458,2.2081868648529053 -459,2.208125591278076 -460,2.208080530166626 -461,2.2080185413360596 -462,2.207956552505493 -463,2.2079148292541504 -464,2.2078633308410645 -465,2.2077836990356445 -466,2.2077295780181885 -467,2.207658290863037 -468,2.207603693008423 -469,2.207524299621582 -470,2.2074573040008545 -471,2.2073779106140137 -472,2.207282781600952 -473,2.2072196006774902 -474,2.2071025371551514 -475,2.207035541534424 -476,2.2068891525268555 -477,2.2067811489105225 -478,2.2066588401794434 -479,2.2065210342407227 -480,2.206376552581787 -481,2.2061898708343506 -482,2.2059314250946045 -483,2.205826997756958 -484,2.20554518699646 -485,2.2053825855255127 -486,2.2050771713256836 -487,2.204899787902832 -488,2.2046260833740234 -489,2.2043185234069824 -490,2.203943967819214 -491,2.2038917541503906 -492,2.2035021781921387 -493,2.2033002376556396 -494,2.202686071395874 -495,2.2027828693389893 -496,2.2020211219787598 -497,2.2017369270324707 -498,2.2012624740600586 -499,2.201280117034912 -500,2.2015798091888428 -501,2.200784206390381 -502,2.2003841400146484 -503,2.200171947479248 -504,2.2002971172332764 -505,2.1993396282196045 -506,2.1991260051727295 -507,2.1987977027893066 -508,2.198338031768799 -509,2.198012113571167 -510,2.1977312564849854 -511,2.197134017944336 -512,2.196824073791504 -513,2.1970064640045166 -514,2.1963632106781006 -515,2.1961607933044434 -516,2.1960294246673584 -517,2.1957976818084717 -518,2.1954526901245117 -519,2.1956539154052734 -520,2.1952085494995117 -521,2.1957008838653564 -522,2.195733070373535 -523,2.1955068111419678 -524,2.1956377029418945 -525,2.193953514099121 -526,2.1945033073425293 -527,2.1938719749450684 -528,2.194692850112915 -529,2.1935391426086426 -530,2.194279909133911 -531,2.1930508613586426 -532,2.193923234939575 -533,2.1942198276519775 -534,2.193162679672241 -535,2.192798614501953 -536,2.1931495666503906 -537,2.1922965049743652 -538,2.1924095153808594 -539,2.1929097175598145 -540,2.1921355724334717 -541,2.1924588680267334 -542,2.191859483718872 -543,2.192347526550293 -544,2.191565990447998 -545,2.191802740097046 -546,2.191709280014038 -547,2.191197156906128 -548,2.191645383834839 -549,2.191349744796753 -550,2.191107749938965 -551,2.1910111904144287 -552,2.190897226333618 -553,2.191291570663452 -554,2.190640687942505 -555,2.1906633377075195 -556,2.190519094467163 -557,2.1904964447021484 -558,2.1907806396484375 -559,2.1907753944396973 -560,2.1905593872070312 -561,2.190530776977539 -562,2.1905040740966797 -563,2.190399646759033 -564,2.1901419162750244 -565,2.190443277359009 -566,2.1905288696289062 -567,2.189913749694824 -568,2.1905601024627686 -569,2.1903884410858154 -570,2.189725875854492 -571,2.1905128955841064 -572,2.1899573802948 -573,2.1898679733276367 -574,2.1900558471679688 -575,2.189598321914673 -576,2.189673900604248 -577,2.1897003650665283 -578,2.1894116401672363 -579,2.1894776821136475 -580,2.189368724822998 -581,2.1892149448394775 -582,2.1893413066864014 -583,2.189148426055908 -584,2.189115285873413 -585,2.1891188621520996 -586,2.188998222351074 -587,2.1889898777008057 -588,2.189073085784912 -589,2.189018487930298 -590,2.188908338546753 -591,2.1889400482177734 -592,2.188875436782837 -593,2.189014196395874 -594,2.189020872116089 -595,2.1887664794921875 -596,2.1887385845184326 -597,2.189108371734619 -598,2.1889355182647705 -599,2.189082145690918 -600,2.18908953666687 -601,2.1887128353118896 -602,2.1887717247009277 -603,2.1885483264923096 -604,2.1886374950408936 -605,2.188746690750122 -606,2.18839168548584 -607,2.188291311264038 -608,2.18837308883667 -609,2.188368558883667 -610,2.188385009765625 -611,2.188169479370117 -612,2.1881582736968994 -613,2.1881792545318604 -614,2.1881420612335205 -615,2.1881096363067627 -616,2.188183069229126 -617,2.1881277561187744 -618,2.187894821166992 -619,2.1878206729888916 -620,2.1879518032073975 -621,2.187830686569214 -622,2.1878647804260254 -623,2.1877284049987793 -624,2.187822103500366 -625,2.187682628631592 -626,2.1875 -627,2.1875295639038086 -628,2.187615156173706 -629,2.187570333480835 -630,2.1876540184020996 -631,2.1874215602874756 -632,2.1874592304229736 -633,2.187753915786743 -634,2.1875598430633545 -635,2.1875503063201904 -636,2.1875927448272705 -637,2.1875321865081787 -638,2.1872830390930176 -639,2.1872119903564453 -640,2.187408447265625 -641,2.1873421669006348 -642,2.1870834827423096 -643,2.1872851848602295 -644,2.187459945678711 -645,2.187194585800171 -646,2.187058210372925 -647,2.1871893405914307 -648,2.1870968341827393 -649,2.1870293617248535 -650,2.1873135566711426 -651,2.18790340423584 -652,2.1875240802764893 -653,2.1871511936187744 -654,2.1874840259552 -655,2.187208890914917 -656,2.1868951320648193 -657,2.18699049949646 -658,2.1870293617248535 -659,2.1868488788604736 -660,2.1869137287139893 -661,2.1871402263641357 -662,2.1872901916503906 -663,2.187443971633911 -664,2.1870622634887695 -665,2.1870391368865967 -666,2.1868510246276855 -667,2.18672776222229 -668,2.186833620071411 -669,2.1869957447052 -670,2.187063455581665 -671,2.186711549758911 -672,2.186701774597168 -673,2.1869332790374756 -674,2.1868114471435547 -675,2.186758279800415 -676,2.1869568824768066 -677,2.18688702583313 -678,2.1868884563446045 -679,2.1871657371520996 -680,2.18682861328125 -681,2.186572790145874 -682,2.186516046524048 -683,2.186607599258423 -684,2.1867332458496094 -685,2.1866614818573 -686,2.1865546703338623 -687,2.1864542961120605 -688,2.1864728927612305 -689,2.186493158340454 -690,2.1865944862365723 -691,2.1866958141326904 -692,2.1866064071655273 -693,2.186605215072632 -694,2.1864535808563232 -695,2.1863746643066406 -696,2.186401128768921 -697,2.1865453720092773 -698,2.1870110034942627 -699,2.1868653297424316 -700,2.1864633560180664 -701,2.186927080154419 -702,2.1866469383239746 -703,2.1864736080169678 -704,2.1867408752441406 -705,2.186774969100952 -706,2.186678409576416 -707,2.1866114139556885 -708,2.186459541320801 -709,2.1862897872924805 -710,2.186340808868408 -711,2.186338424682617 -712,2.1864285469055176 -713,2.186736583709717 -714,2.1866977214813232 -715,2.1866936683654785 -716,2.186300277709961 -717,2.186218023300171 -718,2.186530828475952 -719,2.187183141708374 -720,2.18721866607666 -721,2.186406373977661 -722,2.1866250038146973 -723,2.1867151260375977 -724,2.186685085296631 -725,2.1867716312408447 -726,2.1862940788269043 -727,2.1863930225372314 -728,2.186288833618164 -729,2.1863818168640137 -730,2.186523199081421 -731,2.186734437942505 -732,2.186506509780884 -733,2.186293840408325 -734,2.1863157749176025 -735,2.18634033203125 -736,2.1864371299743652 -737,2.186593770980835 -738,2.186340570449829 -739,2.186190605163574 -740,2.1861348152160645 -741,2.186246156692505 -742,2.186326026916504 -743,2.186946153640747 -744,2.187030076980591 -745,2.1863269805908203 -746,2.186399221420288 -747,2.186337471008301 -748,2.1865408420562744 -749,2.1870036125183105 -750,2.186300277709961 -751,2.186289072036743 -752,2.186992883682251 -753,2.186295747756958 -754,2.186096668243408 -755,2.1864688396453857 -756,2.1862385272979736 -757,2.1860885620117188 -758,2.1861133575439453 -759,2.1861283779144287 -760,2.186323404312134 -761,2.1861038208007812 -762,2.1859848499298096 -763,2.1861443519592285 -764,2.1867499351501465 -765,2.1868503093719482 -766,2.18635630607605 -767,2.186429023742676 -768,2.1861023902893066 -769,2.186448812484741 -770,2.186410903930664 -771,2.186251401901245 -772,2.1863019466400146 -773,2.1863248348236084 -774,2.186072826385498 -775,2.1862003803253174 -776,2.1862220764160156 -777,2.1860132217407227 -778,2.1863157749176025 -779,2.186197519302368 -780,2.1862385272979736 -781,2.186283826828003 -782,2.186197519302368 -783,2.1861698627471924 -784,2.186171770095825 -785,2.1859967708587646 -786,2.186072587966919 -787,2.185976028442383 -788,2.1859090328216553 -789,2.186033010482788 -790,2.185950994491577 -791,2.186004877090454 -792,2.1861493587493896 -793,2.186018705368042 -794,2.185971975326538 -795,2.1858999729156494 -796,2.1858017444610596 -797,2.1859004497528076 -798,2.1863434314727783 -799,2.186187267303467 -800,2.186302423477173 -801,2.186676025390625 -802,2.186032772064209 -803,2.1862034797668457 -804,2.186474323272705 -805,2.185953140258789 -806,2.1861701011657715 -807,2.186018705368042 -808,2.1860809326171875 -809,2.186014175415039 -810,2.185957908630371 -811,2.1862010955810547 -812,2.1861062049865723 -813,2.186281681060791 -814,2.1863131523132324 -815,2.1859710216522217 -816,2.1859264373779297 -817,2.18587064743042 -818,2.1857411861419678 -819,2.1859359741210938 -820,2.186230182647705 -821,2.1859395503997803 -822,2.1860883235931396 -823,2.1860077381134033 -824,2.185920000076294 -825,2.18603515625 -826,2.185955286026001 -827,2.1861093044281006 -828,2.1861586570739746 -829,2.186161756515503 -830,2.1862127780914307 -831,2.1859750747680664 -832,2.185781478881836 -833,2.185822010040283 -834,2.185889482498169 -835,2.186105251312256 -836,2.1863481998443604 -837,2.1862874031066895 -838,2.185936212539673 -839,2.185900926589966 -840,2.1857845783233643 -841,2.1860125064849854 -842,2.1862740516662598 -843,2.1860060691833496 -844,2.18609881401062 -845,2.1862058639526367 -846,2.185889482498169 -847,2.1859161853790283 -848,2.185857057571411 -849,2.185797929763794 -850,2.1859030723571777 -851,2.1858561038970947 -852,2.1859939098358154 -853,2.1860952377319336 -854,2.1860997676849365 -855,2.1860744953155518 -856,2.1857945919036865 -857,2.1856560707092285 -858,2.1857316493988037 -859,2.1858949661254883 -860,2.186152458190918 -861,2.186060905456543 -862,2.1859230995178223 -863,2.1857197284698486 -864,2.1857056617736816 -865,2.1857385635375977 -866,2.185789108276367 -867,2.185940980911255 -868,2.1859078407287598 -869,2.1860671043395996 -870,2.1865830421447754 -871,2.1867456436157227 -872,2.185974597930908 -873,2.1862385272979736 -874,2.1859591007232666 -875,2.1860694885253906 -876,2.1866085529327393 -877,2.1858839988708496 -878,2.1858608722686768 -879,2.186911106109619 -880,2.186258316040039 -881,2.1861014366149902 -882,2.186685085296631 -883,2.185685873031616 -884,2.186363697052002 -885,2.186922311782837 -886,2.1857378482818604 -887,2.186361789703369 -888,2.1862120628356934 -889,2.185983419418335 -890,2.186328411102295 -891,2.1858415603637695 -892,2.186383008956909 -893,2.185831069946289 -894,2.1863627433776855 -895,2.186110258102417 -896,2.1862893104553223 -897,2.186170816421509 -898,2.185840606689453 -899,2.1859097480773926 -900,2.185633420944214 -901,2.1858303546905518 -902,2.1859214305877686 -903,2.1858277320861816 -904,2.1858363151550293 -905,2.1857638359069824 -906,2.185818672180176 -907,2.1857845783233643 -908,2.1857504844665527 -909,2.1857409477233887 -910,2.1855900287628174 -911,2.185617446899414 -912,2.185542345046997 -913,2.1855573654174805 -914,2.1855976581573486 -915,2.1859076023101807 -916,2.185798406600952 -917,2.1859865188598633 -918,2.186150312423706 -919,2.185899496078491 -920,2.185724973678589 -921,2.1857876777648926 -922,2.185718536376953 -923,2.185673713684082 -924,2.185667037963867 -925,2.1857662200927734 -926,2.185594320297241 -927,2.1856696605682373 -928,2.185755729675293 -929,2.185671806335449 -930,2.186086893081665 -931,2.1863770484924316 -932,2.1864519119262695 -933,2.1858158111572266 -934,2.1857614517211914 -935,2.185797691345215 -936,2.186022996902466 -937,2.1863155364990234 -938,2.1857831478118896 -939,2.185633897781372 -940,2.185560703277588 -941,2.185760021209717 -942,2.1858983039855957 -943,2.1857457160949707 -944,2.1857197284698486 -945,2.1855084896087646 -946,2.185493230819702 -947,2.185650587081909 -948,2.1856398582458496 -949,2.1856915950775146 -950,2.1856353282928467 -951,2.185574531555176 -952,2.185515880584717 -953,2.185464382171631 -954,2.1854288578033447 -955,2.1855952739715576 -956,2.1863853931427 -957,2.1861491203308105 -958,2.186309576034546 -959,2.1871471405029297 -960,2.185701608657837 -961,2.1862008571624756 -962,2.1872189044952393 -963,2.1860222816467285 -964,2.185877561569214 -965,2.1865572929382324 -966,2.18595027923584 -967,2.185521364212036 -968,2.1861045360565186 -969,2.1858394145965576 -970,2.185678482055664 -971,2.185605764389038 -972,2.186321496963501 -973,2.1870884895324707 -974,2.1855685710906982 -975,2.1864027976989746 -976,2.1877615451812744 -977,2.185638189315796 -978,2.1893386840820312 -979,2.1905829906463623 -980,2.1899611949920654 -981,2.186995267868042 -982,2.19111967086792 -983,2.1878015995025635 -984,2.1910440921783447 -985,2.188703775405884 -986,2.187687397003174 -987,2.1900641918182373 -988,2.1870169639587402 -989,2.189117908477783 -990,2.1875367164611816 -991,2.1876845359802246 -992,2.187164306640625 -993,2.1868913173675537 -994,2.187607526779175 -995,2.1864306926727295 -996,2.1864840984344482 -997,2.1873550415039062 -998,2.1863746643066406 -999,2.187396764755249 -1000,2.186246871948242 diff --git a/training/time_weighting/square_root/training_config.txt b/training/time_weighting/square_root/training_config.txt deleted file mode 100644 index c58ff01..0000000 --- a/training/time_weighting/square_root/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/square_root/training_log.csv b/training/time_weighting/square_root/training_log.csv deleted file mode 100644 index 12cc162..0000000 --- a/training/time_weighting/square_root/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,386.0617370605469 -2,273.7415771484375 -3,868.5386352539062 -4,140.04168701171875 -5,39.245811462402344 -6,25.079883575439453 -7,20.710187911987305 -8,21.303237915039062 -9,24.3283748626709 -10,28.050525665283203 -11,33.28006362915039 -12,33.558929443359375 -13,33.54981994628906 -14,33.41416931152344 -15,33.187660217285156 -16,32.86654281616211 -17,32.45302963256836 -18,31.890411376953125 -19,30.863636016845703 -20,28.81007957458496 -21,27.425033569335938 -22,26.52981185913086 -23,25.750869750976562 -24,25.012826919555664 -25,24.280357360839844 -26,23.51789093017578 -27,22.676368713378906 -28,21.675588607788086 -29,20.41519546508789 -30,18.839738845825195 -31,17.01235008239746 -32,14.930663108825684 -33,13.544623374938965 -34,12.149258613586426 -35,9.877694129943848 -36,8.975503921508789 -37,8.304489135742188 -38,7.686733722686768 -39,7.064981460571289 -40,6.504978656768799 -41,6.047710418701172 -42,5.622644901275635 -43,4.8125786781311035 -44,4.010440826416016 -45,3.727144241333008 -46,3.479302167892456 -47,3.255537986755371 -48,3.0491843223571777 -49,2.854078531265259 -50,2.6671178340911865 -51,2.4917497634887695 -52,2.3407697677612305 -53,2.214313507080078 -54,2.09494948387146 -55,1.9812803268432617 -56,1.8791310787200928 -57,1.801325798034668 -58,1.8051507472991943 -59,1.8385875225067139 -60,1.8670263290405273 -61,1.8876523971557617 -62,1.9010323286056519 -63,1.9072588682174683 -64,1.9063305854797363 -65,1.8984962701797485 -66,1.884208083152771 -67,1.864050030708313 -68,1.8387365341186523 -69,1.8092477321624756 -70,1.7766779661178589 -71,1.7420848608016968 -72,1.7066315412521362 -73,1.6714426279067993 -74,1.6373891830444336 -75,1.6052318811416626 -76,1.5756756067276 -77,1.5493190288543701 -78,1.526640772819519 -79,1.5081292390823364 -80,1.4943021535873413 -81,1.485450267791748 -82,1.4812577962875366 -83,1.4802736043930054 -84,1.4800548553466797 -85,1.4781684875488281 -86,1.472988486289978 -87,1.4638690948486328 -88,1.4510529041290283 -89,1.4355002641677856 -90,1.4187359809875488 -91,1.4025137424468994 -92,1.388289213180542 -93,1.37664794921875 -94,1.3674904108047485 -95,1.360166311264038 -96,1.3538203239440918 -97,1.3476755619049072 -98,1.3411424160003662 -99,1.3338611125946045 -100,1.3257077932357788 -101,1.3167908191680908 -102,1.3074053525924683 -103,1.2980047464370728 -104,1.2890905141830444 -105,1.281062126159668 -106,1.2740854024887085 -107,1.2679626941680908 -108,1.2621707916259766 -109,1.2562057971954346 -110,1.2497165203094482 -111,1.242646336555481 -112,1.2352313995361328 -113,1.2279162406921387 -114,1.2210437059402466 -115,1.2147819995880127 -116,1.209092617034912 -117,1.2037813663482666 -118,1.198622226715088 -119,1.1934318542480469 -120,1.188112497329712 -121,1.1826688051223755 -122,1.1771883964538574 -123,1.1717880964279175 -124,1.1665760278701782 -125,1.1615924835205078 -126,1.1567877531051636 -127,1.1520837545394897 -128,1.147392749786377 -129,1.1426714658737183 -130,1.1379114389419556 -131,1.1331524848937988 -132,1.128448724746704 -133,1.1238428354263306 -134,1.1193476915359497 -135,1.1149500608444214 -136,1.1106098890304565 -137,1.106292486190796 -138,1.101981282234192 -139,1.097678780555725 -140,1.0933902263641357 -141,1.08914053440094 -142,1.0849401950836182 -143,1.0808062553405762 -144,1.076734185218811 -145,1.0727039575576782 -146,1.0687028169631958 -147,1.0647226572036743 -148,1.06075918674469 -149,1.0568186044692993 -150,1.052905797958374 -151,1.0490288734436035 -152,1.0451840162277222 -153,1.0413799285888672 -154,1.0376030206680298 -155,1.033852219581604 -156,1.0301202535629272 -157,1.0264097452163696 -158,1.0227222442626953 -159,1.0190589427947998 -160,1.0154221057891846 -161,1.0118114948272705 -162,1.0082308053970337 -163,1.004673719406128 -164,1.0011413097381592 -165,0.9976338148117065 -166,0.9941502213478088 -167,0.9906930923461914 -168,0.987259030342102 -169,0.9838529229164124 -170,0.9804714918136597 -171,0.9771128296852112 -172,0.9737769961357117 -173,0.9704601168632507 -174,0.9671651124954224 -175,0.9638897180557251 -176,0.9606326222419739 -177,0.9573953151702881 -178,0.9541759490966797 -179,0.9509792327880859 -180,0.9478039741516113 -181,0.944650411605835 -182,0.9415212273597717 -183,0.9384150505065918 -184,0.935332715511322 -185,0.9322758316993713 -186,0.9292389750480652 -187,0.9262270927429199 -188,0.9232397079467773 -189,0.9202757477760315 -190,0.9173371195793152 -191,0.9144227504730225 -192,0.9115323424339294 -193,0.9086688756942749 -194,0.9058282971382141 -195,0.9030114412307739 -196,0.9002201557159424 -197,0.8974522352218628 -198,0.8947097063064575 -199,0.8919917345046997 -200,0.8892976641654968 -201,0.886628270149231 -202,0.8839840888977051 -203,0.8813619613647461 -204,0.8787634968757629 -205,0.8761875033378601 -206,0.8736352920532227 -207,0.8711054921150208 -208,0.8685964345932007 -209,0.8661138415336609 -210,0.8636524081230164 -211,0.861212968826294 -212,0.858795166015625 -213,0.85639888048172 -214,0.8540253639221191 -215,0.8516727685928345 -216,0.8493380546569824 -217,0.8470243215560913 -218,0.8447296023368835 -219,0.8424569368362427 -220,0.8402023911476135 -221,0.837964653968811 -222,0.8357497453689575 -223,0.8335552215576172 -224,0.8313844799995422 -225,0.8292359709739685 -226,0.8271090388298035 -227,0.8250055313110352 -228,0.8229233026504517 -229,0.8208590149879456 -230,0.8188132047653198 -231,0.8167829513549805 -232,0.8147663474082947 -233,0.8127610087394714 -234,0.8107671141624451 -235,0.8087859749794006 -236,0.8068146109580994 -237,0.8048540353775024 -238,0.8029027581214905 -239,0.800966203212738 -240,0.7990398406982422 -241,0.7971227765083313 -242,0.7952157258987427 -243,0.7933170199394226 -244,0.7914304733276367 -245,0.7895553708076477 -246,0.787692129611969 -247,0.7858403921127319 -248,0.7839997410774231 -249,0.7821727395057678 -250,0.7803533673286438 -251,0.7785437703132629 -252,0.7767458558082581 -253,0.7749602198600769 -254,0.7731859683990479 -255,0.7714222073554993 -256,0.7696707844734192 -257,0.7679311037063599 -258,0.7662025094032288 -259,0.7644832134246826 -260,0.762776792049408 -261,0.761082112789154 -262,0.7593970894813538 -263,0.7577230334281921 -264,0.756057858467102 -265,0.7544046640396118 -266,0.752763032913208 -267,0.751132607460022 -268,0.7495139241218567 -269,0.7479073405265808 -270,0.7463135719299316 -271,0.7447317838668823 -272,0.7431620955467224 -273,0.7416052222251892 -274,0.7400609850883484 -275,0.7385292649269104 -276,0.7370121479034424 -277,0.7355054020881653 -278,0.7340129613876343 -279,0.732533872127533 -280,0.7310673594474792 -281,0.7296128869056702 -282,0.7281719446182251 -283,0.7267438769340515 -284,0.7253291606903076 -285,0.7239254117012024 -286,0.7225340008735657 -287,0.7211577892303467 -288,0.7197957634925842 -289,0.7184482216835022 -290,0.7171139121055603 -291,0.7157949209213257 -292,0.7144902944564819 -293,0.7132039070129395 -294,0.7119311690330505 -295,0.7106760144233704 -296,0.7094424962997437 -297,0.7082332968711853 -298,0.7070544362068176 -299,0.7059040665626526 -300,0.7047833204269409 -301,0.7036905288696289 -302,0.702622652053833 -303,0.7015781402587891 -304,0.7005540728569031 -305,0.6995510458946228 -306,0.6985697150230408 -307,0.6976091861724854 -308,0.6966695785522461 -309,0.6957486867904663 -310,0.6948468685150146 -311,0.6939628720283508 -312,0.6930960416793823 -313,0.6922464966773987 -314,0.6914129853248596 -315,0.6905946135520935 -316,0.6897920370101929 -317,0.689003586769104 -318,0.6882290244102478 -319,0.687467098236084 -320,0.6867191195487976 -321,0.6859830021858215 -322,0.6852602958679199 -323,0.6845493316650391 -324,0.6838507056236267 -325,0.6831638813018799 -326,0.682489812374115 -327,0.6818274855613708 -328,0.6811767816543579 -329,0.680537760257721 -330,0.6799091100692749 -331,0.6792913675308228 -332,0.6786845922470093 -333,0.6780886650085449 -334,0.6775036454200745 -335,0.676928699016571 -336,0.6763644218444824 -337,0.6758096218109131 -338,0.6752645373344421 -339,0.6747283339500427 -340,0.6742022633552551 -341,0.6736850142478943 -342,0.6731767654418945 -343,0.672677755355835 -344,0.6721872091293335 -345,0.6717048287391663 -346,0.6712312698364258 -347,0.6707668900489807 -348,0.6703111529350281 -349,0.6698635816574097 -350,0.6694253087043762 -351,0.6689973473548889 -352,0.6685793399810791 -353,0.6681704521179199 -354,0.6677723526954651 -355,0.6673835515975952 -356,0.6670041680335999 -357,0.6666355729103088 -358,0.6662765741348267 -359,0.6659287214279175 -360,0.6655912399291992 -361,0.665265679359436 -362,0.664951503276825 -363,0.6646493673324585 -364,0.6643593907356262 -365,0.6640820503234863 -366,0.6638172268867493 -367,0.663565993309021 -368,0.6633281707763672 -369,0.6631026864051819 -370,0.6628885865211487 -371,0.662684977054596 -372,0.662493109703064 -373,0.6623119711875916 -374,0.6621403694152832 -375,0.6619783639907837 -376,0.661828339099884 -377,0.6616957783699036 -378,0.6615678668022156 -379,0.6614429950714111 -380,0.6613209247589111 -381,0.6612010598182678 -382,0.6610836386680603 -383,0.6609677076339722 -384,0.6608533263206482 -385,0.6607398986816406 -386,0.6606278419494629 -387,0.6605170369148254 -388,0.6604074239730835 -389,0.6602985858917236 -390,0.6601911187171936 -391,0.6600844264030457 -392,0.6599783301353455 -393,0.6598736047744751 -394,0.6597691774368286 -395,0.6596687436103821 -396,0.6595723628997803 -397,0.6594622731208801 -398,0.6593449115753174 -399,0.6592210531234741 -400,0.6590862274169922 -401,0.6589421033859253 -402,0.658794641494751 -403,0.6586452126502991 -404,0.6584946513175964 -405,0.6583433747291565 -406,0.6581935286521912 -407,0.6580453515052795 -408,0.6579000949859619 -409,0.6577584147453308 -410,0.6576203107833862 -411,0.6574864387512207 -412,0.6573562026023865 -413,0.657230019569397 -414,0.6571073532104492 -415,0.6569879055023193 -416,0.6568722724914551 -417,0.6567604541778564 -418,0.6566521525382996 -419,0.6565476059913635 -420,0.6564553380012512 -421,0.6563975214958191 -422,0.6563376188278198 -423,0.6562723517417908 -424,0.6562026143074036 -425,0.6561295986175537 -426,0.6560548543930054 -427,0.6559790372848511 -428,0.6559025049209595 -429,0.655825138092041 -430,0.6557468175888062 -431,0.6556671857833862 -432,0.6555859446525574 -433,0.6555038094520569 -434,0.6554208993911743 -435,0.655338704586029 -436,0.655258297920227 -437,0.6551860570907593 -438,0.6551179885864258 -439,0.6550506949424744 -440,0.6549823880195618 -441,0.6549127697944641 -442,0.6548420786857605 -443,0.6547711491584778 -444,0.6547008752822876 -445,0.6546328067779541 -446,0.6545670032501221 -447,0.6545026302337646 -448,0.654437243938446 -449,0.6543706655502319 -450,0.6543042659759521 -451,0.6542391777038574 -452,0.6541749835014343 -453,0.6541106104850769 -454,0.6540456414222717 -455,0.6539803147315979 -456,0.653915286064148 -457,0.6538510918617249 -458,0.6537880897521973 -459,0.653725802898407 -460,0.6536634564399719 -461,0.6536005139350891 -462,0.6535377502441406 -463,0.6534755229949951 -464,0.6534132957458496 -465,0.6533514857292175 -466,0.653289794921875 -467,0.6532284021377563 -468,0.6531670093536377 -469,0.6531058549880981 -470,0.6530449390411377 -471,0.652984082698822 -472,0.6529234647750854 -473,0.6528631448745728 -474,0.6528027057647705 -475,0.6527425050735474 -476,0.6526827216148376 -477,0.652623176574707 -478,0.6525638699531555 -479,0.6525045037269592 -480,0.6524452567100525 -481,0.6523862481117249 -482,0.6523274183273315 -483,0.6522688865661621 -484,0.6522102952003479 -485,0.6521520614624023 -486,0.6520939469337463 -487,0.6520360708236694 -488,0.651978611946106 -489,0.6519212126731873 -490,0.651864230632782 -491,0.6518083214759827 -492,0.6517528891563416 -493,0.6516978740692139 -494,0.6516427993774414 -495,0.6515880823135376 -496,0.6515334248542786 -497,0.6514787077903748 -498,0.6514240503311157 -499,0.651369571685791 -500,0.6513150334358215 -501,0.6512606739997864 -502,0.6512062549591064 -503,0.6511521935462952 -504,0.6510984301567078 -505,0.6510447263717651 -506,0.6509913802146912 -507,0.6509382128715515 -508,0.6508853435516357 -509,0.6508326530456543 -510,0.6507802605628967 -511,0.6507278084754944 -512,0.6506757140159607 -513,0.6506235599517822 -514,0.6505715847015381 -515,0.6505197882652283 -516,0.6504679918289185 -517,0.6504161953926086 -518,0.6503646969795227 -519,0.6503134369850159 -520,0.6502622365951538 -521,0.6502110958099365 -522,0.6501601338386536 -523,0.6501092314720154 -524,0.6500585675239563 -525,0.6500079035758972 -526,0.6499573588371277 -527,0.6499068737030029 -528,0.649856686592102 -529,0.649806559085846 -530,0.6497564911842346 -531,0.6497065424919128 -532,0.6496567726135254 -533,0.6496071815490723 -534,0.6495575904846191 -535,0.6495081782341003 -536,0.6494588851928711 -537,0.649409830570221 -538,0.6493607759475708 -539,0.6493116617202759 -540,0.6492628455162048 -541,0.6492141485214233 -542,0.6491654515266418 -543,0.6491169929504395 -544,0.6490684747695923 -545,0.649020254611969 -546,0.6489718556404114 -547,0.6489237546920776 -548,0.6488755941390991 -549,0.6488275527954102 -550,0.6487796306610107 -551,0.6487318277359009 -552,0.6486841440200806 -553,0.6486363410949707 -554,0.6485888957977295 -555,0.6485415697097778 -556,0.6484944820404053 -557,0.6484472751617432 -558,0.6484003663063049 -559,0.6483535170555115 -560,0.6483068466186523 -561,0.6482601165771484 -562,0.6482134461402893 -563,0.6481668949127197 -564,0.6481205224990845 -565,0.648074209690094 -566,0.6480279564857483 -567,0.6479818820953369 -568,0.6479361057281494 -569,0.6478901505470276 -570,0.6478444337844849 -571,0.6477987170219421 -572,0.6477531790733337 -573,0.6477075815200806 -574,0.6476622819900513 -575,0.6476168632507324 -576,0.6475715637207031 -577,0.6475263237953186 -578,0.6474812030792236 -579,0.6474359631538391 -580,0.6473909616470337 -581,0.6473458409309387 -582,0.6473007798194885 -583,0.6472557187080383 -584,0.6472102999687195 -585,0.6471652388572693 -586,0.6471201777458191 -587,0.6470756530761719 -588,0.6470316052436829 -589,0.6469875574111938 -590,0.6469436883926392 -591,0.6468998789787292 -592,0.6468561887741089 -593,0.6468129754066467 -594,0.6467700004577637 -595,0.646727442741394 -596,0.6466850638389587 -597,0.6466430425643921 -598,0.6466012001037598 -599,0.6465597152709961 -600,0.6465185284614563 -601,0.6464775800704956 -602,0.6464371681213379 -603,0.64639812707901 -604,0.6463596224784851 -605,0.6463213562965393 -606,0.6462834477424622 -607,0.6462458372116089 -608,0.6462082862854004 -609,0.6461707949638367 -610,0.6461334228515625 -611,0.6460960507392883 -612,0.646058976650238 -613,0.6460220217704773 -614,0.6459852457046509 -615,0.6459484100341797 -616,0.645911693572998 -617,0.6458748579025269 -618,0.6458381414413452 -619,0.6458014845848083 -620,0.6457648873329163 -621,0.6457282304763794 -622,0.6456916928291321 -623,0.6456553339958191 -624,0.6456190347671509 -625,0.6455829739570618 -626,0.6455467939376831 -627,0.645510733127594 -628,0.6454748511314392 -629,0.6454390287399292 -630,0.6454031467437744 -631,0.6453675627708435 -632,0.645331859588623 -633,0.6452962756156921 -634,0.6452609300613403 -635,0.6452257037162781 -636,0.6451902985572815 -637,0.6451550722122192 -638,0.6451197862625122 -639,0.645084798336029 -640,0.6450498104095459 -641,0.6450150012969971 -642,0.6449803113937378 -643,0.6449456810951233 -644,0.6449111104011536 -645,0.6448766589164734 -646,0.644842267036438 -647,0.6448079347610474 -648,0.6447739601135254 -649,0.6447397470474243 -650,0.6447056531906128 -651,0.6446716785430908 -652,0.6446377635002136 -653,0.6446038484573364 -654,0.6445700526237488 -655,0.6445363163948059 -656,0.6445024609565735 -657,0.6444690227508545 -658,0.644435465335846 -659,0.6444019675254822 -660,0.6443686485290527 -661,0.6443353295326233 -662,0.6443021297454834 -663,0.6442690491676331 -664,0.6442359089851379 -665,0.6442030072212219 -666,0.6441699862480164 -667,0.6441372036933899 -668,0.6441045999526978 -669,0.6440718770027161 -670,0.6440392732620239 -671,0.6440067291259766 -672,0.6439743638038635 -673,0.6439420580863953 -674,0.6439096927642822 -675,0.6438776254653931 -676,0.6438454985618591 -677,0.6438133716583252 -678,0.643781304359436 -679,0.6437494158744812 -680,0.6437176465988159 -681,0.6436858773231506 -682,0.6436542272567749 -683,0.6436226963996887 -684,0.6435909867286682 -685,0.6435596346855164 -686,0.643528163433075 -687,0.6434968113899231 -688,0.6434656381607056 -689,0.6434345841407776 -690,0.6434034705162048 -691,0.6433725357055664 -692,0.643341600894928 -693,0.6433107852935791 -694,0.6432801485061646 -695,0.6432496309280396 -696,0.643218994140625 -697,0.6431885361671448 -698,0.6431581377983093 -699,0.6431277394294739 -700,0.6430975198745728 -701,0.6430673599243164 -702,0.6430372595787048 -703,0.6430070996284485 -704,0.6429773569107056 -705,0.6429473757743835 -706,0.6429174542427063 -707,0.6428878903388977 -708,0.6428582072257996 -709,0.642828643321991 -710,0.6427990198135376 -711,0.6427696347236633 -712,0.6427403092384338 -713,0.6427109241485596 -714,0.6426816582679749 -715,0.6426526308059692 -716,0.6426236033439636 -717,0.642594575881958 -718,0.6425658464431763 -719,0.6425370573997498 -720,0.642508327960968 -721,0.6424798965454102 -722,0.6424515247344971 -723,0.6424230933189392 -724,0.6423949003219604 -725,0.6423667669296265 -726,0.642338752746582 -727,0.6423109173774719 -728,0.6422829627990723 -729,0.6422552466392517 -730,0.6422275304794312 -731,0.6421998739242554 -732,0.6421724557876587 -733,0.642145037651062 -734,0.6421177983283997 -735,0.6420904397964478 -736,0.6420632004737854 -737,0.642035961151123 -738,0.642008900642395 -739,0.641981840133667 -740,0.6419548392295837 -741,0.64192795753479 -742,0.6419011354446411 -743,0.6418744921684265 -744,0.6418478488922119 -745,0.6418212056159973 -746,0.641794741153717 -747,0.6417683362960815 -748,0.6417418122291565 -749,0.6417152881622314 -750,0.6416887640953064 -751,0.6416621804237366 -752,0.6416356563568115 -753,0.6416089534759521 -754,0.6415822505950928 -755,0.6415553689002991 -756,0.6415284872055054 -757,0.6415016651153564 -758,0.6414747834205627 -759,0.6414477229118347 -760,0.6414207220077515 -761,0.6413938403129578 -762,0.6413669586181641 -763,0.6413400769233704 -764,0.6413130164146423 -765,0.6412863731384277 -766,0.6412594318389893 -767,0.6412328481674194 -768,0.6412062048912048 -769,0.6411795616149902 -770,0.64115309715271 -771,0.6411266922950745 -772,0.6411001682281494 -773,0.6410738825798035 -774,0.6410475373268127 -775,0.6410213708877563 -776,0.6409953236579895 -777,0.6409692168235779 -778,0.6409434676170349 -779,0.6409175395965576 -780,0.6408918499946594 -781,0.640866219997406 -782,0.6408408284187317 -783,0.6408153772354126 -784,0.6407901048660278 -785,0.6407647728919983 -786,0.6407396197319031 -787,0.6407144665718079 -788,0.6406895518302917 -789,0.6406645178794861 -790,0.6406398415565491 -791,0.6406149864196777 -792,0.6405903100967407 -793,0.640565812587738 -794,0.6405413150787354 -795,0.640516996383667 -796,0.6404927968978882 -797,0.6404687762260437 -798,0.6404446363449097 -799,0.6404207944869995 -800,0.6403971910476685 -801,0.6403734087944031 -802,0.6403500437736511 -803,0.6403266787528992 -804,0.640303373336792 -805,0.6402801275253296 -806,0.6402570009231567 -807,0.6402339935302734 -808,0.6402110457420349 -809,0.6401882767677307 -810,0.6401655673980713 -811,0.6401427984237671 -812,0.640120267868042 -813,0.6400976777076721 -814,0.6400752663612366 -815,0.6400533318519592 -816,0.6400314569473267 -817,0.6400097012519836 -818,0.6399880051612854 -819,0.6399664878845215 -820,0.6399451494216919 -821,0.6399238109588623 -822,0.6399024128913879 -823,0.6398813128471375 -824,0.6398600935935974 -825,0.6398391127586365 -826,0.6398180723190308 -827,0.6397971510887146 -828,0.6397762894630432 -829,0.6397554874420166 -830,0.6397346258163452 -831,0.6397140622138977 -832,0.6396934390068054 -833,0.6396729350090027 -834,0.6396525502204895 -835,0.6396321654319763 -836,0.6396119594573975 -837,0.639591634273529 -838,0.6395716667175293 -839,0.6395515203475952 -840,0.6395315527915955 -841,0.6395116448402405 -842,0.6394916772842407 -843,0.6394718289375305 -844,0.6394522190093994 -845,0.6394325494766235 -846,0.6394128799438477 -847,0.6393935680389404 -848,0.6393741369247437 -849,0.639354944229126 -850,0.6393359303474426 -851,0.6393168568611145 -852,0.639298141002655 -853,0.6392797231674194 -854,0.6392613053321838 -855,0.6392430067062378 -856,0.6392247676849365 -857,0.6392067074775696 -858,0.6391885876655579 -859,0.6391705274581909 -860,0.6391525864601135 -861,0.6391345858573914 -862,0.639116644859314 -863,0.6390988230705261 -864,0.6390810608863831 -865,0.6390633583068848 -866,0.6390457153320312 -867,0.6390281319618225 -868,0.6390108466148376 -869,0.6389935612678528 -870,0.6389762759208679 -871,0.6389592289924622 -872,0.6389420628547668 -873,0.6389250755310059 -874,0.6389079689979553 -875,0.6388909816741943 -876,0.6388741731643677 -877,0.6388572454452515 -878,0.6388404369354248 -879,0.6388236284255981 -880,0.6388068795204163 -881,0.6387901902198792 -882,0.638773500919342 -883,0.6387569308280945 -884,0.6387403011322021 -885,0.6387237906455994 -886,0.6387072801589966 -887,0.6386908888816833 -888,0.6386744379997253 -889,0.6386582255363464 -890,0.638641893863678 -891,0.6386256217956543 -892,0.6386094689369202 -893,0.6385933756828308 -894,0.6385772228240967 -895,0.6385611891746521 -896,0.6385452151298523 -897,0.6385291814804077 -898,0.6385133862495422 -899,0.6384974122047424 -900,0.638481616973877 -901,0.6384658217430115 -902,0.6384500861167908 -903,0.6384344100952148 -904,0.6384186148643494 -905,0.6384029388427734 -906,0.6383872628211975 -907,0.6383716464042664 -908,0.63835608959198 -909,0.6383403539657593 -910,0.6383248567581177 -911,0.6383093595504761 -912,0.6382937431335449 -913,0.6382782459259033 -914,0.6382628083229065 -915,0.6382473707199097 -916,0.6382319331169128 -917,0.6382163763046265 -918,0.6382009983062744 -919,0.6381855010986328 -920,0.6381703019142151 -921,0.6381548047065735 -922,0.6381394863128662 -923,0.6381241083145142 -924,0.6381087899208069 -925,0.6380935311317444 -926,0.6380782127380371 -927,0.6380627751350403 -928,0.6380475759506226 -929,0.6380321979522705 -930,0.6380171179771423 -931,0.6380016803741455 -932,0.637986421585083 -933,0.6379712224006653 -934,0.6379559636116028 -935,0.6379407048225403 -936,0.6379255056381226 -937,0.6379103064537048 -938,0.6378952264785767 -939,0.6378800868988037 -940,0.6378650665283203 -941,0.6378499865531921 -942,0.6378348469734192 -943,0.6378198266029358 -944,0.6378046274185181 -945,0.6377896070480347 -946,0.637774646282196 -947,0.6377595663070679 -948,0.637744665145874 -949,0.6377297639846802 -950,0.6377148032188416 -951,0.6376998424530029 -952,0.6376848816871643 -953,0.6376699209213257 -954,0.6376550197601318 -955,0.6376401782035828 -956,0.6376253366470337 -957,0.6376103758811951 -958,0.6375956535339355 -959,0.6375808715820312 -960,0.6375659704208374 -961,0.6375511884689331 -962,0.6375362873077393 -963,0.6375216245651245 -964,0.637506902217865 -965,0.6374921798706055 -966,0.6374775767326355 -967,0.6374627947807312 -968,0.6374481320381165 -969,0.6374335885047913 -970,0.6374189257621765 -971,0.6374043226242065 -972,0.6373897194862366 -973,0.637374997138977 -974,0.6373605132102966 -975,0.6373459100723267 -976,0.6373314261436462 -977,0.6373168230056763 -978,0.6373023986816406 -979,0.6372877955436707 -980,0.6372733116149902 -981,0.6372588276863098 -982,0.6372444033622742 -983,0.6372299194335938 -984,0.6372154355049133 -985,0.637201189994812 -986,0.6371866464614868 -987,0.6371723413467407 -988,0.6371579170227051 -989,0.6371436715126038 -990,0.6371293663978577 -991,0.6371150016784668 -992,0.6371007561683655 -993,0.6370866298675537 -994,0.6370722055435181 -995,0.6370579600334167 -996,0.637043833732605 -997,0.6370297074317932 -998,0.6370154619216919 -999,0.6370012164115906 -1000,0.6369870901107788 diff --git a/training/time_weighting/square_root_mirrored/training_config.txt b/training/time_weighting/square_root_mirrored/training_config.txt deleted file mode 100644 index 64a9ff7..0000000 --- a/training/time_weighting/square_root_mirrored/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0.0001 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting/square_root_mirrored/training_log.csv b/training/time_weighting/square_root_mirrored/training_log.csv deleted file mode 100644 index 0a88789..0000000 --- a/training/time_weighting/square_root_mirrored/training_log.csv +++ /dev/null @@ -1,1002 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,239.5032958984375 -2,128.45269775390625 -3,35.91033935546875 -4,19.557899475097656 -5,13.45746898651123 -6,10.717108726501465 -7,9.150884628295898 -8,8.341358184814453 -9,7.670628070831299 -10,6.810764312744141 -11,6.154086589813232 -12,5.808220386505127 -13,5.687150478363037 -14,5.576473236083984 -15,5.5100507736206055 -16,5.463283538818359 -17,5.4235005378723145 -18,5.372893333435059 -19,5.289089202880859 -20,5.1675801277160645 -21,5.016462802886963 -22,4.846563339233398 -23,4.6832756996154785 -24,4.53269100189209 -25,4.389254093170166 -26,4.251585006713867 -27,4.119555950164795 -28,3.994378089904785 -29,3.876568078994751 -30,3.7656593322753906 -31,3.663181781768799 -32,3.5724174976348877 -33,3.4978530406951904 -34,3.4442663192749023 -35,3.4104137420654297 -36,3.3891258239746094 -37,3.3722662925720215 -38,3.353893518447876 -39,3.331012487411499 -40,3.302680015563965 -41,3.269315242767334 -42,3.2322332859039307 -43,3.1932156085968018 -44,3.1541197299957275 -45,3.116694688796997 -46,3.0824837684631348 -47,3.052614450454712 -48,3.027493715286255 -49,3.006455898284912 -50,2.9883275032043457 -51,2.9718215465545654 -52,2.9558231830596924 -53,2.939500331878662 -54,2.9223644733428955 -55,2.9042556285858154 -56,2.885272741317749 -57,2.865755081176758 -58,2.8461599349975586 -59,2.8270130157470703 -60,2.8088154792785645 -61,2.7919423580169678 -62,2.776437759399414 -63,2.7620620727539062 -64,2.748548984527588 -65,2.735487222671509 -66,2.72247576713562 -67,2.709141254425049 -68,2.695672035217285 -69,2.68243145942688 -70,2.6697237491607666 -71,2.6578071117401123 -72,2.6468727588653564 -73,2.6370458602905273 -74,2.6283867359161377 -75,2.620910882949829 -76,2.614602565765381 -77,2.6093404293060303 -78,2.604623556137085 -79,2.599982500076294 -80,2.5951101779937744 -81,2.5898618698120117 -82,2.584347724914551 -83,2.5786805152893066 -84,2.5730063915252686 -85,2.567495822906494 -86,2.5622336864471436 -87,2.5573036670684814 -88,2.552706003189087 -89,2.548482894897461 -90,2.544666051864624 -91,2.5412039756774902 -92,2.5380442142486572 -93,2.535137414932251 -94,2.53248929977417 -95,2.5301413536071777 -96,2.528110980987549 -97,2.526392936706543 -98,2.525033473968506 -99,2.523940324783325 -100,2.5230486392974854 -101,2.522298812866211 -102,2.521671772003174 -103,2.5210938453674316 -104,2.520535469055176 -105,2.5199759006500244 -106,2.519390821456909 -107,2.5187785625457764 -108,2.518136501312256 -109,2.5174996852874756 -110,2.5168771743774414 -111,2.5162882804870605 -112,2.515734910964966 -113,2.5152130126953125 -114,2.5147175788879395 -115,2.514235734939575 -116,2.5137529373168945 -117,2.513258457183838 -118,2.5127463340759277 -119,2.512220621109009 -120,2.511690139770508 -121,2.511162281036377 -122,2.510647773742676 -123,2.510152578353882 -124,2.5096871852874756 -125,2.5092501640319824 -126,2.5088436603546143 -127,2.5084633827209473 -128,2.5081071853637695 -129,2.507769823074341 -130,2.5074462890625 -131,2.5071306228637695 -132,2.5068259239196777 -133,2.5065321922302246 -134,2.506244421005249 -135,2.505960702896118 -136,2.5056800842285156 -137,2.505402088165283 -138,2.505126476287842 -139,2.504852533340454 -140,2.50458025932312 -141,2.5043082237243652 -142,2.5040371417999268 -143,2.503767728805542 -144,2.503502130508423 -145,2.503239631652832 -146,2.5029821395874023 -147,2.5027294158935547 -148,2.5024797916412354 -149,2.502234697341919 -150,2.5019946098327637 -151,2.5017573833465576 -152,2.501523017883301 -153,2.5012903213500977 -154,2.5010604858398438 -155,2.5008342266082764 -156,2.5006136894226074 -157,2.500396251678467 -158,2.5001819133758545 -159,2.4999704360961914 -160,2.499762535095215 -161,2.4995579719543457 -162,2.4993553161621094 -163,2.499154806137085 -164,2.498955488204956 -165,2.4987573623657227 -166,2.498560905456543 -167,2.4983670711517334 -168,2.4981753826141357 -169,2.49798583984375 -170,2.497798442840576 -171,2.4976136684417725 -172,2.4974303245544434 -173,2.497248888015747 -174,2.4970688819885254 -175,2.4968903064727783 -176,2.496713161468506 -177,2.496537208557129 -178,2.4963622093200684 -179,2.4961905479431152 -180,2.496020555496216 -181,2.495851516723633 -182,2.495683193206787 -183,2.495516538619995 -184,2.4953503608703613 -185,2.495185375213623 -186,2.495020866394043 -187,2.4948580265045166 -188,2.494694948196411 -189,2.4945333003997803 -190,2.4943723678588867 -191,2.494213342666626 -192,2.4940543174743652 -193,2.4938955307006836 -194,2.493736743927002 -195,2.4935784339904785 -196,2.493420362472534 -197,2.493262529373169 -198,2.4931044578552246 -199,2.4929468631744385 -200,2.4927897453308105 -201,2.492633581161499 -202,2.4924774169921875 -203,2.492323398590088 -204,2.4921696186065674 -205,2.492016077041626 -206,2.4918630123138428 -207,2.4917099475860596 -208,2.4915575981140137 -209,2.4914050102233887 -210,2.491253137588501 -211,2.4911019802093506 -212,2.490952253341675 -213,2.490804433822632 -214,2.4906582832336426 -215,2.4905128479003906 -216,2.4903690814971924 -217,2.490224838256836 -218,2.4900810718536377 -219,2.4899377822875977 -220,2.4897940158843994 -221,2.4896504878997803 -222,2.4895071983337402 -223,2.489363193511963 -224,2.4892191886901855 -225,2.489075183868408 -226,2.4889326095581055 -227,2.48879075050354 -228,2.4886505603790283 -229,2.4885103702545166 -230,2.4883711338043213 -231,2.4882314205169678 -232,2.488091230392456 -233,2.4879512786865234 -234,2.487811326980591 -235,2.4876716136932373 -236,2.4875314235687256 -237,2.487391710281372 -238,2.4872524738311768 -239,2.4871137142181396 -240,2.4869747161865234 -241,2.4868361949920654 -242,2.4867031574249268 -243,2.486572265625 -244,2.4864447116851807 -245,2.486318588256836 -246,2.486194133758545 -247,2.486072063446045 -248,2.4859514236450195 -249,2.485832452774048 -250,2.4857161045074463 -251,2.4856019020080566 -252,2.485490322113037 -253,2.485380172729492 -254,2.4852712154388428 -255,2.4851627349853516 -256,2.485053777694702 -257,2.4849448204040527 -258,2.4848365783691406 -259,2.4847283363342285 -260,2.4846208095550537 -261,2.4845144748687744 -262,2.484408140182495 -263,2.484301805496216 -264,2.4841959476470947 -265,2.4840900897979736 -266,2.4839842319488525 -267,2.4838790893554688 -268,2.483774423599243 -269,2.4836699962615967 -270,2.4835665225982666 -271,2.4834628105163574 -272,2.4833598136901855 -273,2.483257532119751 -274,2.4831559658050537 -275,2.4830548763275146 -276,2.482954263687134 -277,2.482853889465332 -278,2.4827539920806885 -279,2.482654571533203 -280,2.482555866241455 -281,2.4824578762054443 -282,2.4823596477508545 -283,2.4822616577148438 -284,2.482163429260254 -285,2.482065200805664 -286,2.481966972351074 -287,2.4818687438964844 -288,2.4817705154418945 -289,2.481673002243042 -290,2.481574535369873 -291,2.4814765453338623 -292,2.4813780784606934 -293,2.4812796115875244 -294,2.481180429458618 -295,2.481081485748291 -296,2.4809818267822266 -297,2.4808828830718994 -298,2.480783462524414 -299,2.4806838035583496 -300,2.480583429336548 -301,2.480482578277588 -302,2.480381727218628 -303,2.480280637741089 -304,2.4801785945892334 -305,2.480076789855957 -306,2.479973793029785 -307,2.479870319366455 -308,2.479767322540283 -309,2.479663610458374 -310,2.479559898376465 -311,2.4794559478759766 -312,2.4793527126312256 -313,2.479249954223633 -314,2.4791460037231445 -315,2.4790422916412354 -316,2.478938102722168 -317,2.4788334369659424 -318,2.4787285327911377 -319,2.478623867034912 -320,2.4785194396972656 -321,2.47841477394104 -322,2.4783108234405518 -323,2.4782073497772217 -324,2.4781036376953125 -325,2.4780001640319824 -326,2.4778966903686523 -327,2.477792978286743 -328,2.477689266204834 -329,2.477586269378662 -330,2.4774837493896484 -331,2.477382183074951 -332,2.477280855178833 -333,2.4771804809570312 -334,2.477081060409546 -335,2.476980447769165 -336,2.4768800735473633 -337,2.476780652999878 -338,2.476681709289551 -339,2.47658371925354 -340,2.47648549079895 -341,2.4763875007629395 -342,2.4762895107269287 -343,2.476191520690918 -344,2.4760947227478027 -345,2.475998640060425 -346,2.4759035110473633 -347,2.4758095741271973 -348,2.4757163524627686 -349,2.475623369216919 -350,2.475532054901123 -351,2.475440502166748 -352,2.4753499031066895 -353,2.475259304046631 -354,2.4751696586608887 -355,2.475080728530884 -356,2.4749915599823 -357,2.474903106689453 -358,2.4748148918151855 -359,2.474726676940918 -360,2.4746389389038086 -361,2.474550724029541 -362,2.47446346282959 -363,2.4743762016296387 -364,2.4742894172668457 -365,2.474202871322632 -366,2.474116325378418 -367,2.4740309715270996 -368,2.473947048187256 -369,2.473865032196045 -370,2.4737844467163086 -371,2.4737045764923096 -372,2.473625898361206 -373,2.47354793548584 -374,2.473471164703369 -375,2.4733943939208984 -376,2.473318099975586 -377,2.4732418060302734 -378,2.47316575050354 -379,2.4730899333953857 -380,2.4730143547058105 -381,2.4729390144348145 -382,2.4728643894195557 -383,2.4727895259857178 -384,2.472714900970459 -385,2.4726407527923584 -386,2.472567081451416 -387,2.4724936485290527 -388,2.4724197387695312 -389,2.4723470211029053 -390,2.4722743034362793 -391,2.4722018241882324 -392,2.4721295833587646 -393,2.4720582962036133 -394,2.471986770629883 -395,2.4719152450561523 -396,2.471843719482422 -397,2.4717724323272705 -398,2.4717016220092773 -399,2.471630573272705 -400,2.471559762954712 -401,2.471489429473877 -402,2.471418857574463 -403,2.471348524093628 -404,2.471277952194214 -405,2.471208095550537 -406,2.4711382389068604 -407,2.4710681438446045 -408,2.470999240875244 -409,2.4709296226501465 -410,2.470860481262207 -411,2.470791816711426 -412,2.4707233905792236 -413,2.4706544876098633 -414,2.470585823059082 -415,2.470517635345459 -416,2.470449686050415 -417,2.470381498336792 -418,2.470313310623169 -419,2.470245838165283 -420,2.47017765045166 -421,2.4701101779937744 -422,2.4700427055358887 -423,2.469974994659424 -424,2.469907283782959 -425,2.469839572906494 -426,2.4697721004486084 -427,2.4697046279907227 -428,2.469637393951416 -429,2.4695701599121094 -430,2.4695029258728027 -431,2.4694361686706543 -432,2.469369649887085 -433,2.4693028926849365 -434,2.469236373901367 -435,2.469169855117798 -436,2.4691033363342285 -437,2.469036817550659 -438,2.468970775604248 -439,2.4689040184020996 -440,2.4688384532928467 -441,2.4687724113464355 -442,2.4687063694000244 -443,2.4686405658721924 -444,2.4685745239257812 -445,2.4685089588165283 -446,2.468442678451538 -447,2.468376874923706 -448,2.468310832977295 -449,2.468245029449463 -450,2.4681785106658936 -451,2.4681124687194824 -452,2.4680464267730713 -453,2.46798038482666 -454,2.467914581298828 -455,2.467848777770996 -456,2.467782735824585 -457,2.467716693878174 -458,2.4676506519317627 -459,2.4675846099853516 -460,2.4675185680389404 -461,2.46745228767395 -462,2.467386245727539 -463,2.467320203781128 -464,2.467254161834717 -465,2.467188835144043 -466,2.46712327003479 -467,2.467057228088379 -468,2.4669911861419678 -469,2.4669253826141357 -470,2.4668593406677246 -471,2.4667932987213135 -472,2.4667274951934814 -473,2.466661214828491 -474,2.46659517288208 -475,2.466529369354248 -476,2.466463804244995 -477,2.466398000717163 -478,2.466331958770752 -479,2.4662671089172363 -480,2.4662017822265625 -481,2.4661364555358887 -482,2.466071128845215 -483,2.4660067558288574 -484,2.4659414291381836 -485,2.465876817703247 -486,2.4658122062683105 -487,2.465747594833374 -488,2.4656829833984375 -489,2.465618371963501 -490,2.4655537605285645 -491,2.465488910675049 -492,2.4654242992401123 -493,2.4653594493865967 -494,2.4652953147888184 -495,2.4652304649353027 -496,2.465165853500366 -497,2.465101480484009 -498,2.465036392211914 -499,2.4649715423583984 -500,2.464906692504883 -501,2.4648420810699463 -502,2.464777708053589 -503,2.4647133350372314 -504,2.464648723602295 -505,2.4645841121673584 -506,2.46451997756958 -507,2.464456081390381 -508,2.4643924236297607 -509,2.464329242706299 -510,2.464265823364258 -511,2.464202880859375 -512,2.464139461517334 -513,2.4640769958496094 -514,2.4640135765075684 -515,2.4639508724212646 -516,2.463887929916382 -517,2.463824987411499 -518,2.4637625217437744 -519,2.4636995792388916 -520,2.463636875152588 -521,2.463573932647705 -522,2.4635109901428223 -523,2.4634478092193604 -524,2.4633851051330566 -525,2.463322162628174 -526,2.463259696960449 -527,2.4631969928741455 -528,2.463134527206421 -529,2.463071346282959 -530,2.4630088806152344 -531,2.4629461765289307 -532,2.462883472442627 -533,2.4628207683563232 -534,2.4627580642700195 -535,2.4626951217651367 -536,2.462631940841675 -537,2.462569236755371 -538,2.462506055831909 -539,2.4624428749084473 -540,2.4623794555664062 -541,2.4623162746429443 -542,2.4622530937194824 -543,2.4621899127960205 -544,2.4621264934539795 -545,2.4620633125305176 -546,2.4620001316070557 -547,2.461937189102173 -548,2.4618735313415527 -549,2.461810827255249 -550,2.461747646331787 -551,2.4616847038269043 -552,2.4616219997406006 -553,2.461559534072876 -554,2.4614970684051514 -555,2.461435079574585 -556,2.4613730907440186 -557,2.4613113403320312 -558,2.461249351501465 -559,2.461188554763794 -560,2.461127281188965 -561,2.461066961288452 -562,2.4610061645507812 -563,2.4609460830688477 -564,2.4608864784240723 -565,2.460826873779297 -566,2.460767984390259 -567,2.4607083797454834 -568,2.4606499671936035 -569,2.4605915546417236 -570,2.460533618927002 -571,2.460475444793701 -572,2.4604177474975586 -573,2.460360527038574 -574,2.4603030681610107 -575,2.4602463245391846 -576,2.460188865661621 -577,2.4601316452026367 -578,2.4600753784179688 -579,2.4600186347961426 -580,2.4599618911743164 -581,2.4599051475524902 -582,2.459848642349243 -583,2.4597928524017334 -584,2.459735870361328 -585,2.459679365158081 -586,2.459623098373413 -587,2.459567070007324 -588,2.4595108032226562 -589,2.4594550132751465 -590,2.4593985080718994 -591,2.4593424797058105 -592,2.459286689758301 -593,2.459230899810791 -594,2.459174871444702 -595,2.4591190814971924 -596,2.4590632915496826 -597,2.459007501602173 -598,2.458951950073242 -599,2.4588959217071533 -600,2.45884108543396 -601,2.458785057067871 -602,2.4587295055389404 -603,2.458674430847168 -604,2.4586188793182373 -605,2.4585635662078857 -606,2.4585087299346924 -607,2.4584527015686035 -608,2.458397388458252 -609,2.4583420753479004 -610,2.4582860469818115 -611,2.4582302570343018 -612,2.458174705505371 -613,2.4581191539764404 -614,2.458063840866089 -615,2.4580078125 -616,2.457951784133911 -617,2.4578962326049805 -618,2.4578402042388916 -619,2.45778489112854 -620,2.4577295780181885 -621,2.4576735496520996 -622,2.457618236541748 -623,2.457561731338501 -624,2.457505941390991 -625,2.4574503898620605 -626,2.4573934078216553 -627,2.457336187362671 -628,2.457279920578003 -629,2.457221508026123 -630,2.457163095474243 -631,2.457103967666626 -632,2.457042694091797 -633,2.4569807052612305 -634,2.456918716430664 -635,2.4568564891815186 -636,2.456794023513794 -637,2.4567317962646484 -638,2.4566686153411865 -639,2.4566054344177246 -640,2.4565420150756836 -641,2.4564783573150635 -642,2.4564146995544434 -643,2.456350564956665 -644,2.4562861919403076 -645,2.456221580505371 -646,2.456157684326172 -647,2.4560930728912354 -648,2.456028699874878 -649,2.455963611602783 -650,2.4558987617492676 -651,2.4558334350585938 -652,2.4557688236236572 -653,2.455704927444458 -654,2.4556407928466797 -655,2.455576181411743 -656,2.4555118083953857 -657,2.45544695854187 -658,2.4553816318511963 -659,2.455317258834839 -660,2.455252170562744 -661,2.4551877975463867 -662,2.4551243782043457 -663,2.4550604820251465 -664,2.454996109008789 -665,2.454932451248169 -666,2.4548676013946533 -667,2.454803228378296 -668,2.4547390937805176 -669,2.45467472076416 -670,2.45461106300354 -671,2.454547166824341 -672,2.4544835090637207 -673,2.4544200897216797 -674,2.4543569087982178 -675,2.454293966293335 -676,2.454232692718506 -677,2.4541728496551514 -678,2.4541168212890625 -679,2.4540622234344482 -680,2.454007863998413 -681,2.453953742980957 -682,2.453900098800659 -683,2.4538466930389404 -684,2.4537925720214844 -685,2.453740119934082 -686,2.453686475753784 -687,2.4536337852478027 -688,2.453582763671875 -689,2.4535293579101562 -690,2.453477382659912 -691,2.453425407409668 -692,2.4533724784851074 -693,2.453319549560547 -694,2.453268051147461 -695,2.4532148838043213 -696,2.45316219329834 -697,2.453110456466675 -698,2.453057289123535 -699,2.453005313873291 -700,2.4529547691345215 -701,2.4529025554656982 -702,2.452850818634033 -703,2.4527997970581055 -704,2.452746868133545 -705,2.452694892883301 -706,2.4526431560516357 -707,2.452589750289917 -708,2.4525365829467773 -709,2.452484130859375 -710,2.452430009841919 -711,2.4523768424987793 -712,2.452324151992798 -713,2.4522695541381836 -714,2.452216148376465 -715,2.452162742614746 -716,2.4521090984344482 -717,2.452054977416992 -718,2.4519999027252197 -719,2.4519448280334473 -720,2.451890468597412 -721,2.4518346786499023 -722,2.4517786502838135 -723,2.451723098754883 -724,2.451667308807373 -725,2.4516100883483887 -726,2.4515538215637207 -727,2.4514963626861572 -728,2.451439142227173 -729,2.451380968093872 -730,2.4513235092163086 -731,2.451265573501587 -732,2.451206684112549 -733,2.45114803314209 -734,2.451089382171631 -735,2.4510304927825928 -736,2.4509711265563965 -737,2.450911521911621 -738,2.4508519172668457 -739,2.450792074203491 -740,2.4507317543029785 -741,2.4506747722625732 -742,2.450622081756592 -743,2.4505693912506104 -744,2.4505155086517334 -745,2.4504590034484863 -746,2.45039963722229 -747,2.450338125228882 -748,2.450275182723999 -749,2.450212001800537 -750,2.4501490592956543 -751,2.450087547302246 -752,2.450028657913208 -753,2.44996976852417 -754,2.4499099254608154 -755,2.449849843978882 -756,2.4497885704040527 -757,2.4497275352478027 -758,2.4496655464172363 -759,2.4496045112609863 -760,2.449544906616211 -761,2.449486494064331 -762,2.4494283199310303 -763,2.4493706226348877 -764,2.4493114948272705 -765,2.4492528438568115 -766,2.44919490814209 -767,2.449136734008789 -768,2.449079990386963 -769,2.4490222930908203 -770,2.4489669799804688 -771,2.4489104747772217 -772,2.448856830596924 -773,2.4488017559051514 -774,2.448742628097534 -775,2.4486870765686035 -776,2.448632001876831 -777,2.4485743045806885 -778,2.448521375656128 -779,2.448465347290039 -780,2.4484078884124756 -781,2.4483518600463867 -782,2.4482979774475098 -783,2.4482414722442627 -784,2.4481875896453857 -785,2.448131561279297 -786,2.448078155517578 -787,2.448024034500122 -788,2.4479660987854004 -789,2.447911500930786 -790,2.4478564262390137 -791,2.4478001594543457 -792,2.447747230529785 -793,2.4476916790008545 -794,2.447638511657715 -795,2.4475834369659424 -796,2.4475338459014893 -797,2.447481632232666 -798,2.4474239349365234 -799,2.44736647605896 -800,2.4473114013671875 -801,2.4472570419311523 -802,2.447202682495117 -803,2.447147846221924 -804,2.4470934867858887 -805,2.4470386505126953 -806,2.4469833374023438 -807,2.4469287395477295 -808,2.4468729496002197 -809,2.4468181133270264 -810,2.4467616081237793 -811,2.4467077255249023 -812,2.4466519355773926 -813,2.4465982913970947 -814,2.4465413093566895 -815,2.4464855194091797 -816,2.4464282989501953 -817,2.4463722705841064 -818,2.4463162422180176 -819,2.446261405944824 -820,2.4462053775787354 -821,2.446146011352539 -822,2.4460904598236084 -823,2.4460341930389404 -824,2.445977210998535 -825,2.445918560028076 -826,2.4458603858947754 -827,2.445805788040161 -828,2.4457476139068604 -829,2.4456872940063477 -830,2.4456307888031006 -831,2.445570945739746 -832,2.4455130100250244 -833,2.445453643798828 -834,2.445394515991211 -835,2.4453351497650146 -836,2.4452762603759766 -837,2.4452168941497803 -838,2.4451544284820557 -839,2.445096254348755 -840,2.445033311843872 -841,2.4449760913848877 -842,2.444917917251587 -843,2.444855213165283 -844,2.444788694381714 -845,2.4447238445281982 -846,2.4446611404418945 -847,2.444598913192749 -848,2.444537401199341 -849,2.4444727897644043 -850,2.4444050788879395 -851,2.4443414211273193 -852,2.4442756175994873 -853,2.4442131519317627 -854,2.444150686264038 -855,2.444084882736206 -856,2.4440155029296875 -857,2.4439449310302734 -858,2.443880081176758 -859,2.443812608718872 -860,2.4437475204467773 -861,2.44368314743042 -862,2.4436185359954834 -863,2.4435601234436035 -864,2.4434974193573 -865,2.443441152572632 -866,2.4433839321136475 -867,2.443323850631714 -868,2.4432625770568848 -869,2.4432055950164795 -870,2.4431443214416504 -871,2.4430911540985107 -872,2.443037748336792 -873,2.442979335784912 -874,2.4429173469543457 -875,2.442854642868042 -876,2.4427947998046875 -877,2.4427385330200195 -878,2.442683696746826 -879,2.442631483078003 -880,2.4425764083862305 -881,2.4425196647644043 -882,2.442460536956787 -883,2.4424078464508057 -884,2.442351818084717 -885,2.442298412322998 -886,2.4422473907470703 -887,2.442193031311035 -888,2.4421350955963135 -889,2.44207501411438 -890,2.4420223236083984 -891,2.4419662952423096 -892,2.441908836364746 -893,2.441857099533081 -894,2.441802501678467 -895,2.4417452812194824 -896,2.4416868686676025 -897,2.441636085510254 -898,2.441579818725586 -899,2.441526174545288 -900,2.4414753913879395 -901,2.4414219856262207 -902,2.4413647651672363 -903,2.4413061141967773 -904,2.4412548542022705 -905,2.4412002563476562 -906,2.4411447048187256 -907,2.441094398498535 -908,2.4410414695739746 -909,2.440985918045044 -910,2.4409286975860596 -911,2.4408748149871826 -912,2.4408199787139893 -913,2.4407689571380615 -914,2.4407191276550293 -915,2.4406657218933105 -916,2.4406087398529053 -917,2.4405508041381836 -918,2.4404983520507812 -919,2.440444231033325 -920,2.440389633178711 -921,2.4403390884399414 -922,2.4402859210968018 -923,2.440229654312134 -924,2.440171957015991 -925,2.440119981765747 -926,2.4400651454925537 -927,2.4400112628936768 -928,2.439960479736328 -929,2.439906597137451 -930,2.439850091934204 -931,2.4397921562194824 -932,2.439742088317871 -933,2.4396862983703613 -934,2.439633369445801 -935,2.4395833015441895 -936,2.4395313262939453 -937,2.4394750595092773 -938,2.4394192695617676 -939,2.4393622875213623 -940,2.4393088817596436 -941,2.4392547607421875 -942,2.439199686050415 -943,2.4391462802886963 -944,2.439096212387085 -945,2.439042329788208 -946,2.4389901161193848 -947,2.438934326171875 -948,2.4388816356658936 -949,2.4388275146484375 -950,2.4387776851654053 -951,2.438722610473633 -952,2.4386720657348633 -953,2.4386165142059326 -954,2.4385664463043213 -955,2.4385147094726562 -956,2.4384560585021973 -957,2.438404083251953 -958,2.438349723815918 -959,2.4383022785186768 -960,2.4382519721984863 -961,2.4381930828094482 -962,2.438143730163574 -963,2.438093662261963 -964,2.43803334236145 -965,2.4379899501800537 -966,2.4379427433013916 -967,2.43788480758667 -968,2.437821626663208 -969,2.4377758502960205 -970,2.4377293586730957 -971,2.4376659393310547 -972,2.4376165866851807 -973,2.437570571899414 -974,2.4375171661376953 -975,2.4374544620513916 -976,2.4374067783355713 -977,2.437359094619751 -978,2.4372975826263428 -979,2.4372496604919434 -980,2.437204122543335 -981,2.4371464252471924 -982,2.4370834827423096 -983,2.4370408058166504 -984,2.4369914531707764 -985,2.4369280338287354 -986,2.436885356903076 -987,2.436838150024414 -988,2.436781167984009 -989,2.436718225479126 -990,2.4366767406463623 -991,2.4366238117218018 -992,2.436563730239868 -993,2.436514377593994 -994,2.4364609718322754 -995,2.4364099502563477 -996,2.4363596439361572 -997,2.4363059997558594 -998,2.4362542629241943 -999,2.4361987113952637 -1000,2.4361495971679688 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.010/training_config.txt deleted file mode 100644 index 7d2a9b6..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.010/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.010/training_log.csv deleted file mode 100644 index e8fdff4..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,929.5012817382812 -2,760.7491455078125 -3,719.2329711914062 -4,628.367919921875 -5,497.7966003417969 -6,438.876220703125 -7,345.08258056640625 -8,249.2259521484375 -9,232.88772583007812 -10,195.8839111328125 -11,153.7075653076172 -12,162.1884307861328 -13,156.3458709716797 -14,146.13671875 -15,139.6178741455078 -16,137.17164611816406 -17,127.34637451171875 -18,124.68274688720703 -19,120.20781707763672 -20,119.68919372558594 -21,118.0666732788086 -22,112.44929504394531 -23,111.71492004394531 -24,107.95944213867188 -25,103.55692291259766 -26,104.09497833251953 -27,104.35210418701172 -28,103.973388671875 -29,108.83365631103516 -30,106.27936553955078 -31,97.43461608886719 -32,91.98582458496094 -33,85.0150375366211 -34,81.576416015625 -35,78.11054229736328 -36,74.8123779296875 -37,71.9703369140625 -38,69.01492309570312 -39,66.16272735595703 -40,63.46235275268555 -41,69.29388427734375 -42,66.73934173583984 -43,65.28871154785156 -44,64.34028625488281 -45,63.76631164550781 -46,63.3863639831543 -47,63.1524772644043 -48,63.009090423583984 -49,62.83045959472656 -50,61.66761779785156 -51,62.12139129638672 -52,62.43665313720703 -53,61.677330017089844 -54,61.29457473754883 -55,57.714046478271484 -56,52.49813461303711 -57,43.35012435913086 -58,39.513614654541016 -59,38.22907257080078 -60,37.408077239990234 -61,36.67443084716797 -62,35.95701217651367 -63,35.44706726074219 -64,35.04279327392578 -65,34.68356704711914 -66,34.38911819458008 -67,34.61005401611328 -68,34.5826301574707 -69,34.29618453979492 -70,34.02177429199219 -71,33.777122497558594 -72,33.156009674072266 -73,32.17774200439453 -74,31.8585262298584 -75,31.68084144592285 -76,31.552824020385742 -77,31.268096923828125 -78,31.126066207885742 -79,30.796327590942383 -80,29.9853515625 -81,30.022335052490234 -82,30.004865646362305 -83,30.024503707885742 -84,30.350528717041016 -85,30.382726669311523 -86,30.38064193725586 -87,30.345109939575195 -88,30.2956600189209 -89,30.249067306518555 -90,30.427114486694336 -91,30.257610321044922 -92,30.033601760864258 -93,30.09646224975586 -94,30.13431167602539 -95,30.13173484802246 -96,30.09049415588379 -97,30.030664443969727 -98,29.978221893310547 -99,29.949363708496094 -100,29.94510841369629 -101,29.95542335510254 -102,29.963903427124023 -103,29.95401954650879 -104,29.91761589050293 -105,29.857656478881836 -106,29.783775329589844 -107,29.705184936523438 -108,29.62762451171875 -109,29.553142547607422 -110,29.481164932250977 -111,29.409881591796875 -112,29.336000442504883 -113,29.254844665527344 -114,29.159570693969727 -115,29.039579391479492 -116,28.878250122070312 -117,28.632080078125 -118,27.398883819580078 -119,27.31397247314453 -120,27.296682357788086 -121,27.295808792114258 -122,27.296131134033203 -123,27.287403106689453 -124,27.262155532836914 -125,27.215559005737305 -126,27.1453857421875 -127,27.056438446044922 -128,26.970943450927734 -129,26.889707565307617 -130,26.791837692260742 -131,26.66192626953125 -132,26.499778747558594 -133,26.349170684814453 -134,26.233783721923828 -135,26.13144874572754 -136,26.02330207824707 -137,25.897985458374023 -138,25.744298934936523 -139,25.593557357788086 -140,25.54541778564453 -141,25.556602478027344 -142,25.545976638793945 -143,25.517065048217773 -144,25.476497650146484 -145,25.424301147460938 -146,25.359004974365234 -147,25.29531478881836 -148,25.28548240661621 -149,25.30689811706543 -150,25.302318572998047 -151,25.271909713745117 -152,25.219276428222656 -153,25.18211555480957 -154,25.183395385742188 -155,25.1825008392334 -156,25.167064666748047 -157,25.137479782104492 -158,25.101871490478516 -159,25.076181411743164 -160,25.07478904724121 -161,25.06527328491211 -162,25.037206649780273 -163,25.007558822631836 -164,24.996585845947266 -165,24.987852096557617 -166,24.972766876220703 -167,24.95099449157715 -168,24.92755126953125 -169,24.913040161132812 -170,24.904504776000977 -171,24.887758255004883 -172,24.86594581604004 -173,24.851192474365234 -174,24.84052276611328 -175,24.82664680480957 -176,24.80894660949707 -177,24.791629791259766 -178,24.779516220092773 -179,24.767210006713867 -180,24.750703811645508 -181,24.734315872192383 -182,24.721378326416016 -183,24.708040237426758 -184,24.691965103149414 -185,24.67508316040039 -186,24.660314559936523 -187,24.64565658569336 -188,24.628795623779297 -189,24.61121368408203 -190,24.594440460205078 -191,24.577133178710938 -192,24.558361053466797 -193,24.53976058959961 -194,24.52371597290039 -195,24.5109806060791 -196,24.50069236755371 -197,24.495590209960938 -198,24.492685317993164 -199,24.475553512573242 -200,24.458232879638672 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.020/training_config.txt deleted file mode 100644 index e00571d..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.020/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.020/training_log.csv deleted file mode 100644 index 7e3e31e..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,848.4541015625 -2,585.0303344726562 -3,357.997314453125 -4,222.19033813476562 -5,166.72950744628906 -6,168.06515502929688 -7,149.70050048828125 -8,133.18544006347656 -9,123.33529663085938 -10,112.84333038330078 -11,104.2765121459961 -12,90.75269317626953 -13,77.59008026123047 -14,62.69501876831055 -15,52.964908599853516 -16,47.52260208129883 -17,41.17879867553711 -18,34.73335647583008 -19,32.59225082397461 -20,26.943431854248047 -21,25.71811866760254 -22,24.074312210083008 -23,21.00414276123047 -24,20.200172424316406 -25,18.574867248535156 -26,16.756731033325195 -27,16.277568817138672 -28,16.236690521240234 -29,14.540406227111816 -30,13.96481990814209 -31,13.146289825439453 -32,12.308053970336914 -33,11.763128280639648 -34,11.230616569519043 -35,10.92441463470459 -36,11.060528755187988 -37,11.085006713867188 -38,11.011038780212402 -39,10.908602714538574 -40,10.785636901855469 -41,10.577847480773926 -42,10.587580680847168 -43,10.458529472351074 -44,10.573966026306152 -45,10.447955131530762 -46,10.454204559326172 -47,10.36115837097168 -48,10.225329399108887 -49,10.246866226196289 -50,10.265070915222168 -51,10.250393867492676 -52,10.218449592590332 -53,10.165895462036133 -54,10.004765510559082 -55,10.001520156860352 -56,9.96434497833252 -57,9.87012767791748 -58,9.775923728942871 -59,9.761712074279785 -60,9.766066551208496 -61,9.767330169677734 -62,9.749612808227539 -63,9.717607498168945 -64,9.671631813049316 -65,9.609047889709473 -66,9.521329879760742 -67,9.341747283935547 -68,9.420880317687988 -69,9.473664283752441 -70,9.477346420288086 -71,9.449597358703613 -72,9.353282928466797 -73,9.322884559631348 -74,9.15274715423584 -75,9.3987398147583 -76,9.465538024902344 -77,9.506175994873047 -78,9.523160934448242 -79,9.530499458312988 -80,9.530074119567871 -81,9.523048400878906 -82,9.510130882263184 -83,9.491634368896484 -84,9.467304229736328 -85,9.434998512268066 -86,9.37868595123291 -87,9.398971557617188 -88,9.382739067077637 -89,9.349221229553223 -90,9.310247421264648 -91,9.26604175567627 -92,9.10019302368164 -93,9.072464942932129 -94,9.008045196533203 -95,9.061010360717773 -96,9.136478424072266 -97,9.16582202911377 -98,9.173788070678711 -99,9.171384811401367 -100,9.162890434265137 -101,9.149582862854004 -102,9.127596855163574 -103,9.00387954711914 -104,9.084137916564941 -105,9.08071231842041 -106,9.068263053894043 -107,9.05156135559082 -108,9.031661987304688 -109,9.009000778198242 -110,8.983818054199219 -111,8.95622730255127 -112,8.92620849609375 -113,8.893411636352539 -114,8.856374740600586 -115,8.804402351379395 -116,8.795841217041016 -117,8.779950141906738 -118,8.751302719116211 -119,8.720248222351074 -120,8.68759822845459 -121,8.653236389160156 -122,8.616758346557617 -123,8.577445983886719 -124,8.534051895141602 -125,8.484238624572754 -126,8.42344856262207 -127,8.344918251037598 -128,8.217096328735352 -129,8.157134056091309 -130,8.455394744873047 -131,8.602924346923828 -132,8.67727279663086 -133,8.704824447631836 -134,8.682988166809082 -135,8.496933937072754 -136,8.317636489868164 -137,8.348366737365723 -138,8.366308212280273 -139,8.372465133666992 -140,8.370376586914062 -141,8.362154006958008 -142,8.34916877746582 -143,8.332263946533203 -144,8.312002182006836 -145,8.288758277893066 -146,8.262754440307617 -147,8.234086036682129 -148,8.202699661254883 -149,8.168371200561523 -150,8.130666732788086 -151,8.08890151977539 -152,8.041812896728516 -153,7.9888153076171875 -154,7.932823181152344 -155,7.876833915710449 -156,7.779386520385742 -157,7.75201940536499 -158,7.717170238494873 -159,7.6829681396484375 -160,7.651601791381836 -161,7.515256881713867 -162,7.587852478027344 -163,7.577576160430908 -164,7.560608863830566 -165,7.539144039154053 -166,7.514283180236816 -167,7.486752033233643 -168,7.457034111022949 -169,7.42548942565918 -170,7.392409801483154 -171,7.358027935028076 -172,7.322559833526611 -173,7.286151885986328 -174,7.248970985412598 -175,7.211185455322266 -176,7.172971725463867 -177,7.13458251953125 -178,7.096227169036865 -179,7.058074474334717 -180,7.020253658294678 -181,6.982621669769287 -182,6.944700241088867 -183,6.905762195587158 -184,6.864636421203613 -185,6.819009780883789 -186,6.742674827575684 -187,6.766718864440918 -188,6.739691257476807 -189,6.681982040405273 -190,6.753336429595947 -191,6.8020100593566895 -192,6.815769195556641 -193,6.806506156921387 -194,6.787343978881836 -195,6.765802383422852 -196,6.742506504058838 -197,6.7116851806640625 -198,6.6643524169921875 -199,6.608691215515137 -200,6.475497245788574 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.040/training_config.txt deleted file mode 100644 index c431233..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.040/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.040/training_log.csv deleted file mode 100644 index 92e5b98..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,701.9725341796875 -2,264.0076904296875 -3,159.40110778808594 -4,126.43462371826172 -5,92.0433120727539 -6,74.90762329101562 -7,67.69001770019531 -8,60.541873931884766 -9,48.747215270996094 -10,45.35858917236328 -11,46.4842414855957 -12,40.74010467529297 -13,39.21887969970703 -14,39.1895751953125 -15,36.91448974609375 -16,29.01195526123047 -17,25.2808837890625 -18,19.1278076171875 -19,18.45102310180664 -20,17.770423889160156 -21,18.373388290405273 -22,16.749378204345703 -23,16.542858123779297 -24,16.642562866210938 -25,15.759243965148926 -26,15.941630363464355 -27,15.951122283935547 -28,15.904982566833496 -29,15.810111999511719 -30,15.635967254638672 -31,16.531970977783203 -32,16.44236946105957 -33,16.097810745239258 -34,16.66484832763672 -35,16.582366943359375 -36,17.218414306640625 -37,17.34698486328125 -38,17.035799026489258 -39,17.000234603881836 -40,17.03762435913086 -41,17.069860458374023 -42,17.03567123413086 -43,16.875198364257812 -44,16.454540252685547 -45,15.30571460723877 -46,14.484301567077637 -47,14.928971290588379 -48,15.227804183959961 -49,15.299188613891602 -50,15.255221366882324 -51,15.136775970458984 -52,14.960775375366211 -53,14.729593276977539 -54,14.436092376708984 -55,14.060531616210938 -56,13.573005676269531 -57,12.97873306274414 -58,12.422508239746094 -59,12.276296615600586 -60,12.742033958435059 -61,12.347969055175781 -62,12.179417610168457 -63,12.147784233093262 -64,12.13644027709961 -65,12.125748634338379 -66,12.105073928833008 -67,12.069154739379883 -68,12.01697826385498 -69,11.948393821716309 -70,11.864931106567383 -71,11.773849487304688 -72,11.701828002929688 -73,11.681047439575195 -74,11.67745590209961 -75,11.655662536621094 -76,11.62376880645752 -77,11.585184097290039 -78,11.536943435668945 -79,11.479323387145996 -80,11.413002967834473 -81,11.339367866516113 -82,11.263132095336914 -83,11.196410179138184 -84,11.154114723205566 -85,11.133700370788574 -86,11.122450828552246 -87,11.111432075500488 -88,11.09665298461914 -89,11.076613426208496 -90,11.050999641418457 -91,11.020368576049805 -92,10.986037254333496 -93,10.950230598449707 -94,10.916117668151855 -95,10.887368202209473 -96,10.866628646850586 -97,10.852522850036621 -98,10.84048843383789 -99,10.826284408569336 -100,10.808121681213379 -101,10.786215782165527 -102,10.762040138244629 -103,10.737741470336914 -104,10.715201377868652 -105,10.6950101852417 -106,10.676193237304688 -107,10.657072067260742 -108,10.636316299438477 -109,10.613516807556152 -110,10.589245796203613 -111,10.564495086669922 -112,10.540307998657227 -113,10.517278671264648 -114,10.495426177978516 -115,10.47428035736084 -116,10.45315170288086 -117,10.431408882141113 -118,10.4087553024292 -119,10.385278701782227 -120,10.361358642578125 -121,10.337419509887695 -122,10.313730239868164 -123,10.290315628051758 -124,10.266986846923828 -125,10.243412971496582 -126,10.219423294067383 -127,10.194990158081055 -128,10.170079231262207 -129,10.144745826721191 -130,10.119159698486328 -131,10.093442916870117 -132,10.067682266235352 -133,10.04179573059082 -134,10.015642166137695 -135,9.989130973815918 -136,9.962286949157715 -137,9.935125350952148 -138,9.907644271850586 -139,9.879822731018066 -140,9.851569175720215 -141,9.822710037231445 -142,9.793022155761719 -143,9.76225757598877 -144,9.730083465576172 -145,9.696220397949219 -146,9.66089916229248 -147,9.625883102416992 -148,9.594341278076172 -149,9.564790725708008 -150,9.5355224609375 -151,9.505996704101562 -152,9.4760103225708 -153,9.44546127319336 -154,9.414304733276367 -155,9.382354736328125 -156,9.349360466003418 -157,9.315010070800781 -158,9.278667449951172 -159,9.23918342590332 -160,9.194244384765625 -161,9.137089729309082 -162,8.930317878723145 -163,8.911437034606934 -164,8.884065628051758 -165,8.855144500732422 -166,8.826417922973633 -167,8.798074722290039 -168,8.77006721496582 -169,8.742522239685059 -170,8.715385437011719 -171,8.688645362854004 -172,8.662213325500488 -173,8.63603687286377 -174,8.610076904296875 -175,8.584427833557129 -176,8.559142112731934 -177,8.534276008605957 -178,8.50981616973877 -179,8.48571491241455 -180,8.46186351776123 -181,8.43826961517334 -182,8.414827346801758 -183,8.391619682312012 -184,8.368552207946777 -185,8.345622062683105 -186,8.322759628295898 -187,8.299935340881348 -188,8.277108192443848 -189,8.2542142868042 -190,8.231208801269531 -191,8.208024024963379 -192,8.18459415435791 -193,8.160835266113281 -194,8.136693000793457 -195,8.11206340789795 -196,8.086776733398438 -197,8.0606107711792 -198,8.03337574005127 -199,8.004700660705566 -200,7.974150657653809 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.050/training_config.txt deleted file mode 100644 index 07b26fc..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.050/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.050/training_log.csv deleted file mode 100644 index be6faaf..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,644.8335571289062 -2,215.26113891601562 -3,128.4003448486328 -4,83.97894287109375 -5,46.74564743041992 -6,38.07755661010742 -7,29.783950805664062 -8,27.605365753173828 -9,24.226465225219727 -10,24.657800674438477 -11,25.81980323791504 -12,27.723743438720703 -13,28.50939178466797 -14,28.479114532470703 -15,27.975360870361328 -16,27.167585372924805 -17,26.238357543945312 -18,25.436201095581055 -19,24.742801666259766 -20,24.102548599243164 -21,23.43455696105957 -22,22.674776077270508 -23,20.805768966674805 -24,19.99936294555664 -25,19.256731033325195 -26,15.360869407653809 -27,14.673091888427734 -28,14.179522514343262 -29,13.689996719360352 -30,13.191728591918945 -31,12.630029678344727 -32,11.739797592163086 -33,10.139701843261719 -34,9.716131210327148 -35,9.348210334777832 -36,9.01505184173584 -37,8.707425117492676 -38,8.410775184631348 -39,8.118062973022461 -40,7.822728157043457 -41,7.514976501464844 -42,7.188068389892578 -43,6.83927059173584 -44,6.478740215301514 -45,6.136829376220703 -46,5.821681976318359 -47,5.541790008544922 -48,5.275959014892578 -49,5.048756122589111 -50,4.975151062011719 -51,4.896034240722656 -52,4.81827974319458 -53,4.744032382965088 -54,4.673309803009033 -55,4.605315208435059 -56,4.539967060089111 -57,4.476802349090576 -58,4.415337562561035 -59,4.355484962463379 -60,4.297171115875244 -61,4.23993444442749 -62,4.183554172515869 -63,4.127974987030029 -64,4.072920322418213 -65,4.018280506134033 -66,3.9640462398529053 -67,3.910109043121338 -68,3.856426954269409 -69,3.8028271198272705 -70,3.7491157054901123 -71,3.6950912475585938 -72,3.640612840652466 -73,3.584808349609375 -74,3.528102159500122 -75,3.4718668460845947 -76,3.417095184326172 -77,3.3649251461029053 -78,3.316835880279541 -79,3.273427963256836 -80,3.2342891693115234 -81,3.1955134868621826 -82,3.1419663429260254 -83,3.20237135887146 -84,3.249563455581665 -85,3.2764530181884766 -86,3.286076307296753 -87,3.2826080322265625 -88,3.2665212154388428 -89,3.2349460124969482 -90,3.1874094009399414 -91,3.1095824241638184 -92,3.137726306915283 -93,3.149625062942505 -94,3.1463003158569336 -95,3.13478684425354 -96,3.1171982288360596 -97,3.092816114425659 -98,3.055276870727539 -99,3.024710178375244 -100,3.0551295280456543 -101,3.0515856742858887 -102,3.0148844718933105 -103,3.02044677734375 -104,3.0339386463165283 -105,3.0258543491363525 -106,2.997575521469116 -107,2.981809377670288 -108,2.9945714473724365 -109,2.960761785507202 -110,2.9637069702148438 -111,2.9638519287109375 -112,2.944010019302368 -113,2.9376676082611084 -114,2.941311836242676 -115,2.922370672225952 -116,2.9276344776153564 -117,2.9230570793151855 -118,2.910398483276367 -119,2.914470911026001 -120,2.9052014350891113 -121,2.899569511413574 -122,2.899691343307495 -123,2.8897809982299805 -124,2.8870503902435303 -125,2.883997917175293 -126,2.875964403152466 -127,2.8759713172912598 -128,2.869760036468506 -129,2.8663084506988525 -130,2.864819288253784 -131,2.8590943813323975 -132,2.858699321746826 -133,2.854114055633545 -134,2.8517744541168213 -135,2.8493831157684326 -136,2.845494270324707 -137,2.8441381454467773 -138,2.8398468494415283 -139,2.838564872741699 -140,2.834953546524048 -141,2.833138942718506 -142,2.8306643962860107 -143,2.8283536434173584 -144,2.8267295360565186 -145,2.824315071105957 -146,2.8230884075164795 -147,2.8207955360412598 -148,2.8198399543762207 -149,2.817699670791626 -150,2.8167099952697754 -151,2.814849853515625 -152,2.813981771469116 -153,2.8121609687805176 -154,2.8110880851745605 -155,2.8093085289001465 -156,2.8082404136657715 -157,2.8065898418426514 -158,2.805633306503296 -159,2.8042190074920654 -160,2.80330753326416 -161,2.801994562149048 -162,2.8009207248687744 -163,2.79971981048584 -164,2.7985475063323975 -165,2.7975475788116455 -166,2.7964365482330322 -167,2.795574426651001 -168,2.794424533843994 -169,2.7933738231658936 -170,2.7923145294189453 -171,2.7912774085998535 -172,2.7904186248779297 -173,2.7894043922424316 -174,2.788450241088867 -175,2.7874832153320312 -176,2.786539316177368 -177,2.785719394683838 -178,2.784792423248291 -179,2.7838704586029053 -180,2.7830042839050293 -181,2.7821247577667236 -182,2.7812938690185547 -183,2.7804317474365234 -184,2.7795510292053223 -185,2.778747081756592 -186,2.7779335975646973 -187,2.777086019515991 -188,2.7762763500213623 -189,2.775479316711426 -190,2.77467942237854 -191,2.7738873958587646 -192,2.773098945617676 -193,2.7723257541656494 -194,2.771559953689575 -195,2.770782947540283 -196,2.7700226306915283 -197,2.7692785263061523 -198,2.768521308898926 -199,2.767770528793335 -200,2.7670392990112305 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.080/training_config.txt deleted file mode 100644 index e691bf7..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.080/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.080/training_log.csv deleted file mode 100644 index 3c89abf..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,481.686279296875 -2,181.51026916503906 -3,355.3207092285156 -4,122.40864562988281 -5,60.330623626708984 -6,50.91759490966797 -7,37.912994384765625 -8,33.112186431884766 -9,28.84535026550293 -10,26.272972106933594 -11,23.49559211730957 -12,21.08464813232422 -13,18.613008499145508 -14,17.03631591796875 -15,16.266324996948242 -16,15.668737411499023 -17,14.981435775756836 -18,13.404241561889648 -19,11.934287071228027 -20,11.188985824584961 -21,9.877406120300293 -22,8.8971529006958 -23,8.62330436706543 -24,8.392090797424316 -25,8.258062362670898 -26,8.22425651550293 -27,8.564380645751953 -28,8.833946228027344 -29,8.667107582092285 -30,8.438118934631348 -31,8.214349746704102 -32,7.960752010345459 -33,7.633885860443115 -34,7.222818851470947 -35,6.820206165313721 -36,6.43884801864624 -37,6.140181064605713 -38,5.897441387176514 -39,5.695476531982422 -40,5.519347667694092 -41,5.361562252044678 -42,5.223142147064209 -43,5.111656188964844 -44,5.02293586730957 -45,4.940958023071289 -46,4.857954978942871 -47,4.7718634605407715 -48,4.682060241699219 -49,4.588162422180176 -50,4.489864826202393 -51,4.389747142791748 -52,4.296416282653809 -53,4.223264694213867 -54,4.170172691345215 -55,4.12441873550415 -56,4.081700325012207 -57,4.041499137878418 -58,3.9991700649261475 -59,3.950439214706421 -60,3.8954970836639404 -61,3.836456298828125 -62,3.7764170169830322 -63,3.718632459640503 -64,3.6660656929016113 -65,3.618180513381958 -66,3.572890281677246 -67,3.530268430709839 -68,3.488551139831543 -69,3.4476544857025146 -70,3.4081103801727295 -71,3.3707573413848877 -72,3.3357975482940674 -73,3.3026270866394043 -74,3.2701234817504883 -75,3.2370574474334717 -76,3.202831745147705 -77,3.1679775714874268 -78,3.1346850395202637 -79,3.105441093444824 -80,3.0802769660949707 -81,3.0570485591888428 -82,3.0333969593048096 -83,3.0083813667297363 -84,2.98229718208313 -85,2.956451416015625 -86,2.932147741317749 -87,2.910200595855713 -88,2.8916757106781006 -89,2.876786470413208 -90,2.8648440837860107 -91,2.855344533920288 -92,2.847583532333374 -93,2.841017484664917 -94,2.8352744579315186 -95,2.830068826675415 -96,2.82525897026062 -97,2.820791482925415 -98,2.8166685104370117 -99,2.812885284423828 -100,2.8094534873962402 -101,2.8064448833465576 -102,2.803892135620117 -103,2.8016836643218994 -104,2.7995994091033936 -105,2.797438621520996 -106,2.795107126235962 -107,2.7926275730133057 -108,2.7900969982147217 -109,2.7876229286193848 -110,2.7852766513824463 -111,2.7830710411071777 -112,2.7809808254241943 -113,2.7789688110351562 -114,2.777012586593628 -115,2.7751083374023438 -116,2.7732582092285156 -117,2.7714569568634033 -118,2.769692897796631 -119,2.7679555416107178 -120,2.766244411468506 -121,2.7645716667175293 -122,2.762967586517334 -123,2.761441707611084 -124,2.759993553161621 -125,2.7586171627044678 -126,2.7573001384735107 -127,2.7560253143310547 -128,2.7547781467437744 -129,2.753549814224243 -130,2.7523365020751953 -131,2.7511441707611084 -132,2.7499773502349854 -133,2.748842239379883 -134,2.74773907661438 -135,2.7466633319854736 -136,2.7456111907958984 -137,2.7445733547210693 -138,2.7435455322265625 -139,2.7425248622894287 -140,2.741511821746826 -141,2.740504741668701 -142,2.739506483078003 -143,2.7385175228118896 -144,2.7375380992889404 -145,2.7365682125091553 -146,2.735607147216797 -147,2.734656572341919 -148,2.7337160110473633 -149,2.7327868938446045 -150,2.731867551803589 -151,2.730954885482788 -152,2.730048894882202 -153,2.729149580001831 -154,2.728256940841675 -155,2.72737193107605 -156,2.7264957427978516 -157,2.7256274223327637 -158,2.72476863861084 -159,2.723917007446289 -160,2.7230725288391113 -161,2.722234010696411 -162,2.7214012145996094 -163,2.720574140548706 -164,2.719752550125122 -165,2.7189362049102783 -166,2.718125104904175 -167,2.7173190116882324 -168,2.7165169715881348 -169,2.7157201766967773 -170,2.714927911758423 -171,2.7141404151916504 -172,2.7133569717407227 -173,2.712578058242798 -174,2.711803913116455 -175,2.711033582687378 -176,2.710268020629883 -177,2.7095067501068115 -178,2.708749532699585 -179,2.7079970836639404 -180,2.7072479724884033 -181,2.706502676010132 -182,2.7057619094848633 -183,2.7050249576568604 -184,2.7042922973632812 -185,2.7035627365112305 -186,2.7028379440307617 -187,2.702116012573242 -188,2.701399087905884 -189,2.70068621635437 -190,2.699977159500122 -191,2.6992716789245605 -192,2.6985700130462646 -193,2.697871685028076 -194,2.6971774101257324 -195,2.6964871883392334 -196,2.6958022117614746 -197,2.6951215267181396 -198,2.6944448947906494 -199,2.6937716007232666 -200,2.6931018829345703 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.100/training_config.txt deleted file mode 100644 index 8ddca6b..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.100/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.100/training_log.csv deleted file mode 100644 index 4c0ec01..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,469.2649230957031 -2,326.39837646484375 -3,213.38052368164062 -4,50.33772277832031 -5,29.021835327148438 -6,25.483436584472656 -7,31.197837829589844 -8,31.746530532836914 -9,31.48188591003418 -10,31.22617530822754 -11,31.1026611328125 -12,30.768352508544922 -13,30.411008834838867 -14,29.993667602539062 -15,29.517818450927734 -16,28.948278427124023 -17,28.161788940429688 -18,26.979352951049805 -19,26.17275047302246 -20,25.484460830688477 -21,24.835721969604492 -22,24.191198348999023 -23,23.523496627807617 -24,22.81980323791504 -25,22.155441284179688 -26,21.573843002319336 -27,20.940412521362305 -28,20.22989845275879 -29,19.461017608642578 -30,18.64518928527832 -31,17.78531265258789 -32,16.921890258789062 -33,16.067153930664062 -34,15.233980178833008 -35,14.445463180541992 -36,13.704170227050781 -37,13.012430191040039 -38,12.367860794067383 -39,11.769373893737793 -40,11.210058212280273 -41,10.685431480407715 -42,10.193699836730957 -43,9.727736473083496 -44,9.281412124633789 -45,8.85403823852539 -46,8.441299438476562 -47,8.038119316101074 -48,7.6389007568359375 -49,7.241733551025391 -50,6.854685306549072 -51,6.486843109130859 -52,6.1454758644104 -53,5.838573455810547 -54,5.568541049957275 -55,5.331871509552002 -56,5.121151447296143 -57,4.925756454467773 -58,4.739625453948975 -59,4.560769081115723 -60,4.389829635620117 -61,4.229304790496826 -62,4.083106994628906 -63,3.9540810585021973 -64,3.8425064086914062 -65,3.7474048137664795 -66,3.668313503265381 -67,3.6057379245758057 -68,3.5602753162384033 -69,3.5308995246887207 -70,3.514744281768799 -71,3.5086121559143066 -72,3.5091707706451416 -73,3.5134897232055664 -74,3.519155263900757 -75,3.5244719982147217 -76,3.5282037258148193 -77,3.5291249752044678 -78,3.526078462600708 -79,3.517765522003174 -80,3.5036728382110596 -81,3.4844303131103516 -82,3.461963415145874 -83,3.4379873275756836 -84,3.4133715629577637 -85,3.3891208171844482 -86,3.365788698196411 -87,3.3436439037323 -88,3.3228886127471924 -89,3.3037564754486084 -90,3.2864136695861816 -91,3.270982265472412 -92,3.2574620246887207 -93,3.2457728385925293 -94,3.235722064971924 -95,3.227010726928711 -96,3.2192482948303223 -97,3.211989402770996 -98,3.204843282699585 -99,3.197505235671997 -100,3.1897997856140137 -101,3.181614398956299 -102,3.172982692718506 -103,3.1640524864196777 -104,3.1548635959625244 -105,3.1454713344573975 -106,3.1358983516693115 -107,3.1261637210845947 -108,3.1163058280944824 -109,3.106369972229004 -110,3.096465587615967 -111,3.0867152214050293 -112,3.0772151947021484 -113,3.0680408477783203 -114,3.059187173843384 -115,3.0506298542022705 -116,3.042323589324951 -117,3.0342187881469727 -118,3.0262749195098877 -119,3.0184576511383057 -120,3.010756492614746 -121,3.0031373500823975 -122,2.995593309402466 -123,2.988088846206665 -124,2.9806129932403564 -125,2.973134994506836 -126,2.965670585632324 -127,2.9582111835479736 -128,2.9507784843444824 -129,2.943403720855713 -130,2.9360909461975098 -131,2.9288642406463623 -132,2.9217276573181152 -133,2.914686679840088 -134,2.907742738723755 -135,2.900902509689331 -136,2.8941538333892822 -137,2.88749623298645 -138,2.8809375762939453 -139,2.874469041824341 -140,2.868089199066162 -141,2.8617947101593018 -142,2.8556067943573 -143,2.849527597427368 -144,2.843579053878784 -145,2.8377885818481445 -146,2.8321847915649414 -147,2.826768398284912 -148,2.821537733078003 -149,2.816493034362793 -150,2.8116228580474854 -151,2.806945323944092 -152,2.802446126937866 -153,2.798109769821167 -154,2.7939236164093018 -155,2.7898664474487305 -156,2.785916805267334 -157,2.782067060470581 -158,2.7783143520355225 -159,2.7746522426605225 -160,2.771038770675659 -161,2.767500638961792 -162,2.7640469074249268 -163,2.760666608810425 -164,2.757354736328125 -165,2.7541048526763916 -166,2.7509143352508545 -167,2.7477827072143555 -168,2.7447049617767334 -169,2.7416677474975586 -170,2.738690137863159 -171,2.7357540130615234 -172,2.7328598499298096 -173,2.7300024032592773 -174,2.727181911468506 -175,2.7243926525115967 -176,2.721630334854126 -177,2.718885898590088 -178,2.71616268157959 -179,2.713449239730835 -180,2.710740327835083 -181,2.7080299854278564 -182,2.705319881439209 -183,2.7026026248931885 -184,2.699883460998535 -185,2.6971538066864014 -186,2.6944162845611572 -187,2.6916680335998535 -188,2.6889026165008545 -189,2.6861231327056885 -190,2.6833181381225586 -191,2.6804773807525635 -192,2.6775612831115723 -193,2.6745879650115967 -194,2.6716082096099854 -195,2.6686434745788574 -196,2.6657087802886963 -197,2.662813663482666 -198,2.659982442855835 -199,2.6572070121765137 -200,2.654489755630493 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.125/training_config.txt deleted file mode 100644 index 5dd4cdf..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.125/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.125/training_log.csv deleted file mode 100644 index 087ce2b..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,481.4687194824219 -2,96.23600006103516 -3,46.30360412597656 -4,42.67131423950195 -5,22.463510513305664 -6,16.559965133666992 -7,14.444090843200684 -8,13.130633354187012 -9,11.771244049072266 -10,10.568934440612793 -11,9.973428726196289 -12,9.366987228393555 -13,8.804798126220703 -14,8.27409553527832 -15,7.755214691162109 -16,7.259061813354492 -17,6.791775703430176 -18,6.366294860839844 -19,5.982115745544434 -20,5.6443047523498535 -21,5.381455421447754 -22,5.193507671356201 -23,5.0417962074279785 -24,4.908753395080566 -25,4.789371967315674 -26,4.680208683013916 -27,4.579638957977295 -28,4.486494541168213 -29,4.399327754974365 -30,4.317553997039795 -31,4.240206241607666 -32,4.166420936584473 -33,4.095785617828369 -34,4.027955055236816 -35,3.962388515472412 -36,3.898862600326538 -37,3.836763858795166 -38,3.7760908603668213 -39,3.7168807983398438 -40,3.658808946609497 -41,3.6020450592041016 -42,3.546821117401123 -43,3.4929871559143066 -44,3.4405100345611572 -45,3.3894295692443848 -46,3.339735746383667 -47,3.291717767715454 -48,3.24536395072937 -49,3.2008402347564697 -50,3.1579949855804443 -51,3.117474317550659 -52,3.079694986343384 -53,3.0453310012817383 -54,3.0150771141052246 -55,2.989461898803711 -56,2.968668222427368 -57,2.9526264667510986 -58,2.940664529800415 -59,2.9317538738250732 -60,2.9254977703094482 -61,2.9206764698028564 -62,2.9161529541015625 -63,2.9112279415130615 -64,2.9060800075531006 -65,2.9006295204162598 -66,2.8947958946228027 -67,2.8885629177093506 -68,2.8819518089294434 -69,2.8750643730163574 -70,2.8680176734924316 -71,2.860836982727051 -72,2.853696823120117 -73,2.847111463546753 -74,2.8412044048309326 -75,2.83565354347229 -76,2.8303771018981934 -77,2.825225353240967 -78,2.8200435638427734 -79,2.8147404193878174 -80,2.809227705001831 -81,2.8034520149230957 -82,2.7973809242248535 -83,2.791006565093994 -84,2.784371852874756 -85,2.777538776397705 -86,2.7705130577087402 -87,2.763420343399048 -88,2.7563579082489014 -89,2.749314546585083 -90,2.742288589477539 -91,2.73541522026062 -92,2.7287261486053467 -93,2.7222392559051514 -94,2.716036081314087 -95,2.7100298404693604 -96,2.7042503356933594 -97,2.6986300945281982 -98,2.6931324005126953 -99,2.687795877456665 -100,2.6827449798583984 -101,2.677870988845825 -102,2.673168659210205 -103,2.6686530113220215 -104,2.664296865463257 -105,2.660098075866699 -106,2.6560823917388916 -107,2.652400493621826 -108,2.64913272857666 -109,2.646261215209961 -110,2.6437904834747314 -111,2.641684055328369 -112,2.6399388313293457 -113,2.638502597808838 -114,2.6373291015625 -115,2.6363699436187744 -116,2.635528087615967 -117,2.6347572803497314 -118,2.6341447830200195 -119,2.633460760116577 -120,2.632854700088501 -121,2.632253646850586 -122,2.63169264793396 -123,2.6311864852905273 -124,2.6307148933410645 -125,2.630263090133667 -126,2.629851818084717 -127,2.629472494125366 -128,2.6291165351867676 -129,2.6287875175476074 -130,2.6284642219543457 -131,2.6281540393829346 -132,2.627856492996216 -133,2.627563953399658 -134,2.627284526824951 -135,2.6270041465759277 -136,2.6267237663269043 -137,2.6264452934265137 -138,2.6261661052703857 -139,2.625885009765625 -140,2.625603437423706 -141,2.6253230571746826 -142,2.6250407695770264 -143,2.6247570514678955 -144,2.6244759559631348 -145,2.624196767807007 -146,2.6239166259765625 -147,2.6236376762390137 -148,2.6233632564544678 -149,2.623093366622925 -150,2.622823715209961 -151,2.622556209564209 -152,2.622292995452881 -153,2.6220321655273438 -154,2.6217737197875977 -155,2.6215174198150635 -156,2.621264934539795 -157,2.621016263961792 -158,2.6207706928253174 -159,2.620528221130371 -160,2.620288372039795 -161,2.6200501918792725 -162,2.6198153495788574 -163,2.6195836067199707 -164,2.619354486465454 -165,2.619126319885254 -166,2.6189000606536865 -167,2.6186749935150146 -168,2.6184520721435547 -169,2.6182305812835693 -170,2.6180107593536377 -171,2.6177918910980225 -172,2.61757493019104 -173,2.617359161376953 -174,2.617144823074341 -175,2.6169333457946777 -176,2.616722822189331 -177,2.6165122985839844 -178,2.6163036823272705 -179,2.616096019744873 -180,2.615889310836792 -181,2.6156835556030273 -182,2.615478992462158 -183,2.6152756214141846 -184,2.6150732040405273 -185,2.6148719787597656 -186,2.6146717071533203 -187,2.614471673965454 -188,2.614272117614746 -189,2.614072561264038 -190,2.6138744354248047 -191,2.6136767864227295 -192,2.6134798526763916 -193,2.613284111022949 -194,2.6130881309509277 -195,2.6128928661346436 -196,2.6126976013183594 -197,2.6125032901763916 -198,2.6123099327087402 -199,2.612117052078247 -200,2.611924171447754 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.160/training_config.txt deleted file mode 100644 index 9fbcd48..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.160/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.160/training_log.csv deleted file mode 100644 index d1b94bd..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,553.6963500976562 -2,49.08141326904297 -3,46.070884704589844 -4,46.076011657714844 -5,51.14850616455078 -6,44.729087829589844 -7,39.44825744628906 -8,35.0629768371582 -9,28.447023391723633 -10,23.73651885986328 -11,20.84491539001465 -12,18.04330062866211 -13,15.992259979248047 -14,14.384294509887695 -15,13.096992492675781 -16,12.36588191986084 -17,12.033136367797852 -18,11.882885932922363 -19,11.748250961303711 -20,11.487778663635254 -21,11.079614639282227 -22,10.560137748718262 -23,9.991604804992676 -24,9.441896438598633 -25,8.951383590698242 -26,8.533259391784668 -27,8.188826560974121 -28,7.9125518798828125 -29,7.69334077835083 -30,7.5136542320251465 -31,7.357945442199707 -32,7.215080738067627 -33,7.0778608322143555 -34,6.940879821777344 -35,6.800954341888428 -36,6.656413555145264 -37,6.506791114807129 -38,6.352499485015869 -39,6.194592475891113 -40,6.0344014167785645 -41,5.8734822273254395 -42,5.713569641113281 -43,5.556545257568359 -44,5.4044318199157715 -45,5.259422779083252 -46,5.122798919677734 -47,4.994835376739502 -48,4.875750541687012 -49,4.7653279304504395 -50,4.662687301635742 -51,4.566817283630371 -52,4.4768900871276855 -53,4.392213821411133 -54,4.3114728927612305 -55,4.233184337615967 -56,4.156534671783447 -57,4.081078052520752 -58,4.006781101226807 -59,3.9344382286071777 -60,3.8644073009490967 -61,3.7971014976501465 -62,3.7331008911132812 -63,3.6725046634674072 -64,3.6153955459594727 -65,3.5623679161071777 -66,3.5130515098571777 -67,3.4666190147399902 -68,3.422694206237793 -69,3.380918264389038 -70,3.34086012840271 -71,3.302433967590332 -72,3.2654566764831543 -73,3.229963541030884 -74,3.195887804031372 -75,3.1632513999938965 -76,3.132321834564209 -77,3.10307240486145 -78,3.0756163597106934 -79,3.049919843673706 -80,3.025923728942871 -81,3.0035178661346436 -82,2.9826807975769043 -83,2.963444232940674 -84,2.9456756114959717 -85,2.9293460845947266 -86,2.914433002471924 -87,2.9008066654205322 -88,2.8883070945739746 -89,2.876821994781494 -90,2.866337537765503 -91,2.8568198680877686 -92,2.8481810092926025 -93,2.840397357940674 -94,2.8333535194396973 -95,2.8269832134246826 -96,2.8211967945098877 -97,2.8158717155456543 -98,2.8109676837921143 -99,2.8064279556274414 -100,2.8022096157073975 -101,2.798267364501953 -102,2.794576406478882 -103,2.791100025177002 -104,2.7878060340881348 -105,2.7846758365631104 -106,2.7817060947418213 -107,2.778864622116089 -108,2.7761218547821045 -109,2.77345871925354 -110,2.770862579345703 -111,2.768308162689209 -112,2.7657721042633057 -113,2.7632362842559814 -114,2.7607133388519287 -115,2.758206367492676 -116,2.755723714828491 -117,2.753251314163208 -118,2.750777244567871 -119,2.7483010292053223 -120,2.745819330215454 -121,2.7433362007141113 -122,2.7408525943756104 -123,2.738375663757324 -124,2.735898733139038 -125,2.7334342002868652 -126,2.730978012084961 -127,2.7285351753234863 -128,2.726107597351074 -129,2.723715305328369 -130,2.721348285675049 -131,2.7189998626708984 -132,2.7166740894317627 -133,2.714364767074585 -134,2.7120742797851562 -135,2.70979642868042 -136,2.707547664642334 -137,2.7053351402282715 -138,2.703160524368286 -139,2.701014757156372 -140,2.6989171504974365 -141,2.6968579292297363 -142,2.6948509216308594 -143,2.692901372909546 -144,2.6910157203674316 -145,2.689157247543335 -146,2.687323808670044 -147,2.685511350631714 -148,2.6837263107299805 -149,2.6819632053375244 -150,2.680239200592041 -151,2.6785459518432617 -152,2.676875114440918 -153,2.6752192974090576 -154,2.673585891723633 -155,2.6719651222229004 -156,2.670372247695923 -157,2.6687915325164795 -158,2.6672275066375732 -159,2.665682792663574 -160,2.6641595363616943 -161,2.6626577377319336 -162,2.6611926555633545 -163,2.6597442626953125 -164,2.6583008766174316 -165,2.656860113143921 -166,2.6554226875305176 -167,2.6539745330810547 -168,2.652524709701538 -169,2.651076078414917 -170,2.6496360301971436 -171,2.6482040882110596 -172,2.6467795372009277 -173,2.6453614234924316 -174,2.6439476013183594 -175,2.6425416469573975 -176,2.6411399841308594 -177,2.639739751815796 -178,2.6383423805236816 -179,2.636948823928833 -180,2.6355528831481934 -181,2.634155035018921 -182,2.6327567100524902 -183,2.6313607692718506 -184,2.6299684047698975 -185,2.6285769939422607 -186,2.627183437347412 -187,2.625798225402832 -188,2.6244194507598877 -189,2.623044729232788 -190,2.62166690826416 -191,2.6202921867370605 -192,2.618919849395752 -193,2.617548942565918 -194,2.616185426712036 -195,2.614823579788208 -196,2.613467216491699 -197,2.612114667892456 -198,2.610764980316162 -199,2.609421968460083 -200,2.608083486557007 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.200/training_config.txt deleted file mode 100644 index 1da3229..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.200/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.200/training_log.csv deleted file mode 100644 index 011a912..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,638.138916015625 -2,126.95638275146484 -3,25.703227996826172 -4,17.30948257446289 -5,14.884600639343262 -6,10.920865058898926 -7,9.254437446594238 -8,7.943879127502441 -9,6.872048377990723 -10,6.092459678649902 -11,5.6081719398498535 -12,5.297410011291504 -13,5.066235065460205 -14,4.879238605499268 -15,4.723977565765381 -16,4.591020584106445 -17,4.4769816398620605 -18,4.377741813659668 -19,4.291906833648682 -20,4.218208312988281 -21,4.155269145965576 -22,4.10132360458374 -23,4.055699348449707 -24,4.017193794250488 -25,3.9850821495056152 -26,3.9585351943969727 -27,3.936767339706421 -28,3.9188969135284424 -29,3.9041504859924316 -30,3.8918404579162598 -31,3.881377935409546 -32,3.8722589015960693 -33,3.864015579223633 -34,3.8562610149383545 -35,3.8487372398376465 -36,3.841275453567505 -37,3.833747148513794 -38,3.826080322265625 -39,3.8182294368743896 -40,3.810185670852661 -41,3.8019659519195557 -42,3.793586015701294 -43,3.7850866317749023 -44,3.7764933109283447 -45,3.767857551574707 -46,3.7592031955718994 -47,3.7505853176116943 -48,3.7419936656951904 -49,3.7334494590759277 -50,3.7249693870544434 -51,3.716557502746582 -52,3.7081964015960693 -53,3.6998841762542725 -54,3.69161319732666 -55,3.6833760738372803 -56,3.6751856803894043 -57,3.6670145988464355 -58,3.6588683128356934 -59,3.650744676589966 -60,3.6426455974578857 -61,3.634566307067871 -62,3.626511812210083 -63,3.6184847354888916 -64,3.610483407974243 -65,3.6025326251983643 -66,3.594625949859619 -67,3.5867767333984375 -68,3.578998565673828 -69,3.5712890625 -70,3.5636508464813232 -71,3.556091547012329 -72,3.548602342605591 -73,3.5411875247955322 -74,3.53383731842041 -75,3.526548385620117 -76,3.519317388534546 -77,3.512122869491577 -78,3.504964590072632 -79,3.497835874557495 -80,3.4907400608062744 -81,3.4836747646331787 -82,3.476620674133301 -83,3.4695959091186523 -84,3.462590217590332 -85,3.4555938243865967 -86,3.4486193656921387 -87,3.441661834716797 -88,3.434725761413574 -89,3.4278018474578857 -90,3.420893430709839 -91,3.4140071868896484 -92,3.407135009765625 -93,3.4002761840820312 -94,3.3934319019317627 -95,3.386613368988037 -96,3.3798131942749023 -97,3.3730344772338867 -98,3.3662712574005127 -99,3.359532594680786 -100,3.3528053760528564 -101,3.346094846725464 -102,3.3393967151641846 -103,3.3327183723449707 -104,3.326056718826294 -105,3.3194069862365723 -106,3.312777280807495 -107,3.3061654567718506 -108,3.299579620361328 -109,3.2930171489715576 -110,3.2864811420440674 -111,3.2799644470214844 -112,3.2734663486480713 -113,3.266991138458252 -114,3.260540723800659 -115,3.254108190536499 -116,3.2476890087127686 -117,3.2412848472595215 -118,3.234895706176758 -119,3.2285244464874268 -120,3.2221744060516357 -121,3.2158422470092773 -122,3.2095162868499756 -123,3.203117847442627 -124,3.1966679096221924 -125,3.1901097297668457 -126,3.1834967136383057 -127,3.176847219467163 -128,3.1701719760894775 -129,3.163466691970825 -130,3.1567556858062744 -131,3.1500422954559326 -132,3.143339157104492 -133,3.136652708053589 -134,3.1299829483032227 -135,3.1233255863189697 -136,3.116701602935791 -137,3.110090970993042 -138,3.103492259979248 -139,3.0969111919403076 -140,3.090346574783325 -141,3.083801031112671 -142,3.0772898197174072 -143,3.0708088874816895 -144,3.064385414123535 -145,3.0579960346221924 -146,3.051602363586426 -147,3.0452427864074707 -148,3.0389111042022705 -149,3.0326015949249268 -150,3.0263562202453613 -151,3.020143747329712 -152,3.0139853954315186 -153,3.007880687713623 -154,3.0018320083618164 -155,2.9958269596099854 -156,2.9898507595062256 -157,2.9839470386505127 -158,2.9780802726745605 -159,2.972240686416626 -160,2.966510057449341 -161,2.960875988006592 -162,2.9553921222686768 -163,2.9501094818115234 -164,2.945035934448242 -165,2.9401001930236816 -166,2.9353580474853516 -167,2.9308011531829834 -168,2.9264252185821533 -169,2.9222323894500732 -170,2.9182190895080566 -171,2.914371967315674 -172,2.910691499710083 -173,2.9071621894836426 -174,2.903806447982788 -175,2.9006123542785645 -176,2.8975651264190674 -177,2.8946545124053955 -178,2.8918910026550293 -179,2.889240264892578 -180,2.886709690093994 -181,2.884303569793701 -182,2.8820247650146484 -183,2.8798563480377197 -184,2.877760887145996 -185,2.8757522106170654 -186,2.873821973800659 -187,2.8719937801361084 -188,2.8702189922332764 -189,2.8685176372528076 -190,2.866842269897461 -191,2.8651974201202393 -192,2.8636062145233154 -193,2.8620219230651855 -194,2.860455274581909 -195,2.8589398860931396 -196,2.8574321269989014 -197,2.8559417724609375 -198,2.8544905185699463 -199,2.8530449867248535 -200,2.8516249656677246 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.250/training_config.txt deleted file mode 100644 index 5638011..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.250/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.250/training_log.csv deleted file mode 100644 index 3dab0e9..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,1172.7674560546875 -2,100.25691986083984 -3,119402.2890625 -4,133.49362182617188 -5,59.150943756103516 -6,52.43195343017578 -7,54.74357223510742 -8,60.726932525634766 -9,63.98731231689453 -10,64.91996765136719 -11,63.94999313354492 -12,62.00520324707031 -13,58.6434440612793 -14,53.395198822021484 -15,46.27482223510742 -16,40.115535736083984 -17,36.4989013671875 -18,33.63146209716797 -19,31.102542877197266 -20,28.929868698120117 -21,26.843608856201172 -22,25.418996810913086 -23,24.649375915527344 -24,24.061758041381836 -25,23.3399658203125 -26,22.555253982543945 -27,21.701343536376953 -28,20.815927505493164 -29,19.92568588256836 -30,19.006975173950195 -31,18.06807518005371 -32,17.145551681518555 -33,16.292030334472656 -34,15.532393455505371 -35,14.863213539123535 -36,14.26880931854248 -37,13.735945701599121 -38,13.256166458129883 -39,12.821511268615723 -40,12.426076889038086 -41,12.06234359741211 -42,11.727265357971191 -43,11.416626930236816 -44,11.131233215332031 -45,10.866288185119629 -46,10.621492385864258 -47,10.391637802124023 -48,10.174287796020508 -49,9.965923309326172 -50,9.76606559753418 -51,9.573927879333496 -52,9.389328002929688 -53,9.210346221923828 -54,9.037010192871094 -55,8.868074417114258 -56,8.70367431640625 -57,8.543137550354004 -58,8.385336875915527 -59,8.230120658874512 -60,8.077096939086914 -61,7.925225257873535 -62,7.7738261222839355 -63,7.623720169067383 -64,7.475256443023682 -65,7.329106330871582 -66,7.182750701904297 -67,7.031722068786621 -68,6.880136966705322 -69,6.727870941162109 -70,6.576241493225098 -71,6.42543888092041 -72,6.277204513549805 -73,6.132631301879883 -74,5.991875171661377 -75,5.855222702026367 -76,5.723228931427002 -77,5.593400955200195 -78,5.467288017272949 -79,5.346905708312988 -80,5.2328925132751465 -81,5.125042915344238 -82,5.021970748901367 -83,4.923079013824463 -84,4.8285136222839355 -85,4.7386698722839355 -86,4.653127670288086 -87,4.571839809417725 -88,4.494254112243652 -89,4.4201788902282715 -90,4.349775314331055 -91,4.2830986976623535 -92,4.219934940338135 -93,4.160295009613037 -94,4.103320121765137 -95,4.048985958099365 -96,3.9969615936279297 -97,3.9467976093292236 -98,3.898562431335449 -99,3.8514668941497803 -100,3.806110382080078 -101,3.762852668762207 -102,3.7217857837677 -103,3.6830904483795166 -104,3.6466803550720215 -105,3.6124815940856934 -106,3.580472230911255 -107,3.550790548324585 -108,3.5235748291015625 -109,3.4986579418182373 -110,3.475843667984009 -111,3.4549434185028076 -112,3.43583345413208 -113,3.4183998107910156 -114,3.402480363845825 -115,3.387868642807007 -116,3.374448776245117 -117,3.362361431121826 -118,3.351301908493042 -119,3.3412811756134033 -120,3.3322646617889404 -121,3.324073076248169 -122,3.316577672958374 -123,3.309654712677002 -124,3.3031740188598633 -125,3.2971014976501465 -126,3.2913944721221924 -127,3.285975217819214 -128,3.2807655334472656 -129,3.2757186889648438 -130,3.270798921585083 -131,3.2659785747528076 -132,3.261248826980591 -133,3.2565717697143555 -134,3.2519161701202393 -135,3.2472610473632812 -136,3.2425618171691895 -137,3.2378664016723633 -138,3.2331740856170654 -139,3.2284865379333496 -140,3.223806858062744 -141,3.2191410064697266 -142,3.214459180831909 -143,3.2097601890563965 -144,3.20505690574646 -145,3.2003579139709473 -146,3.1956534385681152 -147,3.1909520626068115 -148,3.1862666606903076 -149,3.1815884113311768 -150,3.1769208908081055 -151,3.172271728515625 -152,3.1676464080810547 -153,3.163057804107666 -154,3.1585025787353516 -155,3.1539885997772217 -156,3.1495325565338135 -157,3.1451261043548584 -158,3.140794038772583 -159,3.1365439891815186 -160,3.132366418838501 -161,3.128267288208008 -162,3.124246835708618 -163,3.120305061340332 -164,3.1164445877075195 -165,3.1126551628112793 -166,3.1089229583740234 -167,3.105269193649292 -168,3.101688861846924 -169,3.0981662273406982 -170,3.094719171524048 -171,3.091362476348877 -172,3.088073253631592 -173,3.0848498344421387 -174,3.0816967487335205 -175,3.078617811203003 -176,3.0756008625030518 -177,3.0726370811462402 -178,3.0697410106658936 -179,3.0669026374816895 -180,3.0641438961029053 -181,3.0614750385284424 -182,3.0588855743408203 -183,3.0564775466918945 -184,3.054225444793701 -185,3.0520174503326416 -186,3.0498387813568115 -187,3.04767107963562 -188,3.0455057621002197 -189,3.043341636657715 -190,3.041179656982422 -191,3.039034128189087 -192,3.0368995666503906 -193,3.034780502319336 -194,3.0326836109161377 -195,3.0306124687194824 -196,3.028563976287842 -197,3.0265390872955322 -198,3.024538993835449 -199,3.022556781768799 -200,3.020589590072632 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.300/training_config.txt deleted file mode 100644 index ff63435..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.300/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.300/training_log.csv deleted file mode 100644 index 26989be..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.300/training_log.csv +++ /dev/null @@ -1,42 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,2825.916259765625 -2,45.81127166748047 -3,167.4499053955078 -4,39.7562255859375 -5,31.285991668701172 -6,28.110328674316406 -7,26.23940086364746 -8,24.85036849975586 -9,23.69524383544922 -10,22.691848754882812 -11,21.820920944213867 -12,21.053966522216797 -13,20.368806838989258 -14,19.758020401000977 -15,19.222614288330078 -16,18.762393951416016 -17,18.373872756958008 -18,18.048959732055664 -19,17.780969619750977 -20,17.5612735748291 -21,17.3798828125 -22,17.223751068115234 -23,17.082439422607422 -24,64.6371078491211 -25,12853.6376953125 -26,29927.26953125 -27,35657.75 -28,40116.0234375 -29,43236.8984375 -30,43042.046875 -31,45883.66796875 -32,44355.76171875 -33,105720.296875 -34,185412.984375 -35,196373.640625 -36,196373.640625 -37,196373.640625 -38,196373.640625 -39,196373.640625 -40,196373.640625 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.400/training_config.txt deleted file mode 100644 index 33a846e..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.400/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.400/training_log.csv deleted file mode 100644 index bf04308..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.400/training_log.csv +++ /dev/null @@ -1,44 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,36838.27734375 -2,44.573402404785156 -3,105924.5390625 -4,41.59011459350586 -5,36.11030578613281 -6,35.55046081542969 -7,37.81698989868164 -8,41.02517318725586 -9,42.91999053955078 -10,45.7131233215332 -11,41.85399627685547 -12,40.049232482910156 -13,38.6837043762207 -14,37.556556701660156 -15,36.00605773925781 -16,33.6727409362793 -17,31.685684204101562 -18,37.23391342163086 -19,37.71848678588867 -20,33.69709014892578 -21,31.826297760009766 -22,31.2726993560791 -23,31.138790130615234 -24,31.04647445678711 -25,30.865480422973633 -26,30.721553802490234 -27,30.514291763305664 -28,30.227590560913086 -29,29.922489166259766 -30,29.53095245361328 -31,29.23976707458496 -32,28.990644454956055 -33,28.765079498291016 -34,28.542724609375 -35,28.22769546508789 -36,27.828588485717773 -37,24.716533660888672 -38,25.0787410736084 -39,nan -40,nan -41,nan -42,nan diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.500/training_config.txt deleted file mode 100644 index 6a1ed86..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.500/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.500/training_log.csv deleted file mode 100644 index ebd6c2e..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.500/training_log.csv +++ /dev/null @@ -1,17 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,97624.71875 -2,193010.671875 -3,193945.328125 -4,184485.71875 -5,139493.53125 -6,74732.3828125 -7,81682.4296875 -8,27178.181640625 -9,9615.1865234375 -10,213.50404357910156 -11,27.556537628173828 -12,nan -13,nan -14,nan -15,nan diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.600/training_config.txt deleted file mode 100644 index 228cbb3..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.600/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.600/training_log.csv deleted file mode 100644 index 5a789d7..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.600/training_log.csv +++ /dev/null @@ -1,11 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,139986.984375 -2,195732.21875 -3,196348.1875 -4,196373.640625 -5,196373.640625 -6,196373.640625 -7,196373.640625 -8,196373.640625 -9,196373.640625 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.700/training_config.txt deleted file mode 100644 index 13501b1..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.700/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.700/training_log.csv deleted file mode 100644 index f8951a3..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,165115.203125 -2,196340.71875 -3,196373.640625 -4,196373.640625 -5,196373.640625 -6,196373.640625 -7,196373.640625 -8,196373.640625 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.800/training_config.txt deleted file mode 100644 index 17ae80e..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.800/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.800/training_log.csv deleted file mode 100644 index da4087b..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.800/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,178371.703125 -2,196373.4375 -3,196373.640625 -4,196373.640625 -5,196373.640625 -6,196373.640625 -7,196373.640625 -8,196373.640625 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_0.900/training_config.txt deleted file mode 100644 index bf410f0..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.900/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_0.900/training_log.csv deleted file mode 100644 index fefd7df..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,188662.65625 -2,196373.640625 -3,196373.640625 -4,196373.640625 -5,196373.640625 -6,196373.640625 -7,196373.640625 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_1.000/training_config.txt deleted file mode 100644 index 3257127..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_1.000/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_1.000/training_log.csv deleted file mode 100644 index 9babc0a..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_1.000/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,192997.171875 -2,196373.40625 -3,196373.640625 -4,196373.640625 -5,196373.640625 -6,196373.640625 -7,196373.640625 -8,196373.640625 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_16.000/training_config.txt deleted file mode 100644 index 9744508..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_16.000/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_16.000/training_log.csv deleted file mode 100644 index 7973ad7..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,197062.375 -2,197062.375 -3,197062.375 -4,197062.375 -5,197062.375 -6,197062.375 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_2.000/training_config.txt deleted file mode 100644 index 447f887..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_2.000/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_2.000/training_log.csv deleted file mode 100644 index 7973ad7..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,197062.375 -2,197062.375 -3,197062.375 -4,197062.375 -5,197062.375 -6,197062.375 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_4.000/training_config.txt deleted file mode 100644 index 0284766..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_4.000/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_4.000/training_log.csv deleted file mode 100644 index 7973ad7..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,197062.375 -2,197062.375 -3,197062.375 -4,197062.375 -5,197062.375 -6,197062.375 diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/constant/lr_8.000/training_config.txt deleted file mode 100644 index 2223be8..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_8.000/training_config.txt +++ /dev/null @@ -1,47 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def constant(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - return torch.ones_like(t_span) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/constant/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/constant/lr_8.000/training_log.csv deleted file mode 100644 index 7973ad7..0000000 --- a/training/time_weighting_learning_rate_sweep/constant/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,1087.9490966796875 -1,197062.375 -2,197062.375 -3,197062.375 -4,197062.375 -5,197062.375 -6,197062.375 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.010/training_config.txt deleted file mode 100644 index 176f7b5..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.010/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.010/training_log.csv deleted file mode 100644 index 979467f..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,490.7637634277344 -2,404.3140563964844 -3,336.8674011230469 -4,278.2140808105469 -5,203.0413360595703 -6,170.9028778076172 -7,103.69400787353516 -8,83.88016510009766 -9,80.34635162353516 -10,61.95636749267578 -11,62.38576889038086 -12,56.68960952758789 -13,56.0425910949707 -14,59.06610870361328 -15,56.95689392089844 -16,63.60873794555664 -17,58.646915435791016 -18,72.38348388671875 -19,71.20616912841797 -20,68.93463897705078 -21,60.734561920166016 -22,57.46406936645508 -23,57.10195541381836 -24,56.22803497314453 -25,54.68375015258789 -26,52.8687858581543 -27,53.0968132019043 -28,51.506874084472656 -29,50.42244338989258 -30,49.363521575927734 -31,49.66569137573242 -32,48.727783203125 -33,47.28483963012695 -34,45.32536315917969 -35,52.03187942504883 -36,50.42601776123047 -37,49.05934524536133 -38,56.712398529052734 -39,63.59385681152344 -40,61.93925094604492 -41,53.37807846069336 -42,55.29351806640625 -43,42.97301483154297 -44,44.0015869140625 -45,45.63426208496094 -46,44.1588134765625 -47,45.94529342651367 -48,43.62541580200195 -49,43.42586135864258 -50,43.451114654541016 -51,42.94729232788086 -52,41.867401123046875 -53,40.849735260009766 -54,40.22796630859375 -55,39.83695602416992 -56,39.873348236083984 -57,38.65355682373047 -58,38.486541748046875 -59,38.42221450805664 -60,38.226409912109375 -61,37.81587600708008 -62,37.14459228515625 -63,35.909461975097656 -64,36.57479476928711 -65,36.60603332519531 -66,36.659175872802734 -67,35.92082214355469 -68,35.16218185424805 -69,34.858734130859375 -70,34.83457946777344 -71,34.9055290222168 -72,34.884212493896484 -73,34.680572509765625 -74,34.359352111816406 -75,34.043983459472656 -76,33.80924606323242 -77,33.65767288208008 -78,33.54021072387695 -79,33.3841552734375 -80,33.153202056884766 -81,32.890865325927734 -82,32.66939926147461 -83,32.52092361450195 -84,32.431251525878906 -85,32.363094329833984 -86,32.27976989746094 -87,32.16466522216797 -88,32.02665328979492 -89,31.887271881103516 -90,31.763830184936523 -91,31.66136360168457 -92,31.57164764404297 -93,31.480548858642578 -94,31.379093170166016 -95,31.270193099975586 -96,31.164426803588867 -97,31.07044792175293 -98,30.990249633789062 -99,30.92011070251465 -100,30.853479385375977 -101,30.78464698791504 -102,30.711374282836914 -103,30.635597229003906 -104,30.56117820739746 -105,30.49178695678711 -106,30.428590774536133 -107,30.3701171875 -108,30.31301498413086 -109,30.254226684570312 -110,30.192655563354492 -111,30.129568099975586 -112,30.067092895507812 -113,30.0067138671875 -114,29.948457717895508 -115,29.891340255737305 -116,29.834571838378906 -117,29.777484893798828 -118,29.720340728759766 -119,29.663692474365234 -120,29.60805892944336 -121,29.553354263305664 -122,29.49921226501465 -123,29.445056915283203 -124,29.390422821044922 -125,29.335241317749023 -126,29.279747009277344 -127,29.224512100219727 -128,29.169862747192383 -129,29.115821838378906 -130,29.062374114990234 -131,29.00925064086914 -132,28.956195831298828 -133,28.90306282043457 -134,28.849977493286133 -135,28.79707145690918 -136,28.744375228881836 -137,28.691871643066406 -138,28.639650344848633 -139,28.587482452392578 -140,28.53536033630371 -141,28.483203887939453 -142,28.43109703063965 -143,28.37923812866211 -144,28.327543258666992 -145,28.27614974975586 -146,28.22499656677246 -147,28.174062728881836 -148,28.123334884643555 -149,28.07280921936035 -150,28.022451400756836 -151,27.97236442565918 -152,27.922529220581055 -153,27.87289810180664 -154,27.82349967956543 -155,27.774330139160156 -156,27.725296020507812 -157,27.67645263671875 -158,27.627710342407227 -159,27.579124450683594 -160,27.530675888061523 -161,27.48228645324707 -162,27.434005737304688 -163,27.38589859008789 -164,27.33784294128418 -165,27.28998374938965 -166,27.242210388183594 -167,27.1944522857666 -168,27.146852493286133 -169,27.099300384521484 -170,27.05178451538086 -171,27.004281997680664 -172,26.956838607788086 -173,26.909412384033203 -174,26.8619327545166 -175,26.81447410583496 -176,26.767051696777344 -177,26.719614028930664 -178,26.6721248626709 -179,26.624650955200195 -180,26.57723045349121 -181,26.529733657836914 -182,26.48232078552246 -183,26.434823989868164 -184,26.387269973754883 -185,26.339710235595703 -186,26.292097091674805 -187,26.244426727294922 -188,26.19667625427246 -189,26.14883804321289 -190,26.100954055786133 -191,26.052953720092773 -192,26.004934310913086 -193,25.956789016723633 -194,25.908544540405273 -195,25.860240936279297 -196,25.8118896484375 -197,25.763513565063477 -198,25.715051651000977 -199,25.66643524169922 -200,25.61774444580078 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.020/training_config.txt deleted file mode 100644 index cf1d84e..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.020/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.020/training_log.csv deleted file mode 100644 index e977fbf..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,438.00213623046875 -2,282.4886474609375 -3,139.7744598388672 -4,82.15703582763672 -5,96.0716781616211 -6,88.7756576538086 -7,67.5429916381836 -8,66.53595733642578 -9,65.6704330444336 -10,65.94153594970703 -11,66.25276184082031 -12,60.02836990356445 -13,85.09168243408203 -14,80.47128295898438 -15,89.7029800415039 -16,78.59867858886719 -17,53.04693603515625 -18,56.89222717285156 -19,50.484710693359375 -20,49.823001861572266 -21,49.314300537109375 -22,47.83378219604492 -23,43.711544036865234 -24,40.819087982177734 -25,37.89718246459961 -26,35.04273223876953 -27,35.87839126586914 -28,36.12340545654297 -29,35.079654693603516 -30,33.312801361083984 -31,33.19147872924805 -32,33.351600646972656 -33,33.22645568847656 -34,32.55833435058594 -35,29.774471282958984 -36,29.26665687561035 -37,29.08632469177246 -38,28.90761375427246 -39,28.720355987548828 -40,28.33127212524414 -41,27.65358543395996 -42,26.85132598876953 -43,25.94232940673828 -44,26.000152587890625 -45,25.46835708618164 -46,25.080097198486328 -47,24.809398651123047 -48,26.65996551513672 -49,25.693775177001953 -50,25.284332275390625 -51,25.033227920532227 -52,24.76788902282715 -53,24.46537208557129 -54,24.194625854492188 -55,24.080869674682617 -56,23.961780548095703 -57,23.692533493041992 -58,23.43000030517578 -59,23.22692108154297 -60,23.09064483642578 -61,22.997583389282227 -62,22.913854598999023 -63,22.826766967773438 -64,22.734508514404297 -65,22.643033981323242 -66,22.58087921142578 -67,22.571218490600586 -68,22.509408950805664 -69,22.434104919433594 -70,22.400028228759766 -71,22.374528884887695 -72,22.344560623168945 -73,22.307775497436523 -74,22.263586044311523 -75,22.212636947631836 -76,22.158809661865234 -77,22.105573654174805 -78,22.057985305786133 -79,22.018577575683594 -80,21.9812068939209 -81,21.93882179260254 -82,21.895769119262695 -83,21.85749053955078 -84,21.822546005249023 -85,21.788400650024414 -86,21.75555992126465 -87,21.72543716430664 -88,21.696279525756836 -89,21.664962768554688 -90,21.630979537963867 -91,21.596052169799805 -92,21.56130599975586 -93,21.52669334411621 -94,21.492427825927734 -95,21.459012985229492 -96,21.425739288330078 -97,21.39209747314453 -98,21.3588809967041 -99,21.32619285583496 -100,21.293262481689453 -101,21.2603816986084 -102,21.228313446044922 -103,21.196744918823242 -104,21.16532325744629 -105,21.133983612060547 -106,21.102096557617188 -107,21.069616317749023 -108,21.037553787231445 -109,21.005767822265625 -110,20.974088668823242 -111,20.942312240600586 -112,20.910232543945312 -113,20.878496170043945 -114,20.847286224365234 -115,20.816295623779297 -116,20.785511016845703 -117,20.75452995300293 -118,20.72329330444336 -119,20.69173812866211 -120,20.659942626953125 -121,20.628141403198242 -122,20.596267700195312 -123,20.56429100036621 -124,20.5321044921875 -125,20.499645233154297 -126,20.467222213745117 -127,20.434789657592773 -128,20.40253257751465 -129,20.37030601501465 -130,20.3381404876709 -131,20.305889129638672 -132,20.273630142211914 -133,20.241249084472656 -134,20.20890998840332 -135,20.176429748535156 -136,20.143896102905273 -137,20.111392974853516 -138,20.0789737701416 -139,20.04680633544922 -140,20.014854431152344 -141,19.98314094543457 -142,19.951406478881836 -143,19.91977882385254 -144,19.88818359375 -145,19.85654640197754 -146,19.824825286865234 -147,19.793350219726562 -148,19.76216697692871 -149,19.73135757446289 -150,19.70092010498047 -151,19.671100616455078 -152,19.64244842529297 -153,19.61446762084961 -154,19.58669662475586 -155,19.55915069580078 -156,19.53182029724121 -157,19.504558563232422 -158,19.477365493774414 -159,19.45026969909668 -160,19.423233032226562 -161,19.39634895324707 -162,19.369661331176758 -163,19.343442916870117 -164,19.319015502929688 -165,19.304553985595703 -166,19.335527420043945 -167,19.811107635498047 -168,20.21792984008789 -169,21.46573829650879 -170,20.13370132446289 -171,20.300931930541992 -172,24.525226593017578 -173,23.588481903076172 -174,28.013870239257812 -175,30.328161239624023 -176,47.557098388671875 -177,52.01130294799805 -178,34.808982849121094 -179,31.8913516998291 -180,26.322036743164062 -181,23.790590286254883 -182,19.950693130493164 -183,18.350902557373047 -184,17.82912826538086 -185,16.836641311645508 -186,16.837839126586914 -187,14.593155860900879 -188,12.962855339050293 -189,15.224420547485352 -190,15.916751861572266 -191,14.879631042480469 -192,14.013172149658203 -193,11.941842079162598 -194,11.925493240356445 -195,11.616101264953613 -196,16.155424118041992 -197,13.99347972869873 -198,13.108319282531738 -199,11.055944442749023 -200,10.895567893981934 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.040/training_config.txt deleted file mode 100644 index 387ac24..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.040/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.040/training_log.csv deleted file mode 100644 index ea732bf..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,347.9196472167969 -2,88.23384857177734 -3,64.95013427734375 -4,53.55635070800781 -5,34.245018005371094 -6,22.94109344482422 -7,20.48912811279297 -8,11.607606887817383 -9,11.25313663482666 -10,7.636106491088867 -11,7.438293933868408 -12,7.702119827270508 -13,7.468079566955566 -14,7.826272964477539 -15,8.214800834655762 -16,7.7163567543029785 -17,10.604812622070312 -18,10.156761169433594 -19,10.628146171569824 -20,10.741048812866211 -21,11.045429229736328 -22,9.281624794006348 -23,8.916332244873047 -24,9.179983139038086 -25,9.371625900268555 -26,8.816662788391113 -27,7.4074482917785645 -28,5.902316570281982 -29,5.047746658325195 -30,6.000243663787842 -31,5.900737762451172 -32,5.098971366882324 -33,5.031848907470703 -34,4.805078506469727 -35,5.145158290863037 -36,5.61767053604126 -37,4.955471038818359 -38,4.494084358215332 -39,3.938270330429077 -40,3.9001731872558594 -41,3.914665460586548 -42,4.011464595794678 -43,4.07597541809082 -44,3.7962493896484375 -45,3.826953172683716 -46,3.841982841491699 -47,3.768927812576294 -48,3.6213862895965576 -49,3.427665948867798 -50,3.256809711456299 -51,3.174323081970215 -52,3.156507730484009 -53,3.1532976627349854 -54,3.139244318008423 -55,3.1094532012939453 -56,3.070634603500366 -57,3.0298995971679688 -58,2.9862911701202393 -59,2.933492422103882 -60,2.846348285675049 -61,2.83799147605896 -62,2.834475517272949 -63,2.866649866104126 -64,2.7750229835510254 -65,2.718712329864502 -66,2.707850933074951 -67,2.705960273742676 -68,2.704658031463623 -69,2.7185721397399902 -70,2.7327749729156494 -71,2.7268550395965576 -72,2.7221155166625977 -73,2.7028708457946777 -74,2.8754658699035645 -75,2.6855967044830322 -76,2.8568830490112305 -77,2.9669885635375977 -78,2.592146396636963 -79,2.824007749557495 -80,3.028592586517334 -81,2.8785712718963623 -82,3.0997159481048584 -83,3.2841427326202393 -84,3.1605336666107178 -85,3.3449723720550537 -86,3.3174960613250732 -87,3.2145280838012695 -88,3.308927536010742 -89,3.2640137672424316 -90,3.166367292404175 -91,3.2205052375793457 -92,3.2640304565429688 -93,3.1699697971343994 -94,3.1605913639068604 -95,3.190214157104492 -96,3.123408317565918 -97,3.067885637283325 -98,3.0827581882476807 -99,3.0170440673828125 -100,3.074411630630493 -101,3.0622897148132324 -102,3.0200371742248535 -103,2.9398140907287598 -104,2.850395679473877 -105,2.7177882194519043 -106,2.606072187423706 -107,2.5478498935699463 -108,2.516045570373535 -109,2.4794960021972656 -110,2.4319021701812744 -111,2.3822412490844727 -112,2.3620622158050537 -113,2.368638515472412 -114,2.3448920249938965 -115,2.387432813644409 -116,2.3786745071411133 -117,2.360952377319336 -118,2.336423397064209 -119,2.3006885051727295 -120,2.2525558471679688 -121,2.216169834136963 -122,2.22017765045166 -123,2.1661014556884766 -124,2.090989589691162 -125,2.1248133182525635 -126,2.159291982650757 -127,2.1713504791259766 -128,2.1827375888824463 -129,2.183199882507324 -130,2.1718719005584717 -131,2.1461479663848877 -132,2.1203582286834717 -133,2.1285719871520996 -134,2.110393762588501 -135,2.080596923828125 -136,2.072340726852417 -137,2.0463340282440186 -138,2.021232843399048 -139,1.9915915727615356 -140,2.0293431282043457 -141,1.9538954496383667 -142,1.950137734413147 -143,1.9077409505844116 -144,1.8879350423812866 -145,1.915911078453064 -146,1.9265291690826416 -147,1.9253325462341309 -148,1.9012724161148071 -149,1.8862746953964233 -150,1.872910499572754 -151,1.8315157890319824 -152,1.811267375946045 -153,1.8150216341018677 -154,1.7861570119857788 -155,1.7971006631851196 -156,1.7939419746398926 -157,1.784462571144104 -158,1.7662464380264282 -159,1.7590456008911133 -160,1.7430355548858643 -161,1.7395058870315552 -162,1.7259149551391602 -163,1.7151567935943604 -164,1.6999801397323608 -165,1.676554560661316 -166,1.6848647594451904 -167,1.6748522520065308 -168,1.6372034549713135 -169,1.649580955505371 -170,1.6221779584884644 -171,1.62427818775177 -172,1.615058183670044 -173,1.589563012123108 -174,1.5703481435775757 -175,1.546787977218628 -176,1.5309046506881714 -177,1.4970520734786987 -178,1.5074620246887207 -179,1.510524868965149 -180,1.5140422582626343 -181,1.4985867738723755 -182,1.4773385524749756 -183,1.4636558294296265 -184,1.461452841758728 -185,1.4537696838378906 -186,1.4407398700714111 -187,1.4245628118515015 -188,1.4040073156356812 -189,1.3882923126220703 -190,1.3727236986160278 -191,1.356567144393921 -192,1.3350642919540405 -193,1.310477614402771 -194,1.2867556810379028 -195,1.2945544719696045 -196,1.2694107294082642 -197,1.285437822341919 -198,1.3191161155700684 -199,1.332870364189148 -200,1.2997816801071167 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.050/training_config.txt deleted file mode 100644 index 20ebecb..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.050/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.050/training_log.csv deleted file mode 100644 index 67e7ece..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,310.68701171875 -2,74.36722564697266 -3,41.85774230957031 -4,26.906742095947266 -5,17.45112419128418 -6,13.564167976379395 -7,10.549190521240234 -8,10.305512428283691 -9,10.323173522949219 -10,10.475152969360352 -11,9.865166664123535 -12,8.810970306396484 -13,8.77301025390625 -14,8.912811279296875 -15,8.966854095458984 -16,8.917834281921387 -17,8.823260307312012 -18,8.800261497497559 -19,8.70089054107666 -20,8.603302001953125 -21,8.513223648071289 -22,8.427711486816406 -23,8.343575477600098 -24,8.259710311889648 -25,8.17436695098877 -26,8.085980415344238 -27,7.992875099182129 -28,7.889718055725098 -29,7.770033359527588 -30,7.659265518188477 -31,7.13055419921875 -32,6.88877010345459 -33,6.758962154388428 -34,6.639039039611816 -35,6.523301124572754 -36,6.407827377319336 -37,6.289547443389893 -38,6.167047023773193 -39,6.038315296173096 -40,5.900675296783447 -41,5.751809597015381 -42,5.595601558685303 -43,5.421152591705322 -44,5.13905668258667 -45,4.934002876281738 -46,4.652440547943115 -47,4.319989204406738 -48,4.001376152038574 -49,3.7301974296569824 -50,3.5065970420837402 -51,3.316319704055786 -52,3.1571671962738037 -53,3.0318987369537354 -54,2.9269070625305176 -55,2.8342084884643555 -56,2.749218463897705 -57,2.6679162979125977 -58,2.5873706340789795 -59,2.5077829360961914 -60,2.4306256771087646 -61,2.353717088699341 -62,2.274707555770874 -63,2.1928389072418213 -64,2.1097404956817627 -65,2.028958320617676 -66,1.9540634155273438 -67,1.8883472681045532 -68,1.832905888557434 -69,1.7816218137741089 -70,1.7296537160873413 -71,1.6721429824829102 -72,1.606939435005188 -73,1.5469218492507935 -74,1.491618037223816 -75,1.4388450384140015 -76,1.3870145082473755 -77,1.3338156938552856 -78,1.2796645164489746 -79,1.2239712476730347 -80,1.1660773754119873 -81,1.0966618061065674 -82,1.0130360126495361 -83,0.9716266989707947 -84,0.9232714772224426 -85,0.8714278936386108 -86,0.8258012533187866 -87,0.787456750869751 -88,0.7550979256629944 -89,0.727286159992218 -90,0.7027363777160645 -91,0.6807001233100891 -92,0.6607878804206848 -93,0.6426327228546143 -94,0.6259653568267822 -95,0.6105700731277466 -96,0.5962452292442322 -97,0.5827869772911072 -98,0.5700821280479431 -99,0.5580418109893799 -100,0.5466133952140808 -101,0.5357120633125305 -102,0.5252617001533508 -103,0.5152072310447693 -104,0.5054991841316223 -105,0.4960823953151703 -106,0.4869106411933899 -107,0.477949321269989 -108,0.4691668450832367 -109,0.46053028106689453 -110,0.4520063102245331 -111,0.4435393214225769 -112,0.43503499031066895 -113,0.4264664053916931 -114,0.41782018542289734 -115,0.4090583324432373 -116,0.40015140175819397 -117,0.39111292362213135 -118,0.38195762038230896 -119,0.37272846698760986 -120,0.36342954635620117 -121,0.35406729578971863 -122,0.3446541726589203 -123,0.33522793650627136 -124,0.325877845287323 -125,0.3166811168193817 -126,0.30793991684913635 -127,0.2998354732990265 -128,0.29226189851760864 -129,0.284767746925354 -130,0.2766685485839844 -131,0.2674793004989624 -132,0.25705018639564514 -133,0.24570976197719574 -134,0.23410628736019135 -135,0.22292684018611908 -136,0.21261747181415558 -137,0.20339393615722656 -138,0.19564956426620483 -139,0.18993273377418518 -140,0.18655304610729218 -141,0.18521302938461304 -142,0.18509641289710999 -143,0.1853080540895462 -144,0.18523244559764862 -145,0.18455365300178528 -146,0.18310414254665375 -147,0.1807176172733307 -148,0.1772965043783188 -149,0.17297525703907013 -150,0.1682005524635315 -151,0.16355083882808685 -152,0.1595045030117035 -153,0.15629857778549194 -154,0.1539258360862732 -155,0.15221503376960754 -156,0.15093298256397247 -157,0.14987242221832275 -158,0.14889615774154663 -159,0.14791297912597656 -160,0.1468341052532196 -161,0.1455526053905487 -162,0.14398689568042755 -163,0.14216198027133942 -164,0.14022359251976013 -165,0.1383601278066635 -166,0.13669894635677338 -167,0.13526761531829834 -168,0.1340295672416687 -169,0.1329432874917984 -170,0.13199307024478912 -171,0.1311747133731842 -172,0.13045667111873627 -173,0.12976306676864624 -174,0.1290120780467987 -175,0.12817682325839996 -176,0.127296581864357 -177,0.12642522156238556 -178,0.12558294832706451 -179,0.12476182729005814 -180,0.12396659702062607 -181,0.12322668731212616 -182,0.1225711926817894 -183,0.12199467420578003 -184,0.12146039307117462 -185,0.12093803286552429 -186,0.12042641639709473 -187,0.11993274837732315 -188,0.11944715678691864 -189,0.11894901096820831 -190,0.11843620985746384 -191,0.11792971938848495 -192,0.11744716763496399 -193,0.11698595434427261 -194,0.11653733253479004 -195,0.11610625684261322 -196,0.11570344865322113 -197,0.11532409489154816 -198,0.1149531826376915 -199,0.11458572745323181 -200,0.11422739177942276 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.080/training_config.txt deleted file mode 100644 index b1cafb5..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.080/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.080/training_log.csv deleted file mode 100644 index 2020a73..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,207.882568359375 -2,56.660125732421875 -3,63.86771774291992 -4,31.845294952392578 -5,31.81883430480957 -6,33.137107849121094 -7,29.22472381591797 -8,33.57838821411133 -9,193.4351348876953 -10,41.12259292602539 -11,39.15040969848633 -12,23.366411209106445 -13,17.962989807128906 -14,15.987190246582031 -15,12.505390167236328 -16,10.994537353515625 -17,10.035009384155273 -18,9.27875804901123 -19,8.670827865600586 -20,8.20725154876709 -21,7.849754333496094 -22,7.564206600189209 -23,7.333187580108643 -24,7.142928123474121 -25,6.978282451629639 -26,6.826180934906006 -27,6.680299758911133 -28,6.543830394744873 -29,6.412168979644775 -30,6.283604621887207 -31,6.157267093658447 -32,6.032319068908691 -33,5.90770149230957 -34,5.783026218414307 -35,5.658642292022705 -36,5.534681797027588 -37,5.412393569946289 -38,5.292916774749756 -39,5.176690578460693 -40,5.06292200088501 -41,4.949832916259766 -42,4.834560871124268 -43,4.712734699249268 -44,4.57841157913208 -45,4.425729274749756 -46,4.2506279945373535 -47,4.050756931304932 -48,3.828691005706787 -49,3.5907905101776123 -50,3.349576711654663 -51,3.124992847442627 -52,2.9399988651275635 -53,2.7969462871551514 -54,2.68331241607666 -55,2.5846357345581055 -56,2.4944372177124023 -57,2.408752918243408 -58,2.3264200687408447 -59,2.246619939804077 -60,2.1679129600524902 -61,2.088878631591797 -62,2.007982015609741 -63,1.9238632917404175 -64,1.8365715742111206 -65,1.74830162525177 -66,1.6632894277572632 -67,1.5854623317718506 -68,1.5161513090133667 -69,1.4536395072937012 -70,1.3951045274734497 -71,1.3386489152908325 -72,1.2824501991271973 -73,1.226065993309021 -74,1.1702386140823364 -75,1.1155569553375244 -76,1.0624089241027832 -77,1.0115941762924194 -78,0.9632936120033264 -79,0.9180428981781006 -80,0.8755586743354797 -81,0.8350687026977539 -82,0.795688807964325 -83,0.7567717432975769 -84,0.7179138660430908 -85,0.6794101595878601 -86,0.6414872407913208 -87,0.604526162147522 -88,0.5686933398246765 -89,0.5340646505355835 -90,0.5007237195968628 -91,0.4688314199447632 -92,0.43879732489585876 -93,0.4109063744544983 -94,0.38539791107177734 -95,0.3624134063720703 -96,0.34173914790153503 -97,0.32325026392936707 -98,0.3067595064640045 -99,0.29200419783592224 -100,0.2785465717315674 -101,0.26602619886398315 -102,0.2542232275009155 -103,0.2430095225572586 -104,0.2324986755847931 -105,0.22247834503650665 -106,0.21288245916366577 -107,0.20380748808383942 -108,0.19515959918498993 -109,0.1868773102760315 -110,0.17906159162521362 -111,0.17190253734588623 -112,0.16551533341407776 -113,0.1599963754415512 -114,0.15542200207710266 -115,0.1517714112997055 -116,0.14902101457118988 -117,0.14699934422969818 -118,0.14551804959774017 -119,0.1443595141172409 -120,0.14332380890846252 -121,0.1423291563987732 -122,0.1412835717201233 -123,0.14013247191905975 -124,0.13884755969047546 -125,0.13743847608566284 -126,0.13593444228172302 -127,0.13437174260616302 -128,0.13279177248477936 -129,0.13123615086078644 -130,0.12973752617835999 -131,0.1283217966556549 -132,0.12700532376766205 -133,0.12579567730426788 -134,0.12468955665826797 -135,0.12367697060108185 -136,0.12274299561977386 -137,0.12186986953020096 -138,0.12103929370641708 -139,0.12023387849330902 -140,0.11943849921226501 -141,0.11864031106233597 -142,0.11783136427402496 -143,0.11700674891471863 -144,0.11616675555706024 -145,0.11531298607587814 -146,0.11444998532533646 -147,0.11358021199703217 -148,0.11270896345376968 -149,0.11184173822402954 -150,0.11098384112119675 -151,0.11013928055763245 -152,0.10931042581796646 -153,0.1084989532828331 -154,0.10770575702190399 -155,0.10693082213401794 -156,0.10617288202047348 -157,0.10543006658554077 -158,0.10470030456781387 -159,0.1039816364645958 -160,0.10327205806970596 -161,0.1025695726275444 -162,0.10187307745218277 -163,0.10118184238672256 -164,0.10049556940793991 -165,0.09981456398963928 -166,0.0991392731666565 -167,0.09847023338079453 -168,0.09780800342559814 -169,0.09715325385332108 -170,0.09650661051273346 -171,0.09586847573518753 -172,0.09523952752351761 -173,0.09461965411901474 -174,0.09400888532400131 -175,0.09340682625770569 -176,0.09281359612941742 -177,0.09222868829965591 -178,0.09165170788764954 -179,0.09108217805624008 -180,0.09052001684904099 -181,0.08996478468179703 -182,0.08941631019115448 -183,0.08887439221143723 -184,0.08833985775709152 -185,0.08781199157238007 -186,0.0872906893491745 -187,0.08677574247121811 -188,0.08626698702573776 -189,0.08576405793428421 -190,0.08526687324047089 -191,0.08477536588907242 -192,0.0842893198132515 -193,0.08380873501300812 -194,0.08333341032266617 -195,0.08286337554454803 -196,0.08239839226007462 -197,0.0819387435913086 -198,0.08148481696844101 -199,0.08103611320257187 -200,0.08059266209602356 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.100/training_config.txt deleted file mode 100644 index 6e74157..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.100/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.100/training_log.csv deleted file mode 100644 index d9ee2a7..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,195.80422973632812 -2,235.43768310546875 -3,86.38312530517578 -4,19.939048767089844 -5,10.273727416992188 -6,8.131309509277344 -7,6.771768569946289 -8,10.231901168823242 -9,10.918525695800781 -10,11.30544376373291 -11,12.244677543640137 -12,11.832077026367188 -13,11.583242416381836 -14,11.302816390991211 -15,11.146215438842773 -16,9.654322624206543 -17,9.520241737365723 -18,9.251075744628906 -19,8.841695785522461 -20,8.139796257019043 -21,7.430532455444336 -22,7.081821918487549 -23,6.846663951873779 -24,6.636226654052734 -25,6.475137710571289 -26,6.320333957672119 -27,6.1486711502075195 -28,5.982146263122559 -29,5.824563026428223 -30,5.673310279846191 -31,5.526076793670654 -32,5.3805623054504395 -33,5.231759071350098 -34,5.075941562652588 -35,4.906622886657715 -36,4.711850166320801 -37,4.482506275177002 -38,4.20401668548584 -39,3.8777108192443848 -40,3.5503594875335693 -41,3.2502124309539795 -42,3.015671491622925 -43,2.8359405994415283 -44,2.689755439758301 -45,2.560529947280884 -46,2.443039655685425 -47,2.335078477859497 -48,2.2353177070617676 -49,2.1430981159210205 -50,2.057521343231201 -51,1.9776370525360107 -52,1.9026676416397095 -53,1.8321096897125244 -54,1.7654259204864502 -55,1.7022137641906738 -56,1.6419838666915894 -57,1.584261178970337 -58,1.5281484127044678 -59,1.4725043773651123 -60,1.4161009788513184 -61,1.3576573133468628 -62,1.2959513664245605 -63,1.230086326599121 -64,1.160457968711853 -65,1.0891494750976562 -66,1.0188636779785156 -67,0.952327311038971 -68,0.8898755311965942 -69,0.8309217691421509 -70,0.7751232981681824 -71,0.722197413444519 -72,0.6721348762512207 -73,0.6250333189964294 -74,0.5810012817382812 -75,0.5402737259864807 -76,0.5031805038452148 -77,0.4701448082923889 -78,0.4414342939853668 -79,0.41692736744880676 -80,0.3963194191455841 -81,0.3791704475879669 -82,0.36490610241889954 -83,0.35292887687683105 -84,0.3426857590675354 -85,0.3337164521217346 -86,0.32566189765930176 -87,0.31823232769966125 -88,0.31121107935905457 -89,0.304435133934021 -90,0.2977982461452484 -91,0.29121068120002747 -92,0.2846047282218933 -93,0.27793973684310913 -94,0.2711838483810425 -95,0.26430898904800415 -96,0.25731906294822693 -97,0.25022000074386597 -98,0.243027925491333 -99,0.23575417697429657 -100,0.2284233570098877 -101,0.22104303538799286 -102,0.2136373221874237 -103,0.2062218189239502 -104,0.19880643486976624 -105,0.19141726195812225 -106,0.18408459424972534 -107,0.17685087025165558 -108,0.1698000580072403 -109,0.16303500533103943 -110,0.15662749111652374 -111,0.15066060423851013 -112,0.14520591497421265 -113,0.14025086164474487 -114,0.13580843806266785 -115,0.13190224766731262 -116,0.12850768864154816 -117,0.12561991810798645 -118,0.1231808215379715 -119,0.1211458072066307 -120,0.11946696043014526 -121,0.1180998757481575 -122,0.11699799448251724 -123,0.11611208319664001 -124,0.1154010146856308 -125,0.11481889337301254 -126,0.1143375113606453 -127,0.11392240971326828 -128,0.11355022341012955 -129,0.11320319026708603 -130,0.11286462098360062 -131,0.11252221465110779 -132,0.11216558516025543 -133,0.11178775131702423 -134,0.11138495057821274 -135,0.11095497012138367 -136,0.1104971244931221 -137,0.11001250892877579 -138,0.1095033809542656 -139,0.10897263139486313 -140,0.10842428356409073 -141,0.10786217451095581 -142,0.1072906032204628 -143,0.1067139208316803 -144,0.1061360090970993 -145,0.10556092113256454 -146,0.1049925833940506 -147,0.10443469136953354 -148,0.10388913750648499 -149,0.10335759818553925 -150,0.1028408333659172 -151,0.1023401990532875 -152,0.10185584425926208 -153,0.10138771682977676 -154,0.10093559324741364 -155,0.10049830377101898 -156,0.10007508099079132 -157,0.09966462105512619 -158,0.09926579147577286 -159,0.09887729585170746 -160,0.09849865734577179 -161,0.09812887012958527 -162,0.09776509553194046 -163,0.0974070131778717 -164,0.09705422073602676 -165,0.09670648723840714 -166,0.09636268019676208 -167,0.09602220356464386 -168,0.09568474441766739 -169,0.09534983336925507 -170,0.09501675516366959 -171,0.0946851372718811 -172,0.09435499459505081 -173,0.09402631223201752 -174,0.09369952976703644 -175,0.09337440878152847 -176,0.09305121749639511 -177,0.0927303358912468 -178,0.09241201728582382 -179,0.09209634363651276 -180,0.09178372472524643 -181,0.09147423505783081 -182,0.09116792678833008 -183,0.0908648818731308 -184,0.09056516736745834 -185,0.09026893228292465 -186,0.08997604250907898 -187,0.08968625962734222 -188,0.08939956873655319 -189,0.08911578357219696 -190,0.088834747672081 -191,0.08855653554201126 -192,0.08828090876340866 -193,0.08800781518220901 -194,0.08773719519376755 -195,0.08746886253356934 -196,0.08720268309116364 -197,0.08693866431713104 -198,0.0866766944527626 -199,0.08641664683818817 -200,0.08615875244140625 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.125/training_config.txt deleted file mode 100644 index 912adbb..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.125/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.125/training_log.csv deleted file mode 100644 index 39628c0..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,176.11407470703125 -2,20.90680694580078 -3,10.343158721923828 -4,5.564237117767334 -5,299.4178466796875 -6,3.515578031539917 -7,4.181884765625 -8,4.5520853996276855 -9,4.118020534515381 -10,3.1396420001983643 -11,3.016195774078369 -12,2.9610190391540527 -13,2.845594644546509 -14,2.5918993949890137 -15,2.5080912113189697 -16,2.4898431301116943 -17,2.4796271324157715 -18,2.467200756072998 -19,2.451497793197632 -20,2.4292054176330566 -21,2.401172161102295 -22,2.3676984310150146 -23,2.3297786712646484 -24,2.2873942852020264 -25,2.242206573486328 -26,2.1946403980255127 -27,2.145967483520508 -28,2.0980029106140137 -29,2.0508956909179688 -30,2.0051536560058594 -31,1.9616554975509644 -32,1.920804500579834 -33,1.8825252056121826 -34,1.8464211225509644 -35,1.8120287656784058 -36,1.778865933418274 -37,1.7465609312057495 -38,1.7148520946502686 -39,1.6835545301437378 -40,1.6525328159332275 -41,1.6216875314712524 -42,1.5908851623535156 -43,1.5600417852401733 -44,1.5290682315826416 -45,1.4979007244110107 -46,1.4664853811264038 -47,1.4347745180130005 -48,1.4027254581451416 -49,1.3703069686889648 -50,1.3375060558319092 -51,1.304364562034607 -52,1.2708966732025146 -53,1.2371288537979126 -54,1.2030768394470215 -55,1.1687201261520386 -56,1.1341091394424438 -57,1.099239468574524 -58,1.064109206199646 -59,1.028754472732544 -60,0.9933640956878662 -61,0.9582376480102539 -62,0.9236214756965637 -63,0.8896690607070923 -64,0.856543242931366 -65,0.8243265151977539 -66,0.7931535840034485 -67,0.7630778551101685 -68,0.7341688871383667 -69,0.7064357399940491 -70,0.6797983050346375 -71,0.6541531682014465 -72,0.6294309496879578 -73,0.6055945158004761 -74,0.5825569033622742 -75,0.560275137424469 -76,0.538723886013031 -77,0.517784833908081 -78,0.49744269251823425 -79,0.4776992201805115 -80,0.4585326611995697 -81,0.43992382287979126 -82,0.4218767285346985 -83,0.40428751707077026 -84,0.3872094750404358 -85,0.3706008195877075 -86,0.35440024733543396 -87,0.33866748213768005 -88,0.3233902156352997 -89,0.3085327744483948 -90,0.29402145743370056 -91,0.27984294295310974 -92,0.26604267954826355 -93,0.2526630461215973 -94,0.2397495061159134 -95,0.2273215800523758 -96,0.2153826355934143 -97,0.20396122336387634 -98,0.1930726170539856 -99,0.18276359140872955 -100,0.17305231094360352 -101,0.16392892599105835 -102,0.1553853452205658 -103,0.14743776619434357 -104,0.14011448621749878 -105,0.1334197074174881 -106,0.1273578554391861 -107,0.12193267047405243 -108,0.1171482726931572 -109,0.11299115419387817 -110,0.10944000631570816 -111,0.10648379474878311 -112,0.10409098863601685 -113,0.10222309082746506 -114,0.10082794725894928 -115,0.09984596073627472 -116,0.09920808672904968 -117,0.09883937984704971 -118,0.09867122024297714 -119,0.09863732010126114 -120,0.09867554903030396 -121,0.0987325981259346 -122,0.09876465797424316 -123,0.09873900562524796 -124,0.09863312542438507 -125,0.09843408316373825 -126,0.09813687205314636 -127,0.09774363785982132 -128,0.09726229310035706 -129,0.09670525789260864 -130,0.09608735889196396 -131,0.09542469680309296 -132,0.09473311901092529 -133,0.09402836114168167 -134,0.09332433342933655 -135,0.09263268858194351 -136,0.09196318686008453 -137,0.09132297337055206 -138,0.09071679413318634 -139,0.09014734625816345 -140,0.0896153524518013 -141,0.08912009745836258 -142,0.0886596292257309 -143,0.08823104947805405 -144,0.08783113211393356 -145,0.08745582401752472 -146,0.08710121363401413 -147,0.08676368743181229 -148,0.08643955737352371 -149,0.08612554520368576 -150,0.08581910282373428 -151,0.08551790565252304 -152,0.08522023260593414 -153,0.08492486923933029 -154,0.08463066071271896 -155,0.0843372493982315 -156,0.08404421806335449 -157,0.08375150710344315 -158,0.08345936983823776 -159,0.08316804468631744 -160,0.08287810534238815 -161,0.08258999139070511 -162,0.08230426162481308 -163,0.08202173560857773 -164,0.08174274116754532 -165,0.08146766573190689 -166,0.08119703084230423 -167,0.08093128353357315 -168,0.08067053556442261 -169,0.08041492104530334 -170,0.08016468584537506 -171,0.07991964370012283 -172,0.07967975735664368 -173,0.0794447660446167 -174,0.0792144387960434 -175,0.0789887085556984 -176,0.07876714318990707 -177,0.07854937762022018 -178,0.07833503186702728 -179,0.0781240165233612 -180,0.07791601866483688 -181,0.07771079242229462 -182,0.07750820368528366 -183,0.07730801403522491 -184,0.07711024582386017 -185,0.0769149586558342 -186,0.07672189921140671 -187,0.07653118669986725 -188,0.07634278386831284 -189,0.07615669071674347 -190,0.0759730264544487 -191,0.07579157501459122 -192,0.0756126344203949 -193,0.07543593645095825 -194,0.07526160776615143 -195,0.07508973032236099 -196,0.074920155107975 -197,0.07475291192531586 -198,0.07458797097206116 -199,0.07442529499530792 -200,0.07426470518112183 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.160/training_config.txt deleted file mode 100644 index e8255bb..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.160/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.160/training_log.csv deleted file mode 100644 index e562976..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,209.6147003173828 -2,10.394539833068848 -3,12.15915298461914 -4,18.245380401611328 -5,18.363075256347656 -6,18.686405181884766 -7,17.032630920410156 -8,15.344280242919922 -9,12.701509475708008 -10,11.291228294372559 -11,10.301063537597656 -12,9.395920753479004 -13,8.557830810546875 -14,7.815526485443115 -15,7.173956871032715 -16,6.642879486083984 -17,6.194134712219238 -18,5.794510364532471 -19,5.427369594573975 -20,5.094376564025879 -21,4.790121078491211 -22,4.511061668395996 -23,4.254693031311035 -24,4.019528388977051 -25,3.7995753288269043 -26,3.5951123237609863 -27,3.4048452377319336 -28,3.2246041297912598 -29,3.0527799129486084 -30,2.8856282234191895 -31,2.7187092304229736 -32,2.5512964725494385 -33,2.384911298751831 -34,2.234035015106201 -35,2.096526622772217 -36,1.977124810218811 -37,1.8749876022338867 -38,1.7836053371429443 -39,1.70082688331604 -40,1.6239609718322754 -41,1.5528428554534912 -42,1.4875340461730957 -43,1.427006721496582 -44,1.3695487976074219 -45,1.314002275466919 -46,1.259489893913269 -47,1.205415964126587 -48,1.1514180898666382 -49,1.0973020792007446 -50,1.042923092842102 -51,0.9881466627120972 -52,0.9303571581840515 -53,0.8673008680343628 -54,0.8003977537155151 -55,0.7291027903556824 -56,0.6557955145835876 -57,0.5847890377044678 -58,0.5178061723709106 -59,0.45673081278800964 -60,0.4027137756347656 -61,0.35708025097846985 -62,0.3195110857486725 -63,0.28813832998275757 -64,0.2607227563858032 -65,0.23651504516601562 -66,0.21509037911891937 -67,0.1963215172290802 -68,0.18010593950748444 -69,0.16572555899620056 -70,0.1527070552110672 -71,0.14079225063323975 -72,0.12979590892791748 -73,0.11967569589614868 -74,0.11036543548107147 -75,0.10188645124435425 -76,0.0941431000828743 -77,0.08704350888729095 -78,0.08045554906129837 -79,0.07435093075037003 -80,0.06887155026197433 -81,0.0641758143901825 -82,0.06039080396294594 -83,0.05737820267677307 -84,0.055059947073459625 -85,0.05333847180008888 -86,0.05213599652051926 -87,0.051382824778556824 -88,0.05099806934595108 -89,0.05088731646537781 -90,0.050965044647455215 -91,0.051170334219932556 -92,0.05144746974110603 -93,0.05174466222524643 -94,0.052023548632860184 -95,0.052257511764764786 -96,0.05242965370416641 -97,0.05251995474100113 -98,0.05251540243625641 -99,0.05241824686527252 -100,0.05223216488957405 -101,0.05196288973093033 -102,0.05163854360580444 -103,0.05127963051199913 -104,0.05090521648526192 -105,0.05053160339593887 -106,0.050172045826911926 -107,0.0498366542160511 -108,0.04953135922551155 -109,0.04925842955708504 -110,0.04901707172393799 -111,0.04879637435078621 -112,0.048592351377010345 -113,0.04840803146362305 -114,0.04824129492044449 -115,0.048089154064655304 -116,0.04794863983988762 -117,0.04781683534383774 -118,0.047690119594335556 -119,0.0475657619535923 -120,0.04744197055697441 -121,0.04731685668230057 -122,0.04718971997499466 -123,0.0470610074698925 -124,0.04693147540092468 -125,0.046801939606666565 -126,0.04667351022362709 -127,0.046547889709472656 -128,0.04642535001039505 -129,0.046306028962135315 -130,0.04618983715772629 -131,0.046076323837041855 -132,0.0459650494158268 -133,0.045855652540922165 -134,0.04574742168188095 -135,0.045639537274837494 -136,0.0455315038561821 -137,0.045424338430166245 -138,0.04531775042414665 -139,0.045211561024188995 -140,0.04510558396577835 -141,0.04499988630414009 -142,0.04489462450146675 -143,0.04479009285569191 -144,0.044686514884233475 -145,0.04458431899547577 -146,0.044483765959739685 -147,0.0443851463496685 -148,0.04428869113326073 -149,0.04419443756341934 -150,0.044102445244789124 -151,0.04401256889104843 -152,0.04392470791935921 -153,0.04383860155940056 -154,0.04375404492020607 -155,0.04367082566022873 -156,0.04358871653676033 -157,0.04350754991173744 -158,0.04342715069651604 -159,0.043347496539354324 -160,0.043268561363220215 -161,0.043190184980630875 -162,0.04311239719390869 -163,0.04303520545363426 -164,0.04295876994729042 -165,0.042883120477199554 -166,0.04280828684568405 -167,0.04273426532745361 -168,0.042661089450120926 -169,0.0425887405872345 -170,0.04251718521118164 -171,0.04244642332196236 -172,0.04237641394138336 -173,0.04230703040957451 -174,0.04223821312189102 -175,0.04217002913355827 -176,0.04210251197218895 -177,0.04203559830784798 -178,0.04196932166814804 -179,0.04190360754728317 -180,0.041838500648736954 -181,0.041773952543735504 -182,0.04171004891395569 -183,0.04164670780301094 -184,0.041584063321352005 -185,0.04152202233672142 -186,0.04146061837673187 -187,0.041399795562028885 -188,0.04133955016732216 -189,0.041279878467321396 -190,0.04122074320912361 -191,0.0411621555685997 -192,0.041104063391685486 -193,0.04104645177721977 -194,0.040989264845848083 -195,0.04093253239989281 -196,0.04087616130709648 -197,0.04082019254565239 -198,0.04076455533504486 -199,0.040709346532821655 -200,0.04065453261137009 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.200/training_config.txt deleted file mode 100644 index 55106ba..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.200/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.200/training_log.csv deleted file mode 100644 index b53d670..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,182.62867736816406 -2,5.271470546722412 -3,5.519219875335693 -4,4.898951530456543 -5,4.761758327484131 -6,4.577480792999268 -7,4.344464302062988 -8,4.073909759521484 -9,3.7657108306884766 -10,3.4058594703674316 -11,2.9714388847351074 -12,2.5796470642089844 -13,2.2804012298583984 -14,2.0382513999938965 -15,1.8301280736923218 -16,1.641526460647583 -17,1.467961072921753 -18,1.3012394905090332 -19,1.1423128843307495 -20,0.9867765307426453 -21,0.8326084017753601 -22,0.698262631893158 -23,0.5933031439781189 -24,0.5091028809547424 -25,0.4415326714515686 -26,0.387540340423584 -27,0.34626686573028564 -28,0.3167427182197571 -29,0.2970278263092041 -30,0.2851030230522156 -31,0.27879175543785095 -32,0.2752405107021332 -33,0.27208349108695984 -34,0.26676416397094727 -35,0.2576713562011719 -36,0.24443139135837555 -37,0.2278524488210678 -38,0.20937590301036835 -39,0.1906605362892151 -40,0.17305122315883636 -41,0.15733587741851807 -42,0.1438778191804886 -43,0.1329505294561386 -44,0.12430527806282043 -45,0.11752218008041382 -46,0.11220310628414154 -47,0.10801254957914352 -48,0.10465913265943527 -49,0.10191574692726135 -50,0.09960411489009857 -51,0.09755606204271317 -52,0.09556099027395248 -53,0.09352397918701172 -54,0.09141772240400314 -55,0.0892033502459526 -56,0.08687268197536469 -57,0.08444901555776596 -58,0.08200330287218094 -59,0.07957632839679718 -60,0.07721102982759476 -61,0.07495470345020294 -62,0.07284881919622421 -63,0.07092598080635071 -64,0.06921709328889847 -65,0.06772401928901672 -66,0.06644231826066971 -67,0.06536222994327545 -68,0.06446817517280579 -69,0.06373651325702667 -70,0.06313648074865341 -71,0.06263992935419083 -72,0.0622214674949646 -73,0.06185442581772804 -74,0.06151716783642769 -75,0.06119232252240181 -76,0.0608680360019207 -77,0.06053615361452103 -78,0.060192372649908066 -79,0.05983719602227211 -80,0.05947238951921463 -81,0.05910006910562515 -82,0.05872808396816254 -83,0.05836248770356178 -84,0.058007530868053436 -85,0.0576682947576046 -86,0.05734937638044357 -87,0.057052645832300186 -88,0.05677904561161995 -89,0.05652406066656113 -90,0.05628630891442299 -91,0.056063804775476456 -92,0.055854324251413345 -93,0.05565528944134712 -94,0.055463388562202454 -95,0.05527627840638161 -96,0.05509215593338013 -97,0.05490940809249878 -98,0.05472704395651817 -99,0.054544467478990555 -100,0.054361507296562195 -101,0.05417829006910324 -102,0.05399533733725548 -103,0.05381312593817711 -104,0.05363224819302559 -105,0.05345345288515091 -106,0.05327736586332321 -107,0.05310458689928055 -108,0.05293545871973038 -109,0.05277065187692642 -110,0.052609845995903015 -111,0.05245232582092285 -112,0.0522979199886322 -113,0.052146539092063904 -114,0.05199792608618736 -115,0.05185176059603691 -116,0.05170781910419464 -117,0.05156608298420906 -118,0.05142625793814659 -119,0.0512884147465229 -120,0.05115228146314621 -121,0.05101789906620979 -122,0.05088507756590843 -123,0.05075398460030556 -124,0.050624605268239975 -125,0.0504969097673893 -126,0.050370953977108 -127,0.05024689435958862 -128,0.0501246452331543 -129,0.05000416189432144 -130,0.04988541081547737 -131,0.04976830631494522 -132,0.04965285584330559 -133,0.049539051949977875 -134,0.0494268424808979 -135,0.04931612312793732 -136,0.04920697584748268 -137,0.049099233001470566 -138,0.048992957919836044 -139,0.04888817295432091 -140,0.04878466948866844 -141,0.04868251085281372 -142,0.048581670969724655 -143,0.04848209023475647 -144,0.04838383197784424 -145,0.04828682169318199 -146,0.04819092899560928 -147,0.048096269369125366 -148,0.04800259694457054 -149,0.047910090535879135 -150,0.04781867936253548 -151,0.04772830381989479 -152,0.04763900861144066 -153,0.04755068197846413 -154,0.047463349997997284 -155,0.04737694188952446 -156,0.04729144275188446 -157,0.04720691964030266 -158,0.047123298048973083 -159,0.04704052209854126 -160,0.04695866256952286 -161,0.046877652406692505 -162,0.046797484159469604 -163,0.046718038618564606 -164,0.04663943871855736 -165,0.046561602503061295 -166,0.046484559774398804 -167,0.04640825465321541 -168,0.0463327132165432 -169,0.04625788331031799 -170,0.04618379473686218 -171,0.04611041769385338 -172,0.04603777080774307 -173,0.04596575349569321 -174,0.04589449614286423 -175,0.04582389444112778 -176,0.04575398564338684 -177,0.04568471387028694 -178,0.0456160306930542 -179,0.045547984540462494 -180,0.04548046737909317 -181,0.04541356489062309 -182,0.04534725844860077 -183,0.045281458646059036 -184,0.04521623253822327 -185,0.04515151306986809 -186,0.04508733004331589 -187,0.04502365365624428 -188,0.04496048390865326 -189,0.04489782452583313 -190,0.04483566805720329 -191,0.04477401077747345 -192,0.04471278935670853 -193,0.04465208947658539 -194,0.04459185153245926 -195,0.04453209787607193 -196,0.044472865760326385 -197,0.04441407695412636 -198,0.044355668127536774 -199,0.044297683984041214 -200,0.04424003139138222 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.250/training_config.txt deleted file mode 100644 index 5f6066a..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.250/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.250/training_log.csv deleted file mode 100644 index 422388e..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,274.4598083496094 -2,15.490995407104492 -3,73849.8125 -4,45.42102813720703 -5,17.613985061645508 -6,17.97882652282715 -7,18.950223922729492 -8,23.565357208251953 -9,5502.84130859375 -10,11.614291191101074 -11,12.21872615814209 -12,12.204355239868164 -13,12.585772514343262 -14,12.828717231750488 -15,13.04027271270752 -16,13.220087051391602 -17,13.383810043334961 -18,13.53294563293457 -19,13.663265228271484 -20,13.771559715270996 -21,13.837691307067871 -22,13.862445831298828 -23,13.854166030883789 -24,13.827964782714844 -25,13.787884712219238 -26,13.735812187194824 -27,13.674245834350586 -28,13.595763206481934 -29,13.497817993164062 -30,13.39732551574707 -31,13.289875984191895 -32,13.168659210205078 -33,13.044819831848145 -34,12.921599388122559 -35,12.795453071594238 -36,12.666022300720215 -37,12.5336332321167 -38,12.397357940673828 -39,12.259156227111816 -40,12.121049880981445 -41,11.984146118164062 -42,11.848427772521973 -43,11.71273136138916 -44,11.57732105255127 -45,11.441656112670898 -46,11.305400848388672 -47,11.169473648071289 -48,11.03153133392334 -49,10.890631675720215 -50,10.74940299987793 -51,10.609684944152832 -52,10.470117568969727 -53,10.334979057312012 -54,10.205514907836914 -55,10.081390380859375 -56,9.962167739868164 -57,9.848044395446777 -58,9.738497734069824 -59,9.633280754089355 -60,9.53217887878418 -61,9.434867858886719 -62,9.34122085571289 -63,9.250811576843262 -64,9.16308879852295 -65,9.078051567077637 -66,8.995345115661621 -67,8.914347648620605 -68,8.834832191467285 -69,8.756637573242188 -70,8.679828643798828 -71,8.60414981842041 -72,8.529358863830566 -73,8.455126762390137 -74,8.381013870239258 -75,8.306768417358398 -76,8.231986999511719 -77,8.156332015991211 -78,8.07944393157959 -79,8.000763893127441 -80,7.920838832855225 -81,7.837929725646973 -82,7.747119426727295 -83,7.649228572845459 -84,7.5459160804748535 -85,7.440932750701904 -86,7.339196681976318 -87,7.245208740234375 -88,7.163267612457275 -89,7.090829372406006 -90,7.023871421813965 -91,6.960822582244873 -92,6.90053129196167 -93,6.842490196228027 -94,6.786232948303223 -95,6.731329441070557 -96,6.677451133728027 -97,6.6245574951171875 -98,6.572465419769287 -99,6.520960807800293 -100,6.469996452331543 -101,6.419576644897461 -102,6.36970329284668 -103,6.321098804473877 -104,6.2740960121154785 -105,6.229166030883789 -106,6.186796188354492 -107,6.147137641906738 -108,6.110288143157959 -109,6.076019287109375 -110,6.0440449714660645 -111,6.014143943786621 -112,5.9860382080078125 -113,5.959293842315674 -114,5.933615207672119 -115,5.908776760101318 -116,5.8846282958984375 -117,5.8610076904296875 -118,5.83779239654541 -119,5.815075874328613 -120,5.792727470397949 -121,5.77067756652832 -122,5.748890399932861 -123,5.727352142333984 -124,5.706091403961182 -125,5.685227394104004 -126,5.664623737335205 -127,5.6438446044921875 -128,5.622891902923584 -129,5.601781845092773 -130,5.5805768966674805 -131,5.559476852416992 -132,5.538517951965332 -133,5.517658233642578 -134,5.49690580368042 -135,5.4762983322143555 -136,5.45583963394165 -137,5.435550212860107 -138,5.41543436050415 -139,5.395445823669434 -140,5.375572681427002 -141,5.355815887451172 -142,5.336178302764893 -143,5.316654682159424 -144,5.297757625579834 -145,5.280020713806152 -146,5.263798236846924 -147,5.248722553253174 -148,5.234397888183594 -149,5.220685958862305 -150,5.207486629486084 -151,5.1947526931762695 -152,5.182435035705566 -153,5.170506954193115 -154,5.158888339996338 -155,5.147578239440918 -156,5.136494159698486 -157,5.12558650970459 -158,5.114840984344482 -159,5.104239463806152 -160,5.0937700271606445 -161,5.0834150314331055 -162,5.0731635093688965 -163,5.0630035400390625 -164,5.052942752838135 -165,5.042968273162842 -166,5.033072471618652 -167,5.023250579833984 -168,5.013509273529053 -169,5.00383996963501 -170,4.99423885345459 -171,4.984702110290527 -172,4.975226402282715 -173,4.965811252593994 -174,4.956456184387207 -175,4.947165012359619 -176,4.93797492980957 -177,4.92897891998291 -178,4.920071601867676 -179,4.911218643188477 -180,4.902412414550781 -181,4.893649101257324 -182,4.88492488861084 -183,4.8762359619140625 -184,4.867578506469727 -185,4.858984470367432 -186,4.850448131561279 -187,4.841960430145264 -188,4.833515644073486 -189,4.825098514556885 -190,4.816729545593262 -191,4.808408260345459 -192,4.800127983093262 -193,4.791863918304443 -194,4.783620834350586 -195,4.775398254394531 -196,4.767197608947754 -197,4.759026050567627 -198,4.750886917114258 -199,4.742762565612793 -200,4.7346510887146 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.300/training_config.txt deleted file mode 100644 index 4de5531..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.300/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.300/training_log.csv deleted file mode 100644 index 1fcd0eb..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.300/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,492.01617431640625 -2,13.401321411132812 -3,18.337509155273438 -4,19.71282958984375 -5,14.222005844116211 -6,11.554130554199219 -7,9.353883743286133 -8,7.840353488922119 -9,7.886081695556641 -10,7.613690376281738 -11,7.438124179840088 -12,6.879708766937256 -13,6.254703998565674 -14,6.051534175872803 -15,5.851674556732178 -16,5.621561050415039 -17,5.372321605682373 -18,5.142682075500488 -19,4.952249526977539 -20,4.793293476104736 -21,4.656720161437988 -22,4.540838241577148 -23,4.4414448738098145 -24,4.352227210998535 -25,4.267849445343018 -26,4.184732913970947 -27,4.101089000701904 -28,4.016798973083496 -29,3.932859420776367 -30,3.8504858016967773 -31,3.7712223529815674 -32,3.696451425552368 -33,3.626095771789551 -34,3.558987855911255 -35,3.4950528144836426 -36,3.4336917400360107 -37,3.374769926071167 -38,3.317997455596924 -39,3.2630529403686523 -40,3.209686040878296 -41,3.157665491104126 -42,3.1067864894866943 -43,2.8061985969543457 -44,2.546762466430664 -45,2.2779500484466553 -46,1.9750951528549194 -47,1.7009499073028564 -48,1.4608994722366333 -49,1.2371753454208374 -50,9.350263595581055 -51,10.236865997314453 -52,3.7429451942443848 -53,0.9673624038696289 -54,1.3075644969940186 -55,1.6370640993118286 -56,1.8709008693695068 -57,2.042738437652588 -58,2.1508586406707764 -59,2.1827809810638428 -60,2.1543679237365723 -61,2.074617624282837 -62,1.9538440704345703 -63,1.8033522367477417 -64,1.6314122676849365 -65,1.44855535030365 -66,1.2541018724441528 -67,1.0571335554122925 -68,0.8704979419708252 -69,0.7096309661865234 -70,0.5984827876091003 -71,0.5141292810440063 -72,0.469163179397583 -73,0.43695828318595886 -74,0.41282495856285095 -75,0.3957599401473999 -76,0.38484126329421997 -77,0.3792080581188202 -78,0.3778347373008728 -79,0.3795868754386902 -80,0.38333532214164734 -81,0.38804158568382263 -82,0.3928051292896271 -83,0.39688023924827576 -84,0.3996945023536682 -85,0.4008459746837616 -86,0.40009376406669617 -87,0.3973468244075775 -88,0.39263996481895447 -89,0.3861142694950104 -90,0.377990186214447 -91,0.3685402572154999 -92,0.3580667972564697 -93,0.34687668085098267 -94,0.33526575565338135 -95,0.32350608706474304 -96,0.31183576583862305 -97,0.3004523813724518 -98,0.28951239585876465 -99,0.27912989258766174 -100,0.26937904953956604 -101,0.26029860973358154 -102,0.25189638137817383 -103,0.2441558688879013 -104,0.2370406538248062 -105,0.23049946129322052 -106,0.2244754135608673 -107,0.21890538930892944 -108,0.2137247920036316 -109,0.20887145400047302 -110,0.2042870819568634 -111,0.1999182552099228 -112,0.1957191526889801 -113,0.19164901971817017 -114,0.18767407536506653 -115,0.18377037346363068 -116,0.1799197494983673 -117,0.17610882222652435 -118,0.17232964932918549 -119,0.16857829689979553 -120,0.1648542284965515 -121,0.16115984320640564 -122,0.1574997901916504 -123,0.15388008952140808 -124,0.15030749142169952 -125,0.1467892825603485 -126,0.14333301782608032 -127,0.13994528353214264 -128,0.13663291931152344 -129,0.13340091705322266 -130,0.1302541047334671 -131,0.12719577550888062 -132,0.12422814965248108 -133,0.12135253846645355 -134,0.11856943368911743 -135,0.11587966233491898 -136,0.1132817417383194 -137,0.11077295243740082 -138,0.10835054516792297 -139,0.10601133108139038 -140,0.10375218838453293 -141,0.1015697494149208 -142,0.09946031868457794 -143,0.09742076694965363 -144,0.09544848650693893 -145,0.0935416892170906 -146,0.09170228987932205 -147,0.08992843329906464 -148,0.08821743726730347 -149,0.0865698903799057 -150,0.08497924357652664 -151,0.08344058692455292 -152,0.08195306360721588 -153,0.08051317930221558 -154,0.07911967486143112 -155,0.07777165621519089 -156,0.07646798342466354 -157,0.07520775496959686 -158,0.07399050146341324 -159,0.07281556725502014 -160,0.07168197631835938 -161,0.07058874517679214 -162,0.06953506916761398 -163,0.06852015852928162 -164,0.0675429105758667 -165,0.06660240888595581 -166,0.06569720059633255 -167,0.06482691317796707 -168,0.06399072706699371 -169,0.06318731606006622 -170,0.062415506690740585 -171,0.061673834919929504 -172,0.06096185743808746 -173,0.06027860939502716 -174,0.059623006731271744 -175,0.05899392440915108 -176,0.05839039757847786 -177,0.057811345905065536 -178,0.05725608766078949 -179,0.05672430247068405 -180,0.05621461197733879 -181,0.05572616681456566 -182,0.05525827407836914 -183,0.054809924215078354 -184,0.05438029766082764 -185,0.053969092667102814 -186,0.05357567220926285 -187,0.0531991645693779 -188,0.052838895469903946 -189,0.05249422788619995 -190,0.0521644689142704 -191,0.05184890329837799 -192,0.05154670774936676 -193,0.05125763639807701 -194,0.05098120495676994 -195,0.050716742873191833 -196,0.050463806837797165 -197,0.05022170767188072 -198,0.049989983439445496 -199,0.04976814240217209 -200,0.049555618315935135 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.400/training_config.txt deleted file mode 100644 index d552f10..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.400/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.400/training_log.csv deleted file mode 100644 index 443ad73..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.400/training_log.csv +++ /dev/null @@ -1,18 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,5422.8564453125 -2,13.61970043182373 -3,13.728039741516113 -4,15790.015625 -5,11.498286247253418 -6,13.05815315246582 -7,14.18200969696045 -8,15.17587947845459 -9,15.950647354125977 -10,16.586803436279297 -11,17.14067840576172 -12,17.609283447265625 -13,nan -14,nan -15,nan -16,nan diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.500/training_config.txt deleted file mode 100644 index b72987d..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.500/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.500/training_log.csv deleted file mode 100644 index b9a162c..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.500/training_log.csv +++ /dev/null @@ -1,120 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,37032.5625 -2,126483.1328125 -3,127093.703125 -4,126022.6640625 -5,113224.515625 -6,93641.8359375 -7,61508.5390625 -8,24295.208984375 -9,2256.920654296875 -10,1242.16162109375 -11,459.1148681640625 -12,17876.73828125 -13,124746.359375 -14,122902.09375 -15,119095.1015625 -16,111570.1328125 -17,105717.546875 -18,101568.40625 -19,99957.6640625 -20,99447.6015625 -21,98663.125 -22,94503.671875 -23,56348.765625 -24,12100.052734375 -25,2060.07080078125 -26,717.1099853515625 -27,363.3665771484375 -28,239.5377960205078 -29,186.4116668701172 -30,119646.953125 -31,116184.609375 -32,118549.3203125 -33,116342.5625 -34,116116.4296875 -35,115921.9453125 -36,120556.953125 -37,120273.40625 -38,115138.0859375 -39,114859.3125 -40,114608.125 -41,116255.625 -42,115823.28125 -43,114175.6640625 -44,119139.6171875 -45,119145.109375 -46,119145.8828125 -47,119144.1015625 -48,119130.8203125 -49,119105.0 -50,119052.71875 -51,116121.7890625 -52,116175.4296875 -53,116192.328125 -54,116158.1484375 -55,116106.8359375 -56,116041.3984375 -57,115965.9296875 -58,115875.3515625 -59,115899.6328125 -60,115609.7421875 -61,115596.6953125 -62,115237.9609375 -63,114736.078125 -64,109480.9453125 -65,110353.7109375 -66,110123.7265625 -67,109138.8984375 -68,108718.9765625 -69,109165.859375 -70,113618.6875 -71,118504.03125 -72,119247.40625 -73,119696.5390625 -74,119820.15625 -75,119908.703125 -76,119979.296875 -77,120042.5625 -78,120103.625 -79,120154.21875 -80,120195.78125 -81,120234.15625 -82,122437.0859375 -83,122462.3125 -84,122474.84375 -85,122486.09375 -86,122493.3359375 -87,122497.7890625 -88,122500.6640625 -89,122502.78125 -90,122504.578125 -91,122508.1171875 -92,122488.078125 -93,122448.125 -94,122437.4609375 -95,122452.0 -96,122423.0390625 -97,122383.1171875 -98,122308.296875 -99,120409.515625 -100,120406.84375 -101,120395.0234375 -102,120377.03125 -103,120350.2109375 -104,120313.984375 -105,120266.8828125 -106,120240.734375 -107,120230.6328125 -108,120214.296875 -109,120185.8828125 -110,120148.0859375 -111,120096.46875 -112,120019.1328125 -113,116824.8125 -114,116824.8125 -115,116824.8125 -116,116824.8125 -117,116824.8125 -118,116824.8125 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.600/training_config.txt deleted file mode 100644 index c2d4260..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.600/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.600/training_log.csv deleted file mode 100644 index 058ab45..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.600/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,79010.859375 -2,127744.5546875 -3,127875.796875 -4,127875.796875 -5,127875.796875 -6,127875.796875 -7,127875.796875 -8,127875.796875 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.700/training_config.txt deleted file mode 100644 index cb9c9a2..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.700/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.700/training_log.csv deleted file mode 100644 index 0f722c7..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,103505.5703125 -2,127865.8828125 -3,127875.796875 -4,127875.796875 -5,127875.796875 -6,127875.796875 -7,127875.796875 -8,127875.796875 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.800/training_config.txt deleted file mode 100644 index 82c3966..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.800/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.800/training_log.csv deleted file mode 100644 index 9116579..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,115165.7890625 -2,127875.796875 -3,127875.796875 -4,127875.796875 -5,127875.796875 -6,127875.796875 -7,127875.796875 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_0.900/training_config.txt deleted file mode 100644 index 3c0f4ed..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.900/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_0.900/training_log.csv deleted file mode 100644 index 01eb320..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,122179.375 -2,127875.796875 -3,127875.796875 -4,127875.796875 -5,127875.796875 -6,127875.796875 -7,127875.796875 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_1.000/training_config.txt deleted file mode 100644 index ec2dd21..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_1.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_1.000/training_log.csv deleted file mode 100644 index 55d1e77..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,125197.984375 -2,127875.796875 -3,127875.796875 -4,127875.796875 -5,127875.796875 -6,127875.796875 -7,127875.796875 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_16.000/training_config.txt deleted file mode 100644 index 5f64f93..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_16.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_16.000/training_log.csv deleted file mode 100644 index 7633953..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,128242.46875 -2,128242.46875 -3,128242.46875 -4,128242.46875 -5,128242.46875 -6,128242.46875 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_2.000/training_config.txt deleted file mode 100644 index c0c380b..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_2.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_2.000/training_log.csv deleted file mode 100644 index 7633953..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,128242.46875 -2,128242.46875 -3,128242.46875 -4,128242.46875 -5,128242.46875 -6,128242.46875 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_4.000/training_config.txt deleted file mode 100644 index 13f4af5..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_4.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_4.000/training_log.csv deleted file mode 100644 index 7633953..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,128242.46875 -2,128242.46875 -3,128242.46875 -4,128242.46875 -5,128242.46875 -6,128242.46875 diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic/lr_8.000/training_config.txt deleted file mode 100644 index d3c89ca..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_8.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * t_span**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic/lr_8.000/training_log.csv deleted file mode 100644 index 7633953..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,593.6863403320312 -1,128242.46875 -2,128242.46875 -3,128242.46875 -4,128242.46875 -5,128242.46875 -6,128242.46875 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.010/training_config.txt deleted file mode 100644 index a975792..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.010/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.010/training_log.csv deleted file mode 100644 index 2e2ace2..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,49.196922302246094 -2,45.22605514526367 -3,42.04621505737305 -4,38.49067306518555 -5,35.33107376098633 -6,30.264371871948242 -7,27.637218475341797 -8,23.318727493286133 -9,21.193126678466797 -10,18.612873077392578 -11,16.15882110595703 -12,14.41136360168457 -13,13.260032653808594 -14,11.789100646972656 -15,10.655522346496582 -16,9.845915794372559 -17,8.871618270874023 -18,8.33676528930664 -19,7.724214553833008 -20,6.799914836883545 -21,6.45029878616333 -22,6.251034736633301 -23,5.802291393280029 -24,5.514447212219238 -25,5.15130090713501 -26,4.942483901977539 -27,4.774592399597168 -28,4.622729301452637 -29,4.495644569396973 -30,4.367247104644775 -31,4.255795955657959 -32,4.151123046875 -33,4.052879333496094 -34,3.9561822414398193 -35,3.851292371749878 -36,3.757176399230957 -37,3.6784656047821045 -38,3.5973317623138428 -39,3.4943668842315674 -40,3.4143216609954834 -41,3.347907066345215 -42,3.297858238220215 -43,3.2578554153442383 -44,3.2251787185668945 -45,3.1931846141815186 -46,3.1643717288970947 -47,3.136044502258301 -48,3.107806921005249 -49,3.0828440189361572 -50,3.0585145950317383 -51,3.034355878829956 -52,3.0103821754455566 -53,2.9865174293518066 -54,2.9628124237060547 -55,2.9394068717956543 -56,2.916208028793335 -57,2.8938918113708496 -58,2.8730051517486572 -59,2.853426456451416 -60,2.8344974517822266 -61,2.816237688064575 -62,2.7986156940460205 -63,2.7815418243408203 -64,2.764876365661621 -65,2.748582363128662 -66,2.732512950897217 -67,2.7161691188812256 -68,2.6989858150482178 -69,2.690138578414917 -70,2.6806366443634033 -71,2.6707589626312256 -72,2.6625020503997803 -73,2.6586151123046875 -74,2.6539294719696045 -75,2.648391008377075 -76,2.642179012298584 -77,2.6354079246520996 -78,2.628166437149048 -79,2.6203699111938477 -80,2.611953020095825 -81,2.604595422744751 -82,2.5975539684295654 -83,2.5898168087005615 -84,2.582320213317871 -85,2.5759475231170654 -86,2.5695888996124268 -87,2.5631072521209717 -88,2.556474447250366 -89,2.5496976375579834 -90,2.5427982807159424 -91,2.5358028411865234 -92,2.52874493598938 -93,2.5216269493103027 -94,2.515127658843994 -95,2.5083608627319336 -96,2.501199960708618 -97,2.493743419647217 -98,2.48639178276062 -99,2.4792754650115967 -100,2.4720876216888428 -101,2.4648284912109375 -102,2.4576492309570312 -103,2.450507164001465 -104,2.443769931793213 -105,2.4375784397125244 -106,2.431763172149658 -107,2.4262733459472656 -108,2.420870065689087 -109,2.415492296218872 -110,2.4100759029388428 -111,2.4046013355255127 -112,2.3991246223449707 -113,2.393726110458374 -114,2.388538122177124 -115,2.383629560470581 -116,2.380382776260376 -117,2.376692533493042 -118,2.372931718826294 -119,2.3708274364471436 -120,2.3666486740112305 -121,2.363908529281616 -122,2.3610925674438477 -123,2.35758376121521 -124,2.3546414375305176 -125,2.352181911468506 -126,2.349501371383667 -127,2.3465235233306885 -128,2.3432745933532715 -129,2.3398239612579346 -130,2.3362832069396973 -131,2.3324053287506104 -132,2.329932451248169 -133,2.326641798019409 -134,2.3225719928741455 -135,2.322761297225952 -136,2.32112455368042 -137,2.3177490234375 -138,2.312580108642578 -139,2.309162139892578 -140,2.3061933517456055 -141,2.3042099475860596 -142,2.301410675048828 -143,2.2986395359039307 -144,2.2960994243621826 -145,2.2932770252227783 -146,2.2901902198791504 -147,2.2875490188598633 -148,2.2847962379455566 -149,2.2816781997680664 -150,2.2788569927215576 -151,2.276083469390869 -152,2.2730014324188232 -153,2.270106792449951 -154,2.267282009124756 -155,2.2641570568084717 -156,2.2614941596984863 -157,2.258505344390869 -158,2.255713939666748 -159,2.2530717849731445 -160,2.250607490539551 -161,2.248652458190918 -162,2.2458157539367676 -163,2.243135690689087 -164,2.240913152694702 -165,2.238556385040283 -166,2.2363548278808594 -167,2.2341389656066895 -168,2.2318825721740723 -169,2.229726552963257 -170,2.2274489402770996 -171,2.225296974182129 -172,2.2230870723724365 -173,2.2209455966949463 -174,2.218863010406494 -175,2.2167909145355225 -176,2.21480655670166 -177,2.212801456451416 -178,2.2109484672546387 -179,2.209136724472046 -180,2.2072763442993164 -181,2.205425500869751 -182,2.203690528869629 -183,2.2020163536071777 -184,2.2003798484802246 -185,2.198791265487671 -186,2.1972367763519287 -187,2.195720911026001 -188,2.1942386627197266 -189,2.192784547805786 -190,2.1913599967956543 -191,2.189957857131958 -192,2.1885762214660645 -193,2.18721342086792 -194,2.185864210128784 -195,2.184530258178711 -196,2.183215618133545 -197,2.1819212436676025 -198,2.180656671524048 -199,2.179417133331299 -200,2.1781928539276123 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.020/training_config.txt deleted file mode 100644 index 7f5c9c6..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.020/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.020/training_log.csv deleted file mode 100644 index 31c20f6..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,46.97041320800781 -2,39.18505859375 -3,31.82984733581543 -4,25.056865692138672 -5,18.760730743408203 -6,15.177528381347656 -7,11.045350074768066 -8,9.052659034729004 -9,7.208188056945801 -10,6.320859432220459 -11,5.774099826812744 -12,4.931272506713867 -13,4.53652286529541 -14,4.235463619232178 -15,3.960777997970581 -16,3.7318198680877686 -17,3.55281400680542 -18,3.4067375659942627 -19,3.2637648582458496 -20,3.130418062210083 -21,3.023850202560425 -22,2.923085927963257 -23,2.8333065509796143 -24,2.7590765953063965 -25,2.6984755992889404 -26,2.6437935829162598 -27,2.601924180984497 -28,2.562807083129883 -29,2.524601459503174 -30,2.484666109085083 -31,2.442453384399414 -32,2.4000799655914307 -33,2.3683695793151855 -34,2.3409416675567627 -35,2.3176157474517822 -36,2.301011085510254 -37,2.289968729019165 -38,2.2833590507507324 -39,2.2788689136505127 -40,2.2767763137817383 -41,2.274329900741577 -42,2.2678802013397217 -43,2.25899076461792 -44,2.2500741481781006 -45,2.2423861026763916 -46,2.2357711791992188 -47,2.2296628952026367 -48,2.224259614944458 -49,2.2194864749908447 -50,2.215116024017334 -51,2.2109713554382324 -52,2.2069008350372314 -53,2.202803134918213 -54,2.198659896850586 -55,2.1945385932922363 -56,2.1905107498168945 -57,2.1866872310638428 -58,2.1831963062286377 -59,2.1801564693450928 -60,2.1776838302612305 -61,2.176024913787842 -62,2.1749045848846436 -63,2.173584222793579 -64,2.1721014976501465 -65,2.1704936027526855 -66,2.1687934398651123 -67,2.167020797729492 -68,2.1651949882507324 -69,2.1633377075195312 -70,2.1615607738494873 -71,2.160078763961792 -72,2.158825397491455 -73,2.157639503479004 -74,2.1564812660217285 -75,2.155333995819092 -76,2.1541969776153564 -77,2.153071403503418 -78,2.15195894241333 -79,2.150865316390991 -80,2.149799108505249 -81,2.1487605571746826 -82,2.1477532386779785 -83,2.146782398223877 -84,2.1458559036254883 -85,2.144977569580078 -86,2.1441478729248047 -87,2.1433699131011963 -88,2.142638921737671 -89,2.1419460773468018 -90,2.141280174255371 -91,2.1406259536743164 -92,2.1399776935577393 -93,2.139338493347168 -94,2.1387195587158203 -95,2.138129949569702 -96,2.1375696659088135 -97,2.1370363235473633 -98,2.1365230083465576 -99,2.1360249519348145 -100,2.1355361938476562 -101,2.135053873062134 -102,2.1345810890197754 -103,2.1341185569763184 -104,2.1336679458618164 -105,2.133230209350586 -106,2.1328060626983643 -107,2.132394790649414 -108,2.131995439529419 -109,2.1316051483154297 -110,2.131220817565918 -111,2.1308414936065674 -112,2.1304643154144287 -113,2.13008975982666 -114,2.1297178268432617 -115,2.12934947013855 -116,2.1289854049682617 -117,2.1286256313323975 -118,2.12827205657959 -119,2.127923011779785 -120,2.1275784969329834 -121,2.1272385120391846 -122,2.1269025802612305 -123,2.1265709400177 -124,2.1262428760528564 -125,2.125917911529541 -126,2.125596046447754 -127,2.125277280807495 -128,2.124962329864502 -129,2.124650239944458 -130,2.1243417263031006 -131,2.124035596847534 -132,2.1237337589263916 -133,2.1234350204467773 -134,2.1231396198272705 -135,2.122847318649292 -136,2.12255859375 -137,2.122272253036499 -138,2.121988296508789 -139,2.121706485748291 -140,2.121427297592163 -141,2.1211512088775635 -142,2.1208784580230713 -143,2.120608329772949 -144,2.120340585708618 -145,2.1200754642486572 -146,2.1198129653930664 -147,2.1195528507232666 -148,2.119295597076416 -149,2.119041681289673 -150,2.1187899112701416 -151,2.1185412406921387 -152,2.118295192718506 -153,2.118051052093506 -154,2.117809534072876 -155,2.1175711154937744 -156,2.1173346042633057 -157,2.117100954055786 -158,2.116868734359741 -159,2.116638660430908 -160,2.116410970687866 -161,2.1161856651306152 -162,2.115962266921997 -163,2.115741014480591 -164,2.1155219078063965 -165,2.115304946899414 -166,2.115089178085327 -167,2.114875078201294 -168,2.1146628856658936 -169,2.1144521236419678 -170,2.1142430305480957 -171,2.1140353679656982 -172,2.1138288974761963 -173,2.113624095916748 -174,2.1134214401245117 -175,2.113219738006592 -176,2.1130199432373047 -177,2.1128220558166504 -178,2.1126253604888916 -179,2.1124303340911865 -180,2.1122379302978516 -181,2.112046718597412 -182,2.1118574142456055 -183,2.1116697788238525 -184,2.1114840507507324 -185,2.1112990379333496 -186,2.111116409301758 -187,2.110934019088745 -188,2.110753297805786 -189,2.1105737686157227 -190,2.1103968620300293 -191,2.1102213859558105 -192,2.1100480556488037 -193,2.1098756790161133 -194,2.1097049713134766 -195,2.1095352172851562 -196,2.1093671321868896 -197,2.1092000007629395 -198,2.109034299850464 -199,2.108870506286621 -200,2.108708143234253 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.040/training_config.txt deleted file mode 100644 index daafa2a..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.040/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.040/training_log.csv deleted file mode 100644 index 8ebdf20..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,42.689205169677734 -2,26.707687377929688 -3,17.172574996948242 -4,10.35483169555664 -5,6.889549732208252 -6,5.390281677246094 -7,4.304958343505859 -8,3.751757860183716 -9,3.4045238494873047 -10,3.1463465690612793 -11,2.940051555633545 -12,2.7922401428222656 -13,2.6790611743927 -14,2.58565092086792 -15,2.504937171936035 -16,2.4411187171936035 -17,2.389284372329712 -18,2.3444430828094482 -19,2.3070006370544434 -20,2.2757601737976074 -21,2.2489490509033203 -22,2.225580930709839 -23,2.204840660095215 -24,2.1874518394470215 -25,2.1752030849456787 -26,2.1668572425842285 -27,2.1606569290161133 -28,2.1556668281555176 -29,2.151604175567627 -30,2.148218870162964 -31,2.1449286937713623 -32,2.1420040130615234 -33,2.1404318809509277 -34,2.1409902572631836 -35,2.1413509845733643 -36,2.1396360397338867 -37,2.137202739715576 -38,2.135873317718506 -39,2.1352875232696533 -40,2.1345648765563965 -41,2.133269786834717 -42,2.1313390731811523 -43,2.1288821697235107 -44,2.1261653900146484 -45,2.1234700679779053 -46,2.121016263961792 -47,2.118924617767334 -48,2.1172571182250977 -49,2.115985870361328 -50,2.1150619983673096 -51,2.1143875122070312 -52,2.113826036453247 -53,2.1133134365081787 -54,2.1128127574920654 -55,2.112287759780884 -56,2.1117193698883057 -57,2.111093282699585 -58,2.110410213470459 -59,2.1097400188446045 -60,2.1091134548187256 -61,2.108560085296631 -62,2.1081013679504395 -63,2.1077234745025635 -64,2.107405662536621 -65,2.107128620147705 -66,2.1068456172943115 -67,2.106537342071533 -68,2.106192111968994 -69,2.10581374168396 -70,2.105416774749756 -71,2.105015516281128 -72,2.1046292781829834 -73,2.104269027709961 -74,2.1039414405822754 -75,2.1036524772644043 -76,2.1033997535705566 -77,2.1031689643859863 -78,2.1029560565948486 -79,2.1027462482452393 -80,2.102534770965576 -81,2.1023175716400146 -82,2.1020941734313965 -83,2.101865530014038 -84,2.1016368865966797 -85,2.1014225482940674 -86,2.1012210845947266 -87,2.1010286808013916 -88,2.100847005844116 -89,2.100675106048584 -90,2.1005101203918457 -91,2.100348472595215 -92,2.1001899242401123 -93,2.1000335216522217 -94,2.099877119064331 -95,2.0997226238250732 -96,2.0995707511901855 -97,2.099421262741089 -98,2.0992722511291504 -99,2.099123954772949 -100,2.0989770889282227 -101,2.098832130432129 -102,2.0986876487731934 -103,2.098545789718628 -104,2.098405122756958 -105,2.0982666015625 -106,2.0981295108795166 -107,2.0979926586151123 -108,2.09785532951355 -109,2.0977184772491455 -110,2.097581148147583 -111,2.097443103790283 -112,2.097304582595825 -113,2.097165584564209 -114,2.0970258712768555 -115,2.096885919570923 -116,2.096747398376465 -117,2.0966098308563232 -118,2.096472978591919 -119,2.096337080001831 -120,2.0962016582489014 -121,2.0960676670074463 -122,2.0959343910217285 -123,2.0958030223846436 -124,2.095672845840454 -125,2.0955443382263184 -126,2.095416784286499 -127,2.0952911376953125 -128,2.0951666831970215 -129,2.095043420791626 -130,2.0949220657348633 -131,2.094801664352417 -132,2.0946831703186035 -133,2.09456729888916 -134,2.094453811645508 -135,2.0943424701690674 -136,2.094233751296997 -137,2.0941264629364014 -138,2.094020128250122 -139,2.0939157009124756 -140,2.0938124656677246 -141,2.093709945678711 -142,2.0936076641082764 -143,2.093506097793579 -144,2.09340500831604 -145,2.093303918838501 -146,2.093202829360962 -147,2.09310245513916 -148,2.0930018424987793 -149,2.0929014682769775 -150,2.0928006172180176 -151,2.092700242996216 -152,2.092599630355835 -153,2.092498540878296 -154,2.0923972129821777 -155,2.0922958850860596 -156,2.092193841934204 -157,2.0920920372009277 -158,2.0919904708862305 -159,2.091888904571533 -160,2.0917880535125732 -161,2.0916872024536133 -162,2.0915870666503906 -163,2.091487169265747 -164,2.09138822555542 -165,2.091289758682251 -166,2.0911927223205566 -167,2.0910959243774414 -168,2.0910000801086426 -169,2.090904712677002 -170,2.0908095836639404 -171,2.090715169906616 -172,2.0906214714050293 -173,2.0905284881591797 -174,2.0904359817504883 -175,2.0903446674346924 -176,2.090254545211792 -177,2.0901646614074707 -178,2.090075731277466 -179,2.089987277984619 -180,2.0898988246917725 -181,2.089810609817505 -182,2.0897233486175537 -183,2.0896365642547607 -184,2.089550495147705 -185,2.0894649028778076 -186,2.0893795490264893 -187,2.089294910430908 -188,2.0892107486724854 -189,2.0891273021698 -190,2.0890443325042725 -191,2.088961362838745 -192,2.088878870010376 -193,2.088796854019165 -194,2.0887153148651123 -195,2.088634490966797 -196,2.0885539054870605 -197,2.0884745121002197 -198,2.088395357131958 -199,2.088317394256592 -200,2.088240623474121 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.050/training_config.txt deleted file mode 100644 index 00d7c57..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.050/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.050/training_log.csv deleted file mode 100644 index 0f98eaa..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,40.71481704711914 -2,22.224925994873047 -3,11.902595520019531 -4,7.372074604034424 -5,5.357335567474365 -6,4.165529251098633 -7,3.567863702774048 -8,3.1792259216308594 -9,2.901702404022217 -10,2.71283221244812 -11,2.5687735080718994 -12,2.465667247772217 -13,2.3878426551818848 -14,2.3238561153411865 -15,2.2716445922851562 -16,2.2305712699890137 -17,2.1998484134674072 -18,2.1770665645599365 -19,2.1612961292266846 -20,2.1509780883789062 -21,2.1468844413757324 -22,2.146522045135498 -23,2.1444408893585205 -24,2.1503655910491943 -25,2.1576991081237793 -26,2.155385732650757 -27,2.1441867351531982 -28,2.1316771507263184 -29,2.1247506141662598 -30,2.122708797454834 -31,2.1226139068603516 -32,2.1227688789367676 -33,2.1228106021881104 -34,2.1226627826690674 -35,2.1219146251678467 -36,2.1204094886779785 -37,2.1182830333709717 -38,2.1158642768859863 -39,2.113541603088379 -40,2.1116485595703125 -41,2.1103103160858154 -42,2.109262704849243 -43,2.1085493564605713 -44,2.1082053184509277 -45,2.108008623123169 -46,2.107490062713623 -47,2.1064512729644775 -48,2.105144500732422 -49,2.103935718536377 -50,2.102989673614502 -51,2.1022777557373047 -52,2.101726770401001 -53,2.1012561321258545 -54,2.1008265018463135 -55,2.100499153137207 -56,2.100249767303467 -57,2.1000068187713623 -58,2.099703073501587 -59,2.099303960800171 -60,2.0988032817840576 -61,2.098233699798584 -62,2.0976481437683105 -63,2.0971105098724365 -64,2.0966806411743164 -65,2.096358299255371 -66,2.096113443374634 -67,2.0958938598632812 -68,2.0956578254699707 -69,2.0953848361968994 -70,2.0950703620910645 -71,2.0947227478027344 -72,2.094376564025879 -73,2.0940611362457275 -74,2.093790054321289 -75,2.0935750007629395 -76,2.093411922454834 -77,2.0932600498199463 -78,2.093102216720581 -79,2.092928171157837 -80,2.092743396759033 -81,2.0925540924072266 -82,2.092371702194214 -83,2.092196464538574 -84,2.0920302867889404 -85,2.091867208480835 -86,2.0917043685913086 -87,2.0915417671203613 -88,2.0913784503936768 -89,2.0912282466888428 -90,2.091085910797119 -91,2.090945243835449 -92,2.090808868408203 -93,2.0906784534454346 -94,2.090548038482666 -95,2.0904133319854736 -96,2.0902795791625977 -97,2.090153217315674 -98,2.0900280475616455 -99,2.0899055004119873 -100,2.08978533744812 -101,2.0896658897399902 -102,2.089545726776123 -103,2.0894243717193604 -104,2.0893023014068604 -105,2.089181423187256 -106,2.0890679359436035 -107,2.088954448699951 -108,2.0888378620147705 -109,2.088719367980957 -110,2.0886054039001465 -111,2.088494062423706 -112,2.088383436203003 -113,2.0882720947265625 -114,2.088160991668701 -115,2.0880510807037354 -116,2.087946653366089 -117,2.0878472328186035 -118,2.0877506732940674 -119,2.0876548290252686 -120,2.0875589847564697 -121,2.08746337890625 -122,2.087368965148926 -123,2.0872747898101807 -124,2.0871803760528564 -125,2.0870862007141113 -126,2.0869925022125244 -127,2.086899995803833 -128,2.0868096351623535 -129,2.0867199897766113 -130,2.0866310596466064 -131,2.086543321609497 -132,2.086456298828125 -133,2.086369276046753 -134,2.086282730102539 -135,2.0861964225769043 -136,2.0861117839813232 -137,2.0860280990600586 -138,2.085944890975952 -139,2.085862636566162 -140,2.0857808589935303 -141,2.085700273513794 -142,2.085620403289795 -143,2.085541009902954 -144,2.0854620933532715 -145,2.085383653640747 -146,2.0853052139282227 -147,2.0852272510528564 -148,2.085149049758911 -149,2.085071325302124 -150,2.084994077682495 -151,2.0849170684814453 -152,2.084839105606079 -153,2.084761142730713 -154,2.084681272506714 -155,2.084597587585449 -156,2.084512710571289 -157,2.0844271183013916 -158,2.084340810775757 -159,2.0842537879943848 -160,2.0841667652130127 -161,2.0840795040130615 -162,2.0839922428131104 -163,2.08390474319458 -164,2.0838170051574707 -165,2.0837295055389404 -166,2.0836410522460938 -167,2.083552837371826 -168,2.0834648609161377 -169,2.0833773612976074 -170,2.083289861679077 -171,2.083203077316284 -172,2.083118200302124 -173,2.0830352306365967 -174,2.082953929901123 -175,2.0828750133514404 -176,2.082798719406128 -177,2.0827248096466064 -178,2.082653045654297 -179,2.0825839042663574 -180,2.082517147064209 -181,2.0824522972106934 -182,2.0823891162872314 -183,2.082327127456665 -184,2.082265853881836 -185,2.0822055339813232 -186,2.082146167755127 -187,2.082087278366089 -188,2.082028865814209 -189,2.081970453262329 -190,2.0819122791290283 -191,2.0818543434143066 -192,2.081796407699585 -193,2.0817384719848633 -194,2.0816810131073 -195,2.08162260055542 -196,2.081564426422119 -197,2.0815064907073975 -198,2.081448793411255 -199,2.081390142440796 -200,2.081331968307495 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.080/training_config.txt deleted file mode 100644 index 70fd739..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.080/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.080/training_log.csv deleted file mode 100644 index c485102..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,34.9404182434082 -2,13.987346649169922 -3,6.592799186706543 -4,4.402707099914551 -5,3.524557590484619 -6,2.987480401992798 -7,2.682241201400757 -8,2.5073633193969727 -9,2.37965726852417 -10,2.297813653945923 -11,2.237412929534912 -12,2.1905572414398193 -13,2.1597204208374023 -14,2.1436662673950195 -15,2.1395411491394043 -16,2.1522927284240723 -17,2.1736581325531006 -18,2.151597738265991 -19,2.128288507461548 -20,2.1200475692749023 -21,2.1177475452423096 -22,2.11924147605896 -23,2.1237683296203613 -24,2.1281516551971436 -25,2.1298978328704834 -26,2.128230571746826 -27,2.1236650943756104 -28,2.1175696849823 -29,2.1118435859680176 -30,2.1079297065734863 -31,2.105846643447876 -32,2.105513572692871 -33,2.106898784637451 -34,2.108238458633423 -35,2.1074042320251465 -36,2.1054558753967285 -37,2.104160785675049 -38,2.103562355041504 -39,2.103151559829712 -40,2.1027872562408447 -41,2.102459192276001 -42,2.102109670639038 -43,2.101773738861084 -44,2.101443290710449 -45,2.1010959148406982 -46,2.1007416248321533 -47,2.1003963947296143 -48,2.100050449371338 -49,2.0997202396392822 -50,2.09936261177063 -51,2.0989511013031006 -52,2.098522424697876 -53,2.098123550415039 -54,2.097776174545288 -55,2.0974602699279785 -56,2.097177743911743 -57,2.0969038009643555 -58,2.096623659133911 -59,2.096304416656494 -60,2.0959620475769043 -61,2.0956242084503174 -62,2.0953104496002197 -63,2.0950238704681396 -64,2.0947818756103516 -65,2.094574213027954 -66,2.09438419342041 -67,2.0942087173461914 -68,2.0940306186676025 -69,2.093836545944214 -70,2.0936272144317627 -71,2.093407392501831 -72,2.0931899547576904 -73,2.0929694175720215 -74,2.0927529335021973 -75,2.0925445556640625 -76,2.092339515686035 -77,2.092129707336426 -78,2.091916084289551 -79,2.0917043685913086 -80,2.091494560241699 -81,2.0913102626800537 -82,2.0911448001861572 -83,2.090977191925049 -84,2.090801954269409 -85,2.0906267166137695 -86,2.090449094772339 -87,2.090273857116699 -88,2.0900988578796387 -89,2.0899224281311035 -90,2.0897443294525146 -91,2.0895652770996094 -92,2.0893938541412354 -93,2.0892298221588135 -94,2.0890727043151855 -95,2.088914155960083 -96,2.088754653930664 -97,2.0885865688323975 -98,2.088414430618286 -99,2.088252305984497 -100,2.0880985260009766 -101,2.087954521179199 -102,2.0878098011016846 -103,2.087661027908325 -104,2.087507486343384 -105,2.087355613708496 -106,2.087207555770874 -107,2.087064743041992 -108,2.086925745010376 -109,2.086789846420288 -110,2.0866541862487793 -111,2.0865161418914795 -112,2.086378574371338 -113,2.086242198944092 -114,2.0861082077026367 -115,2.0859758853912354 -116,2.0858466625213623 -117,2.0857176780700684 -118,2.0855860710144043 -119,2.085452079772949 -120,2.085314989089966 -121,2.0851762294769287 -122,2.0850369930267334 -123,2.08489727973938 -124,2.0847573280334473 -125,2.0846168994903564 -126,2.0844779014587402 -127,2.0843393802642822 -128,2.084204912185669 -129,2.0840752124786377 -130,2.083951234817505 -131,2.0838327407836914 -132,2.0837180614471436 -133,2.083606243133545 -134,2.08349609375 -135,2.0833873748779297 -136,2.0832793712615967 -137,2.083171844482422 -138,2.083063840866089 -139,2.0829548835754395 -140,2.0828466415405273 -141,2.0827388763427734 -142,2.0826339721679688 -143,2.0825304985046387 -144,2.0824272632598877 -145,2.0823235511779785 -146,2.082219362258911 -147,2.0821163654327393 -148,2.0820152759552 -149,2.0819153785705566 -150,2.081815719604492 -151,2.0817158222198486 -152,2.081615924835205 -153,2.0815162658691406 -154,2.081416606903076 -155,2.081317663192749 -156,2.081218957901001 -157,2.081120491027832 -158,2.0810210704803467 -159,2.080921173095703 -160,2.0808210372924805 -161,2.080721616744995 -162,2.0806221961975098 -163,2.0805227756500244 -164,2.080422878265381 -165,2.080322265625 -166,2.080221652984619 -167,2.080120801925659 -168,2.0800206661224365 -169,2.0799198150634766 -170,2.0798189640045166 -171,2.079718828201294 -172,2.0796194076538086 -173,2.0795199871063232 -174,2.079420566558838 -175,2.0793216228485107 -176,2.079223871231079 -177,2.0791268348693848 -178,2.0790295600891113 -179,2.078933000564575 -180,2.0788373947143555 -181,2.0787429809570312 -182,2.0786499977111816 -183,2.0785579681396484 -184,2.0784661769866943 -185,2.0783743858337402 -186,2.0782828330993652 -187,2.078191041946411 -188,2.078099012374878 -189,2.0780069828033447 -190,2.077914237976074 -191,2.077821731567383 -192,2.0777289867401123 -193,2.077636480331421 -194,2.0775439739227295 -195,2.077451467514038 -196,2.0773589611053467 -197,2.0772666931152344 -198,2.077174425125122 -199,2.077082395553589 -200,2.0769901275634766 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.100/training_config.txt deleted file mode 100644 index a78745f..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.100/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.100/training_log.csv deleted file mode 100644 index f392a12..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,31.589841842651367 -2,12.353227615356445 -3,6.406772136688232 -4,4.111112117767334 -5,3.3155357837677 -6,2.897693157196045 -7,2.6386303901672363 -8,2.484741449356079 -9,2.3903772830963135 -10,2.328997850418091 -11,2.284961223602295 -12,2.251102924346924 -13,2.2251880168914795 -14,2.2076382637023926 -15,2.196115016937256 -16,2.1836907863616943 -17,2.172995090484619 -18,2.1589934825897217 -19,2.1449108123779297 -20,2.137437582015991 -21,2.137301206588745 -22,2.136183977127075 -23,2.130673885345459 -24,2.121845006942749 -25,2.113802671432495 -26,2.1123440265655518 -27,2.1145129203796387 -28,2.116966962814331 -29,2.118939161300659 -30,2.1193573474884033 -31,2.117875337600708 -32,2.11498761177063 -33,2.111794948577881 -34,2.109379768371582 -35,2.1081700325012207 -36,2.108182430267334 -37,2.109194040298462 -38,2.1096017360687256 -39,2.107769727706909 -40,2.105238437652588 -41,2.103343963623047 -42,2.1022417545318604 -43,2.101654291152954 -44,2.101447343826294 -45,2.1015195846557617 -46,2.1016788482666016 -47,2.1017227172851562 -48,2.1015467643737793 -49,2.1011288166046143 -50,2.10056734085083 -51,2.1000118255615234 -52,2.0995774269104004 -53,2.0992848873138428 -54,2.0990893840789795 -55,2.098942518234253 -56,2.0987884998321533 -57,2.098599672317505 -58,2.098339557647705 -59,2.098020315170288 -60,2.0976722240448 -61,2.0973494052886963 -62,2.0970823764801025 -63,2.096879482269287 -64,2.0967328548431396 -65,2.096593141555786 -66,2.0964038372039795 -67,2.096163272857666 -68,2.095900535583496 -69,2.0956475734710693 -70,2.095416784286499 -71,2.0952179431915283 -72,2.0950183868408203 -73,2.0948193073272705 -74,2.0946176052093506 -75,2.094411849975586 -76,2.0941858291625977 -77,2.0939605236053467 -78,2.0937435626983643 -79,2.0935351848602295 -80,2.09334135055542 -81,2.0931479930877686 -82,2.092949867248535 -83,2.092777729034424 -84,2.0926239490509033 -85,2.0924794673919678 -86,2.0923385620117188 -87,2.092195749282837 -88,2.0920538902282715 -89,2.0919151306152344 -90,2.091771364212036 -91,2.0916178226470947 -92,2.091456174850464 -93,2.091304063796997 -94,2.0911574363708496 -95,2.0910074710845947 -96,2.0908539295196533 -97,2.0906970500946045 -98,2.0905373096466064 -99,2.090376377105713 -100,2.0902161598205566 -101,2.090057134628296 -102,2.0899031162261963 -103,2.089750289916992 -104,2.089594841003418 -105,2.089435338973999 -106,2.089279890060425 -107,2.0891318321228027 -108,2.0889828205108643 -109,2.0888359546661377 -110,2.088688611984253 -111,2.0885424613952637 -112,2.0884029865264893 -113,2.0882680416107178 -114,2.088135004043579 -115,2.0880067348480225 -116,2.087878465652466 -117,2.087747573852539 -118,2.087615966796875 -119,2.087486505508423 -120,2.0873594284057617 -121,2.0872323513031006 -122,2.0871059894561768 -123,2.0869808197021484 -124,2.0868542194366455 -125,2.086728572845459 -126,2.086601972579956 -127,2.086472272872925 -128,2.0863425731658936 -129,2.086212635040283 -130,2.0860824584960938 -131,2.0859532356262207 -132,2.0858235359191895 -133,2.085695743560791 -134,2.085568428039551 -135,2.085442304611206 -136,2.0853166580200195 -137,2.0851919651031494 -138,2.0850679874420166 -139,2.084948778152466 -140,2.0848307609558105 -141,2.0847153663635254 -142,2.0846011638641357 -143,2.084489107131958 -144,2.084378480911255 -145,2.0842692852020264 -146,2.084160804748535 -147,2.0840535163879395 -148,2.083946943283081 -149,2.0838403701782227 -150,2.0837340354919434 -151,2.083627700805664 -152,2.0835204124450684 -153,2.083414316177368 -154,2.0833094120025635 -155,2.083205461502075 -156,2.083101511001587 -157,2.0829970836639404 -158,2.082892894744873 -159,2.0827884674072266 -160,2.0826847553253174 -161,2.082581043243408 -162,2.0824780464172363 -163,2.0823752880096436 -164,2.0822722911834717 -165,2.0821692943573 -166,2.082066535949707 -167,2.081963539123535 -168,2.0818614959716797 -169,2.081758975982666 -170,2.081655979156494 -171,2.0815539360046387 -172,2.0814521312713623 -173,2.0813510417938232 -174,2.0812504291534424 -175,2.081151008605957 -176,2.0810511112213135 -177,2.080951690673828 -178,2.08085298538208 -179,2.0807549953460693 -180,2.0806572437286377 -181,2.080559492111206 -182,2.0804615020751953 -183,2.080364465713501 -184,2.0802671909332275 -185,2.0801708698272705 -186,2.0800750255584717 -187,2.079979419708252 -188,2.0798845291137695 -189,2.079789161682129 -190,2.0796942710876465 -191,2.0795986652374268 -192,2.0795037746429443 -193,2.07940936088562 -194,2.079315662384033 -195,2.0792229175567627 -196,2.0791304111480713 -197,2.0790393352508545 -198,2.078949451446533 -199,2.078859806060791 -200,2.0787720680236816 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.125/training_config.txt deleted file mode 100644 index 6a41d6c..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.125/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.125/training_log.csv deleted file mode 100644 index 50b02f3..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,29.926124572753906 -2,7.4073381423950195 -3,3.9933714866638184 -4,3.059843063354492 -5,2.667717695236206 -6,2.459554433822632 -7,2.350377082824707 -8,2.284081220626831 -9,2.2305774688720703 -10,2.1844942569732666 -11,2.150845527648926 -12,2.1372413635253906 -13,2.1430084705352783 -14,2.185434579849243 -15,2.156684398651123 -16,2.1225624084472656 -17,2.1150758266448975 -18,2.1142804622650146 -19,2.1197266578674316 -20,2.1264543533325195 -21,2.130662679672241 -22,2.130979061126709 -23,2.1274006366729736 -24,2.121098518371582 -25,2.114252805709839 -26,2.1093270778656006 -27,2.1068501472473145 -28,2.1062285900115967 -29,2.108032703399658 -30,2.1108367443084717 -31,2.1103506088256836 -32,2.107194185256958 -33,2.1050941944122314 -34,2.1039958000183105 -35,2.1033775806427 -36,2.103125810623169 -37,2.103111505508423 -38,2.103180408477783 -39,2.1030733585357666 -40,2.102727174758911 -41,2.1021511554718018 -42,2.1014480590820312 -43,2.1008174419403076 -44,2.1003479957580566 -45,2.0999674797058105 -46,2.099597692489624 -47,2.0992870330810547 -48,2.0990726947784424 -49,2.0989325046539307 -50,2.098794460296631 -51,2.0986037254333496 -52,2.098283290863037 -53,2.097872734069824 -54,2.097447156906128 -55,2.0970518589019775 -56,2.0967204570770264 -57,2.0964510440826416 -58,2.0962653160095215 -59,2.096137762069702 -60,2.0959925651550293 -61,2.0958518981933594 -62,2.09567928314209 -63,2.095460891723633 -64,2.095215082168579 -65,2.094996690750122 -66,2.0947868824005127 -67,2.0946171283721924 -68,2.094468116760254 -69,2.094308853149414 -70,2.094144105911255 -71,2.093975067138672 -72,2.0937998294830322 -73,2.093602180480957 -74,2.093395471572876 -75,2.0932130813598633 -76,2.0930333137512207 -77,2.092834711074829 -78,2.0926711559295654 -79,2.0925142765045166 -80,2.0923471450805664 -81,2.092191219329834 -82,2.0920333862304688 -83,2.0918703079223633 -84,2.091704845428467 -85,2.091548442840576 -86,2.0913805961608887 -87,2.091214418411255 -88,2.0910656452178955 -89,2.0909125804901123 -90,2.0907630920410156 -91,2.0906128883361816 -92,2.090458869934082 -93,2.0903050899505615 -94,2.0901522636413574 -95,2.0900003910064697 -96,2.0898494720458984 -97,2.0896987915039062 -98,2.0895485877990723 -99,2.0893969535827637 -100,2.0892443656921387 -101,2.0890913009643555 -102,2.088935613632202 -103,2.088782787322998 -104,2.088634967803955 -105,2.0884954929351807 -106,2.088359832763672 -107,2.088223457336426 -108,2.0880849361419678 -109,2.087944507598877 -110,2.0878043174743652 -111,2.0876662731170654 -112,2.087528705596924 -113,2.0873897075653076 -114,2.0872509479522705 -115,2.087113618850708 -116,2.086977958679199 -117,2.0868465900421143 -118,2.0867154598236084 -119,2.086583137512207 -120,2.086449146270752 -121,2.086313486099243 -122,2.0861799716949463 -123,2.0860464572906494 -124,2.0859134197235107 -125,2.085784435272217 -126,2.085662364959717 -127,2.0855464935302734 -128,2.0854341983795166 -129,2.0853235721588135 -130,2.0852131843566895 -131,2.0850989818573 -132,2.0849809646606445 -133,2.0848548412323 -134,2.0847301483154297 -135,2.0846102237701416 -136,2.0844931602478027 -137,2.084376811981201 -138,2.084259510040283 -139,2.0841457843780518 -140,2.084030866622925 -141,2.0839176177978516 -142,2.0838029384613037 -143,2.083688735961914 -144,2.083573579788208 -145,2.083458662033081 -146,2.083343744277954 -147,2.0832295417785645 -148,2.083116054534912 -149,2.083003044128418 -150,2.0828893184661865 -151,2.0827760696411133 -152,2.082664966583252 -153,2.082551956176758 -154,2.08243989944458 -155,2.082329034805298 -156,2.0822155475616455 -157,2.082099676132202 -158,2.0819854736328125 -159,2.0818724632263184 -160,2.0817596912384033 -161,2.081648826599121 -162,2.0815348625183105 -163,2.0814223289489746 -164,2.0813088417053223 -165,2.0811941623687744 -166,2.0810813903808594 -167,2.0809667110443115 -168,2.0808537006378174 -169,2.0807411670684814 -170,2.0806262493133545 -171,2.0805106163024902 -172,2.080396890640259 -173,2.08028507232666 -174,2.0801682472229004 -175,2.0800540447235107 -176,2.0799407958984375 -177,2.0798306465148926 -178,2.079723834991455 -179,2.0796194076538086 -180,2.0795178413391113 -181,2.079420566558838 -182,2.079324245452881 -183,2.079225778579712 -184,2.0791242122650146 -185,2.0790183544158936 -186,2.0789122581481934 -187,2.078808307647705 -188,2.0787062644958496 -189,2.078603982925415 -190,2.0785012245178223 -191,2.078399419784546 -192,2.0782999992370605 -193,2.0782017707824707 -194,2.0781025886535645 -195,2.078002452850342 -196,2.07790207862854 -197,2.0778021812438965 -198,2.0777018070220947 -199,2.077601432800293 -200,2.077500343322754 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.160/training_config.txt deleted file mode 100644 index 67a986f..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.160/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.160/training_log.csv deleted file mode 100644 index 73eaa92..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,27.467309951782227 -2,8.705410957336426 -3,5.094364643096924 -4,3.6148264408111572 -5,3.155255079269409 -6,2.8792176246643066 -7,2.6795547008514404 -8,2.5420968532562256 -9,2.4413821697235107 -10,2.364757776260376 -11,2.3080389499664307 -12,2.2694857120513916 -13,2.246736764907837 -14,2.2303638458251953 -15,2.2131423950195312 -16,2.191362142562866 -17,2.1682989597320557 -18,2.1505286693573 -19,2.1386871337890625 -20,2.1325788497924805 -21,2.130237340927124 -22,2.1306469440460205 -23,2.132218360900879 -24,2.133174419403076 -25,2.132436752319336 -26,2.130251407623291 -27,2.1283960342407227 -28,2.126072883605957 -29,2.126291513442993 -30,2.125915050506592 -31,2.1218998432159424 -32,2.118490695953369 -33,2.1173248291015625 -34,2.116410732269287 -35,2.115522623062134 -36,2.1146011352539062 -37,2.1136856079101562 -38,2.1129088401794434 -39,2.1122500896453857 -40,2.111855983734131 -41,2.111508369445801 -42,2.1111204624176025 -43,2.1105849742889404 -44,2.109767198562622 -45,2.1088240146636963 -46,2.1077845096588135 -47,2.1067678928375244 -48,2.10589337348938 -49,2.105175256729126 -50,2.1046149730682373 -51,2.104149580001831 -52,2.1037373542785645 -53,2.103344202041626 -54,2.1029531955718994 -55,2.1025550365448 -56,2.1021101474761963 -57,2.101668357849121 -58,2.101247787475586 -59,2.10086727142334 -60,2.10050630569458 -61,2.100194215774536 -62,2.0999579429626465 -63,2.0997557640075684 -64,2.0995848178863525 -65,2.0994131565093994 -66,2.0992202758789062 -67,2.0990121364593506 -68,2.098797559738159 -69,2.0985844135284424 -70,2.0983760356903076 -71,2.098160982131958 -72,2.097975730895996 -73,2.0978028774261475 -74,2.097646951675415 -75,2.0974886417388916 -76,2.0973398685455322 -77,2.0971953868865967 -78,2.0970306396484375 -79,2.096846342086792 -80,2.0966806411743164 -81,2.0964763164520264 -82,2.096290349960327 -83,2.0961437225341797 -84,2.0960254669189453 -85,2.095895528793335 -86,2.095759630203247 -87,2.095623731613159 -88,2.0954792499542236 -89,2.095324993133545 -90,2.0951669216156006 -91,2.09500789642334 -92,2.094858169555664 -93,2.094715118408203 -94,2.0945794582366943 -95,2.094439744949341 -96,2.0942909717559814 -97,2.094130039215088 -98,2.093958854675293 -99,2.0937774181365967 -100,2.093597173690796 -101,2.0934407711029053 -102,2.0933518409729004 -103,2.0932974815368652 -104,2.0932393074035645 -105,2.093165397644043 -106,2.093101978302002 -107,2.0930428504943848 -108,2.0929861068725586 -109,2.092905282974243 -110,2.0928077697753906 -111,2.0927042961120605 -112,2.092602252960205 -113,2.092524528503418 -114,2.092439651489258 -115,2.0923445224761963 -116,2.0922465324401855 -117,2.092155694961548 -118,2.0920848846435547 -119,2.0920310020446777 -120,2.091980218887329 -121,2.091916084289551 -122,2.091843843460083 -123,2.0917677879333496 -124,2.0916950702667236 -125,2.091623544692993 -126,2.0915536880493164 -127,2.091482162475586 -128,2.0914084911346436 -129,2.0913374423980713 -130,2.0912630558013916 -131,2.091193437576294 -132,2.091127395629883 -133,2.091059923171997 -134,2.090991258621216 -135,2.0909221172332764 -136,2.0908474922180176 -137,2.0907537937164307 -138,2.0906918048858643 -139,2.0906264781951904 -140,2.090552568435669 -141,2.0904693603515625 -142,2.0904078483581543 -143,2.09035062789917 -144,2.090273141860962 -145,2.0901832580566406 -146,2.090121030807495 -147,2.090059995651245 -148,2.0899858474731445 -149,2.0898964405059814 -150,2.089820623397827 -151,2.089756727218628 -152,2.0896763801574707 -153,2.089585542678833 -154,2.089529275894165 -155,2.0894620418548584 -156,2.0893778800964355 -157,2.089284896850586 -158,2.0892224311828613 -159,2.089153289794922 -160,2.0890605449676514 -161,2.0889732837677 -162,2.088902235031128 -163,2.0888209342956543 -164,2.0887320041656494 -165,2.088663101196289 -166,2.0885863304138184 -167,2.088489294052124 -168,2.088418483734131 -169,2.0883476734161377 -170,2.088265895843506 -171,2.0881779193878174 -172,2.088085651397705 -173,2.088021755218506 -174,2.0879456996917725 -175,2.087844133377075 -176,2.0877695083618164 -177,2.0876963138580322 -178,2.0876102447509766 -179,2.0875179767608643 -180,2.087425708770752 -181,2.0873537063598633 -182,2.087275266647339 -183,2.087175130844116 -184,2.0870978832244873 -185,2.087026357650757 -186,2.0869624614715576 -187,2.086900472640991 -188,2.0868353843688965 -189,2.0867745876312256 -190,2.0867021083831787 -191,2.0866410732269287 -192,2.086573600769043 -193,2.086494207382202 -194,2.0864081382751465 -195,2.086345911026001 -196,2.0862550735473633 -197,2.086169958114624 -198,2.086099863052368 -199,2.0860140323638916 -200,2.0859429836273193 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.200/training_config.txt deleted file mode 100644 index cc1c314..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.200/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.200/training_log.csv deleted file mode 100644 index 2d6f637..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,26.188383102416992 -2,13.445636749267578 -3,5.852071762084961 -4,4.525437831878662 -5,3.8560497760772705 -6,3.4507009983062744 -7,3.127166986465454 -8,2.895494222640991 -9,2.729743719100952 -10,2.614635944366455 -11,2.5410938262939453 -12,2.497036933898926 -13,2.4706060886383057 -14,2.4539310932159424 -15,2.4420151710510254 -16,2.4318246841430664 -17,2.4213335514068604 -18,2.408951997756958 -19,2.3940439224243164 -20,2.376523971557617 -21,2.3568646907806396 -22,2.3354506492614746 -23,2.3130953311920166 -24,2.290710687637329 -25,2.2689778804779053 -26,2.248371124267578 -27,2.2290754318237305 -28,2.211076259613037 -29,2.194638967514038 -30,2.1799256801605225 -31,2.1666946411132812 -32,2.1546151638031006 -33,2.143644332885742 -34,2.1341054439544678 -35,2.126232624053955 -36,2.1205575466156006 -37,2.116999864578247 -38,2.1147353649139404 -39,2.1132993698120117 -40,2.1125919818878174 -41,2.1122963428497314 -42,2.112072467803955 -43,2.111196756362915 -44,2.1095869541168213 -45,2.107569456100464 -46,2.1054768562316895 -47,2.1034486293792725 -48,2.1022443771362305 -49,2.101088523864746 -50,2.1004912853240967 -51,2.099961042404175 -52,2.0994882583618164 -53,2.0991783142089844 -54,2.098864793777466 -55,2.0984809398651123 -56,2.0980923175811768 -57,2.097686290740967 -58,2.097245454788208 -59,2.0967965126037598 -60,2.0963547229766846 -61,2.0959503650665283 -62,2.095599889755249 -63,2.095301866531372 -64,2.095099687576294 -65,2.0949935913085938 -66,2.0949277877807617 -67,2.0949079990386963 -68,2.094928026199341 -69,2.0948896408081055 -70,2.0947980880737305 -71,2.0946483612060547 -72,2.094454288482666 -73,2.0942375659942627 -74,2.094073534011841 -75,2.0939104557037354 -76,2.0937442779541016 -77,2.0935750007629395 -78,2.0934135913848877 -79,2.093251943588257 -80,2.0930895805358887 -81,2.0929276943206787 -82,2.0927724838256836 -83,2.092622756958008 -84,2.092458486557007 -85,2.0923047065734863 -86,2.0922038555145264 -87,2.0921132564544678 -88,2.092028856277466 -89,2.091937780380249 -90,2.091838836669922 -91,2.0917534828186035 -92,2.0916664600372314 -93,2.091578722000122 -94,2.09149432182312 -95,2.0914065837860107 -96,2.091311454772949 -97,2.0912208557128906 -98,2.0911457538604736 -99,2.091071128845215 -100,2.091003656387329 -101,2.0909335613250732 -102,2.090858221054077 -103,2.090773105621338 -104,2.0906925201416016 -105,2.0906147956848145 -106,2.0905399322509766 -107,2.090468406677246 -108,2.090407371520996 -109,2.0903470516204834 -110,2.090273141860962 -111,2.0902082920074463 -112,2.090143918991089 -113,2.0900802612304688 -114,2.0900158882141113 -115,2.089948892593384 -116,2.0898818969726562 -117,2.0898141860961914 -118,2.089750289916992 -119,2.0896897315979004 -120,2.0896263122558594 -121,2.089564800262451 -122,2.089503288269043 -123,2.0894408226013184 -124,2.0893776416778564 -125,2.0893218517303467 -126,2.08925461769104 -127,2.0891945362091064 -128,2.089132785797119 -129,2.0890698432922363 -130,2.0890164375305176 -131,2.08894944190979 -132,2.0888895988464355 -133,2.0888285636901855 -134,2.088766574859619 -135,2.0887062549591064 -136,2.0886447429656982 -137,2.088585376739502 -138,2.0885252952575684 -139,2.0884628295898438 -140,2.0883994102478027 -141,2.0883469581604004 -142,2.088283061981201 -143,2.088221311569214 -144,2.088164806365967 -145,2.0881052017211914 -146,2.0880417823791504 -147,2.08797550201416 -148,2.0879313945770264 -149,2.0878729820251465 -150,2.087799072265625 -151,2.0877437591552734 -152,2.0876917839050293 -153,2.0876352787017822 -154,2.0875699520111084 -155,2.0874993801116943 -156,2.0874412059783936 -157,2.087388038635254 -158,2.0873196125030518 -159,2.087261915206909 -160,2.0872106552124023 -161,2.0871481895446777 -162,2.087080478668213 -163,2.0870373249053955 -164,2.0869858264923096 -165,2.086921215057373 -166,2.086848735809326 -167,2.0867981910705566 -168,2.086738348007202 -169,2.0866832733154297 -170,2.086627721786499 -171,2.086568832397461 -172,2.08651065826416 -173,2.0864574909210205 -174,2.0863962173461914 -175,2.086341381072998 -176,2.0862843990325928 -177,2.086228609085083 -178,2.0861809253692627 -179,2.086120843887329 -180,2.0860636234283447 -181,2.0860109329223633 -182,2.0859522819519043 -183,2.0858943462371826 -184,2.085836410522461 -185,2.0857856273651123 -186,2.0857295989990234 -187,2.0856680870056152 -188,2.085604667663574 -189,2.0855557918548584 -190,2.0854952335357666 -191,2.0854225158691406 -192,2.08536434173584 -193,2.085303544998169 -194,2.085237741470337 -195,2.0851776599884033 -196,2.0851094722747803 -197,2.0850508213043213 -198,2.084984540939331 -199,2.0849087238311768 -200,2.084841251373291 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.250/training_config.txt deleted file mode 100644 index 4388fc6..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.250/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.250/training_log.csv deleted file mode 100644 index 07b392c..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,27.98554229736328 -2,18.967601776123047 -3,6.282639026641846 -4,5.182474613189697 -5,4.375563144683838 -6,3.8707172870635986 -7,3.5095088481903076 -8,3.2252230644226074 -9,3.009218454360962 -10,2.849764585494995 -11,2.7307088375091553 -12,2.6424596309661865 -13,2.578634023666382 -14,2.5328257083892822 -15,2.499756336212158 -16,2.475395679473877 -17,2.4568183422088623 -18,2.4416866302490234 -19,2.428255558013916 -20,2.4153802394866943 -21,2.4023611545562744 -22,2.388843297958374 -23,2.374521493911743 -24,2.3595070838928223 -25,2.344026565551758 -26,2.328549861907959 -27,2.31339955329895 -28,2.2987992763519287 -29,2.28485369682312 -30,2.2718918323516846 -31,2.260114908218384 -32,2.249366521835327 -33,2.2395942211151123 -34,2.2306480407714844 -35,2.22249698638916 -36,2.215219736099243 -37,2.2089929580688477 -38,2.2040348052978516 -39,2.199904203414917 -40,2.1963419914245605 -41,2.1926934719085693 -42,2.188960313796997 -43,2.1851398944854736 -44,2.1812286376953125 -45,2.1774678230285645 -46,2.1742055416107178 -47,2.171121597290039 -48,2.1684815883636475 -49,2.165987253189087 -50,2.163562059402466 -51,2.160961866378784 -52,2.158137559890747 -53,2.155097246170044 -54,2.1518630981445312 -55,2.1485698223114014 -56,2.145354986190796 -57,2.1421849727630615 -58,2.1387696266174316 -59,2.1351749897003174 -60,2.131690502166748 -61,2.1283161640167236 -62,2.1249940395355225 -63,2.121857166290283 -64,2.118994951248169 -65,2.1163222789764404 -66,2.1137731075286865 -67,2.1116039752960205 -68,2.1097400188446045 -69,2.1082353591918945 -70,2.1073341369628906 -71,2.1068077087402344 -72,2.106396436691284 -73,2.1061463356018066 -74,2.1059937477111816 -75,2.105677604675293 -76,2.1053764820098877 -77,2.105070114135742 -78,2.104696035385132 -79,2.1042656898498535 -80,2.1037964820861816 -81,2.103311777114868 -82,2.1028425693511963 -83,2.102356195449829 -84,2.1018571853637695 -85,2.101367950439453 -86,2.1008946895599365 -87,2.100440263748169 -88,2.1001083850860596 -89,2.099796772003174 -90,2.099607229232788 -91,2.0994057655334473 -92,2.0991828441619873 -93,2.099039316177368 -94,2.098910331726074 -95,2.098762273788452 -96,2.098609685897827 -97,2.0985255241394043 -98,2.0984301567077637 -99,2.0982983112335205 -100,2.098160743713379 -101,2.0980238914489746 -102,2.0978612899780273 -103,2.097707748413086 -104,2.097553014755249 -105,2.097444534301758 -106,2.097357988357544 -107,2.097264528274536 -108,2.097177267074585 -109,2.0970797538757324 -110,2.09696888923645 -111,2.0968523025512695 -112,2.0967142581939697 -113,2.0965919494628906 -114,2.0965116024017334 -115,2.0964348316192627 -116,2.0963542461395264 -117,2.096280574798584 -118,2.0961875915527344 -119,2.096113920211792 -120,2.0960371494293213 -121,2.095935821533203 -122,2.095842123031616 -123,2.095752716064453 -124,2.0956597328186035 -125,2.095571279525757 -126,2.0954837799072266 -127,2.0954041481018066 -128,2.095350742340088 -129,2.0952742099761963 -130,2.095172643661499 -131,2.095097303390503 -132,2.0950331687927246 -133,2.0949597358703613 -134,2.0948801040649414 -135,2.0947952270507812 -136,2.0947015285491943 -137,2.0946364402770996 -138,2.0945801734924316 -139,2.0945045948028564 -140,2.094420909881592 -141,2.0943617820739746 -142,2.0942881107330322 -143,2.0942037105560303 -144,2.094130039215088 -145,2.094066858291626 -146,2.093991756439209 -147,2.0939292907714844 -148,2.093863010406494 -149,2.093780994415283 -150,2.093712329864502 -151,2.0936529636383057 -152,2.0935795307159424 -153,2.0935163497924805 -154,2.0934598445892334 -155,2.0933876037597656 -156,2.093341588973999 -157,2.093278169631958 -158,2.0931968688964844 -159,2.093120813369751 -160,2.0930421352386475 -161,2.0929460525512695 -162,2.0928289890289307 -163,2.0927045345306396 -164,2.092576742172241 -165,2.092456102371216 -166,2.092392921447754 -167,2.0923383235931396 -168,2.0922632217407227 -169,2.0922014713287354 -170,2.0921380519866943 -171,2.0920472145080566 -172,2.0919809341430664 -173,2.0919127464294434 -174,2.0918428897857666 -175,2.0917701721191406 -176,2.0917208194732666 -177,2.09165620803833 -178,2.0915839672088623 -179,2.091536283493042 -180,2.0914762020111084 -181,2.0914063453674316 -182,2.0913472175598145 -183,2.0912888050079346 -184,2.0912129878997803 -185,2.0911641120910645 -186,2.0911037921905518 -187,2.091041088104248 -188,2.090972423553467 -189,2.090904951095581 -190,2.090841770172119 -191,2.090782642364502 -192,2.0907201766967773 -193,2.0906593799591064 -194,2.090594530105591 -195,2.0905344486236572 -196,2.0904674530029297 -197,2.0904040336608887 -198,2.090343952178955 -199,2.090282917022705 -200,2.0902209281921387 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.300/training_config.txt deleted file mode 100644 index 7ff5973..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.300/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.300/training_log.csv deleted file mode 100644 index 3a340f0..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.300/training_log.csv +++ /dev/null @@ -1,31 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,30.139062881469727 -2,8.120911598205566 -3,6.760048866271973 -4,5.555811882019043 -5,4.8922600746154785 -6,4.511008262634277 -7,4.26880407333374 -8,4.072194576263428 -9,3.905367851257324 -10,3.75980544090271 -11,3.6289446353912354 -12,3.5134615898132324 -13,3.410323143005371 -14,3.316193103790283 -15,3.2288949489593506 -16,3.1484992504119873 -17,3.0738227367401123 -18,3.004420518875122 -19,2.940128803253174 -20,2.8799304962158203 -21,2.823472261428833 -22,2.7705068588256836 -23,2.7210042476654053 -24,2.674558162689209 -25,2.520832061767578 -26,nan -27,nan -28,nan -29,nan diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.400/training_config.txt deleted file mode 100644 index a92edea..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.400/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.400/training_log.csv deleted file mode 100644 index a1ba538..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.400/training_log.csv +++ /dev/null @@ -1,45 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,50.648563385009766 -2,8.55200481414795 -3,6.872082233428955 -4,5.9745049476623535 -5,5.3439764976501465 -6,5.111774921417236 -7,4.931632995605469 -8,4.773301124572754 -9,4.634979248046875 -10,4.5111083984375 -11,4.389819622039795 -12,4.1133856773376465 -13,3.946038007736206 -14,3.9413301944732666 -15,3.9684090614318848 -16,3.97147798538208 -17,3.827153444290161 -18,3.966064691543579 -19,3.928583860397339 -20,4.086202621459961 -21,4.303488731384277 -22,4.490424633026123 -23,4.50736141204834 -24,4.263305187225342 -25,4.020894527435303 -26,3.8511240482330322 -27,3.6205127239227295 -28,3.5539779663085938 -29,3.5821735858917236 -30,3.1319730281829834 -31,3.0026583671569824 -32,2.6853814125061035 -33,2.535036325454712 -34,2.5979716777801514 -35,2.5717380046844482 -36,2.619135618209839 -37,2.6504404544830322 -38,2.6620218753814697 -39,2.660367250442505 -40,nan -41,nan -42,nan -43,nan diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.500/training_config.txt deleted file mode 100644 index 6fc6d83..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.500/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.500/training_log.csv deleted file mode 100644 index 1254829..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.500/training_log.csv +++ /dev/null @@ -1,15 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,314.34442138671875 -2,4145.65869140625 -3,8.102828025817871 -4,7.494869232177734 -5,8.68233871459961 -6,9.140067100524902 -7,9.622515678405762 -8,10.304070472717285 -9,9.292510986328125 -10,nan -11,nan -12,nan -13,nan diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.600/training_config.txt deleted file mode 100644 index ea4c997..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.600/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.600/training_log.csv deleted file mode 100644 index 31d9945..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.600/training_log.csv +++ /dev/null @@ -1,16 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,1242.067138671875 -2,4695.6806640625 -3,4699.46923828125 -4,4691.0185546875 -5,4677.1220703125 -6,4644.107421875 -7,4316.83544921875 -8,3478.238525390625 -9,2243.554931640625 -10,847.5971069335938 -11,nan -12,nan -13,nan -14,nan diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.700/training_config.txt deleted file mode 100644 index 89212f7..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.700/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.700/training_log.csv deleted file mode 100644 index a5e1950..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,2172.13330078125 -2,4710.068359375 -3,4710.923828125 -4,4710.923828125 -5,4710.923828125 -6,4710.923828125 -7,4710.923828125 -8,4710.923828125 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.800/training_config.txt deleted file mode 100644 index f8b92d5..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.800/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.800/training_log.csv deleted file mode 100644 index 277151e..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,2888.184326171875 -2,4710.923828125 -3,4710.923828125 -4,4710.923828125 -5,4710.923828125 -6,4710.923828125 -7,4710.923828125 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.900/training_config.txt deleted file mode 100644 index 5ef36b2..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.900/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.900/training_log.csv deleted file mode 100644 index d8c3847..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,3639.0068359375 -2,4710.923828125 -3,4710.923828125 -4,4710.923828125 -5,4710.923828125 -6,4710.923828125 -7,4710.923828125 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_1.000/training_config.txt deleted file mode 100644 index 39b3a63..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_1.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_1.000/training_log.csv deleted file mode 100644 index a9fa338..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,4119.43798828125 -2,4710.923828125 -3,4710.923828125 -4,4710.923828125 -5,4710.923828125 -6,4710.923828125 -7,4710.923828125 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_16.000/training_config.txt deleted file mode 100644 index 20141ae..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_16.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_16.000/training_log.csv deleted file mode 100644 index 54942f2..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,4744.6787109375 -2,4744.6787109375 -3,4744.6787109375 -4,4744.6787109375 -5,4744.6787109375 -6,4744.6787109375 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_2.000/training_config.txt deleted file mode 100644 index 02798dc..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_2.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_2.000/training_log.csv deleted file mode 100644 index fd20884..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,4730.83251953125 -2,5.124730110168457 -3,nan -4,nan -5,nan -6,nan diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_4.000/training_config.txt deleted file mode 100644 index b735cdf..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_4.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_4.000/training_log.csv deleted file mode 100644 index 54942f2..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,4744.6787109375 -2,4744.6787109375 -3,4744.6787109375 -4,4744.6787109375 -5,4744.6787109375 -6,4744.6787109375 diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_8.000/training_config.txt deleted file mode 100644 index cabb59f..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_8.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**3) * (-t_span + t_max)**3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_8.000/training_log.csv deleted file mode 100644 index 54942f2..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_mirrored/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,53.319801330566406 -1,4744.6787109375 -2,4744.6787109375 -3,4744.6787109375 -4,4744.6787109375 -5,4744.6787109375 -6,4744.6787109375 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.010/training_config.txt deleted file mode 100644 index 79b070d..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.010/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.010/training_log.csv deleted file mode 100644 index 0169f23..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,844.246337890625 -2,689.88427734375 -3,616.203369140625 -4,545.285400390625 -5,467.0516052246094 -6,342.44189453125 -7,277.3167419433594 -8,196.2884979248047 -9,171.53024291992188 -10,174.66787719726562 -11,142.81532287597656 -12,145.81613159179688 -13,152.0638885498047 -14,153.22366333007812 -15,144.40333557128906 -16,144.14523315429688 -17,144.87802124023438 -18,144.5191192626953 -19,143.5105743408203 -20,140.55393981933594 -21,139.20608520507812 -22,137.84413146972656 -23,141.32672119140625 -24,139.1470489501953 -25,129.20738220214844 -26,130.4302520751953 -27,135.62530517578125 -28,143.06919860839844 -29,140.81373596191406 -30,126.30345153808594 -31,119.91436004638672 -32,127.7955551147461 -33,126.18103790283203 -34,119.65446472167969 -35,120.00751495361328 -36,117.3320541381836 -37,123.51850128173828 -38,123.52086639404297 -39,119.82781982421875 -40,119.85333251953125 -41,119.72835540771484 -42,119.23233795166016 -43,118.49816131591797 -44,117.58313751220703 -45,116.43563842773438 -46,114.95794677734375 -47,112.67789459228516 -48,111.21722412109375 -49,110.43455505371094 -50,100.91736602783203 -51,104.0487289428711 -52,102.49471282958984 -53,103.21795654296875 -54,97.009521484375 -55,89.70427703857422 -56,89.86699676513672 -57,89.4579849243164 -58,88.61250305175781 -59,87.99237823486328 -60,87.2115249633789 -61,86.2200927734375 -62,85.04248809814453 -63,83.80249786376953 -64,82.70280456542969 -65,81.67107391357422 -66,80.54993438720703 -67,79.41398620605469 -68,78.71388244628906 -69,77.77943420410156 -70,76.31857299804688 -71,71.31938934326172 -72,72.13584899902344 -73,72.94686126708984 -74,62.223018646240234 -75,59.76129150390625 -76,59.10198211669922 -77,59.56736755371094 -78,60.387123107910156 -79,60.56388473510742 -80,59.905879974365234 -81,59.0920295715332 -82,57.74520492553711 -83,56.63352966308594 -84,56.029136657714844 -85,55.75819778442383 -86,55.66976547241211 -87,55.65519332885742 -88,55.60401153564453 -89,55.43602752685547 -90,55.16923141479492 -91,54.87907791137695 -92,54.622501373291016 -93,54.42460250854492 -94,54.286170959472656 -95,54.193016052246094 -96,54.12345504760742 -97,54.05649948120117 -98,53.975318908691406 -99,53.86977767944336 -100,53.73677444458008 -101,53.579620361328125 -102,53.40647888183594 -103,53.228363037109375 -104,53.0563850402832 -105,52.898677825927734 -106,52.758426666259766 -107,52.63368225097656 -108,52.51831817626953 -109,52.40481948852539 -110,52.286869049072266 -111,52.16133499145508 -112,52.02861785888672 -113,51.891937255859375 -114,51.75523376464844 -115,51.62187576293945 -116,51.49368667602539 -117,51.370574951171875 -118,51.25094223022461 -119,51.13224411010742 -120,51.011558532714844 -121,50.88605499267578 -122,50.75311279296875 -123,50.61022186279297 -124,50.45438003540039 -125,50.28146743774414 -126,50.08494186401367 -127,49.853084564208984 -128,49.566715240478516 -129,49.19980239868164 -130,48.744171142578125 -131,48.25745391845703 -132,47.77861404418945 -133,47.26713562011719 -134,46.79916000366211 -135,46.719547271728516 -136,46.518489837646484 -137,46.286964416503906 -138,46.04633712768555 -139,45.79692459106445 -140,45.533199310302734 -141,45.24312973022461 -142,43.90646743774414 -143,43.66738510131836 -144,43.43806838989258 -145,43.10002517700195 -146,42.672916412353516 -147,42.26101303100586 -148,41.912925720214844 -149,41.617027282714844 -150,41.34645462036133 -151,41.07709884643555 -152,40.79483413696289 -153,40.54144287109375 -154,40.33445739746094 -155,40.12773132324219 -156,39.98930358886719 -157,39.81016540527344 -158,39.665245056152344 -159,39.55458068847656 -160,39.46306610107422 -161,39.29935073852539 -162,39.33951187133789 -163,39.32726287841797 -164,39.285579681396484 -165,39.25545120239258 -166,39.23554611206055 -167,39.16789245605469 -168,39.06855010986328 -169,38.97882080078125 -170,38.893558502197266 -171,38.80242156982422 -172,38.65714645385742 -173,38.814022064208984 -174,38.70610809326172 -175,38.72895431518555 -176,38.737728118896484 -177,38.604793548583984 -178,38.43113708496094 -179,38.33748245239258 -180,38.33158493041992 -181,38.302494049072266 -182,38.20853805541992 -183,38.06789016723633 -184,37.98075866699219 -185,37.97711944580078 -186,37.904361724853516 -187,37.76381301879883 -188,37.8531379699707 -189,37.683921813964844 -190,37.751976013183594 -191,37.52957534790039 -192,37.42583084106445 -193,37.45579147338867 -194,37.38665008544922 -195,37.175392150878906 -196,37.14020919799805 -197,37.148895263671875 -198,37.045936584472656 -199,36.91967010498047 -200,36.92351531982422 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.020/training_config.txt deleted file mode 100644 index d6e0001..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.020/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.020/training_log.csv deleted file mode 100644 index de316a7..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,766.4987182617188 -2,512.4585571289062 -3,427.76251220703125 -4,291.212646484375 -5,260.40130615234375 -6,231.02163696289062 -7,220.06431579589844 -8,186.619140625 -9,160.47030639648438 -10,137.65219116210938 -11,132.1453399658203 -12,107.505615234375 -13,90.97319030761719 -14,76.17688751220703 -15,66.6996078491211 -16,61.54448699951172 -17,55.51731872558594 -18,51.224552154541016 -19,43.194950103759766 -20,41.79030227661133 -21,39.19074249267578 -22,35.42741012573242 -23,33.44292068481445 -24,32.671104431152344 -25,31.42974090576172 -26,29.21426773071289 -27,25.864395141601562 -28,22.4057559967041 -29,21.223241806030273 -30,23.151193618774414 -31,23.366804122924805 -32,23.407020568847656 -33,22.12375831604004 -34,22.744388580322266 -35,22.492673873901367 -36,22.2268009185791 -37,21.986820220947266 -38,21.7813777923584 -39,21.607566833496094 -40,21.34050941467285 -41,19.671838760375977 -42,18.993080139160156 -43,18.60042953491211 -44,18.28285026550293 -45,18.018545150756836 -46,17.803897857666016 -47,17.645963668823242 -48,17.575014114379883 -49,17.65956687927246 -50,17.850486755371094 -51,17.42022132873535 -52,17.076650619506836 -53,16.868864059448242 -54,16.513946533203125 -55,16.346065521240234 -56,16.248332977294922 -57,16.169723510742188 -58,16.093952178955078 -59,16.01508140563965 -60,15.931467056274414 -61,15.844213485717773 -62,15.756089210510254 -63,15.66973876953125 -64,15.581108093261719 -65,15.445436477661133 -66,15.28542709350586 -67,15.197897911071777 -68,15.020055770874023 -69,14.912190437316895 -70,14.870917320251465 -71,14.83729076385498 -72,14.803251266479492 -73,14.771690368652344 -74,14.742326736450195 -75,14.648201942443848 -76,14.63634204864502 -77,14.620156288146973 -78,14.602731704711914 -79,14.688214302062988 -80,14.681187629699707 -81,14.61548137664795 -82,14.51023006439209 -83,14.51628589630127 -84,14.499714851379395 -85,14.47482967376709 -86,14.445208549499512 -87,14.410751342773438 -88,14.368385314941406 -89,14.30798053741455 -90,14.217535972595215 -91,14.245731353759766 -92,14.20979118347168 -93,14.120113372802734 -94,14.180169105529785 -95,14.16828441619873 -96,14.140604972839355 -97,14.108428001403809 -98,14.072542190551758 -99,14.031304359436035 -100,13.981035232543945 -101,13.910632133483887 -102,13.766046524047852 -103,13.794388771057129 -104,13.790613174438477 -105,13.745748519897461 -106,13.68355941772461 -107,13.666831016540527 -108,13.662221908569336 -109,13.627544403076172 -110,13.568716049194336 -111,13.535385131835938 -112,13.503632545471191 -113,13.455705642700195 -114,13.510699272155762 -115,13.470000267028809 -116,13.338988304138184 -117,13.237439155578613 -118,13.286654472351074 -119,13.119019508361816 -120,13.067473411560059 -121,13.24512767791748 -122,13.255318641662598 -123,13.203353881835938 -124,13.220683097839355 -125,13.223358154296875 -126,13.194474220275879 -127,13.09576416015625 -128,13.069415092468262 -129,13.13467025756836 -130,13.174728393554688 -131,13.164997100830078 -132,13.098125457763672 -133,13.166149139404297 -134,13.156603813171387 -135,13.108048439025879 -136,12.994366645812988 -137,12.949178695678711 -138,12.821748733520508 -139,12.854886054992676 -140,12.840729713439941 -141,12.806573867797852 -142,12.741873741149902 -143,12.814371109008789 -144,12.778440475463867 -145,12.761968612670898 -146,12.69194507598877 -147,12.659276008605957 -148,12.660530090332031 -149,12.681295394897461 -150,12.663443565368652 -151,12.611316680908203 -152,12.446513175964355 -153,12.524041175842285 -154,12.398167610168457 -155,12.46153450012207 -156,12.418298721313477 -157,12.318769454956055 -158,12.385418891906738 -159,12.36744213104248 -160,12.34592342376709 -161,12.307043075561523 -162,12.20747184753418 -163,12.27938461303711 -164,12.270434379577637 -165,12.24746036529541 -166,12.211441993713379 -167,12.178204536437988 -168,12.156525611877441 -169,12.14030647277832 -170,12.129096984863281 -171,12.120763778686523 -172,12.108942985534668 -173,12.085822105407715 -174,12.054736137390137 -175,12.022656440734863 -176,11.99025821685791 -177,11.955538749694824 -178,11.917613983154297 -179,11.876781463623047 -180,11.833724021911621 -181,11.7867431640625 -182,11.715088844299316 -183,11.480684280395508 -184,10.834538459777832 -185,10.856181144714355 -186,10.943841934204102 -187,10.942207336425781 -188,10.917472839355469 -189,10.882866859436035 -190,10.834554672241211 -191,10.772253036499023 -192,10.715110778808594 -193,10.660158157348633 -194,10.598357200622559 -195,10.523133277893066 -196,10.43217658996582 -197,10.328412055969238 -198,10.218062400817871 -199,10.100641250610352 -200,10.033790588378906 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.040/training_config.txt deleted file mode 100644 index 398fb5c..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.040/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.040/training_log.csv deleted file mode 100644 index 9da7daf..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,622.0982055664062 -2,211.7611541748047 -3,133.14845275878906 -4,119.39192962646484 -5,79.73006439208984 -6,61.542022705078125 -7,51.57270431518555 -8,41.835479736328125 -9,33.281089782714844 -10,33.81135177612305 -11,23.55055046081543 -12,18.39958953857422 -13,17.41179656982422 -14,15.52609634399414 -15,11.32191276550293 -16,10.455880165100098 -17,10.727644920349121 -18,10.57848834991455 -19,10.415877342224121 -20,10.236998558044434 -21,10.041669845581055 -22,9.807246208190918 -23,9.601117134094238 -24,9.478333473205566 -25,9.259076118469238 -26,9.021961212158203 -27,8.906279563903809 -28,8.760568618774414 -29,8.594926834106445 -30,8.417007446289062 -31,8.211894989013672 -32,7.846872329711914 -33,6.856323719024658 -34,6.612576484680176 -35,6.420161247253418 -36,6.225329399108887 -37,6.018374919891357 -38,5.968487739562988 -39,5.864835739135742 -40,5.7379679679870605 -41,5.580863952636719 -42,5.546374320983887 -43,5.459306240081787 -44,5.418848514556885 -45,5.408276081085205 -46,5.3340864181518555 -47,5.226267337799072 -48,5.177030563354492 -49,5.124180316925049 -50,5.03151798248291 -51,5.024331092834473 -52,4.999168395996094 -53,4.933956146240234 -54,4.741913318634033 -55,4.638735294342041 -56,4.516496658325195 -57,4.497771739959717 -58,4.461549758911133 -59,4.431782245635986 -60,4.37719202041626 -61,4.2929768562316895 -62,4.190616607666016 -63,4.058609962463379 -64,4.0610575675964355 -65,4.030840873718262 -66,4.017683029174805 -67,4.00667667388916 -68,3.9765102863311768 -69,3.873121500015259 -70,3.7683231830596924 -71,3.735887050628662 -72,3.696169137954712 -73,3.6398167610168457 -74,3.5761303901672363 -75,3.531883955001831 -76,3.5016119480133057 -77,3.4545462131500244 -78,3.4726078510284424 -79,3.464862823486328 -80,3.436096668243408 -81,3.3867552280426025 -82,3.3327109813690186 -83,3.3195958137512207 -84,3.2992632389068604 -85,3.279618263244629 -86,3.2589073181152344 -87,3.234527587890625 -88,3.2059361934661865 -89,3.1732230186462402 -90,3.135713815689087 -91,3.10876202583313 -92,3.097240447998047 -93,3.0702850818634033 -94,3.036245822906494 -95,3.0109992027282715 -96,2.9797606468200684 -97,2.941847801208496 -98,2.9009196758270264 -99,2.8620786666870117 -100,2.832920551300049 -101,2.8054697513580322 -102,2.781327724456787 -103,2.755966901779175 -104,2.7316083908081055 -105,2.7108025550842285 -106,2.6877994537353516 -107,2.6586074829101562 -108,2.6237876415252686 -109,2.585482120513916 -110,2.556143045425415 -111,2.5350091457366943 -112,2.5157601833343506 -113,2.4958577156066895 -114,2.4741270542144775 -115,2.451697587966919 -116,2.4295387268066406 -117,2.4073095321655273 -118,2.385119676589966 -119,2.363604784011841 -120,2.343003988265991 -121,2.3227062225341797 -122,2.3023862838745117 -123,2.282243490219116 -124,2.2622604370117188 -125,2.242161750793457 -126,2.221951961517334 -127,2.2017369270324707 -128,2.1812946796417236 -129,2.1604528427124023 -130,2.1393914222717285 -131,2.1181936264038086 -132,2.0967183113098145 -133,2.0749847888946533 -134,2.0530874729156494 -135,2.031017303466797 -136,2.0086865425109863 -137,1.9860374927520752 -138,1.9630423784255981 -139,1.9395889043807983 -140,1.915594458580017 -141,1.891105055809021 -142,1.8661876916885376 -143,1.8409123420715332 -144,1.8154300451278687 -145,1.7899483442306519 -146,1.7648595571517944 -147,1.7403823137283325 -148,1.7166942358016968 -149,1.694209337234497 -150,1.673193335533142 -151,1.6534345149993896 -152,1.634814977645874 -153,1.6175854206085205 -154,1.6017441749572754 -155,1.5870896577835083 -156,1.5734708309173584 -157,1.560799479484558 -158,1.5489975214004517 -159,1.5379307270050049 -160,1.5274858474731445 -161,1.5175729990005493 -162,1.508124589920044 -163,1.499104380607605 -164,1.4904977083206177 -165,1.4822759628295898 -166,1.4743891954421997 -167,1.466793179512024 -168,1.4594534635543823 -169,1.452345848083496 -170,1.44545578956604 -171,1.4387776851654053 -172,1.4323012828826904 -173,1.4260125160217285 -174,1.4198881387710571 -175,1.413914442062378 -176,1.4080826044082642 -177,1.402389407157898 -178,1.3968303203582764 -179,1.391398549079895 -180,1.386086344718933 -181,1.3808867931365967 -182,1.3757927417755127 -183,1.3708010911941528 -184,1.3659064769744873 -185,1.3611067533493042 -186,1.3563992977142334 -187,1.3517805337905884 -188,1.347243309020996 -189,1.3427844047546387 -190,1.3383996486663818 -191,1.334087610244751 -192,1.3298449516296387 -193,1.3256721496582031 -194,1.3215662240982056 -195,1.3175249099731445 -196,1.3135461807250977 -197,1.309625267982483 -198,1.3057550191879272 -199,1.3019386529922485 -200,1.2981737852096558 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.050/training_config.txt deleted file mode 100644 index dacbb8e..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.050/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.050/training_log.csv deleted file mode 100644 index 6e09e04..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,567.132080078125 -2,181.69277954101562 -3,106.49693298339844 -4,67.69905853271484 -5,36.893287658691406 -6,28.78817367553711 -7,21.41722297668457 -8,15.730152130126953 -9,13.227381706237793 -10,10.309151649475098 -11,8.90881061553955 -12,8.518481254577637 -13,7.9125518798828125 -14,7.319889068603516 -15,6.861821174621582 -16,5.663227558135986 -17,5.4666900634765625 -18,5.294188976287842 -19,5.142066955566406 -20,5.003450870513916 -21,4.874537944793701 -22,4.751710414886475 -23,4.631827354431152 -24,4.511779308319092 -25,4.388729572296143 -26,4.261944770812988 -27,4.137828350067139 -28,4.024770736694336 -29,3.9151275157928467 -30,3.805830717086792 -31,3.6960062980651855 -32,3.5846800804138184 -33,3.4699368476867676 -34,3.345604658126831 -35,3.189636707305908 -36,2.901756763458252 -37,2.753326416015625 -38,2.735452175140381 -39,2.7058300971984863 -40,2.6440515518188477 -41,2.538806200027466 -42,2.4942336082458496 -43,2.4092965126037598 -44,2.3547770977020264 -45,2.32704496383667 -46,2.319629669189453 -47,2.31874942779541 -48,2.3055977821350098 -49,2.2752246856689453 -50,2.2340359687805176 -51,2.1928603649139404 -52,2.1592140197753906 -53,2.1317031383514404 -54,2.1035687923431396 -55,2.070169687271118 -56,2.030918598175049 -57,1.9892579317092896 -58,1.949471116065979 -59,1.9115337133407593 -60,1.8765627145767212 -61,1.8473879098892212 -62,1.8239264488220215 -63,1.80325448513031 -64,1.7832890748977661 -65,1.7662416696548462 -66,1.7470591068267822 -67,1.7191925048828125 -68,1.6852636337280273 -69,1.6539199352264404 -70,1.6271772384643555 -71,1.6069554090499878 -72,1.5912123918533325 -73,1.5775848627090454 -74,1.5648503303527832 -75,1.5523908138275146 -76,1.5396956205368042 -77,1.5269893407821655 -78,1.5144662857055664 -79,1.5022144317626953 -80,1.4903870820999146 -81,1.4791399240493774 -82,1.468691110610962 -83,1.459215760231018 -84,1.4507843255996704 -85,1.4432600736618042 -86,1.4363362789154053 -87,1.4295989274978638 -88,1.4226882457733154 -89,1.4154460430145264 -90,1.4079678058624268 -91,1.4004344940185547 -92,1.392983078956604 -93,1.3857364654541016 -94,1.378791332244873 -95,1.3721998929977417 -96,1.3659253120422363 -97,1.3598524332046509 -98,1.3538521528244019 -99,1.347845196723938 -100,1.341820240020752 -101,1.3358149528503418 -102,1.3298804759979248 -103,1.3240559101104736 -104,1.3183437585830688 -105,1.3127405643463135 -106,1.3072493076324463 -107,1.3018710613250732 -108,1.2966217994689941 -109,1.2914994955062866 -110,1.2864911556243896 -111,1.2815704345703125 -112,1.2767084836959839 -113,1.271891474723816 -114,1.267126202583313 -115,1.2624269723892212 -116,1.2578065395355225 -117,1.2532702684402466 -118,1.248816728591919 -119,1.2444381713867188 -120,1.2401388883590698 -121,1.2359187602996826 -122,1.231777310371399 -123,1.2277095317840576 -124,1.2237060070037842 -125,1.2197445631027222 -126,1.2158364057540894 -127,1.2120193243026733 -128,1.2082629203796387 -129,1.2045273780822754 -130,1.2008841037750244 -131,1.1973079442977905 -132,1.1938029527664185 -133,1.190383791923523 -134,1.187061071395874 -135,1.1838562488555908 -136,1.1808357238769531 -137,1.1780530214309692 -138,1.1753981113433838 -139,1.1728270053863525 -140,1.1703039407730103 -141,1.1678133010864258 -142,1.1653389930725098 -143,1.1628367900848389 -144,1.160256028175354 -145,1.1576486825942993 -146,1.1550344228744507 -147,1.1524138450622559 -148,1.1497881412506104 -149,1.1471630334854126 -150,1.144545316696167 -151,1.1419283151626587 -152,1.1393275260925293 -153,1.1367677450180054 -154,1.1342580318450928 -155,1.1318162679672241 -156,1.1294710636138916 -157,1.12722909450531 -158,1.1250933408737183 -159,1.123061180114746 -160,1.1211304664611816 -161,1.1192986965179443 -162,1.1175521612167358 -163,1.1158888339996338 -164,1.1143032312393188 -165,1.1127896308898926 -166,1.1113471984863281 -167,1.1099655628204346 -168,1.1086394786834717 -169,1.1073671579360962 -170,1.1061413288116455 -171,1.1049543619155884 -172,1.1038010120391846 -173,1.102684497833252 -174,1.1015950441360474 -175,1.1005276441574097 -176,1.0994800329208374 -177,1.0984477996826172 -178,1.0974291563034058 -179,1.0964219570159912 -180,1.0954257249832153 -181,1.0944397449493408 -182,1.093462347984314 -183,1.0924922227859497 -184,1.0915300846099854 -185,1.090574026107788 -186,1.0896248817443848 -187,1.088682770729065 -188,1.0877491235733032 -189,1.0868241786956787 -190,1.0859073400497437 -191,1.0849995613098145 -192,1.0840977430343628 -193,1.0832024812698364 -194,1.0823122262954712 -195,1.081428050994873 -196,1.0805493593215942 -197,1.0796757936477661 -198,1.0788084268569946 -199,1.0779480934143066 -200,1.077093482017517 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.080/training_config.txt deleted file mode 100644 index 8c41d6a..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.080/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.080/training_log.csv deleted file mode 100644 index 8a40d45..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,408.72760009765625 -2,163.36441040039062 -3,101.57183837890625 -4,51.35537338256836 -5,41.825469970703125 -6,47.87309265136719 -7,50.92573165893555 -8,35.68601989746094 -9,33.67103576660156 -10,36.18947219848633 -11,25.970991134643555 -12,19.707778930664062 -13,18.066774368286133 -14,16.927871704101562 -15,16.003225326538086 -16,14.570568084716797 -17,12.565103530883789 -18,9.488736152648926 -19,9.111454010009766 -20,8.789711952209473 -21,8.508281707763672 -22,8.253022193908691 -23,8.010781288146973 -24,7.774399280548096 -25,7.540799617767334 -26,7.306301116943359 -27,7.07142448425293 -28,6.836880683898926 -29,6.604305267333984 -30,6.376166820526123 -31,6.1552534103393555 -32,5.942960262298584 -33,5.739234924316406 -34,5.541881561279297 -35,5.346580505371094 -36,5.1345953941345215 -37,4.817389965057373 -38,3.689685821533203 -39,3.4971399307250977 -40,3.3445916175842285 -41,3.2152631282806396 -42,3.1006555557250977 -43,2.995535135269165 -44,2.895801544189453 -45,2.800723075866699 -46,2.7089602947235107 -47,2.620023727416992 -48,2.5330517292022705 -49,2.446760892868042 -50,2.358837604522705 -51,2.2636220455169678 -52,2.1497929096221924 -53,1.9821124076843262 -54,1.8790905475616455 -55,1.9668753147125244 -56,2.004673480987549 -57,2.038184404373169 -58,2.070826292037964 -59,2.1026670932769775 -60,2.1295506954193115 -61,2.142897367477417 -62,2.1345677375793457 -63,2.1014456748962402 -64,2.048659086227417 -65,1.985025405883789 -66,1.918718695640564 -67,1.8536382913589478 -68,1.791877269744873 -69,1.7334237098693848 -70,1.678092122077942 -71,1.62496817111969 -72,1.5726796388626099 -73,1.519728660583496 -74,1.46133553981781 -75,1.3852126598358154 -76,1.5394208431243896 -77,1.588350534439087 -78,1.6159957647323608 -79,1.6286619901657104 -80,1.6303385496139526 -81,1.623138189315796 -82,1.607497215270996 -83,1.582822561264038 -84,1.54926335811615 -85,1.5075010061264038 -86,1.3762896060943604 -87,1.3641711473464966 -88,1.4016587734222412 -89,1.4373390674591064 -90,1.4572025537490845 -91,1.4666903018951416 -92,1.4679138660430908 -93,1.4618419408798218 -94,1.4489754438400269 -95,1.4294345378875732 -96,1.402706265449524 -97,1.36654531955719 -98,1.3489664793014526 -99,1.3543713092803955 -100,1.3529837131500244 -101,1.3499640226364136 -102,1.3565996885299683 -103,1.3508963584899902 -104,1.3205993175506592 -105,1.2944608926773071 -106,1.2921525239944458 -107,1.3015978336334229 -108,1.3029894828796387 -109,1.2955127954483032 -110,1.2798526287078857 -111,1.2603557109832764 -112,1.2705219984054565 -113,1.2773391008377075 -114,1.2544444799423218 -115,1.2535399198532104 -116,1.2602784633636475 -117,1.2593291997909546 -118,1.2503753900527954 -119,1.240799903869629 -120,1.2437434196472168 -121,1.2455487251281738 -122,1.235278606414795 -123,1.2320739030838013 -124,1.233909010887146 -125,1.232154369354248 -126,1.226192831993103 -127,1.2210783958435059 -128,1.2215131521224976 -129,1.2199559211730957 -130,1.2140275239944458 -131,1.2116897106170654 -132,1.2110588550567627 -133,1.2084922790527344 -134,1.2042264938354492 -135,1.2012355327606201 -136,1.2002975940704346 -137,1.1975045204162598 -138,1.193672776222229 -139,1.1916441917419434 -140,1.1898913383483887 -141,1.186932921409607 -142,1.1835918426513672 -143,1.1813503503799438 -144,1.1792606115341187 -145,1.175972819328308 -146,1.173060417175293 -147,1.1708005666732788 -148,1.1680690050125122 -149,1.1648287773132324 -150,1.1620088815689087 -151,1.1595165729522705 -152,1.1564432382583618 -153,1.1533832550048828 -154,1.150748610496521 -155,1.147958755493164 -156,1.1449310779571533 -157,1.1421929597854614 -158,1.1396639347076416 -159,1.1368964910507202 -160,1.1342270374298096 -161,1.1318113803863525 -162,1.1292906999588013 -163,1.1266911029815674 -164,1.1242704391479492 -165,1.1218751668930054 -166,1.1193456649780273 -167,1.1169300079345703 -168,1.1145678758621216 -169,1.1121165752410889 -170,1.1096853017807007 -171,1.1073698997497559 -172,1.105036973953247 -173,1.102725625038147 -174,1.1005394458770752 -175,1.0983916521072388 -176,1.0962952375411987 -177,1.094327688217163 -178,1.092376708984375 -179,1.0904597043991089 -180,1.088671326637268 -181,1.087056279182434 -182,1.0855354070663452 -183,1.0841264724731445 -184,1.0828330516815186 -185,1.0816107988357544 -186,1.0804835557937622 -187,1.0794432163238525 -188,1.0784417390823364 -189,1.0774909257888794 -190,1.0766019821166992 -191,1.0757476091384888 -192,1.0749306678771973 -193,1.0741732120513916 -194,1.0734496116638184 -195,1.0727574825286865 -196,1.0720974206924438 -197,1.071452021598816 -198,1.0708197355270386 -199,1.0702135562896729 -200,1.069631576538086 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.100/training_config.txt deleted file mode 100644 index 9aa0032..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.100/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.100/training_log.csv deleted file mode 100644 index ab6b46c..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,354.40814208984375 -2,282.45159912109375 -3,882.5856323242188 -4,153.9576416015625 -5,38.17165756225586 -6,20.04821014404297 -7,15.542011260986328 -8,17.13170623779297 -9,17.109827041625977 -10,17.911563873291016 -11,18.07530403137207 -12,19.22838020324707 -13,18.04988670349121 -14,16.822629928588867 -15,16.344532012939453 -16,15.957139015197754 -17,15.406258583068848 -18,14.033894538879395 -19,14.253972053527832 -20,12.787099838256836 -21,11.907875061035156 -22,11.297150611877441 -23,10.781728744506836 -24,10.257652282714844 -25,9.565768241882324 -26,8.781947135925293 -27,8.30178451538086 -28,7.914223670959473 -29,7.534208297729492 -30,7.13007116317749 -31,6.673430442810059 -32,6.125728607177734 -33,5.5253825187683105 -34,5.079728126525879 -35,4.750982761383057 -36,4.46519660949707 -37,4.199586391448975 -38,3.9444470405578613 -39,3.6881747245788574 -40,3.4191036224365234 -41,3.2293877601623535 -42,3.1635899543762207 -43,3.140289306640625 -44,3.1251754760742188 -45,3.114244222640991 -46,3.10508131980896 -47,3.0923960208892822 -48,3.06848406791687 -49,3.0288374423980713 -50,2.974882125854492 -51,2.9118518829345703 -52,2.8453750610351562 -53,2.779770851135254 -54,2.7167491912841797 -55,2.6576685905456543 -56,2.6037960052490234 -57,2.5549840927124023 -58,2.512411117553711 -59,2.4787356853485107 -60,2.454819679260254 -61,2.438415050506592 -62,2.4257760047912598 -63,2.4129374027252197 -64,2.3966662883758545 -65,2.3754305839538574 -66,2.349102258682251 -67,2.318676471710205 -68,2.2858855724334717 -69,2.252882719039917 -70,2.221736192703247 -71,2.193984031677246 -72,2.1702260971069336 -73,2.150290012359619 -74,2.133462429046631 -75,2.1186673641204834 -76,2.104719638824463 -77,2.0905520915985107 -78,2.0754032135009766 -79,2.058866024017334 -80,2.0408987998962402 -81,2.0217809677124023 -82,2.0020017623901367 -83,1.9821484088897705 -84,1.9627764225006104 -85,1.9443233013153076 -86,1.9270504713058472 -87,1.9109753370285034 -88,1.895947813987732 -89,1.8816803693771362 -90,1.8678463697433472 -91,1.8541345596313477 -92,1.8403096199035645 -93,1.8262287378311157 -94,1.8118590116500854 -95,1.7972990274429321 -96,1.7827250957489014 -97,1.7683396339416504 -98,1.7543244361877441 -99,1.7407945394515991 -100,1.7277945280075073 -101,1.7152940034866333 -102,1.703193187713623 -103,1.6913671493530273 -104,1.6796832084655762 -105,1.6680446863174438 -106,1.6564092636108398 -107,1.6447848081588745 -108,1.633225440979004 -109,1.621816635131836 -110,1.6106239557266235 -111,1.5996931791305542 -112,1.5890426635742188 -113,1.5786592960357666 -114,1.5685012340545654 -115,1.5585193634033203 -116,1.5486631393432617 -117,1.538895606994629 -118,1.5292123556137085 -119,1.519624948501587 -120,1.510161280632019 -121,1.500858187675476 -122,1.4917347431182861 -123,1.4828037023544312 -124,1.474046230316162 -125,1.465446949005127 -126,1.4569815397262573 -127,1.4486255645751953 -128,1.4403733015060425 -129,1.4322268962860107 -130,1.4242035150527954 -131,1.4163153171539307 -132,1.408576488494873 -133,1.4009950160980225 -134,1.3935596942901611 -135,1.3862675428390503 -136,1.3791042566299438 -137,1.3720651865005493 -138,1.3651576042175293 -139,1.3583847284317017 -140,1.3517471551895142 -141,1.3452428579330444 -142,1.338879108428955 -143,1.3326480388641357 -144,1.326552391052246 -145,1.3205899000167847 -146,1.3147556781768799 -147,1.3090442419052124 -148,1.3034617900848389 -149,1.298012375831604 -150,1.2926963567733765 -151,1.2875139713287354 -152,1.2824559211730957 -153,1.277521014213562 -154,1.2727059125900269 -155,1.2680072784423828 -156,1.263425350189209 -157,1.2589460611343384 -158,1.254568338394165 -159,1.2502872943878174 -160,1.2460988759994507 -161,1.2419953346252441 -162,1.237939715385437 -163,1.2339365482330322 -164,1.2299911975860596 -165,1.226117491722107 -166,1.2223225831985474 -167,1.2186030149459839 -168,1.214949369430542 -169,1.211358904838562 -170,1.2078338861465454 -171,1.2043733596801758 -172,1.2009797096252441 -173,1.197651743888855 -174,1.1943817138671875 -175,1.191169261932373 -176,1.1880168914794922 -177,1.1849201917648315 -178,1.1818821430206299 -179,1.1789000034332275 -180,1.1759793758392334 -181,1.173119306564331 -182,1.1703191995620728 -183,1.1675790548324585 -184,1.1649008989334106 -185,1.1622834205627441 -186,1.1597273349761963 -187,1.1572284698486328 -188,1.1547895669937134 -189,1.152408242225647 -190,1.150083303451538 -191,1.1478207111358643 -192,1.1456196308135986 -193,1.1434745788574219 -194,1.1413817405700684 -195,1.1393401622772217 -196,1.1373517513275146 -197,1.1354137659072876 -198,1.1335281133651733 -199,1.1316888332366943 -200,1.1298893690109253 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.125/training_config.txt deleted file mode 100644 index 1e24fad..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.125/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.125/training_log.csv deleted file mode 100644 index fd0b75b..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,335.4588928222656 -2,77.29049682617188 -3,104.57303619384766 -4,3229.751708984375 -5,88.52523803710938 -6,86.800537109375 -7,258.7391357421875 -8,75.96363067626953 -9,59.280967712402344 -10,48.0887565612793 -11,38.10236358642578 -12,35.53825378417969 -13,33.30115509033203 -14,28.630786895751953 -15,26.191835403442383 -16,25.023029327392578 -17,24.087730407714844 -18,23.228904724121094 -19,22.499561309814453 -20,21.79509925842285 -21,21.093990325927734 -22,20.36196517944336 -23,19.503490447998047 -24,18.20163917541504 -25,16.820650100708008 -26,14.937601089477539 -27,12.636967658996582 -28,11.025629043579102 -29,10.065333366394043 -30,9.383214950561523 -31,8.80827522277832 -32,8.222687721252441 -33,7.815186023712158 -34,7.461164951324463 -35,7.114322662353516 -36,6.676290988922119 -37,6.385209560394287 -38,6.195677757263184 -39,6.0027995109558105 -40,5.5740227699279785 -41,4.739547252655029 -42,4.544857025146484 -43,4.411434173583984 -44,4.305829048156738 -45,4.2147908210754395 -46,4.1320343017578125 -47,4.052953243255615 -48,3.9767088890075684 -49,3.9019787311553955 -50,3.827104330062866 -51,3.751203775405884 -52,3.6743876934051514 -53,3.598630666732788 -54,3.5264337062835693 -55,3.45565128326416 -56,3.38411283493042 -57,3.31040096282959 -58,3.2339913845062256 -59,3.1548869609832764 -60,3.0730607509613037 -61,2.987565040588379 -62,2.8978271484375 -63,2.801121234893799 -64,2.687869071960449 -65,2.5349831581115723 -66,2.4243836402893066 -67,2.166700601577759 -68,2.179901361465454 -69,2.1799604892730713 -70,2.1657190322875977 -71,2.1440682411193848 -72,2.1210227012634277 -73,2.0959227085113525 -74,2.0650906562805176 -75,2.0311036109924316 -76,2.00533390045166 -77,2.150731086730957 -78,1.975610613822937 -79,1.9610458612442017 -80,1.9457006454467773 -81,1.9265248775482178 -82,1.903960108757019 -83,1.8797584772109985 -84,1.8565810918807983 -85,1.8367780447006226 -86,1.8211548328399658 -87,1.8094900846481323 -88,1.801039218902588 -89,1.7946979999542236 -90,1.7892448902130127 -91,1.783428430557251 -92,1.7763774394989014 -93,1.7677631378173828 -94,1.757766842842102 -95,1.747009038925171 -96,1.7361137866973877 -97,1.7254585027694702 -98,1.7152801752090454 -99,1.705670952796936 -100,1.6966078281402588 -101,1.6880266666412354 -102,1.6798045635223389 -103,1.6718169450759888 -104,1.6640113592147827 -105,1.6563242673873901 -106,1.648648738861084 -107,1.6409039497375488 -108,1.6330262422561646 -109,1.6250083446502686 -110,1.6168650388717651 -111,1.6086088418960571 -112,1.6002651453018188 -113,1.5918805599212646 -114,1.5835206508636475 -115,1.5752485990524292 -116,1.5670769214630127 -117,1.5590204000473022 -118,1.5510536432266235 -119,1.5431534051895142 -120,1.5352849960327148 -121,1.5274115800857544 -122,1.5194971561431885 -123,1.5114654302597046 -124,1.5032655000686646 -125,1.4949694871902466 -126,1.4865814447402954 -127,1.4781622886657715 -128,1.4698796272277832 -129,1.4618377685546875 -130,1.4540506601333618 -131,1.4464948177337646 -132,1.4391263723373413 -133,1.43189537525177 -134,1.4247581958770752 -135,1.4176896810531616 -136,1.4107177257537842 -137,1.4038323163986206 -138,1.3970333337783813 -139,1.3903111219406128 -140,1.383657455444336 -141,1.3770740032196045 -142,1.3705614805221558 -143,1.3641141653060913 -144,1.3577368259429932 -145,1.35142982006073 -146,1.3451956510543823 -147,1.3390405178070068 -148,1.3329607248306274 -149,1.3269582986831665 -150,1.3210264444351196 -151,1.3151735067367554 -152,1.309390664100647 -153,1.3036772012710571 -154,1.2980397939682007 -155,1.2924813032150269 -156,1.2870008945465088 -157,1.2815994024276733 -158,1.2762818336486816 -159,1.2710566520690918 -160,1.2659181356430054 -161,1.2608654499053955 -162,1.255893588066101 -163,1.2509968280792236 -164,1.246175765991211 -165,1.2414252758026123 -166,1.2367552518844604 -167,1.2321901321411133 -168,1.2277330160140991 -169,1.2233688831329346 -170,1.2190990447998047 -171,1.2149405479431152 -172,1.2108904123306274 -173,1.206917643547058 -174,1.203033685684204 -175,1.1992275714874268 -176,1.195501446723938 -177,1.1918599605560303 -178,1.1882927417755127 -179,1.1848045587539673 -180,1.1813966035842896 -181,1.178070068359375 -182,1.1748204231262207 -183,1.1716516017913818 -184,1.1685631275177002 -185,1.1655504703521729 -186,1.162613868713379 -187,1.1597585678100586 -188,1.1569819450378418 -189,1.154283881187439 -190,1.151663064956665 -191,1.1491186618804932 -192,1.146649718284607 -193,1.1442549228668213 -194,1.141931176185608 -195,1.1396763324737549 -196,1.137489914894104 -197,1.1353708505630493 -198,1.1333190202713013 -199,1.1313334703445435 -200,1.1294103860855103 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.160/training_config.txt deleted file mode 100644 index fa18a3c..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.160/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.160/training_log.csv deleted file mode 100644 index 0d5ae8f..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,355.962646484375 -2,33.75537109375 -3,29.817005157470703 -4,33.62453842163086 -5,18.917964935302734 -6,14.690279960632324 -7,11.216883659362793 -8,8.888655662536621 -9,7.599806308746338 -10,6.7503132820129395 -11,6.099143981933594 -12,5.564679145812988 -13,5.12078332901001 -14,4.762061595916748 -15,4.483892917633057 -16,4.268274307250977 -17,4.093628406524658 -18,3.9461071491241455 -19,3.8163976669311523 -20,3.6977124214172363 -21,3.5884058475494385 -22,3.4895081520080566 -23,3.3944311141967773 -24,3.3009626865386963 -25,3.207763910293579 -26,3.1177115440368652 -27,3.0297691822052 -28,2.943061351776123 -29,2.857811689376831 -30,2.7732298374176025 -31,2.689772605895996 -32,2.6082444190979004 -33,2.5295379161834717 -34,2.4542055130004883 -35,2.382512331008911 -36,2.3144631385803223 -37,2.2503914833068848 -38,2.1902074813842773 -39,2.1335926055908203 -40,2.0799615383148193 -41,2.0294668674468994 -42,1.9819923639297485 -43,1.9373890161514282 -44,1.895520567893982 -45,1.8560935258865356 -46,1.81894052028656 -47,1.7839133739471436 -48,1.7507120370864868 -49,1.7191276550292969 -50,1.6890802383422852 -51,1.6604686975479126 -52,1.6331597566604614 -53,1.6068105697631836 -54,1.5814850330352783 -55,1.5572848320007324 -56,1.5341812372207642 -57,1.5121413469314575 -58,1.4911125898361206 -59,1.4713492393493652 -60,1.453080654144287 -61,1.4363646507263184 -62,1.4211317300796509 -63,1.4073418378829956 -64,1.394868016242981 -65,1.3836947679519653 -66,1.3738409280776978 -67,1.3653451204299927 -68,1.358068585395813 -69,1.351873755455017 -70,1.3466036319732666 -71,1.3421034812927246 -72,1.3382266759872437 -73,1.3348325490951538 -74,1.331780195236206 -75,1.3289374113082886 -76,1.3262102603912354 -77,1.3235125541687012 -78,1.3207769393920898 -79,1.317962646484375 -80,1.3150432109832764 -81,1.3120135068893433 -82,1.3088659048080444 -83,1.3056176900863647 -84,1.3022856712341309 -85,1.2988799810409546 -86,1.295415997505188 -87,1.291932463645935 -88,1.2884529829025269 -89,1.2849953174591064 -90,1.2815760374069214 -91,1.2782057523727417 -92,1.274893879890442 -93,1.271649718284607 -94,1.2684766054153442 -95,1.2653732299804688 -96,1.2623292207717896 -97,1.259331464767456 -98,1.2563527822494507 -99,1.2533819675445557 -100,1.2504053115844727 -101,1.2474290132522583 -102,1.2444387674331665 -103,1.2414284944534302 -104,1.2383990287780762 -105,1.2353507280349731 -106,1.2322874069213867 -107,1.2292085886001587 -108,1.2261238098144531 -109,1.2230472564697266 -110,1.2199769020080566 -111,1.216919183731079 -112,1.2138845920562744 -113,1.2108726501464844 -114,1.2078838348388672 -115,1.2049195766448975 -116,1.2019774913787842 -117,1.199057936668396 -118,1.1961712837219238 -119,1.1933090686798096 -120,1.1904573440551758 -121,1.1876195669174194 -122,1.1848115921020508 -123,1.1820192337036133 -124,1.179246425628662 -125,1.1764864921569824 -126,1.1737449169158936 -127,1.1710318326950073 -128,1.1683330535888672 -129,1.165631890296936 -130,1.1629626750946045 -131,1.160323977470398 -132,1.1576895713806152 -133,1.1550668478012085 -134,1.1524896621704102 -135,1.1499357223510742 -136,1.1473925113677979 -137,1.1448832750320435 -138,1.1424133777618408 -139,1.1399710178375244 -140,1.1375702619552612 -141,1.1352486610412598 -142,1.1329907178878784 -143,1.1308114528656006 -144,1.1286944150924683 -145,1.1266340017318726 -146,1.1246387958526611 -147,1.1227061748504639 -148,1.1208446025848389 -149,1.1190521717071533 -150,1.1173285245895386 -151,1.1156723499298096 -152,1.1140837669372559 -153,1.1125606298446655 -154,1.1110928058624268 -155,1.1096816062927246 -156,1.1083184480667114 -157,1.106999158859253 -158,1.105726718902588 -159,1.1044976711273193 -160,1.1033265590667725 -161,1.1021945476531982 -162,1.1010910272598267 -163,1.1000258922576904 -164,1.0990012884140015 -165,1.098008394241333 -166,1.0970516204833984 -167,1.096117377281189 -168,1.09518301486969 -169,1.0942809581756592 -170,1.093379259109497 -171,1.0925016403198242 -172,1.0916386842727661 -173,1.0907957553863525 -174,1.089978575706482 -175,1.0891786813735962 -176,1.0883712768554688 -177,1.0875812768936157 -178,1.0868130922317505 -179,1.0860587358474731 -180,1.0853060483932495 -181,1.0845648050308228 -182,1.08385169506073 -183,1.0831525325775146 -184,1.0824644565582275 -185,1.0817842483520508 -186,1.0811043977737427 -187,1.0804295539855957 -188,1.0797686576843262 -189,1.0791139602661133 -190,1.0784580707550049 -191,1.0778017044067383 -192,1.0771405696868896 -193,1.0764782428741455 -194,1.075819730758667 -195,1.0751615762710571 -196,1.0745023488998413 -197,1.0738425254821777 -198,1.073182225227356 -199,1.0725189447402954 -200,1.071852207183838 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.200/training_config.txt deleted file mode 100644 index c4faa22..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.200/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.200/training_log.csv deleted file mode 100644 index b78cd80..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,348.31707763671875 -2,78326.78125 -3,49759.35546875 -4,2084.00244140625 -5,316.4538269042969 -6,264.2972717285156 -7,334.8058166503906 -8,2895.165283203125 -9,172.0658416748047 -10,100.74688720703125 -11,79.45128631591797 -12,69.84681701660156 -13,64.04385375976562 -14,60.8437385559082 -15,58.78610610961914 -16,56.118080139160156 -17,53.354183197021484 -18,50.19590759277344 -19,46.91566467285156 -20,43.9587287902832 -21,41.32122802734375 -22,39.126224517822266 -23,37.34713363647461 -24,35.860984802246094 -25,34.557334899902344 -26,33.39616394042969 -27,32.385013580322266 -28,31.53998374938965 -29,30.808645248413086 -30,30.164804458618164 -31,29.589218139648438 -32,29.07107925415039 -33,28.60604476928711 -34,28.186113357543945 -35,27.799909591674805 -36,27.443885803222656 -37,27.116329193115234 -38,26.811187744140625 -39,26.523460388183594 -40,26.249958038330078 -41,25.98814582824707 -42,25.736034393310547 -43,25.492088317871094 -44,25.255361557006836 -45,25.02512550354004 -46,24.803525924682617 -47,24.590152740478516 -48,24.383264541625977 -49,24.18157386779785 -50,23.9847469329834 -51,23.792312622070312 -52,23.604326248168945 -53,23.419965744018555 -54,23.2387638092041 -55,23.06024932861328 -56,22.88467788696289 -57,22.712703704833984 -58,22.545654296875 -59,22.383981704711914 -60,22.227157592773438 -61,22.074203491210938 -62,21.923892974853516 -63,21.77629280090332 -64,21.63149070739746 -65,21.489416122436523 -66,21.349943161010742 -67,21.212892532348633 -68,21.07823371887207 -69,20.945972442626953 -70,20.816064834594727 -71,20.688480377197266 -72,20.563119888305664 -73,20.439884185791016 -74,20.318891525268555 -75,20.20014190673828 -76,20.083498001098633 -77,19.96875 -78,19.85577392578125 -79,19.74456024169922 -80,19.635149002075195 -81,19.527509689331055 -82,19.4216365814209 -83,19.317481994628906 -84,19.215036392211914 -85,19.114179611206055 -86,19.01477813720703 -87,18.916831970214844 -88,18.820159912109375 -89,18.724761962890625 -90,18.630617141723633 -91,18.537708282470703 -92,18.44605827331543 -93,18.355712890625 -94,18.26662826538086 -95,18.17876434326172 -96,18.09196662902832 -97,18.006168365478516 -98,17.921375274658203 -99,17.837553024291992 -100,17.754613876342773 -101,17.672502517700195 -102,17.59117317199707 -103,17.510555267333984 -104,17.43060302734375 -105,17.351276397705078 -106,17.272546768188477 -107,17.194339752197266 -108,17.116605758666992 -109,17.039348602294922 -110,16.962587356567383 -111,16.886287689208984 -112,16.810422897338867 -113,16.734983444213867 -114,16.65995216369629 -115,16.585289001464844 -116,16.51096534729004 -117,16.43695831298828 -118,16.363311767578125 -119,16.28997802734375 -120,16.216856002807617 -121,16.143917083740234 -122,16.071083068847656 -123,15.998322486877441 -124,15.925542831420898 -125,15.852752685546875 -126,15.779966354370117 -127,15.707173347473145 -128,15.634342193603516 -129,15.561432838439941 -130,15.488551139831543 -131,15.415699005126953 -132,15.342733383178711 -133,15.269594192504883 -134,15.196243286132812 -135,15.122658729553223 -136,15.048833847045898 -137,14.974701881408691 -138,14.900236129760742 -139,14.825397491455078 -140,14.750188827514648 -141,14.674653053283691 -142,14.598793983459473 -143,14.522668838500977 -144,14.446170806884766 -145,14.369366645812988 -146,14.292057037353516 -147,14.214263916015625 -148,14.135869026184082 -149,14.05677318572998 -150,13.976907730102539 -151,13.896263122558594 -152,13.814813613891602 -153,13.732494354248047 -154,13.649187088012695 -155,13.564825057983398 -156,13.479334831237793 -157,13.392621994018555 -158,13.304603576660156 -159,13.215204238891602 -160,13.12435245513916 -161,13.032132148742676 -162,12.938329696655273 -163,12.842656135559082 -164,12.744946479797363 -165,12.645008087158203 -166,12.542596817016602 -167,12.437247276306152 -168,12.328529357910156 -169,12.215224266052246 -170,12.09840202331543 -171,11.977771759033203 -172,11.852816581726074 -173,11.722708702087402 -174,11.5873441696167 -175,11.443388938903809 -176,11.288337707519531 -177,11.121275901794434 -178,10.938008308410645 -179,10.742257118225098 -180,10.533748626708984 -181,10.308773040771484 -182,10.082805633544922 -183,9.863691329956055 -184,9.656180381774902 -185,9.461019515991211 -186,9.272970199584961 -187,9.084118843078613 -188,8.899006843566895 -189,8.717607498168945 -190,8.55229377746582 -191,8.403168678283691 -192,8.263175010681152 -193,8.130739212036133 -194,8.004711151123047 -195,7.883773326873779 -196,7.767160415649414 -197,7.654236316680908 -198,7.544306755065918 -199,7.436367034912109 -200,7.330254077911377 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.250/training_config.txt deleted file mode 100644 index e20f859..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.250/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.250/training_log.csv deleted file mode 100644 index f641817..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,439.2963562011719 -2,40.7820930480957 -3,40.199790954589844 -4,32.96226501464844 -5,21.523969650268555 -6,16.067054748535156 -7,13.284873962402344 -8,11.927014350891113 -9,10.876352310180664 -10,10.101446151733398 -11,9.523722648620605 -12,9.068228721618652 -13,8.68891429901123 -14,8.351362228393555 -15,8.040414810180664 -16,7.752610683441162 -17,7.485681056976318 -18,7.235045433044434 -19,6.9991350173950195 -20,6.776625156402588 -21,6.560699462890625 -22,6.350017070770264 -23,6.145439147949219 -24,5.948259353637695 -25,5.757363319396973 -26,5.570921897888184 -27,5.38864278793335 -28,5.209728240966797 -29,5.0336833000183105 -30,4.859591484069824 -31,4.6874189376831055 -32,4.517662525177002 -33,4.350035667419434 -34,4.184586524963379 -35,4.021640777587891 -36,3.861103057861328 -37,3.703458786010742 -38,3.549623966217041 -39,3.401587963104248 -40,3.2578139305114746 -41,3.11877703666687 -42,2.983827829360962 -43,2.854388475418091 -44,2.731436014175415 -45,2.613713502883911 -46,2.5017659664154053 -47,2.39652156829834 -48,2.298124313354492 -49,2.205796241760254 -50,2.1195366382598877 -51,2.039501428604126 -52,1.9657126665115356 -53,1.8989920616149902 -54,1.8380770683288574 -55,1.7827118635177612 -56,1.7321743965148926 -57,1.68647301197052 -58,1.645578145980835 -59,1.608578085899353 -60,1.5748845338821411 -61,1.543736457824707 -62,1.5150179862976074 -63,1.4885753393173218 -64,1.4656482934951782 -65,1.445891261100769 -66,1.4290724992752075 -67,1.414405345916748 -68,1.4022496938705444 -69,1.3916306495666504 -70,1.3824290037155151 -71,1.3743127584457397 -72,1.3670833110809326 -73,1.3605527877807617 -74,1.3546327352523804 -75,1.3490891456604004 -76,1.3437283039093018 -77,1.3383277654647827 -78,1.3327175378799438 -79,1.3268284797668457 -80,1.3206342458724976 -81,1.3141626119613647 -82,1.307512640953064 -83,1.3007919788360596 -84,1.2940295934677124 -85,1.2872422933578491 -86,1.2804756164550781 -87,1.2737172842025757 -88,1.2670071125030518 -89,1.260383129119873 -90,1.2538907527923584 -91,1.247626543045044 -92,1.2415728569030762 -93,1.235821008682251 -94,1.230304479598999 -95,1.2250819206237793 -96,1.2200723886489868 -97,1.2152937650680542 -98,1.2107373476028442 -99,1.2063753604888916 -100,1.2021983861923218 -101,1.198187232017517 -102,1.1943256855010986 -103,1.1906124353408813 -104,1.1870397329330444 -105,1.1835821866989136 -106,1.1802202463150024 -107,1.1769523620605469 -108,1.1737645864486694 -109,1.1706371307373047 -110,1.167568564414978 -111,1.1645599603652954 -112,1.1616102457046509 -113,1.158717155456543 -114,1.155875563621521 -115,1.1530721187591553 -116,1.1502958536148071 -117,1.1475692987442017 -118,1.1448853015899658 -119,1.1422477960586548 -120,1.139655351638794 -121,1.137120246887207 -122,1.134635090827942 -123,1.1321860551834106 -124,1.1297856569290161 -125,1.1274127960205078 -126,1.1250638961791992 -127,1.1227381229400635 -128,1.1204371452331543 -129,1.1181552410125732 -130,1.1159164905548096 -131,1.1137778759002686 -132,1.1117253303527832 -133,1.1097772121429443 -134,1.107913851737976 -135,1.1060892343521118 -136,1.1042869091033936 -137,1.1025065183639526 -138,1.1007628440856934 -139,1.0990256071090698 -140,1.0973026752471924 -141,1.0955886840820312 -142,1.0938823223114014 -143,1.0921841859817505 -144,1.090490698814392 -145,1.0888047218322754 -146,1.0871305465698242 -147,1.0854676961898804 -148,1.0838185548782349 -149,1.0821847915649414 -150,1.0805758237838745 -151,1.078992486000061 -152,1.07743239402771 -153,1.075903058052063 -154,1.0744041204452515 -155,1.07292902469635 -156,1.0714738368988037 -157,1.0700383186340332 -158,1.0686205625534058 -159,1.0672181844711304 -160,1.06582510471344 -161,1.0644443035125732 -162,1.063076138496399 -163,1.0617228746414185 -164,1.0604034662246704 -165,1.0591343641281128 -166,1.0578886270523071 -167,1.056670069694519 -168,1.0554827451705933 -169,1.05431067943573 -170,1.0531574487686157 -171,1.0520166158676147 -172,1.0508822202682495 -173,1.0497575998306274 -174,1.048651099205017 -175,1.0475658178329468 -176,1.0464941263198853 -177,1.0454384088516235 -178,1.0444011688232422 -179,1.043371319770813 -180,1.0423414707183838 -181,1.0413111448287964 -182,1.0402836799621582 -183,1.0392587184906006 -184,1.0382349491119385 -185,1.0372090339660645 -186,1.036166787147522 -187,1.0351135730743408 -188,1.0340452194213867 -189,1.032962679862976 -190,1.0318700075149536 -191,1.030769944190979 -192,1.029659390449524 -193,1.02853524684906 -194,1.027403712272644 -195,1.0262619256973267 -196,1.0251115560531616 -197,1.0239412784576416 -198,1.0227508544921875 -199,1.0215506553649902 -200,1.0203485488891602 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.300/training_config.txt deleted file mode 100644 index fc2a8ac..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.300/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.300/training_log.csv deleted file mode 100644 index 2cd9c58..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.300/training_log.csv +++ /dev/null @@ -1,24 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,575.8009643554688 -2,29.52476692199707 -3,32.54547882080078 -4,33.23890686035156 -5,32.45545196533203 -6,31.14879608154297 -7,29.671154022216797 -8,27.96967124938965 -9,26.075618743896484 -10,24.240398406982422 -11,22.628101348876953 -12,21.22007179260254 -13,19.946874618530273 -14,18.779733657836914 -15,17.70958137512207 -16,16.718036651611328 -17,15.80046558380127 -18,13.184805870056152 -19,nan -20,nan -21,nan -22,nan diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.400/training_config.txt deleted file mode 100644 index d2e16c5..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.400/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.400/training_log.csv deleted file mode 100644 index e5099a0..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.400/training_log.csv +++ /dev/null @@ -1,151 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,4885.07275390625 -2,31.96577262878418 -3,38.095245361328125 -4,109.5927963256836 -5,28.153841018676758 -6,29.09343719482422 -7,30.026750564575195 -8,29.886137008666992 -9,28.98607635498047 -10,27.665761947631836 -11,21.816307067871094 -12,20.861499786376953 -13,129.41175842285156 -14,2895.73193359375 -15,662.2291870117188 -16,255.7066650390625 -17,140.0929718017578 -18,107.21492004394531 -19,86.8718032836914 -20,67.14876556396484 -21,68.0505142211914 -22,65.17466735839844 -23,60.96403503417969 -24,56.970272064208984 -25,53.35028839111328 -26,50.94762420654297 -27,319.476806640625 -28,47.74399948120117 -29,59.24235916137695 -30,47.093238830566406 -31,47.51905822753906 -32,48.14256286621094 -33,48.91954040527344 -34,49.78811264038086 -35,85.58912658691406 -36,53.98323440551758 -37,56.9304084777832 -38,59.81212615966797 -39,62.092742919921875 -40,63.902313232421875 -41,65.22808074951172 -42,66.06009674072266 -43,66.38654327392578 -44,66.22676086425781 -45,65.6283950805664 -46,64.65744018554688 -47,63.390777587890625 -48,61.897708892822266 -49,60.2253532409668 -50,58.434295654296875 -51,56.546783447265625 -52,54.65489959716797 -53,52.86362075805664 -54,51.229408264160156 -55,49.82234191894531 -56,48.598880767822266 -57,47.50993728637695 -58,46.5412712097168 -59,63.01492691040039 -60,44.97109603881836 -61,44.301124572753906 -62,43.673160552978516 -63,43.09675979614258 -64,42.54724884033203 -65,42.01335144042969 -66,41.48794937133789 -67,40.95132827758789 -68,40.428531646728516 -69,39.8534049987793 -70,39.25021743774414 -71,38.67750549316406 -72,38.111087799072266 -73,37.537933349609375 -74,36.97267150878906 -75,36.43173599243164 -76,35.9174690246582 -77,35.42326354980469 -78,34.94718933105469 -79,34.49827575683594 -80,34.07233428955078 -81,33.6589469909668 -82,33.25757598876953 -83,32.86680221557617 -84,32.48341751098633 -85,32.10552215576172 -86,31.74177360534668 -87,31.38666534423828 -88,31.037147521972656 -89,30.684383392333984 -90,30.33445167541504 -91,29.992168426513672 -92,29.64798927307129 -93,29.303789138793945 -94,28.959869384765625 -95,28.612844467163086 -96,28.23381233215332 -97,27.829187393188477 -98,27.415925979614258 -99,26.993772506713867 -100,26.563003540039062 -101,26.114065170288086 -102,25.658897399902344 -103,25.20625877380371 -104,24.752233505249023 -105,24.27825927734375 -106,23.794254302978516 -107,23.314842224121094 -108,22.81679916381836 -109,22.317733764648438 -110,21.830581665039062 -111,21.358922958374023 -112,20.902528762817383 -113,20.464311599731445 -114,20.042251586914062 -115,19.6354923248291 -116,19.242074966430664 -117,18.856246948242188 -118,18.473154067993164 -119,18.067455291748047 -120,31.008028030395508 -121,30.735931396484375 -122,11.690407752990723 -123,11.472602844238281 -124,11.236275672912598 -125,11.05695629119873 -126,10.834949493408203 -127,10.635348320007324 -128,11.245280265808105 -129,117.14779663085938 -130,146.35467529296875 -131,165.65176391601562 -132,296.9743347167969 -133,361.50994873046875 -134,359.9589538574219 -135,375.14703369140625 -136,306.9670104980469 -137,173.6864013671875 -138,115.34821319580078 -139,96.90460205078125 -140,75.94974517822266 -141,53.37372589111328 -142,37.05149841308594 -143,30.79978370666504 -144,27.73476219177246 -145,24.76385498046875 -146,nan -147,nan -148,nan -149,nan diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.500/training_config.txt deleted file mode 100644 index bf2acac..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.500/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.500/training_log.csv deleted file mode 100644 index 0b3c419..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.500/training_log.csv +++ /dev/null @@ -1,15 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,45421.4765625 -2,181493.6875 -3,165112.390625 -4,68631.09375 -5,538.5633544921875 -6,35.20182418823242 -7,30.77528953552246 -8,31.533828735351562 -9,31.939693450927734 -10,nan -11,nan -12,nan -13,nan diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.600/training_config.txt deleted file mode 100644 index 5d8622a..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.600/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.600/training_log.csv deleted file mode 100644 index 729423a..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.600/training_log.csv +++ /dev/null @@ -1,11 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,99731.953125 -2,184977.625 -3,185303.015625 -4,185319.609375 -5,185319.609375 -6,185319.609375 -7,185319.609375 -8,185319.609375 -9,185319.609375 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.700/training_config.txt deleted file mode 100644 index 93e9df1..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.700/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.700/training_log.csv deleted file mode 100644 index ada5fc5..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,136868.609375 -2,185308.875 -3,185319.609375 -4,185319.609375 -5,185319.609375 -6,185319.609375 -7,185319.609375 -8,185319.609375 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.800/training_config.txt deleted file mode 100644 index 3d7efac..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.800/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.800/training_log.csv deleted file mode 100644 index 8a49d1d..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,156535.015625 -2,185319.609375 -3,185319.609375 -4,185319.609375 -5,185319.609375 -6,185319.609375 -7,185319.609375 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.900/training_config.txt deleted file mode 100644 index b250f94..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.900/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.900/training_log.csv deleted file mode 100644 index 59b7007..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,169302.75 -2,185319.609375 -3,185319.609375 -4,185319.609375 -5,185319.609375 -6,185319.609375 -7,185319.609375 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_1.000/training_config.txt deleted file mode 100644 index f6bd732..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_1.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_1.000/training_log.csv deleted file mode 100644 index c29c7db..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,176661.546875 -2,185319.609375 -3,185319.609375 -4,185319.609375 -5,185319.609375 -6,185319.609375 -7,185319.609375 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_16.000/training_config.txt deleted file mode 100644 index 3c0e27f..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_16.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_16.000/training_log.csv deleted file mode 100644 index 1d9e0af..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,185947.046875 -2,185947.046875 -3,185947.046875 -4,185947.046875 -5,185947.046875 -6,185947.046875 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_2.000/training_config.txt deleted file mode 100644 index 6fb0927..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_2.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_2.000/training_log.csv deleted file mode 100644 index d93d661..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,185857.34375 -2,5.012078762054443 -3,nan -4,nan -5,nan -6,nan diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_4.000/training_config.txt deleted file mode 100644 index e252144..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_4.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_4.000/training_log.csv deleted file mode 100644 index 1d9e0af..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,185947.046875 -2,185947.046875 -3,185947.046875 -4,185947.046875 -5,185947.046875 -6,185947.046875 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root/lr_8.000/training_config.txt deleted file mode 100644 index 4d5319b..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_8.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * t_span**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root/lr_8.000/training_log.csv deleted file mode 100644 index 1d9e0af..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,993.4888305664062 -1,185947.046875 -2,185947.046875 -3,185947.046875 -4,185947.046875 -5,185947.046875 -6,185947.046875 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.010/training_config.txt deleted file mode 100644 index f21c274..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.010/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.010/training_log.csv deleted file mode 100644 index d11e690..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,531.0726318359375 -2,448.5474548339844 -3,406.7068176269531 -4,343.07464599609375 -5,277.4829406738281 -6,233.6134490966797 -7,189.57369995117188 -8,161.74795532226562 -9,126.97151184082031 -10,125.95574951171875 -11,107.56097412109375 -12,102.9612045288086 -13,97.05455780029297 -14,92.48214721679688 -15,88.82876586914062 -16,82.52992248535156 -17,81.27387237548828 -18,78.04610443115234 -19,70.4742202758789 -20,66.2538070678711 -21,63.52556228637695 -22,52.9744873046875 -23,44.73141860961914 -24,40.44987106323242 -25,37.82699966430664 -26,35.765663146972656 -27,33.808677673339844 -28,32.48520278930664 -29,31.170013427734375 -30,29.194087982177734 -31,27.701454162597656 -32,27.147851943969727 -33,26.10187530517578 -34,25.424057006835938 -35,24.87906837463379 -36,24.889137268066406 -37,24.296377182006836 -38,24.81645965576172 -39,24.306249618530273 -40,23.9532527923584 -41,24.495914459228516 -42,25.108428955078125 -43,25.294971466064453 -44,25.952266693115234 -45,26.704029083251953 -46,26.60462760925293 -47,26.962678909301758 -48,26.656829833984375 -49,26.5229434967041 -50,26.578983306884766 -51,27.29755401611328 -52,27.316707611083984 -53,27.35036849975586 -54,27.378759384155273 -55,27.297178268432617 -56,27.18161392211914 -57,27.065658569335938 -58,26.95009422302246 -59,26.836164474487305 -60,26.72576904296875 -61,26.6210880279541 -62,26.52469253540039 -63,26.439746856689453 -64,26.369522094726562 -65,26.316184997558594 -66,26.276880264282227 -67,26.238548278808594 -68,26.1873722076416 -69,26.12529182434082 -70,26.06270408630371 -71,26.003313064575195 -72,25.94232749938965 -73,25.870738983154297 -74,25.77541732788086 -75,25.65049171447754 -76,25.926193237304688 -77,25.90821647644043 -78,25.430736541748047 -79,24.142314910888672 -80,23.95331573486328 -81,23.62998390197754 -82,23.640169143676758 -83,22.749406814575195 -84,22.59686279296875 -85,22.47738265991211 -86,22.34101676940918 -87,21.94681167602539 -88,20.539085388183594 -89,19.836034774780273 -90,19.74501609802246 -91,19.812341690063477 -92,19.64289665222168 -93,19.306896209716797 -94,19.217294692993164 -95,19.14746856689453 -96,19.074037551879883 -97,18.98023223876953 -98,18.786088943481445 -99,18.95464515686035 -100,18.5150089263916 -101,18.360240936279297 -102,18.27552032470703 -103,18.21095085144043 -104,18.155622482299805 -105,18.104787826538086 -106,18.054765701293945 -107,17.99602508544922 -108,17.78567886352539 -109,17.765546798706055 -110,17.742734909057617 -111,17.713531494140625 -112,17.67836570739746 -113,17.638341903686523 -114,17.59543800354004 -115,17.553497314453125 -116,17.668594360351562 -117,17.492795944213867 -118,17.471708297729492 -119,17.44716453552246 -120,17.416749954223633 -121,17.38025665283203 -122,17.338109970092773 -123,17.2917423248291 -124,17.245582580566406 -125,17.33269500732422 -126,17.23104476928711 -127,17.260189056396484 -128,17.27799415588379 -129,17.287778854370117 -130,17.291624069213867 -131,17.29034423828125 -132,17.284442901611328 -133,17.27439308166504 -134,17.260841369628906 -135,17.244531631469727 -136,17.226272583007812 -137,17.206727981567383 -138,17.18636703491211 -139,17.165498733520508 -140,17.144258499145508 -141,17.122648239135742 -142,17.10055160522461 -143,17.077770233154297 -144,17.053998947143555 -145,17.02887725830078 -146,17.001916885375977 -147,16.972503662109375 -148,16.939794540405273 -149,16.902698516845703 -150,16.859745025634766 -151,16.809167861938477 -152,16.7493953704834 -153,16.680227279663086 -154,16.590612411499023 -155,16.729555130004883 -156,16.647499084472656 -157,16.559452056884766 -158,16.476394653320312 -159,16.401338577270508 -160,16.333908081054688 -161,16.273056030273438 -162,16.21722984313965 -163,16.165170669555664 -164,16.11595344543457 -165,16.069021224975586 -166,16.024030685424805 -167,15.980987548828125 -168,15.939902305603027 -169,15.900973320007324 -170,15.864558219909668 -171,15.831087112426758 -172,15.8011474609375 -173,15.775873184204102 -174,15.755934715270996 -175,15.72114086151123 -176,15.694332122802734 -177,15.66650104522705 -178,15.65141773223877 -179,15.483380317687988 -180,15.581511497497559 -181,15.703150749206543 -182,15.790902137756348 -183,15.75944709777832 -184,15.64530086517334 -185,15.529763221740723 -186,15.438637733459473 -187,15.372636795043945 -188,15.32660961151123 -189,15.294731140136719 -190,15.270404815673828 -191,15.249526977539062 -192,15.23000717163086 -193,15.210783958435059 -194,15.191232681274414 -195,15.17104434967041 -196,15.150245666503906 -197,15.129416465759277 -198,15.10912799835205 -199,15.08875560760498 -200,15.067582130432129 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.020/training_config.txt deleted file mode 100644 index 9dc3cf8..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.020/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.020/training_log.csv deleted file mode 100644 index 8399db0..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,491.532958984375 -2,357.74468994140625 -3,236.98834228515625 -4,151.89468383789062 -5,120.88611602783203 -6,98.08069610595703 -7,87.50118255615234 -8,80.45437622070312 -9,78.29913330078125 -10,56.63778305053711 -11,44.950782775878906 -12,37.541080474853516 -13,34.69722366333008 -14,30.37797737121582 -15,27.784957885742188 -16,27.677284240722656 -17,25.51268768310547 -18,23.544824600219727 -19,22.75017738342285 -20,22.047630310058594 -21,20.45467185974121 -22,19.760826110839844 -23,19.238555908203125 -24,18.847965240478516 -25,18.556447982788086 -26,18.417823791503906 -27,18.423870086669922 -28,18.526447296142578 -29,19.319997787475586 -30,18.500944137573242 -31,18.311634063720703 -32,18.176876068115234 -33,18.091806411743164 -34,17.951086044311523 -35,17.785358428955078 -36,17.56186294555664 -37,15.435376167297363 -38,14.91695785522461 -39,14.425725936889648 -40,13.937217712402344 -41,13.389803886413574 -42,11.976431846618652 -43,11.551027297973633 -44,10.611583709716797 -45,10.430851936340332 -46,10.210704803466797 -47,9.998390197753906 -48,9.826210021972656 -49,9.684340476989746 -50,9.552083969116211 -51,9.415749549865723 -52,9.261321067810059 -53,9.121001243591309 -54,9.00881576538086 -55,8.914031028747559 -56,8.829047203063965 -57,8.743722915649414 -58,8.747849464416504 -59,8.703509330749512 -60,8.637246131896973 -61,8.594902038574219 -62,8.58739948272705 -63,8.56157398223877 -64,8.528776168823242 -65,8.484768867492676 -66,8.431571006774902 -67,8.37831974029541 -68,8.324762344360352 -69,8.269639015197754 -70,8.210556983947754 -71,8.142610549926758 -72,8.046590805053711 -73,7.970126628875732 -74,7.924332618713379 -75,7.866212844848633 -76,7.831223011016846 -77,7.766080856323242 -78,7.755893230438232 -79,7.7302422523498535 -80,7.685664653778076 -81,7.663659572601318 -82,7.634199619293213 -83,7.593028545379639 -84,7.596865177154541 -85,7.601314544677734 -86,7.596348762512207 -87,7.574789047241211 -88,7.537031173706055 -89,7.4777655601501465 -90,7.372236728668213 -91,7.347907066345215 -92,7.325854301452637 -93,7.304512023925781 -94,7.28115701675415 -95,7.254715919494629 -96,7.225220203399658 -97,7.193394184112549 -98,7.105127811431885 -99,7.2105512619018555 -100,7.247499942779541 -101,7.273057460784912 -102,7.281238555908203 -103,7.253745079040527 -104,7.280553340911865 -105,7.298073768615723 -106,7.304454326629639 -107,7.30397891998291 -108,7.297031879425049 -109,7.283960342407227 -110,7.265817642211914 -111,7.24392032623291 -112,7.219700813293457 -113,7.18568229675293 -114,7.128349304199219 -115,7.133162498474121 -116,7.117612361907959 -117,7.102394104003906 -118,7.090926647186279 -119,7.08110237121582 -120,7.069242000579834 -121,7.053996562957764 -122,7.03037166595459 -123,7.010382175445557 -124,6.986774921417236 -125,6.973050594329834 -126,6.95114278793335 -127,6.9341511726379395 -128,6.908241271972656 -129,6.893868446350098 -130,6.873828887939453 -131,6.8668599128723145 -132,6.855545520782471 -133,6.835297107696533 -134,6.821957588195801 -135,6.7963762283325195 -136,6.7812371253967285 -137,6.762612342834473 -138,6.748215675354004 -139,6.733607292175293 -140,6.716850280761719 -141,6.7007060050964355 -142,6.681904315948486 -143,6.664395809173584 -144,6.647495746612549 -145,6.631406784057617 -146,6.616123676300049 -147,6.599778175354004 -148,6.5842790603637695 -149,6.56634521484375 -150,6.550471782684326 -151,6.531512260437012 -152,6.515493392944336 -153,6.4961042404174805 -154,6.479337215423584 -155,6.4574408531188965 -156,6.438920974731445 -157,6.414641380310059 -158,6.392064571380615 -159,6.361533164978027 -160,6.353409290313721 -161,6.352992057800293 -162,6.353855133056641 -163,6.348988056182861 -164,6.335885524749756 -165,6.332275390625 -166,6.309744834899902 -167,6.3013715744018555 -168,6.2748541831970215 -169,6.258798122406006 -170,6.23621940612793 -171,6.238771438598633 -172,6.213137626647949 -173,6.179778575897217 -174,6.277191638946533 -175,6.129601001739502 -176,6.231684684753418 -177,6.305120944976807 -178,6.3767313957214355 -179,6.413938045501709 -180,6.431994438171387 -181,6.4417195320129395 -182,6.449671268463135 -183,6.452303886413574 -184,6.445984363555908 -185,6.427504062652588 -186,6.3879618644714355 -187,6.34894323348999 -188,6.334661960601807 -189,6.314087390899658 -190,6.288924694061279 -191,6.267126560211182 -192,6.242397785186768 -193,6.211144924163818 -194,6.1702399253845215 -195,6.115541934967041 -196,6.030866622924805 -197,6.075636863708496 -198,6.08508825302124 -199,6.079102039337158 -200,6.054478168487549 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.040/training_config.txt deleted file mode 100644 index 1b6d8db..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.040/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.040/training_log.csv deleted file mode 100644 index 0e596df..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,417.1199035644531 -2,181.62867736816406 -3,100.37950897216797 -4,83.3403091430664 -5,59.19188690185547 -6,54.518836975097656 -7,45.56072235107422 -8,35.55010986328125 -9,29.44100570678711 -10,25.114145278930664 -11,23.975919723510742 -12,23.2408390045166 -13,23.48114013671875 -14,24.279577255249023 -15,24.91008186340332 -16,24.575258255004883 -17,21.658618927001953 -18,20.996469497680664 -19,20.50006866455078 -20,20.068368911743164 -21,19.771818161010742 -22,19.50188446044922 -23,19.235130310058594 -24,18.975040435791016 -25,18.714479446411133 -26,18.45114517211914 -27,18.18340492248535 -28,17.90570640563965 -29,17.60823631286621 -30,17.277502059936523 -31,16.90406608581543 -32,16.49563980102539 -33,16.072385787963867 -34,15.425130844116211 -35,14.143509864807129 -36,11.334772109985352 -37,10.734360694885254 -38,10.31445026397705 -39,9.91151237487793 -40,9.475993156433105 -41,9.030149459838867 -42,8.567642211914062 -43,8.066296577453613 -44,7.39056396484375 -45,6.31207799911499 -46,6.049027919769287 -47,5.8394246101379395 -48,5.653088569641113 -49,5.479311943054199 -50,5.317417144775391 -51,5.166835308074951 -52,5.026725769042969 -53,4.8951802253723145 -54,4.771106719970703 -55,4.653627872467041 -56,4.5419721603393555 -57,4.433633804321289 -58,4.325052261352539 -59,4.205707550048828 -60,4.061861038208008 -61,3.954967737197876 -62,3.9220950603485107 -63,3.8931918144226074 -64,3.8561251163482666 -65,3.814678907394409 -66,3.770634174346924 -67,3.725092649459839 -68,3.6796441078186035 -69,3.6376545429229736 -70,3.6047067642211914 -71,3.5848324298858643 -72,3.5752670764923096 -73,3.5692155361175537 -74,3.5620148181915283 -75,3.551445245742798 -76,3.5368857383728027 -77,3.518437147140503 -78,3.496386766433716 -79,3.471036195755005 -80,3.4428317546844482 -81,3.4122719764709473 -82,3.3799359798431396 -83,3.3464720249176025 -84,3.3126370906829834 -85,3.279162883758545 -86,3.2464587688446045 -87,3.214284896850586 -88,3.181199312210083 -89,3.163118600845337 -90,3.163954257965088 -91,3.1504807472229004 -92,3.144127130508423 -93,3.156022787094116 -94,3.151697874069214 -95,3.1391000747680664 -96,3.115041971206665 -97,3.0937983989715576 -98,3.092242956161499 -99,3.087130308151245 -100,3.080451726913452 -101,3.073241949081421 -102,3.0657806396484375 -103,3.058095932006836 -104,3.0499913692474365 -105,3.0411829948425293 -106,3.031224489212036 -107,3.0192408561706543 -108,3.0047144889831543 -109,3.0026967525482178 -110,2.998563289642334 -111,2.9807450771331787 -112,2.9723446369171143 -113,2.9626471996307373 -114,2.9557015895843506 -115,2.953397035598755 -116,2.9435477256774902 -117,2.9246160984039307 -118,2.9310202598571777 -119,2.922053337097168 -120,2.9104020595550537 -121,2.9150032997131348 -122,2.9012980461120605 -123,2.898306369781494 -124,2.897679090499878 -125,2.882826566696167 -126,2.8859713077545166 -127,2.8779730796813965 -128,2.8701956272125244 -129,2.869762897491455 -130,2.8601527214050293 -131,2.8604073524475098 -132,2.852426052093506 -133,2.852938652038574 -134,2.8447341918945312 -135,2.8429362773895264 -136,2.8374783992767334 -137,2.8339526653289795 -138,2.829885721206665 -139,2.8263423442840576 -140,2.822747230529785 -141,2.8179750442504883 -142,2.8147881031036377 -143,2.8108603954315186 -144,2.807647466659546 -145,2.8040056228637695 -146,2.7999823093414307 -147,2.7968506813049316 -148,2.7928481101989746 -149,2.79034686088562 -150,2.786001682281494 -151,2.78326416015625 -152,2.778996706008911 -153,2.7767364978790283 -154,2.772712469100952 -155,2.769679307937622 -156,2.766220808029175 -157,2.7628960609436035 -158,2.759977340698242 -159,2.756296157836914 -160,2.7534961700439453 -161,2.7498857975006104 -162,2.746938943862915 -163,2.7435929775238037 -164,2.7401838302612305 -165,2.7371270656585693 -166,2.733614921569824 -167,2.7305009365081787 -168,2.7271785736083984 -169,2.7239830493927 -170,2.720987558364868 -171,2.7177634239196777 -172,2.714888334274292 -173,2.7120118141174316 -174,2.7091164588928223 -175,2.7064948081970215 -176,2.7038309574127197 -177,2.701310634613037 -178,2.6989338397979736 -179,2.6965625286102295 -180,2.6944093704223633 -181,2.692305326461792 -182,2.6902780532836914 -183,2.6884446144104004 -184,2.6866204738616943 -185,2.684920072555542 -186,2.683337926864624 -187,2.681763172149658 -188,2.680307149887085 -189,2.6789162158966064 -190,2.6775596141815186 -191,2.676295757293701 -192,2.6750638484954834 -193,2.6738855838775635 -194,2.672765016555786 -195,2.671665906906128 -196,2.670620918273926 -197,2.6696038246154785 -198,2.6686089038848877 -199,2.667656898498535 -200,2.6667165756225586 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.050/training_config.txt deleted file mode 100644 index 7cb4efe..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.050/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.050/training_log.csv deleted file mode 100644 index d5ef1fc..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,386.5570373535156 -2,143.16336059570312 -3,89.31861877441406 -4,59.62860107421875 -5,33.614471435546875 -6,24.603496551513672 -7,20.50646209716797 -8,16.325374603271484 -9,14.616580963134766 -10,14.503169059753418 -11,14.035441398620605 -12,12.900599479675293 -13,10.074438095092773 -14,9.280579566955566 -15,8.617180824279785 -16,8.046723365783691 -17,7.4195942878723145 -18,6.311125755310059 -19,5.7919511795043945 -20,5.626206874847412 -21,5.4868855476379395 -22,5.361550331115723 -23,5.245227336883545 -24,5.135778427124023 -25,5.031583309173584 -26,4.9314141273498535 -27,4.83445405960083 -28,4.739890098571777 -29,4.646982192993164 -30,4.555368423461914 -31,4.464277267456055 -32,4.373220443725586 -33,4.281729698181152 -34,4.189207553863525 -35,4.095970630645752 -36,4.005731105804443 -37,3.9202752113342285 -38,3.8385725021362305 -39,3.760744571685791 -40,3.684286594390869 -41,3.6079509258270264 -42,3.5315349102020264 -43,3.4590604305267334 -44,3.398378849029541 -45,3.355722188949585 -46,3.3341164588928223 -47,3.326916456222534 -48,3.3233299255371094 -49,3.316770553588867 -50,3.305354356765747 -51,3.2891769409179688 -52,3.2689931392669678 -53,3.245856285095215 -54,3.221038818359375 -55,3.196091651916504 -56,3.1728029251098633 -57,3.1528873443603516 -58,3.136821746826172 -59,3.1232824325561523 -60,3.1104624271392822 -61,3.096893787384033 -62,3.0817503929138184 -63,3.065068483352661 -64,3.0476202964782715 -65,3.0306875705718994 -66,3.0155816078186035 -67,3.003019094467163 -68,2.992776393890381 -69,2.9838969707489014 -70,2.9752309322357178 -71,2.9659135341644287 -72,2.9555704593658447 -73,2.9443018436431885 -74,2.932617664337158 -75,2.9210877418518066 -76,2.910137176513672 -77,2.899911642074585 -78,2.89030122756958 -79,2.8810510635375977 -80,2.871878147125244 -81,2.8625588417053223 -82,2.853057861328125 -83,2.843475580215454 -84,2.8340163230895996 -85,2.8248543739318848 -86,2.816103219985962 -87,2.807786703109741 -88,2.79986834526062 -89,2.7922701835632324 -90,2.7849466800689697 -91,2.7779033184051514 -92,2.7711856365203857 -93,2.7648134231567383 -94,2.7588188648223877 -95,2.753196954727173 -96,2.747926950454712 -97,2.742950916290283 -98,2.7382123470306396 -99,2.7336509227752686 -100,2.7292399406433105 -101,2.724963665008545 -102,2.720811128616333 -103,2.7167727947235107 -104,2.712820053100586 -105,2.7089016437530518 -106,2.7049176692962646 -107,2.700827121734619 -108,2.696647882461548 -109,2.6923844814300537 -110,2.6880483627319336 -111,2.6836700439453125 -112,2.6792638301849365 -113,2.6748640537261963 -114,2.670515537261963 -115,2.6662492752075195 -116,2.662100076675415 -117,2.6580970287323 -118,2.654256820678711 -119,2.6505801677703857 -120,2.647064447402954 -121,2.643697500228882 -122,2.640491008758545 -123,2.6374430656433105 -124,2.6345200538635254 -125,2.631741523742676 -126,2.629134178161621 -127,2.626690149307251 -128,2.6243958473205566 -129,2.6222429275512695 -130,2.6202263832092285 -131,2.618340253829956 -132,2.6165833473205566 -133,2.6149544715881348 -134,2.613450050354004 -135,2.6120553016662598 -136,2.610753059387207 -137,2.609525203704834 -138,2.608354091644287 -139,2.607224464416504 -140,2.6061275005340576 -141,2.60506272315979 -142,2.6040279865264893 -143,2.6030261516571045 -144,2.602055549621582 -145,2.601109266281128 -146,2.600187063217163 -147,2.5992870330810547 -148,2.5984015464782715 -149,2.5975265502929688 -150,2.596665382385254 -151,2.595815420150757 -152,2.5949788093566895 -153,2.594155788421631 -154,2.5933477878570557 -155,2.5925521850585938 -156,2.591768741607666 -157,2.5909945964813232 -158,2.59022855758667 -159,2.589470386505127 -160,2.588719367980957 -161,2.5879764556884766 -162,2.5872397422790527 -163,2.5865113735198975 -164,2.585792303085327 -165,2.5850822925567627 -166,2.5843818187713623 -167,2.5836901664733887 -168,2.583007335662842 -169,2.582333564758301 -170,2.5816686153411865 -171,2.581012010574341 -172,2.580364227294922 -173,2.5797250270843506 -174,2.579094171524048 -175,2.5784716606140137 -176,2.577857732772827 -177,2.5772511959075928 -178,2.5766541957855225 -179,2.576068162918091 -180,2.575491189956665 -181,2.5749218463897705 -182,2.574361801147461 -183,2.573810338973999 -184,2.57326602935791 -185,2.5727288722991943 -186,2.5721988677978516 -187,2.5716753005981445 -188,2.5711584091186523 -189,2.570647716522217 -190,2.570143938064575 -191,2.569645643234253 -192,2.5691535472869873 -193,2.5686681270599365 -194,2.568190336227417 -195,2.567718029022217 -196,2.567251205444336 -197,2.566788911819458 -198,2.566331386566162 -199,2.565877914428711 -200,2.565429210662842 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.080/training_config.txt deleted file mode 100644 index d5438f7..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.080/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.080/training_log.csv deleted file mode 100644 index 67901e0..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,297.18231201171875 -2,108.13129425048828 -3,278.8375244140625 -4,87.31847381591797 -5,47.934322357177734 -6,31.710405349731445 -7,19.439434051513672 -8,16.322093963623047 -9,15.573362350463867 -10,14.412281036376953 -11,14.837568283081055 -12,14.382562637329102 -13,13.877470016479492 -14,13.385870933532715 -15,12.882034301757812 -16,12.408281326293945 -17,11.81762409210205 -18,10.878596305847168 -19,10.218636512756348 -20,9.783973693847656 -21,9.445239067077637 -22,9.161712646484375 -23,8.914918899536133 -24,8.694201469421387 -25,8.49359130859375 -26,8.30770206451416 -27,8.133277893066406 -28,7.969070911407471 -29,7.812870502471924 -30,7.66295051574707 -31,7.518159866333008 -32,7.377208232879639 -33,7.238448143005371 -34,7.100360870361328 -35,6.961169242858887 -36,6.81735372543335 -37,6.663356304168701 -38,6.484767913818359 -39,6.234774112701416 -40,5.772953987121582 -41,5.495012283325195 -42,5.327165603637695 -43,5.191157817840576 -44,5.071699619293213 -45,4.963095188140869 -46,4.862576484680176 -47,4.769013404846191 -48,4.681704044342041 -49,4.599965572357178 -50,4.52292537689209 -51,4.449398517608643 -52,4.378969669342041 -53,4.310953617095947 -54,4.244653701782227 -55,4.179534912109375 -56,4.114026069641113 -57,4.044349193572998 -58,3.9633374214172363 -59,3.900566339492798 -60,3.893601894378662 -61,3.8954591751098633 -62,3.8922030925750732 -63,3.8844361305236816 -64,3.873098611831665 -65,3.8586575984954834 -66,3.8414247035980225 -67,3.821695566177368 -68,3.799645185470581 -69,3.775446653366089 -70,3.7492949962615967 -71,3.721414804458618 -72,3.6919445991516113 -73,3.660987615585327 -74,3.6284680366516113 -75,3.5942206382751465 -76,3.5580451488494873 -77,3.5202109813690186 -78,3.4842472076416016 -79,3.4669883251190186 -80,3.467388153076172 -81,3.4623095989227295 -82,3.4484453201293945 -83,3.4268341064453125 -84,3.398306131362915 -85,3.3654396533966064 -86,3.3373188972473145 -87,3.3239526748657227 -88,3.3153622150421143 -89,3.304088830947876 -90,3.2893192768096924 -91,3.2712161540985107 -92,3.250143527984619 -93,3.2269985675811768 -94,3.2058160305023193 -95,3.192553997039795 -96,3.182570695877075 -97,3.170006275177002 -98,3.153512716293335 -99,3.1341054439544678 -100,3.115116596221924 -101,3.100342273712158 -102,3.088751792907715 -103,3.0763771533966064 -104,3.0615787506103516 -105,3.0450148582458496 -106,3.0289905071258545 -107,3.0155155658721924 -108,3.003761053085327 -109,2.991431713104248 -110,2.9775984287261963 -111,2.9629321098327637 -112,2.9490585327148438 -113,2.9366815090179443 -114,2.9249119758605957 -115,2.912571430206299 -116,2.899482488632202 -117,2.8862671852111816 -118,2.8737237453460693 -119,2.861828565597534 -120,2.8497376441955566 -121,2.836918592453003 -122,2.8236207962036133 -123,2.8104796409606934 -124,2.7978804111480713 -125,2.785795211791992 -126,2.7739756107330322 -127,2.7622733116149902 -128,2.7507755756378174 -129,2.739790678024292 -130,2.729567527770996 -131,2.7201294898986816 -132,2.711433172225952 -133,2.703518867492676 -134,2.6965413093566895 -135,2.6905086040496826 -136,2.685162305831909 -137,2.680072546005249 -138,2.675111770629883 -139,2.6703059673309326 -140,2.6658692359924316 -141,2.661912679672241 -142,2.6583287715911865 -143,2.6550164222717285 -144,2.6519086360931396 -145,2.6489970684051514 -146,2.6462349891662598 -147,2.6436569690704346 -148,2.6413161754608154 -149,2.6392393112182617 -150,2.63735032081604 -151,2.6355819702148438 -152,2.633875846862793 -153,2.6322124004364014 -154,2.6305742263793945 -155,2.628960132598877 -156,2.627368688583374 -157,2.625800609588623 -158,2.6242549419403076 -159,2.622729778289795 -160,2.6212353706359863 -161,2.6197941303253174 -162,2.6184186935424805 -163,2.6171209812164307 -164,2.615863800048828 -165,2.614645481109619 -166,2.6134631633758545 -167,2.6123099327087402 -168,2.6111788749694824 -169,2.6100778579711914 -170,2.609002113342285 -171,2.6079516410827637 -172,2.6069247722625732 -173,2.6059186458587646 -174,2.6049327850341797 -175,2.603966236114502 -176,2.603018045425415 -177,2.602083206176758 -178,2.6011602878570557 -179,2.6002492904663086 -180,2.5993499755859375 -181,2.5984609127044678 -182,2.5975828170776367 -183,2.5967154502868652 -184,2.5958573818206787 -185,2.5950076580047607 -186,2.594165802001953 -187,2.5933308601379395 -188,2.592503786087036 -189,2.591684579849243 -190,2.5908758640289307 -191,2.5900769233703613 -192,2.5892887115478516 -193,2.588510751724243 -194,2.5877435207366943 -195,2.5869882106781006 -196,2.5862460136413574 -197,2.585516929626465 -198,2.5847997665405273 -199,2.5841000080108643 -200,2.583421230316162 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.100/training_config.txt deleted file mode 100644 index 352f197..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.100/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.100/training_log.csv deleted file mode 100644 index 943b621..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,278.4961853027344 -2,181.52444458007812 -3,151.3619384765625 -4,45.46152114868164 -5,20.54496192932129 -6,14.931583404541016 -7,14.587371826171875 -8,11.164422988891602 -9,10.991003036499023 -10,9.840625762939453 -11,8.711243629455566 -12,8.091817855834961 -13,7.691790580749512 -14,7.355234146118164 -15,7.06831169128418 -16,6.824971675872803 -17,6.635426044464111 -18,6.573780059814453 -19,6.444064140319824 -20,6.175961017608643 -21,5.869910717010498 -22,5.6131134033203125 -23,5.395617961883545 -24,5.204375743865967 -25,5.03225564956665 -26,4.876045227050781 -27,4.731790542602539 -28,4.598686695098877 -29,4.476219654083252 -30,4.365889072418213 -31,4.271640300750732 -32,4.200459003448486 -33,4.155755519866943 -34,4.126640319824219 -35,4.102468013763428 -36,4.077729225158691 -37,4.049590587615967 -38,4.016994476318359 -39,3.979759931564331 -40,3.9381589889526367 -41,3.892789363861084 -42,3.8445842266082764 -43,3.7950053215026855 -44,3.7455849647521973 -45,3.698207378387451 -46,3.655038595199585 -47,3.617374897003174 -48,3.585106611251831 -49,3.557135581970215 -50,3.5321204662323 -51,3.5088040828704834 -52,3.486220121383667 -53,3.4637060165405273 -54,3.4408934116363525 -55,3.417614698410034 -56,3.393864631652832 -57,3.3697478771209717 -58,3.3454275131225586 -59,3.321134328842163 -60,3.297065258026123 -61,3.2734274864196777 -62,3.2504076957702637 -63,3.228142738342285 -64,3.2067294120788574 -65,3.1861934661865234 -66,3.166499614715576 -67,3.1475448608398438 -68,3.1291980743408203 -69,3.111332654953003 -70,3.093822717666626 -71,3.0765583515167236 -72,3.0594475269317627 -73,3.042473077774048 -74,3.0256800651550293 -75,3.0091278553009033 -76,2.9928464889526367 -77,2.9767889976501465 -78,2.9610047340393066 -79,2.9455337524414062 -80,2.93040132522583 -81,2.9155993461608887 -82,2.9011073112487793 -83,2.8868610858917236 -84,2.8728291988372803 -85,2.858982563018799 -86,2.845306158065796 -87,2.8318121433258057 -88,2.8185341358184814 -89,2.805501937866211 -90,2.792752742767334 -91,2.7803053855895996 -92,2.7681808471679688 -93,2.7563915252685547 -94,2.744939088821411 -95,2.7338321208953857 -96,2.723055124282837 -97,2.712606906890869 -98,2.702486038208008 -99,2.692819595336914 -100,2.683712959289551 -101,2.675156593322754 -102,2.667161703109741 -103,2.6597206592559814 -104,2.652801752090454 -105,2.646372079849243 -106,2.640380620956421 -107,2.6347908973693848 -108,2.629568099975586 -109,2.6246492862701416 -110,2.619980573654175 -111,2.61550235748291 -112,2.6111721992492676 -113,2.6069586277008057 -114,2.602843999862671 -115,2.5987985134124756 -116,2.5948033332824707 -117,2.590737819671631 -118,2.586756467819214 -119,2.582990884780884 -120,2.579465866088867 -121,2.576180934906006 -122,2.573131799697876 -123,2.570298671722412 -124,2.5676581859588623 -125,2.5652031898498535 -126,2.5629308223724365 -127,2.560845375061035 -128,2.5589394569396973 -129,2.557194709777832 -130,2.5556068420410156 -131,2.5541787147521973 -132,2.552889585494995 -133,2.5517237186431885 -134,2.550670623779297 -135,2.5497193336486816 -136,2.548861503601074 -137,2.548082113265991 -138,2.5473713874816895 -139,2.5467216968536377 -140,2.5461196899414062 -141,2.545552968978882 -142,2.5450165271759033 -143,2.544504165649414 -144,2.5440144538879395 -145,2.543538808822632 -146,2.5430750846862793 -147,2.5426197052001953 -148,2.542173147201538 -149,2.541733980178833 -150,2.5413055419921875 -151,2.540886640548706 -152,2.5404775142669678 -153,2.5400795936584473 -154,2.53969144821167 -155,2.5393152236938477 -156,2.538949489593506 -157,2.5385947227478027 -158,2.5382490158081055 -159,2.5379130840301514 -160,2.5375852584838867 -161,2.5372650623321533 -162,2.536952495574951 -163,2.536648988723755 -164,2.536351203918457 -165,2.536060094833374 -166,2.5357754230499268 -167,2.5354959964752197 -168,2.535219430923462 -169,2.5349466800689697 -170,2.5346763134002686 -171,2.5344066619873047 -172,2.5341384410858154 -173,2.5338728427886963 -174,2.5336079597473145 -175,2.533346176147461 -176,2.5330865383148193 -177,2.5328283309936523 -178,2.5325710773468018 -179,2.5323150157928467 -180,2.5320608615875244 -181,2.5318071842193604 -182,2.5315535068511963 -183,2.531301498413086 -184,2.531049966812134 -185,2.5308003425598145 -186,2.530552625656128 -187,2.530305862426758 -188,2.5300607681274414 -189,2.5298166275024414 -190,2.529574155807495 -191,2.5293326377868652 -192,2.529092788696289 -193,2.5288541316986084 -194,2.5286169052124023 -195,2.52838397026062 -196,2.5281543731689453 -197,2.5279271602630615 -198,2.5277013778686523 -199,2.5274760723114014 -200,2.527252197265625 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.125/training_config.txt deleted file mode 100644 index 39832f4..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.125/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.125/training_log.csv deleted file mode 100644 index 7e3143a..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,260.1337890625 -2,64.39563751220703 -3,41.76185607910156 -4,26.769020080566406 -5,25.424041748046875 -6,29.78365135192871 -7,30.260086059570312 -8,31.889842987060547 -9,23.54204750061035 -10,20.790756225585938 -11,15.486087799072266 -12,14.219724655151367 -13,12.733826637268066 -14,10.481193542480469 -15,7.608456611633301 -16,6.833279132843018 -17,6.276886463165283 -18,5.819813251495361 -19,5.417050361633301 -20,5.05330753326416 -21,4.671348571777344 -22,4.2466535568237305 -23,3.884809732437134 -24,3.644195556640625 -25,3.4709010124206543 -26,3.3329052925109863 -27,3.2180659770965576 -28,3.1194097995758057 -29,3.0325756072998047 -30,2.954193115234375 -31,2.8828816413879395 -32,2.819568157196045 -33,2.764004945755005 -34,2.718332529067993 -35,2.6836392879486084 -36,2.6613104343414307 -37,2.6545522212982178 -38,2.6593453884124756 -39,2.6677517890930176 -40,2.674032211303711 -41,2.6768906116485596 -42,2.6759531497955322 -43,2.6713526248931885 -44,2.66296648979187 -45,2.6514699459075928 -46,2.638766050338745 -47,2.6264219284057617 -48,2.61564564704895 -49,2.6069891452789307 -50,2.6006076335906982 -51,2.5964860916137695 -52,2.5940418243408203 -53,2.592836380004883 -54,2.592402696609497 -55,2.5920565128326416 -56,2.5914697647094727 -57,2.5904483795166016 -58,2.5889194011688232 -59,2.5867700576782227 -60,2.5840749740600586 -61,2.5810835361480713 -62,2.5781683921813965 -63,2.575601816177368 -64,2.5734944343566895 -65,2.5717990398406982 -66,2.5704715251922607 -67,2.5694024562835693 -68,2.568516731262207 -69,2.5676932334899902 -70,2.5668821334838867 -71,2.566035032272339 -72,2.565099000930786 -73,2.5641117095947266 -74,2.5629963874816895 -75,2.5617940425872803 -76,2.560528516769409 -77,2.5592267513275146 -78,2.557968854904175 -79,2.556772232055664 -80,2.555704116821289 -81,2.5548040866851807 -82,2.5540807247161865 -83,2.5534822940826416 -84,2.552917003631592 -85,2.552332639694214 -86,2.5516934394836426 -87,2.550968647003174 -88,2.5501718521118164 -89,2.5493392944335938 -90,2.5485129356384277 -91,2.5477612018585205 -92,2.547081470489502 -93,2.546476125717163 -94,2.5459342002868652 -95,2.5454416275024414 -96,2.544968843460083 -97,2.544491767883301 -98,2.5439975261688232 -99,2.543475389480591 -100,2.542921781539917 -101,2.5423672199249268 -102,2.5418224334716797 -103,2.5412864685058594 -104,2.5407862663269043 -105,2.5403335094451904 -106,2.539912700653076 -107,2.5395050048828125 -108,2.5391058921813965 -109,2.5386908054351807 -110,2.5382559299468994 -111,2.537811517715454 -112,2.537374973297119 -113,2.536952495574951 -114,2.5365428924560547 -115,2.5361461639404297 -116,2.535773277282715 -117,2.535412311553955 -118,2.5350501537323 -119,2.534681797027588 -120,2.534309148788452 -121,2.5339367389678955 -122,2.533569097518921 -123,2.533212661743164 -124,2.532865524291992 -125,2.5325253009796143 -126,2.532195568084717 -127,2.5318682193756104 -128,2.531552314758301 -129,2.531240463256836 -130,2.5309267044067383 -131,2.5306193828582764 -132,2.530318021774292 -133,2.5300192832946777 -134,2.5297229290008545 -135,2.529428005218506 -136,2.529135227203369 -137,2.528843641281128 -138,2.528554677963257 -139,2.528268575668335 -140,2.527985095977783 -141,2.5277044773101807 -142,2.5274269580841064 -143,2.5271520614624023 -144,2.52687931060791 -145,2.526606798171997 -146,2.5263354778289795 -147,2.526066541671753 -148,2.525801181793213 -149,2.525538921356201 -150,2.525280237197876 -151,2.525022029876709 -152,2.524764060974121 -153,2.5245072841644287 -154,2.52425217628479 -155,2.523998737335205 -156,2.5237457752227783 -157,2.523494243621826 -158,2.5232436656951904 -159,2.522993564605713 -160,2.522745370864868 -161,2.5224967002868652 -162,2.522250175476074 -163,2.5220062732696533 -164,2.521763801574707 -165,2.52152156829834 -166,2.5212783813476562 -167,2.5210366249084473 -168,2.520796537399292 -169,2.5205588340759277 -170,2.520321846008301 -171,2.520085334777832 -172,2.519850254058838 -173,2.5196163654327393 -174,2.519383430480957 -175,2.5191500186920166 -176,2.518918037414551 -177,2.5186879634857178 -178,2.518460273742676 -179,2.5182340145111084 -180,2.518007755279541 -181,2.517782211303711 -182,2.5175583362579346 -183,2.51733660697937 -184,2.5171167850494385 -185,2.516897678375244 -186,2.5166802406311035 -187,2.516464948654175 -188,2.516252040863037 -189,2.5160419940948486 -190,2.515833854675293 -191,2.5156264305114746 -192,2.515422821044922 -193,2.515220880508423 -194,2.515021562576294 -195,2.5148234367370605 -196,2.514625072479248 -197,2.514427900314331 -198,2.5142323970794678 -199,2.514038562774658 -200,2.5138466358184814 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.160/training_config.txt deleted file mode 100644 index e7c65c6..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.160/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.160/training_log.csv deleted file mode 100644 index ec812c6..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,271.3787536621094 -2,34.26557159423828 -3,29.031230926513672 -4,26.572959899902344 -5,24.54200553894043 -6,21.020755767822266 -7,16.849517822265625 -8,14.529926300048828 -9,13.163427352905273 -10,12.231792449951172 -11,11.418268203735352 -12,10.534174919128418 -13,9.595291137695312 -14,8.753190040588379 -15,8.069551467895508 -16,7.535299777984619 -17,7.107607841491699 -18,6.756683349609375 -19,6.465115070343018 -20,6.211913585662842 -21,5.981085777282715 -22,5.762630939483643 -23,5.550121307373047 -24,5.340010166168213 -25,5.1322712898254395 -26,4.928611755371094 -27,4.731381893157959 -28,4.543166160583496 -29,4.366037845611572 -30,4.207062721252441 -31,4.06414794921875 -32,3.935600519180298 -33,3.819779396057129 -34,3.7150497436523438 -35,3.6198878288269043 -36,3.532503843307495 -37,3.451014280319214 -38,3.3745944499969482 -39,3.3027961254119873 -40,3.2355329990386963 -41,3.1731796264648438 -42,3.1167166233062744 -43,3.0662074089050293 -44,3.0213372707366943 -45,2.9817001819610596 -46,2.946953535079956 -47,2.917022466659546 -48,2.8915317058563232 -49,2.870316982269287 -50,2.8528082370758057 -51,2.838505268096924 -52,2.8269498348236084 -53,2.81760573387146 -54,2.810014486312866 -55,2.803920030593872 -56,2.7990689277648926 -57,2.7950828075408936 -58,2.791740655899048 -59,2.788766860961914 -60,2.7860093116760254 -61,2.7832655906677246 -62,2.780381679534912 -63,2.7772834300994873 -64,2.7739367485046387 -65,2.770331621170044 -66,2.7664687633514404 -67,2.7623534202575684 -68,2.758000612258911 -69,2.7534279823303223 -70,2.7486934661865234 -71,2.74385142326355 -72,2.738957643508911 -73,2.734062910079956 -74,2.7291579246520996 -75,2.724277973175049 -76,2.7194743156433105 -77,2.714768171310425 -78,2.710186243057251 -79,2.7057149410247803 -80,2.701378107070923 -81,2.6971664428710938 -82,2.6930992603302 -83,2.689178466796875 -84,2.6854641437530518 -85,2.6820123195648193 -86,2.6787354946136475 -87,2.675567626953125 -88,2.6725027561187744 -89,2.6695284843444824 -90,2.666628837585449 -91,2.663797378540039 -92,2.6610219478607178 -93,2.658291816711426 -94,2.65559720993042 -95,2.6529650688171387 -96,2.650404930114746 -97,2.6479272842407227 -98,2.6455421447753906 -99,2.6432101726531982 -100,2.640907049179077 -101,2.6386284828186035 -102,2.6363720893859863 -103,2.634140729904175 -104,2.6319265365600586 -105,2.6297295093536377 -106,2.627549648284912 -107,2.6253879070281982 -108,2.6232454776763916 -109,2.621128797531128 -110,2.619027614593506 -111,2.616947889328003 -112,2.614891529083252 -113,2.6128604412078857 -114,2.610856294631958 -115,2.6088755130767822 -116,2.606924295425415 -117,2.604995012283325 -118,2.6030924320220947 -119,2.601222515106201 -120,2.5993752479553223 -121,2.597531318664551 -122,2.5956923961639404 -123,2.593851089477539 -124,2.5920729637145996 -125,2.5902671813964844 -126,2.5883853435516357 -127,2.586444616317749 -128,2.5845019817352295 -129,2.5825412273406982 -130,2.580570936203003 -131,2.578580379486084 -132,2.576584577560425 -133,2.5745904445648193 -134,2.5726075172424316 -135,2.5706424713134766 -136,2.568690776824951 -137,2.566761016845703 -138,2.564842462539673 -139,2.562945604324341 -140,2.561112642288208 -141,2.559286594390869 -142,2.5574402809143066 -143,2.555577039718628 -144,2.5537049770355225 -145,2.551819086074829 -146,2.5499074459075928 -147,2.5478413105010986 -148,2.5457775592803955 -149,2.5437445640563965 -150,2.541759729385376 -151,2.5398106575012207 -152,2.5379040241241455 -153,2.5360538959503174 -154,2.5342698097229004 -155,2.5325565338134766 -156,2.5309269428253174 -157,2.529391288757324 -158,2.527926445007324 -159,2.52652645111084 -160,2.5251948833465576 -161,2.5239415168762207 -162,2.522773504257202 -163,2.5216970443725586 -164,2.520700454711914 -165,2.5197808742523193 -166,2.5189387798309326 -167,2.518172264099121 -168,2.51747727394104 -169,2.516848087310791 -170,2.5162789821624756 -171,2.5157907009124756 -172,2.515404224395752 -173,2.515048027038574 -174,2.5147082805633545 -175,2.514376163482666 -176,2.5140540599823 -177,2.5137314796447754 -178,2.5134148597717285 -179,2.513101816177368 -180,2.512794256210327 -181,2.512493848800659 -182,2.512190818786621 -183,2.5118911266326904 -184,2.5115973949432373 -185,2.511310338973999 -186,2.5110316276550293 -187,2.5107569694519043 -188,2.510486602783203 -189,2.5102217197418213 -190,2.509958028793335 -191,2.5097033977508545 -192,2.50945782661438 -193,2.50921893119812 -194,2.508986711502075 -195,2.508758068084717 -196,2.5085291862487793 -197,2.50830078125 -198,2.508070230484009 -199,2.5078375339508057 -200,2.507603406906128 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.200/training_config.txt deleted file mode 100644 index 94990b4..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.200/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.200/training_log.csv deleted file mode 100644 index fd3c8e6..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,271.8916015625 -2,29.792179107666016 -3,16.146120071411133 -4,13.42953109741211 -5,10.219978332519531 -6,8.865829467773438 -7,7.75886869430542 -8,6.82590389251709 -9,5.977773666381836 -10,5.289205551147461 -11,4.831647872924805 -12,4.511669635772705 -13,4.286991119384766 -14,4.123298168182373 -15,3.988435745239258 -16,3.8663203716278076 -17,3.7506375312805176 -18,3.6429851055145264 -19,3.5473406314849854 -20,3.4680163860321045 -21,3.407707452774048 -22,3.3657467365264893 -23,3.339012861251831 -24,3.3230435848236084 -25,3.313408136367798 -26,3.3062474727630615 -27,3.2985501289367676 -28,3.2884042263031006 -29,3.275271415710449 -30,3.258927583694458 -31,3.240243911743164 -32,3.2203874588012695 -33,3.2003486156463623 -34,3.1808125972747803 -35,3.1619482040405273 -36,3.143545150756836 -37,3.125126361846924 -38,3.105269193649292 -39,3.0844507217407227 -40,3.062791109085083 -41,3.040808916091919 -42,3.018775463104248 -43,2.996936082839966 -44,2.975680351257324 -45,2.9554712772369385 -46,2.936431407928467 -47,2.9189882278442383 -48,2.9037957191467285 -49,2.8908157348632812 -50,2.8798742294311523 -51,2.8705461025238037 -52,2.862290143966675 -53,2.8545610904693604 -54,2.847339630126953 -55,2.8408355712890625 -56,2.835193157196045 -57,2.830383777618408 -58,2.8268706798553467 -59,2.8241240978240967 -60,2.8219006061553955 -61,2.819941759109497 -62,2.817582845687866 -63,2.814777135848999 -64,2.8114140033721924 -65,2.8074800968170166 -66,2.802992582321167 -67,2.798163414001465 -68,2.7932543754577637 -69,2.7885522842407227 -70,2.7840585708618164 -71,2.7797744274139404 -72,2.775804281234741 -73,2.7721266746520996 -74,2.76869797706604 -75,2.7654454708099365 -76,2.7622299194335938 -77,2.759129285812378 -78,2.7560408115386963 -79,2.752952814102173 -80,2.7497735023498535 -81,2.746579885482788 -82,2.7433910369873047 -83,2.740231513977051 -84,2.7371115684509277 -85,2.734035015106201 -86,2.730942726135254 -87,2.727870464324951 -88,2.724853754043579 -89,2.7218220233917236 -90,2.718846321105957 -91,2.7159292697906494 -92,2.7129569053649902 -93,2.7099850177764893 -94,2.706934928894043 -95,2.7036631107330322 -96,2.700143575668335 -97,2.6964216232299805 -98,2.692577362060547 -99,2.688706636428833 -100,2.684929847717285 -101,2.6811819076538086 -102,2.6775166988372803 -103,2.67386531829834 -104,2.6702046394348145 -105,2.6665008068084717 -106,2.662750005722046 -107,2.6589884757995605 -108,2.655219554901123 -109,2.6514406204223633 -110,2.647629737854004 -111,2.6437745094299316 -112,2.639909029006958 -113,2.6360244750976562 -114,2.6320207118988037 -115,2.6280078887939453 -116,2.623948812484741 -117,2.6197590827941895 -118,2.6155264377593994 -119,2.611227512359619 -120,2.6068077087402344 -121,2.602332353591919 -122,2.597839593887329 -123,2.593318223953247 -124,2.5889086723327637 -125,2.5845398902893066 -126,2.580230236053467 -127,2.5760202407836914 -128,2.5717389583587646 -129,2.567469358444214 -130,2.563157081604004 -131,2.558790922164917 -132,2.554579257965088 -133,2.5504331588745117 -134,2.546522378921509 -135,2.5426950454711914 -136,2.538975954055786 -137,2.535541296005249 -138,2.532534122467041 -139,2.5297653675079346 -140,2.5273008346557617 -141,2.52508282661438 -142,2.523236036300659 -143,2.521698474884033 -144,2.520451068878174 -145,2.519416093826294 -146,2.518547296524048 -147,2.5178797245025635 -148,2.5173189640045166 -149,2.5168538093566895 -150,2.5163917541503906 -151,2.515829563140869 -152,2.5152456760406494 -153,2.5146279335021973 -154,2.513988494873047 -155,2.513331651687622 -156,2.5127151012420654 -157,2.512174129486084 -158,2.5116846561431885 -159,2.5112593173980713 -160,2.5108895301818848 -161,2.510547399520874 -162,2.510207414627075 -163,2.509880542755127 -164,2.509561538696289 -165,2.509244918823242 -166,2.5089266300201416 -167,2.5086092948913574 -168,2.5082809925079346 -169,2.507941484451294 -170,2.5075972080230713 -171,2.5072519779205322 -172,2.506911516189575 -173,2.506577730178833 -174,2.5062522888183594 -175,2.5059354305267334 -176,2.5056376457214355 -177,2.5053772926330566 -178,2.505119800567627 -179,2.5048587322235107 -180,2.50460147857666 -181,2.5043606758117676 -182,2.5041348934173584 -183,2.503915786743164 -184,2.5037026405334473 -185,2.5034940242767334 -186,2.503286838531494 -187,2.5030789375305176 -188,2.5028700828552246 -189,2.5026607513427734 -190,2.5024540424346924 -191,2.502251148223877 -192,2.5020506381988525 -193,2.501854181289673 -194,2.501662015914917 -195,2.501474142074585 -196,2.5012876987457275 -197,2.5011043548583984 -198,2.5009241104125977 -199,2.500749111175537 -200,2.5005717277526855 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.250/training_config.txt deleted file mode 100644 index 5ef5270..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.250/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.250/training_log.csv deleted file mode 100644 index 7f67a1c..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,355.09771728515625 -2,33.25216293334961 -3,45.993099212646484 -4,21.697818756103516 -5,14.625507354736328 -6,12.8707914352417 -7,12.243583679199219 -8,11.447431564331055 -9,10.730199813842773 -10,10.151763916015625 -11,9.722772598266602 -12,9.364788055419922 -13,9.005243301391602 -14,8.633445739746094 -15,8.273977279663086 -16,7.946038246154785 -17,7.64614725112915 -18,7.369170665740967 -19,7.108803749084473 -20,6.863198280334473 -21,6.6316447257995605 -22,6.413092136383057 -23,6.20560359954834 -24,6.007829666137695 -25,5.818274974822998 -26,5.635011196136475 -27,5.455123424530029 -28,5.280218124389648 -29,5.1110944747924805 -30,4.94709587097168 -31,4.787132740020752 -32,4.628997802734375 -33,4.474498271942139 -34,4.325277328491211 -35,4.184556484222412 -36,4.0526957511901855 -37,3.9292244911193848 -38,3.8138234615325928 -39,3.704984664916992 -40,3.6022255420684814 -41,3.5059633255004883 -42,3.4169938564300537 -43,3.3361544609069824 -44,3.263150215148926 -45,3.1985976696014404 -46,3.1410398483276367 -47,3.0900793075561523 -48,3.0466222763061523 -49,3.011373996734619 -50,2.9853198528289795 -51,2.967749834060669 -52,2.9577481746673584 -53,2.9531350135803223 -54,2.9511196613311768 -55,2.9490535259246826 -56,2.9446542263031006 -57,2.9368162155151367 -58,2.9249565601348877 -59,2.909228801727295 -60,2.89009165763855 -61,2.868224620819092 -62,2.844650983810425 -63,2.821315050125122 -64,2.79905366897583 -65,2.7776596546173096 -66,2.7575292587280273 -67,2.738896608352661 -68,2.7218003273010254 -69,2.705796957015991 -70,2.6905343532562256 -71,2.675840139389038 -72,2.6613876819610596 -73,2.6471316814422607 -74,2.633225917816162 -75,2.619898557662964 -76,2.6075751781463623 -77,2.596684217453003 -78,2.587714195251465 -79,2.581047534942627 -80,2.5766117572784424 -81,2.573321580886841 -82,2.5699446201324463 -83,2.5656943321228027 -84,2.5605597496032715 -85,2.5551962852478027 -86,2.550549268722534 -87,2.5471067428588867 -88,2.5447802543640137 -89,2.543048143386841 -90,2.5413289070129395 -91,2.53920316696167 -92,2.5366547107696533 -93,2.5338528156280518 -94,2.5311412811279297 -95,2.5288124084472656 -96,2.527088165283203 -97,2.5257980823516846 -98,2.5245230197906494 -99,2.5229129791259766 -100,2.5208990573883057 -101,2.518749952316284 -102,2.5167438983917236 -103,2.5151162147521973 -104,2.5138916969299316 -105,2.5128281116485596 -106,2.5117406845092773 -107,2.510554313659668 -108,2.509303331375122 -109,2.5080864429473877 -110,2.5069899559020996 -111,2.5061116218566895 -112,2.5054166316986084 -113,2.5047905445098877 -114,2.504154920578003 -115,2.5034892559051514 -116,2.502833127975464 -117,2.502262592315674 -118,2.5017855167388916 -119,2.5013792514801025 -120,2.5010128021240234 -121,2.500633955001831 -122,2.500239610671997 -123,2.499842882156372 -124,2.4994497299194336 -125,2.4991037845611572 -126,2.498817205429077 -127,2.498546838760376 -128,2.4982762336730957 -129,2.497971296310425 -130,2.497650623321533 -131,2.4973337650299072 -132,2.4970316886901855 -133,2.4967422485351562 -134,2.496455669403076 -135,2.49615740776062 -136,2.4958479404449463 -137,2.4955365657806396 -138,2.4952280521392822 -139,2.4949207305908203 -140,2.494614362716675 -141,2.494307041168213 -142,2.493994951248169 -143,2.493677854537964 -144,2.493356466293335 -145,2.4930341243743896 -146,2.492710590362549 -147,2.4924051761627197 -148,2.4921793937683105 -149,2.491931915283203 -150,2.4916350841522217 -151,2.4913883209228516 -152,2.491199254989624 -153,2.4909610748291016 -154,2.4907052516937256 -155,2.490511178970337 -156,2.4903180599212646 -157,2.4900858402252197 -158,2.4898645877838135 -159,2.489690065383911 -160,2.4894940853118896 -161,2.4892830848693848 -162,2.4891085624694824 -163,2.488922357559204 -164,2.4886960983276367 -165,2.4884555339813232 -166,2.4882328510284424 -167,2.4880130290985107 -168,2.4877705574035645 -169,2.4875121116638184 -170,2.487273693084717 -171,2.487027645111084 -172,2.486762285232544 -173,2.486494541168213 -174,2.486250877380371 -175,2.4859752655029297 -176,2.485689163208008 -177,2.485422134399414 -178,2.4851484298706055 -179,2.4848361015319824 -180,2.4846386909484863 -181,2.484391689300537 -182,2.484124183654785 -183,2.4839255809783936 -184,2.4837207794189453 -185,2.4834468364715576 -186,2.483222484588623 -187,2.4830427169799805 -188,2.482804775238037 -189,2.4825189113616943 -190,2.4822838306427 -191,2.4820730686187744 -192,2.481806993484497 -193,2.481553554534912 -194,2.481325149536133 -195,2.4810707569122314 -196,2.480806589126587 -197,2.480579137802124 -198,2.480316638946533 -199,2.480048656463623 -200,2.47979474067688 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.300/training_config.txt deleted file mode 100644 index e4a44d2..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.300/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.300/training_log.csv deleted file mode 100644 index b35aea3..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.300/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,471.6612854003906 -2,23.00252914428711 -3,27.078798294067383 -4,23.66132164001465 -5,18.63482093811035 -6,16.150075912475586 -7,14.964597702026367 -8,14.276823997497559 -9,13.714733123779297 -10,13.213746070861816 -11,12.760565757751465 -12,12.35844612121582 -13,11.996943473815918 -14,11.669873237609863 -15,11.372967720031738 -16,11.099198341369629 -17,10.842599868774414 -18,10.600417137145996 -19,10.366345405578613 -20,10.136759757995605 -21,9.908153533935547 -22,9.681231498718262 -23,9.467360496520996 -24,73.6868667602539 -25,2478.372802734375 -26,1167.7396240234375 -27,421.5982666015625 -28,252.62109375 -29,207.45602416992188 -30,168.4712677001953 -31,121.76538848876953 -32,98.69564819335938 -33,83.87957000732422 -34,63.68681335449219 -35,50.40849304199219 -36,39.95701599121094 -37,32.14857864379883 -38,28.875354766845703 -39,26.95559310913086 -40,25.32404327392578 -41,24.126235961914062 -42,23.25377082824707 -43,22.576465606689453 -44,22.379201889038086 -45,21.908870697021484 -46,21.5030517578125 -47,21.14948844909668 -48,20.81096649169922 -49,20.484588623046875 -50,20.120601654052734 -51,19.853925704956055 -52,19.61568832397461 -53,19.309412002563477 -54,19.210054397583008 -55,18.96631622314453 -56,18.767667770385742 -57,18.60054588317871 -58,18.488311767578125 -59,18.743558883666992 -60,19.37874412536621 -61,21.10749053955078 -62,26.27532196044922 -63,29.525562286376953 -64,32.204307556152344 -65,32.75871276855469 -66,32.34585952758789 -67,31.242210388183594 -68,29.23996925354004 -69,27.899551391601562 -70,27.536907196044922 -71,28.425039291381836 -72,27.539718627929688 -73,25.473852157592773 -74,25.994863510131836 -75,25.905702590942383 -76,24.578033447265625 -77,24.0378475189209 -78,24.068693161010742 -79,24.277385711669922 -80,23.61006736755371 -81,22.614017486572266 -82,22.4334716796875 -83,22.545886993408203 -84,22.26801109313965 -85,22.153575897216797 -86,22.092817306518555 -87,21.71979522705078 -88,21.5398006439209 -89,20.81610679626465 -90,19.60283851623535 -91,17.234291076660156 -92,15.855135917663574 -93,15.423468589782715 -94,14.335714340209961 -95,13.01332950592041 -96,13.289395332336426 -97,13.568232536315918 -98,13.678468704223633 -99,13.477156639099121 -100,13.288290977478027 -101,12.9901123046875 -102,12.694053649902344 -103,12.179972648620605 -104,11.728004455566406 -105,11.389742851257324 -106,11.189838409423828 -107,11.029342651367188 -108,10.858057022094727 -109,10.697242736816406 -110,10.513890266418457 -111,10.301393508911133 -112,10.09469985961914 -113,9.921070098876953 -114,9.746050834655762 -115,9.552938461303711 -116,9.403939247131348 -117,9.2467679977417 -118,9.10406494140625 -119,8.959546089172363 -120,8.81676959991455 -121,8.67989730834961 -122,8.555961608886719 -123,8.435626983642578 -124,8.329627990722656 -125,8.230923652648926 -126,8.145867347717285 -127,8.071218490600586 -128,7.997625827789307 -129,7.940179824829102 -130,7.873055458068848 -131,7.776669979095459 -132,7.719196796417236 -133,7.6693196296691895 -134,7.619132995605469 -135,7.566268444061279 -136,7.515756607055664 -137,7.482010841369629 -138,7.460355281829834 -139,7.43874979019165 -140,7.421947479248047 -141,7.393260478973389 -142,7.362057685852051 -143,7.333127021789551 -144,7.3053669929504395 -145,7.277162551879883 -146,7.249178886413574 -147,7.230945110321045 -148,7.233561992645264 -149,7.20446252822876 -150,7.172256946563721 -151,7.150623798370361 -152,7.127945899963379 -153,7.102389335632324 -154,7.074929714202881 -155,7.047362804412842 -156,7.020300388336182 -157,6.988565444946289 -158,6.953955173492432 -159,6.916121482849121 -160,6.876855850219727 -161,6.845049858093262 -162,6.8113813400268555 -163,6.77463436126709 -164,6.712974548339844 -165,6.672788143157959 -166,6.630098819732666 -167,6.5842671394348145 -168,6.539709568023682 -169,6.489234447479248 -170,6.413600921630859 -171,6.295468807220459 -172,6.182491302490234 -173,6.04374361038208 -174,5.9318528175354 -175,5.823639869689941 -176,5.704588890075684 -177,5.615278244018555 -178,5.541597843170166 -179,5.604612827301025 -180,5.677299499511719 -181,5.755727767944336 -182,5.808108806610107 -183,5.862032890319824 -184,5.909805774688721 -185,5.945312976837158 -186,6.002069473266602 -187,6.06873083114624 -188,6.135275363922119 -189,6.19083309173584 -190,6.21349573135376 -191,6.19719123840332 -192,6.168421745300293 -193,6.138680934906006 -194,6.101579666137695 -195,6.035576820373535 -196,5.9707350730896 -197,5.911670684814453 -198,5.858095645904541 -199,5.794510364532471 -200,5.726263046264648 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.400/training_config.txt deleted file mode 100644 index d39e487..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.400/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.400/training_log.csv deleted file mode 100644 index dd53ca7..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.400/training_log.csv +++ /dev/null @@ -1,46 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,3019.009521484375 -2,38.42874526977539 -3,39.22597122192383 -4,48.9142951965332 -5,25.242408752441406 -6,24.58867073059082 -7,23.745019912719727 -8,22.72548484802246 -9,21.57259750366211 -10,20.401016235351562 -11,19.269248962402344 -12,18.279972076416016 -13,17.465425491333008 -14,16.786096572875977 -15,21.093902587890625 -16,90.79993438720703 -17,275.0513610839844 -18,116.63509368896484 -19,63.87993240356445 -20,52.246009826660156 -21,55.965126037597656 -22,62.273216247558594 -23,71.46532440185547 -24,78.7964859008789 -25,83.91603088378906 -26,88.38800048828125 -27,91.44149780273438 -28,94.4592514038086 -29,96.16133880615234 -30,96.1873779296875 -31,95.09724426269531 -32,94.97596740722656 -33,92.1351089477539 -34,90.3102035522461 -35,88.52037811279297 -36,86.15780639648438 -37,83.72998046875 -38,80.62232971191406 -39,70.8019790649414 -40,63.90696716308594 -41,nan -42,nan -43,nan -44,nan diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.500/training_config.txt deleted file mode 100644 index 05416de..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.500/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.500/training_log.csv deleted file mode 100644 index 0ebd32e..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.500/training_log.csv +++ /dev/null @@ -1,21 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,20722.400390625 -2,94059.9140625 -3,87355.1875 -4,43245.546875 -5,1076.577392578125 -6,43.8785400390625 -7,47.48427963256836 -8,48.926353454589844 -9,59.5208625793457 -10,44.5113639831543 -11,41.65046691894531 -12,40.13322067260742 -13,52.385528564453125 -14,356.2956848144531 -15,54.39165496826172 -16,nan -17,nan -18,nan -19,nan diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.600/training_config.txt deleted file mode 100644 index 499b5a9..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.600/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.600/training_log.csv deleted file mode 100644 index 8cd66dd..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.600/training_log.csv +++ /dev/null @@ -1,11 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,45775.93359375 -2,96178.5546875 -3,96369.59375 -4,96380.984375 -5,96380.984375 -6,96380.984375 -7,96380.984375 -8,96380.984375 -9,96380.984375 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.700/training_config.txt deleted file mode 100644 index e9bd782..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.700/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.700/training_log.csv deleted file mode 100644 index 0c84739..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,66367.7109375 -2,96368.578125 -3,96380.984375 -4,96380.984375 -5,96380.984375 -6,96380.984375 -7,96380.984375 -8,96380.984375 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.800/training_config.txt deleted file mode 100644 index f9498b9..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.800/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.800/training_log.csv deleted file mode 100644 index 96437d4..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,78400.9296875 -2,96380.984375 -3,96380.984375 -4,96380.984375 -5,96380.984375 -6,96380.984375 -7,96380.984375 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.900/training_config.txt deleted file mode 100644 index faca672..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.900/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.900/training_log.csv deleted file mode 100644 index 229aee6..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,84804.3515625 -2,96380.984375 -3,96380.984375 -4,96380.984375 -5,96380.984375 -6,96380.984375 -7,96380.984375 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_1.000/training_config.txt deleted file mode 100644 index ea65694..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_1.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_1.000/training_log.csv deleted file mode 100644 index 3a9df19..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,89158.015625 -2,96380.984375 -3,96380.984375 -4,96380.984375 -5,96380.984375 -6,96380.984375 -7,96380.984375 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_16.000/training_config.txt deleted file mode 100644 index f9d6af6..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_16.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_16.000/training_log.csv deleted file mode 100644 index 334597a..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,96770.1328125 -2,96770.1328125 -3,96770.1328125 -4,96770.1328125 -5,96770.1328125 -6,96770.1328125 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_2.000/training_config.txt deleted file mode 100644 index f82b310..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_2.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_2.000/training_log.csv deleted file mode 100644 index 9190af6..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,96646.5625 -2,7.2633161544799805 -3,nan -4,nan -5,nan -6,nan diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_4.000/training_config.txt deleted file mode 100644 index 5199930..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_4.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_4.000/training_log.csv deleted file mode 100644 index 334597a..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,96770.1328125 -2,96770.1328125 -3,96770.1328125 -4,96770.1328125 -5,96770.1328125 -6,96770.1328125 diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_8.000/training_config.txt deleted file mode 100644 index ce3976f..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_8.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def cubic_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/3)) * (-t_span + t_max)**(1/3) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_8.000/training_log.csv deleted file mode 100644 index 334597a..0000000 --- a/training/time_weighting_learning_rate_sweep/cubic_root_mirrored/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,606.1986083984375 -1,96770.1328125 -2,96770.1328125 -3,96770.1328125 -4,96770.1328125 -5,96770.1328125 -6,96770.1328125 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.010/training_config.txt deleted file mode 100644 index 13131e9..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.010/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.010/training_log.csv deleted file mode 100644 index 2fa083a..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,13.763581275939941 -2,11.73384952545166 -3,10.836784362792969 -4,9.466296195983887 -5,7.722275733947754 -6,6.524634838104248 -7,5.502102375030518 -8,4.847158908843994 -9,4.081435680389404 -10,3.897671937942505 -11,3.482429027557373 -12,3.393237590789795 -13,3.135890245437622 -14,3.0342886447906494 -15,2.861006498336792 -16,2.708923816680908 -17,2.4860432147979736 -18,2.345407247543335 -19,2.174870014190674 -20,2.037766218185425 -21,1.92116117477417 -22,1.7937120199203491 -23,1.699816107749939 -24,1.5501412153244019 -25,1.5124584436416626 -26,1.4707242250442505 -27,1.43644118309021 -28,1.3858717679977417 -29,1.3223499059677124 -30,1.280295968055725 -31,1.2569609880447388 -32,1.2297996282577515 -33,1.204512357711792 -34,1.1815470457077026 -35,1.1637660264968872 -36,1.1571478843688965 -37,1.147878885269165 -38,1.1050268411636353 -39,1.082077980041504 -40,1.0655897855758667 -41,1.043971061706543 -42,1.0328165292739868 -43,1.0229405164718628 -44,1.0136914253234863 -45,1.0063350200653076 -46,0.9758747220039368 -47,0.9539047479629517 -48,0.9406636357307434 -49,0.9320182204246521 -50,0.9278836846351624 -51,0.9224196672439575 -52,0.9171828031539917 -53,0.9122323989868164 -54,0.9076048731803894 -55,0.9032882452011108 -56,0.8992444276809692 -57,0.8953991532325745 -58,0.8903710246086121 -59,0.8869714736938477 -60,0.8868290781974792 -61,0.883935272693634 -62,0.8845046758651733 -63,0.8837236166000366 -64,0.8828096389770508 -65,0.8819745182991028 -66,0.8810811638832092 -67,0.879948079586029 -68,0.8785377144813538 -69,0.8769556283950806 -70,0.8752871751785278 -71,0.8735478520393372 -72,0.8716910481452942 -73,0.8696111440658569 -74,0.867116391658783 -75,0.8641587495803833 -76,0.8614251613616943 -77,0.8579245209693909 -78,0.8543555736541748 -79,0.850910484790802 -80,0.84784996509552 -81,0.8463753461837769 -82,0.8434226512908936 -83,0.8390172123908997 -84,0.8395360708236694 -85,0.8384261131286621 -86,0.8371247053146362 -87,0.8369617462158203 -88,0.8363243937492371 -89,0.8355402946472168 -90,0.8346731066703796 -91,0.833746075630188 -92,0.8327823877334595 -93,0.831815242767334 -94,0.8309152126312256 -95,0.8301321268081665 -96,0.8294010758399963 -97,0.8286560773849487 -98,0.8278859257698059 -99,0.8271031379699707 -100,0.8263087868690491 -101,0.825486958026886 -102,0.8246251940727234 -103,0.8237083554267883 -104,0.8225077390670776 -105,0.8230896592140198 -106,0.8232205510139465 -107,0.8229151964187622 -108,0.8223220109939575 -109,0.821517825126648 -110,0.8204795718193054 -111,0.8177556991577148 -112,0.8179928064346313 -113,0.8171201944351196 -114,0.8158769607543945 -115,0.8133878707885742 -116,0.8131742477416992 -117,0.8091431260108948 -118,0.8069778084754944 -119,0.8068113327026367 -120,0.8062925934791565 -121,0.8053321242332458 -122,0.805131733417511 -123,0.8047508597373962 -124,0.8039847016334534 -125,0.8028149008750916 -126,0.8011186718940735 -127,0.7994829416275024 -128,0.7991377711296082 -129,0.7991141080856323 -130,0.7994288802146912 -131,0.7987305521965027 -132,0.7979806661605835 -133,0.7966620326042175 -134,0.7956897020339966 -135,0.7954036593437195 -136,0.7950471639633179 -137,0.7948750257492065 -138,0.7941837906837463 -139,0.793526291847229 -140,0.7925565242767334 -141,0.7916857004165649 -142,0.7911099195480347 -143,0.790877103805542 -144,0.7903392314910889 -145,0.7896106839179993 -146,0.7887363433837891 -147,0.7882260084152222 -148,0.7877829074859619 -149,0.7873098254203796 -150,0.7865927815437317 -151,0.7858453392982483 -152,0.7851324677467346 -153,0.7845830917358398 -154,0.7840098738670349 -155,0.7833256125450134 -156,0.7825748324394226 -157,0.7818554639816284 -158,0.7812287211418152 -159,0.7805797457695007 -160,0.7799019813537598 -161,0.779143214225769 -162,0.778394341468811 -163,0.7776601314544678 -164,0.7769424915313721 -165,0.7762043476104736 -166,0.7754265666007996 -167,0.7746424674987793 -168,0.7738561630249023 -169,0.7730950117111206 -170,0.7723222374916077 -171,0.7715266346931458 -172,0.7706940770149231 -173,0.7698401212692261 -174,0.7689772844314575 -175,0.7680975794792175 -176,0.7671889066696167 -177,0.7662276029586792 -178,0.7652096748352051 -179,0.7641189694404602 -180,0.7629440426826477 -181,0.7616450786590576 -182,0.760158121585846 -183,0.7583853602409363 -184,0.756174623966217 -185,0.7533580660820007 -186,0.7498317360877991 -187,0.7457112669944763 -188,0.7412624359130859 -189,0.7368377447128296 -190,0.7326577305793762 -191,0.7287655472755432 -192,0.725142240524292 -193,0.7217444181442261 -194,0.7185381054878235 -195,0.715479850769043 -196,0.7124356627464294 -197,0.7087085843086243 -198,0.7061402797698975 -199,0.703540027141571 -200,0.7007264494895935 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.020/training_config.txt deleted file mode 100644 index 50fc976..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.020/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.020/training_log.csv deleted file mode 100644 index 4cc2679..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,12.758044242858887 -2,9.488778114318848 -3,6.606973648071289 -4,4.619569778442383 -5,3.859963893890381 -6,3.456387758255005 -7,3.1410515308380127 -8,2.9764933586120605 -9,2.741509199142456 -10,2.485708713531494 -11,2.246647834777832 -12,1.8057876825332642 -13,1.6914048194885254 -14,1.564077377319336 -15,1.5024460554122925 -16,1.3844445943832397 -17,1.3127005100250244 -18,1.275898814201355 -19,1.2065529823303223 -20,1.1483855247497559 -21,1.087403655052185 -22,1.0543193817138672 -23,1.0075653791427612 -24,0.9435848593711853 -25,0.912631630897522 -26,0.8840352296829224 -27,0.8619718551635742 -28,0.8494780659675598 -29,0.8179140686988831 -30,0.8041591048240662 -31,0.7938401699066162 -32,0.7885138392448425 -33,0.7819388508796692 -34,0.7752399444580078 -35,0.7679809927940369 -36,0.762419581413269 -37,0.7559440732002258 -38,0.7514055371284485 -39,0.7468715310096741 -40,0.7424175143241882 -41,0.7380679845809937 -42,0.7338201999664307 -43,0.7296591997146606 -44,0.7255719304084778 -45,0.7215541005134583 -46,0.7175939083099365 -47,0.713671088218689 -48,0.7097588181495667 -49,0.7058230042457581 -50,0.7018164396286011 -51,0.697638988494873 -52,0.692916989326477 -53,0.6880387663841248 -54,0.6825558543205261 -55,0.6794713735580444 -56,0.6763638854026794 -57,0.6732127666473389 -58,0.6701591610908508 -59,0.6676899194717407 -60,0.6649507284164429 -61,0.6621050834655762 -62,0.6591322422027588 -63,0.6555188894271851 -64,0.6534298062324524 -65,0.6507207751274109 -66,0.6478711366653442 -67,0.6448257565498352 -68,0.6413606405258179 -69,0.6390074491500854 -70,0.6362504363059998 -71,0.633590817451477 -72,0.6318575739860535 -73,0.6294819116592407 -74,0.627937376499176 -75,0.6263719201087952 -76,0.6246053576469421 -77,0.6227391958236694 -78,0.6207658648490906 -79,0.6184967756271362 -80,0.6168748736381531 -81,0.6149178147315979 -82,0.614966869354248 -83,0.6140624284744263 -84,0.6128005385398865 -85,0.6113187074661255 -86,0.6096569895744324 -87,0.6078012585639954 -88,0.6056855916976929 -89,0.6029314398765564 -90,0.6017065644264221 -91,0.6001688838005066 -92,0.5977965593338013 -93,0.5965966582298279 -94,0.5965027809143066 -95,0.5958618521690369 -96,0.5948502421379089 -97,0.5935042500495911 -98,0.5917373895645142 -99,0.5907390713691711 -100,0.5899432897567749 -101,0.5885322093963623 -102,0.5868629813194275 -103,0.5856683850288391 -104,0.5840418338775635 -105,0.5826201438903809 -106,0.5815742015838623 -107,0.5801595449447632 -108,0.5788367986679077 -109,0.5778759717941284 -110,0.576783299446106 -111,0.5755336880683899 -112,0.5743212699890137 -113,0.5732454657554626 -114,0.5720621347427368 -115,0.570724368095398 -116,0.5691633224487305 -117,0.5681285262107849 -118,0.567102313041687 -119,0.5660144686698914 -120,0.5651145577430725 -121,0.5640745759010315 -122,0.5630728602409363 -123,0.5620977282524109 -124,0.5609026551246643 -125,0.5603090524673462 -126,0.5596908926963806 -127,0.5591352581977844 -128,0.5585699677467346 -129,0.5579407811164856 -130,0.5574309229850769 -131,0.5568625926971436 -132,0.5563626885414124 -133,0.5558903813362122 -134,0.5553867816925049 -135,0.5549440979957581 -136,0.5544948577880859 -137,0.554033100605011 -138,0.553606390953064 -139,0.5531778335571289 -140,0.5527390837669373 -141,0.5523162484169006 -142,0.5519077777862549 -143,0.5514960289001465 -144,0.5510846972465515 -145,0.550679087638855 -146,0.5502741932868958 -147,0.5498622059822083 -148,0.5494346618652344 -149,0.5489841103553772 -150,0.5485137104988098 -151,0.5480697154998779 -152,0.5476617813110352 -153,0.5472671985626221 -154,0.5468740463256836 -155,0.546481192111969 -156,0.5460963249206543 -157,0.5457315444946289 -158,0.5453928709030151 -159,0.545080304145813 -160,0.544791042804718 -161,0.5445201992988586 -162,0.5442651510238647 -163,0.5440238118171692 -164,0.5437947511672974 -165,0.543577253818512 -166,0.5433703660964966 -167,0.543173611164093 -168,0.5429862141609192 -169,0.5428066253662109 -170,0.5426342487335205 -171,0.542468249797821 -172,0.5423082113265991 -173,0.5421536564826965 -174,0.5420037508010864 -175,0.5418581962585449 -176,0.5417168140411377 -177,0.5415791273117065 -178,0.5414454936981201 -179,0.541316032409668 -180,0.541191041469574 -181,0.5410702228546143 -182,0.5409530401229858 -183,0.5408393740653992 -184,0.5407292246818542 -185,0.5406225323677063 -186,0.5405190587043762 -187,0.5404185652732849 -188,0.540320873260498 -189,0.5402257442474365 -190,0.5401330590248108 -191,0.5400426387786865 -192,0.5399546027183533 -193,0.5398685932159424 -194,0.5397845506668091 -195,0.5397025942802429 -196,0.5396225452423096 -197,0.5395443439483643 -198,0.5394678115844727 -199,0.5393930077552795 -200,0.5393196940422058 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.040/training_config.txt deleted file mode 100644 index d6564a6..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.040/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.040/training_log.csv deleted file mode 100644 index d679b24..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,10.914260864257812 -2,5.244596004486084 -3,3.3786869049072266 -4,2.6988303661346436 -5,2.164201498031616 -6,1.7004746198654175 -7,1.3965837955474854 -8,1.3509955406188965 -9,1.2013323307037354 -10,1.166407585144043 -11,1.1405653953552246 -12,1.1061850786209106 -13,1.0800546407699585 -14,1.0473992824554443 -15,1.0230071544647217 -16,1.0018409490585327 -17,0.9611281752586365 -18,0.9027965068817139 -19,0.8600149750709534 -20,0.8132787346839905 -21,0.7911035418510437 -22,0.770679235458374 -23,0.7610421776771545 -24,0.7483933568000793 -25,0.7379231452941895 -26,0.728775143623352 -27,0.7203161120414734 -28,0.7123416066169739 -29,0.7047644257545471 -30,0.6975376009941101 -31,0.6906236410140991 -32,0.6839851140975952 -33,0.6775622963905334 -34,0.6712390184402466 -35,0.6648097634315491 -36,0.6578519344329834 -37,0.6482688188552856 -38,0.6395882964134216 -39,0.6333963871002197 -40,0.6284952759742737 -41,0.6243594884872437 -42,0.6203129887580872 -43,0.61640465259552 -44,0.6126103401184082 -45,0.608876645565033 -46,0.6051594614982605 -47,0.6014026999473572 -48,0.5975567102432251 -49,0.5934041142463684 -50,0.589432418346405 -51,0.5865110158920288 -52,0.5855473875999451 -53,0.5846422910690308 -54,0.5834765434265137 -55,0.5821650624275208 -56,0.5807623267173767 -57,0.579292356967926 -58,0.5777754783630371 -59,0.5762054324150085 -60,0.5746024250984192 -61,0.5729721188545227 -62,0.5712212324142456 -63,0.5693925023078918 -64,0.5680357813835144 -65,0.5666892528533936 -66,0.5654094815254211 -67,0.5643497705459595 -68,0.5633573532104492 -69,0.5622709393501282 -70,0.5612390041351318 -71,0.5601575970649719 -72,0.5589836835861206 -73,0.557942271232605 -74,0.5567107796669006 -75,0.5552029609680176 -76,0.554167628288269 -77,0.5536254644393921 -78,0.5532194375991821 -79,0.5528250336647034 -80,0.5524103045463562 -81,0.5519726276397705 -82,0.5515171885490417 -83,0.5510422587394714 -84,0.5505472421646118 -85,0.5500382781028748 -86,0.5495564341545105 -87,0.5491603016853333 -88,0.5488303899765015 -89,0.5484747886657715 -90,0.5480709075927734 -91,0.5476765632629395 -92,0.5473436713218689 -93,0.547046959400177 -94,0.5467432737350464 -95,0.5464121699333191 -96,0.5460594892501831 -97,0.5456931591033936 -98,0.5453264117240906 -99,0.5449727177619934 -100,0.5446356534957886 -101,0.5443152189254761 -102,0.5440138578414917 -103,0.5437303185462952 -104,0.5434637069702148 -105,0.5432133674621582 -106,0.5429778099060059 -107,0.542755663394928 -108,0.5425458550453186 -109,0.5423479080200195 -110,0.5421607494354248 -111,0.541982889175415 -112,0.5418136715888977 -113,0.5416521430015564 -114,0.5414972901344299 -115,0.5413483381271362 -116,0.5412043929100037 -117,0.5410647392272949 -118,0.5409289002418518 -119,0.5407967567443848 -120,0.5406691431999207 -121,0.5405468940734863 -122,0.5404302477836609 -123,0.5403189659118652 -124,0.5402125120162964 -125,0.5401105284690857 -126,0.540012538433075 -127,0.5399179458618164 -128,0.5398269295692444 -129,0.5397393703460693 -130,0.5396549105644226 -131,0.5395734906196594 -132,0.5394949913024902 -133,0.5394193530082703 -134,0.5393465161323547 -135,0.5392760038375854 -136,0.5392072796821594 -137,0.5391403436660767 -138,0.5390750169754028 -139,0.5390109419822693 -140,0.5389483571052551 -141,0.5388869643211365 -142,0.5388269424438477 -143,0.5387681722640991 -144,0.5387108325958252 -145,0.5386547446250916 -146,0.5385997891426086 -147,0.5385459065437317 -148,0.5384930968284607 -149,0.5384413599967957 -150,0.5383905172348022 -151,0.5383405685424805 -152,0.5382915735244751 -153,0.5382435321807861 -154,0.5381963849067688 -155,0.5381501913070679 -156,0.5381046533584595 -157,0.5380599498748779 -158,0.5380162596702576 -159,0.5379729866981506 -160,0.5379306077957153 -161,0.5378888249397278 -162,0.537847638130188 -163,0.5378071069717407 -164,0.5377674102783203 -165,0.5377282500267029 -166,0.537689745426178 -167,0.5376518368721008 -168,0.5376144051551819 -169,0.5375773906707764 -170,0.5375410318374634 -171,0.537505030632019 -172,0.5374695062637329 -173,0.5374345183372498 -174,0.5373998880386353 -175,0.5373657941818237 -176,0.5373319983482361 -177,0.5372986793518066 -178,0.5372657775878906 -179,0.537233293056488 -180,0.5372012257575989 -181,0.5371695160865784 -182,0.5371382832527161 -183,0.5371072292327881 -184,0.5370766520500183 -185,0.5370464324951172 -186,0.5370165109634399 -187,0.5369870662689209 -188,0.536957859992981 -189,0.5369291305541992 -190,0.5369006991386414 -191,0.5368725061416626 -192,0.536844789981842 -193,0.5368173718452454 -194,0.5367901921272278 -195,0.536763608455658 -196,0.5367372632026672 -197,0.5367112755775452 -198,0.5366857647895813 -199,0.5366604924201965 -200,0.5366356372833252 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.050/training_config.txt deleted file mode 100644 index f8a9a0f..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.050/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.050/training_log.csv deleted file mode 100644 index 28ceeb4..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,10.166067123413086 -2,4.3827996253967285 -3,2.8631653785705566 -4,2.013965129852295 -5,1.3893450498580933 -6,1.2622448205947876 -7,1.06297767162323 -8,0.9528818130493164 -9,0.8766962289810181 -10,0.8442513942718506 -11,0.8192172646522522 -12,0.7903127670288086 -13,0.7462661862373352 -14,0.720564603805542 -15,0.7063114643096924 -16,0.693117618560791 -17,0.682266354560852 -18,0.6727904081344604 -19,0.6635510325431824 -20,0.6543563008308411 -21,0.643268883228302 -22,0.6358655691146851 -23,0.6288796067237854 -24,0.6236181855201721 -25,0.619987428188324 -26,0.616507887840271 -27,0.613010048866272 -28,0.6095123887062073 -29,0.6060062050819397 -30,0.6024858951568604 -31,0.5989385843276978 -32,0.5953536033630371 -33,0.591716468334198 -34,0.5879954099655151 -35,0.5841774940490723 -36,0.580234706401825 -37,0.5760708451271057 -38,0.5714420676231384 -39,0.5664193630218506 -40,0.5630466341972351 -41,0.5618035793304443 -42,0.5605217218399048 -43,0.5590160489082336 -44,0.5572813153266907 -45,0.5553396940231323 -46,0.5532674193382263 -47,0.5512763857841492 -48,0.5495353937149048 -49,0.5481131672859192 -50,0.5470036864280701 -51,0.546258270740509 -52,0.5458714365959167 -53,0.5456602573394775 -54,0.5456184148788452 -55,0.545609712600708 -56,0.5454017519950867 -57,0.5449248552322388 -58,0.5442174673080444 -59,0.5433782935142517 -60,0.5425552129745483 -61,0.5419207215309143 -62,0.5414698123931885 -63,0.5411083698272705 -64,0.5408107042312622 -65,0.5405710935592651 -66,0.5403643250465393 -67,0.5401808619499207 -68,0.5400170087814331 -69,0.5398671627044678 -70,0.5397282838821411 -71,0.5395984053611755 -72,0.5394790768623352 -73,0.5393690466880798 -74,0.5392639636993408 -75,0.5391578674316406 -76,0.5390481352806091 -77,0.5389344692230225 -78,0.5388157963752747 -79,0.5386937260627747 -80,0.5385693311691284 -81,0.5384443998336792 -82,0.5383217334747314 -83,0.5382022261619568 -84,0.5380861759185791 -85,0.5379737615585327 -86,0.5378664135932922 -87,0.5377649664878845 -88,0.5376696586608887 -89,0.5375808477401733 -90,0.5374986529350281 -91,0.5374225378036499 -92,0.5373522043228149 -93,0.5372860431671143 -94,0.5372236967086792 -95,0.5371647477149963 -96,0.5371086001396179 -97,0.537054181098938 -98,0.5370007753372192 -99,0.5369482040405273 -100,0.536896288394928 -101,0.5368449091911316 -102,0.5367937684059143 -103,0.5367429852485657 -104,0.53669273853302 -105,0.536643385887146 -106,0.5365948677062988 -107,0.5365476012229919 -108,0.5365016460418701 -109,0.5364568829536438 -110,0.5364134311676025 -111,0.5363712906837463 -112,0.5363301634788513 -113,0.5362906455993652 -114,0.5362527966499329 -115,0.536216139793396 -116,0.5361803770065308 -117,0.5361453294754028 -118,0.5361106395721436 -119,0.5360763669013977 -120,0.5360425710678101 -121,0.5360089540481567 -122,0.5359757542610168 -123,0.5359428524971008 -124,0.5359103083610535 -125,0.5358781218528748 -126,0.5358462333679199 -127,0.535814642906189 -128,0.5357834696769714 -129,0.535752534866333 -130,0.535722017288208 -131,0.5356918573379517 -132,0.5356619954109192 -133,0.5356326103210449 -134,0.5356035828590393 -135,0.5355748534202576 -136,0.5355464220046997 -137,0.5355184078216553 -138,0.5354907512664795 -139,0.5354634523391724 -140,0.5354364514350891 -141,0.5354099869728088 -142,0.535383939743042 -143,0.5353583693504333 -144,0.5353333353996277 -145,0.5353086590766907 -146,0.5352844595909119 -147,0.5352606773376465 -148,0.5352373123168945 -149,0.535214364528656 -150,0.5351917743682861 -151,0.5351696014404297 -152,0.5351477265357971 -153,0.5351261496543884 -154,0.5351049304008484 -155,0.535084068775177 -156,0.5350633263587952 -157,0.535042941570282 -158,0.5350225567817688 -159,0.535002589225769 -160,0.5349827408790588 -161,0.534963071346283 -162,0.5349435210227966 -163,0.5349242687225342 -164,0.534905195236206 -165,0.5348863005638123 -166,0.5348677039146423 -167,0.5348491668701172 -168,0.5348308086395264 -169,0.5348127484321594 -170,0.5347946286201477 -171,0.5347766876220703 -172,0.5347588062286377 -173,0.5347411036491394 -174,0.5347235202789307 -175,0.5347059369087219 -176,0.5346885323524475 -177,0.5346711874008179 -178,0.5346539616584778 -179,0.534636914730072 -180,0.5346198678016663 -181,0.5346029996871948 -182,0.5345863103866577 -183,0.5345696806907654 -184,0.5345532894134521 -185,0.5345368385314941 -186,0.5345206260681152 -187,0.5345045328140259 -188,0.5344886183738708 -189,0.534472644329071 -190,0.5344569683074951 -191,0.5344414114952087 -192,0.5344259738922119 -193,0.5344105958938599 -194,0.5343953967094421 -195,0.5343802571296692 -196,0.5343652367591858 -197,0.5343502163887024 -198,0.5343353152275085 -199,0.5343204140663147 -200,0.5343055129051208 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.080/training_config.txt deleted file mode 100644 index 09cc7d1..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.080/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.080/training_log.csv deleted file mode 100644 index d2c7f9f..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,8.005406379699707 -2,3.600041151046753 -3,1.9742019176483154 -4,1.4582751989364624 -5,1.0735445022583008 -6,0.9060229659080505 -7,0.8226025104522705 -8,0.7719898819923401 -9,0.7347301840782166 -10,0.6962923407554626 -11,0.6552360653877258 -12,0.6408777236938477 -13,0.6287147998809814 -14,0.618496835231781 -15,0.6102163195610046 -16,0.6033396124839783 -17,0.5973661541938782 -18,0.5919492840766907 -19,0.5871340036392212 -20,0.5846588015556335 -21,0.5823773741722107 -22,0.5800563097000122 -23,0.5776730179786682 -24,0.5752077698707581 -25,0.5726892948150635 -26,0.5701332092285156 -27,0.5675666332244873 -28,0.5650053024291992 -29,0.5624473094940186 -30,0.5599318146705627 -31,0.5574765801429749 -32,0.5551079511642456 -33,0.5528184771537781 -34,0.5506194233894348 -35,0.5485414862632751 -36,0.5465967655181885 -37,0.5447912812232971 -38,0.5431067943572998 -39,0.5415099859237671 -40,0.5401231646537781 -41,0.539017379283905 -42,0.5382255911827087 -43,0.5377309322357178 -44,0.5376549959182739 -45,0.5381971597671509 -46,0.538644015789032 -47,0.5387260317802429 -48,0.5384795665740967 -49,0.5379958152770996 -50,0.5373379588127136 -51,0.5367394685745239 -52,0.5365185737609863 -53,0.5364288687705994 -54,0.5363189578056335 -55,0.5362046957015991 -56,0.5361159443855286 -57,0.5360487699508667 -58,0.535995364189148 -59,0.5359446406364441 -60,0.5358901619911194 -61,0.5358306765556335 -62,0.535767138004303 -63,0.5357009768486023 -64,0.5356349945068359 -65,0.5355700254440308 -66,0.5355071425437927 -67,0.5354470610618591 -68,0.5353889465332031 -69,0.5353267192840576 -70,0.5352568626403809 -71,0.5351812839508057 -72,0.5351001620292664 -73,0.535015881061554 -74,0.534927487373352 -75,0.5348384976387024 -76,0.5347534418106079 -77,0.5346778035163879 -78,0.5346160531044006 -79,0.5345764756202698 -80,0.5345596075057983 -81,0.5345537662506104 -82,0.5345385074615479 -83,0.534504234790802 -84,0.534456729888916 -85,0.5344085693359375 -86,0.5343694686889648 -87,0.5343417525291443 -88,0.5343220829963684 -89,0.5343053340911865 -90,0.5342881083488464 -91,0.5342690348625183 -92,0.5342464447021484 -93,0.5342209935188293 -94,0.5341935157775879 -95,0.5341655611991882 -96,0.534138023853302 -97,0.534111499786377 -98,0.5340874195098877 -99,0.534066379070282 -100,0.5340478420257568 -101,0.5340316891670227 -102,0.5340158939361572 -103,0.5339986085891724 -104,0.5339798331260681 -105,0.5339601635932922 -106,0.5339404940605164 -107,0.5339218974113464 -108,0.5339045524597168 -109,0.5338885188102722 -110,0.5338732004165649 -111,0.5338584184646606 -112,0.5338433384895325 -113,0.5338283777236938 -114,0.5338133573532104 -115,0.5337982177734375 -116,0.533782958984375 -117,0.533767819404602 -118,0.5337531566619873 -119,0.5337387323379517 -120,0.5337244272232056 -121,0.5337104797363281 -122,0.5336967706680298 -123,0.5336832404136658 -124,0.533669650554657 -125,0.533656120300293 -126,0.5336424708366394 -127,0.5336287021636963 -128,0.533614993095398 -129,0.5336012840270996 -130,0.533587634563446 -131,0.533574104309082 -132,0.5335604548454285 -133,0.5335466861724854 -134,0.5335327982902527 -135,0.5335189700126648 -136,0.533505380153656 -137,0.533491849899292 -138,0.5334786772727966 -139,0.5334656238555908 -140,0.533452570438385 -141,0.5334396958351135 -142,0.5334267616271973 -143,0.5334139466285706 -144,0.5334011316299438 -145,0.5333884358406067 -146,0.5333758592605591 -147,0.5333633422851562 -148,0.5333507061004639 -149,0.5333380103111267 -150,0.5333251357078552 -151,0.5333121418952942 -152,0.5332990288734436 -153,0.5332860350608826 -154,0.5332732796669006 -155,0.5332604646682739 -156,0.5332478880882263 -157,0.5332355499267578 -158,0.5332232117652893 -159,0.5332112312316895 -160,0.5331992506980896 -161,0.5331875085830688 -162,0.5331759452819824 -163,0.5331644415855408 -164,0.533153235912323 -165,0.5331421494483948 -166,0.5331312417984009 -167,0.5331206321716309 -168,0.5331100821495056 -169,0.5330997705459595 -170,0.5330896973609924 -171,0.5330795645713806 -172,0.5330696105957031 -173,0.5330597758293152 -174,0.5330497622489929 -175,0.533039927482605 -176,0.5330298542976379 -177,0.5330197811126709 -178,0.5330094695091248 -179,0.5329990386962891 -180,0.5329884886741638 -181,0.5329778790473938 -182,0.5329673290252686 -183,0.5329569578170776 -184,0.5329467058181763 -185,0.5329366326332092 -186,0.5329265594482422 -187,0.5329167246818542 -188,0.5329070687294006 -189,0.5328974723815918 -190,0.5328880548477173 -191,0.532878577709198 -192,0.5328692197799683 -193,0.5328598022460938 -194,0.5328502655029297 -195,0.5328408479690552 -196,0.5328312516212463 -197,0.5328219532966614 -198,0.5328125357627869 -199,0.5328032970428467 -200,0.5327941179275513 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.100/training_config.txt deleted file mode 100644 index b335353..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.100/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.100/training_log.csv deleted file mode 100644 index e4c023c..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,7.5930376052856445 -2,5.123776435852051 -3,3.1087546348571777 -4,1.5090582370758057 -5,1.0794459581375122 -6,0.9458938241004944 -7,0.8778306841850281 -8,0.8431394100189209 -9,0.8097102046012878 -10,0.7803521156311035 -11,0.7487252950668335 -12,0.7236608862876892 -13,0.7060392498970032 -14,0.6910995244979858 -15,0.6791704297065735 -16,0.6701375246047974 -17,0.6631845831871033 -18,0.6576836705207825 -19,0.6524495482444763 -20,0.6466902494430542 -21,0.6404284238815308 -22,0.6339802742004395 -23,0.6277294158935547 -24,0.6220076084136963 -25,0.6168485879898071 -26,0.6121901869773865 -27,0.6079438328742981 -28,0.604061484336853 -29,0.6005080342292786 -30,0.5972511768341064 -31,0.5942515730857849 -32,0.5914899706840515 -33,0.5889431834220886 -34,0.5865669846534729 -35,0.5843381881713867 -36,0.5822528004646301 -37,0.5802915096282959 -38,0.5784358382225037 -39,0.5766746997833252 -40,0.5749916434288025 -41,0.5733861923217773 -42,0.5718417763710022 -43,0.5703340768814087 -44,0.5688199400901794 -45,0.567277193069458 -46,0.5656947493553162 -47,0.5640793442726135 -48,0.5624439120292664 -49,0.5608099102973938 -50,0.5591983795166016 -51,0.5576245784759521 -52,0.5561021566390991 -53,0.554639458656311 -54,0.553230345249176 -55,0.5518527030944824 -56,0.5505136847496033 -57,0.5492233037948608 -58,0.5479876399040222 -59,0.5468147397041321 -60,0.5457027554512024 -61,0.5446452498435974 -62,0.5436456799507141 -63,0.5427139401435852 -64,0.5418607592582703 -65,0.5410976409912109 -66,0.540431559085846 -67,0.5398427248001099 -68,0.5392922759056091 -69,0.5387824177742004 -70,0.5383158922195435 -71,0.5378831028938293 -72,0.5374553799629211 -73,0.5370433926582336 -74,0.5366591215133667 -75,0.5363046526908875 -76,0.5359874963760376 -77,0.5357241034507751 -78,0.5355169773101807 -79,0.5353670716285706 -80,0.5352509617805481 -81,0.5351521968841553 -82,0.535071611404419 -83,0.5350278615951538 -84,0.5350499749183655 -85,0.5351370573043823 -86,0.5352175831794739 -87,0.5351942181587219 -88,0.5350516438484192 -89,0.5348454117774963 -90,0.5346609950065613 -91,0.5345310568809509 -92,0.5344489812850952 -93,0.5343970060348511 -94,0.5343641042709351 -95,0.5343411564826965 -96,0.5343244075775146 -97,0.5343117117881775 -98,0.5343025922775269 -99,0.5342963337898254 -100,0.5342919230461121 -101,0.5342881679534912 -102,0.5342835783958435 -103,0.5342769622802734 -104,0.5342674255371094 -105,0.5342543721199036 -106,0.5342380404472351 -107,0.5342190861701965 -108,0.5341978073120117 -109,0.5341752767562866 -110,0.5341523289680481 -111,0.5341293811798096 -112,0.5341070890426636 -113,0.5340856313705444 -114,0.5340648293495178 -115,0.5340449213981628 -116,0.534026026725769 -117,0.5340083837509155 -118,0.5339913368225098 -119,0.5339754223823547 -120,0.5339611172676086 -121,0.5339478850364685 -122,0.5339356064796448 -123,0.5339246988296509 -124,0.5339144468307495 -125,0.5339047312736511 -126,0.5338947772979736 -127,0.5338846445083618 -128,0.5338739156723022 -129,0.5338626503944397 -130,0.5338507890701294 -131,0.5338386297225952 -132,0.5338261127471924 -133,0.5338137149810791 -134,0.5338011980056763 -135,0.5337890386581421 -136,0.5337772369384766 -137,0.5337657332420349 -138,0.5337544679641724 -139,0.5337435007095337 -140,0.5337328314781189 -141,0.5337221622467041 -142,0.5337116718292236 -143,0.5337011218070984 -144,0.5336905717849731 -145,0.5336800813674927 -146,0.533669650554657 -147,0.5336592197418213 -148,0.5336487889289856 -149,0.5336384773254395 -150,0.5336281061172485 -151,0.5336177945137024 -152,0.5336076617240906 -153,0.5335975885391235 -154,0.5335876941680908 -155,0.5335778594017029 -156,0.5335681438446045 -157,0.5335584878921509 -158,0.533548891544342 -159,0.5335394144058228 -160,0.5335301160812378 -161,0.5335208177566528 -162,0.533511757850647 -163,0.5335028171539307 -164,0.5334939360618591 -165,0.5334852337837219 -166,0.5334765911102295 -167,0.5334680676460266 -168,0.5334597229957581 -169,0.5334513783454895 -170,0.5334431529045105 -171,0.5334349870681763 -172,0.5334268808364868 -173,0.5334189534187317 -174,0.5334110260009766 -175,0.5334031581878662 -176,0.5333953499794006 -177,0.5333877205848694 -178,0.5333800315856934 -179,0.5333725214004517 -180,0.5333649516105652 -181,0.5333573818206787 -182,0.5333499908447266 -183,0.5333423614501953 -184,0.5333349108695984 -185,0.5333274006843567 -186,0.5333199501037598 -187,0.5333125591278076 -188,0.5333051681518555 -189,0.5332978367805481 -190,0.5332905054092407 -191,0.5332832932472229 -192,0.5332759618759155 -193,0.5332687497138977 -194,0.5332616567611694 -195,0.5332544445991516 -196,0.5332473516464233 -197,0.5332402586936951 -198,0.533233106136322 -199,0.5332260727882385 -200,0.5332190990447998 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.125/training_config.txt deleted file mode 100644 index 85ccfa2..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.125/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.125/training_log.csv deleted file mode 100644 index 2422423..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,7.046892166137695 -2,1.5866422653198242 -3,0.9400674700737 -4,0.7677825689315796 -5,1.6439863443374634 -6,0.9129355549812317 -7,0.719156801700592 -8,0.6656500101089478 -9,0.6139358878135681 -10,0.5982386469841003 -11,0.5861831903457642 -12,0.5775803327560425 -13,0.5705454349517822 -14,0.5682281255722046 -15,0.5667890906333923 -16,0.5654447674751282 -17,0.5634278655052185 -18,0.5609577298164368 -19,0.5581797957420349 -20,0.5551574230194092 -21,0.5521496534347534 -22,0.5493422150611877 -23,0.5469457507133484 -24,0.5450204610824585 -25,0.5434583425521851 -26,0.5420481562614441 -27,0.5412802696228027 -28,0.5425816178321838 -29,0.5425231456756592 -30,0.5426490306854248 -31,0.5430760979652405 -32,0.5416017174720764 -33,0.5399476885795593 -34,0.5392233729362488 -35,0.5385292768478394 -36,0.5382980108261108 -37,0.5383700132369995 -38,0.5385274887084961 -39,0.5386059284210205 -40,0.5385481119155884 -41,0.5383706092834473 -42,0.5381152629852295 -43,0.5378226041793823 -44,0.537522554397583 -45,0.5372406840324402 -46,0.5369712114334106 -47,0.5367110967636108 -48,0.5364850759506226 -49,0.5363161563873291 -50,0.5362229943275452 -51,0.5362130403518677 -52,0.5362146496772766 -53,0.5361140370368958 -54,0.5358998775482178 -55,0.535679042339325 -56,0.5355184078216553 -57,0.5354208946228027 -58,0.5353586673736572 -59,0.5353144407272339 -60,0.5352743864059448 -61,0.535228967666626 -62,0.5351725816726685 -63,0.5351045727729797 -64,0.5350270867347717 -65,0.5349448919296265 -66,0.5348619222640991 -67,0.5347828269004822 -68,0.5347122550010681 -69,0.5346530675888062 -70,0.5346058011054993 -71,0.5345720648765564 -72,0.5345443487167358 -73,0.5345131158828735 -74,0.5344706773757935 -75,0.5344166159629822 -76,0.5343571305274963 -77,0.5343016982078552 -78,0.5342509746551514 -79,0.5342072248458862 -80,0.5341697931289673 -81,0.5341339707374573 -82,0.5340986847877502 -83,0.5340617895126343 -84,0.5340232849121094 -85,0.5339838266372681 -86,0.5339453220367432 -87,0.5339094400405884 -88,0.5338786244392395 -89,0.5338532328605652 -90,0.5338301658630371 -91,0.5338076949119568 -92,0.5337844491004944 -93,0.5337597727775574 -94,0.5337346196174622 -95,0.5337099432945251 -96,0.5336870551109314 -97,0.5336675047874451 -98,0.5336499214172363 -99,0.5336331725120544 -100,0.53361576795578 -101,0.5335978269577026 -102,0.5335794687271118 -103,0.5335609316825867 -104,0.5335428714752197 -105,0.5335256457328796 -106,0.5335088968276978 -107,0.5334923267364502 -108,0.5334756970405579 -109,0.5334591865539551 -110,0.533442497253418 -111,0.5334256291389465 -112,0.5334093570709229 -113,0.5333936214447021 -114,0.5333783030509949 -115,0.533363401889801 -116,0.5333486199378967 -117,0.5333341956138611 -118,0.5333203077316284 -119,0.5333065986633301 -120,0.5332927703857422 -121,0.5332788825035095 -122,0.5332645773887634 -123,0.5332499742507935 -124,0.5332359075546265 -125,0.5332225561141968 -126,0.5332099795341492 -127,0.5331974625587463 -128,0.5331851243972778 -129,0.5331726670265198 -130,0.5331604480743408 -131,0.5331484079360962 -132,0.533136785030365 -133,0.5331252813339233 -134,0.5331138968467712 -135,0.5331026911735535 -136,0.5330914258956909 -137,0.5330803990364075 -138,0.5330694317817688 -139,0.5330585241317749 -140,0.5330475568771362 -141,0.5330366492271423 -142,0.5330258011817932 -143,0.5330150723457336 -144,0.5330044627189636 -145,0.5329940319061279 -146,0.532983660697937 -147,0.5329732298851013 -148,0.5329627990722656 -149,0.5329523086547852 -150,0.5329416990280151 -151,0.5329314470291138 -152,0.5329211354255676 -153,0.5329110622406006 -154,0.5329009890556335 -155,0.5328910946846008 -156,0.5328812599182129 -157,0.5328715443611145 -158,0.5328618288040161 -159,0.532852292060852 -160,0.5328426957130432 -161,0.5328333377838135 -162,0.5328239798545837 -163,0.5328149199485779 -164,0.532805860042572 -165,0.5327968001365662 -166,0.5327877402305603 -167,0.5327785611152649 -168,0.5327696800231934 -169,0.5327607989311218 -170,0.5327519774436951 -171,0.5327432751655579 -172,0.5327343940734863 -173,0.5327256321907043 -174,0.5327168703079224 -175,0.5327081680297852 -176,0.532699465751648 -177,0.5326909422874451 -178,0.5326825976371765 -179,0.5326741337776184 -180,0.5326657295227051 -181,0.5326573848724365 -182,0.5326491594314575 -183,0.5326410531997681 -184,0.5326328277587891 -185,0.5326247215270996 -186,0.5326166749000549 -187,0.532608687877655 -188,0.5326005816459656 -189,0.5325927138328552 -190,0.5325846672058105 -191,0.532576858997345 -192,0.5325689911842346 -193,0.5325611233711243 -194,0.5325531959533691 -195,0.5325453877449036 -196,0.532537579536438 -197,0.5325297117233276 -198,0.5325218439102173 -199,0.5325140357017517 -200,0.5325061678886414 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.160/training_config.txt deleted file mode 100644 index b09e375..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.160/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.160/training_log.csv deleted file mode 100644 index af2f8d3..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,7.381719589233398 -2,1.4295058250427246 -3,1.2057560682296753 -4,1.115397334098816 -5,1.014309287071228 -6,0.9192431569099426 -7,0.8490802645683289 -8,0.778067409992218 -9,0.7373127937316895 -10,0.716012716293335 -11,0.7021580934524536 -12,0.690902829170227 -13,0.677562415599823 -14,0.6616995930671692 -15,0.6456526517868042 -16,0.6308917999267578 -17,0.6178440451622009 -18,0.6066527962684631 -19,0.5972227454185486 -20,0.5892493724822998 -21,0.5825273394584656 -22,0.5767869353294373 -23,0.5718555450439453 -24,0.5675526261329651 -25,0.5637614727020264 -26,0.5604379177093506 -27,0.5575516223907471 -28,0.5551580786705017 -29,0.5532214045524597 -30,0.5516469478607178 -31,0.5503448843955994 -32,0.5492594838142395 -33,0.5483353734016418 -34,0.5475409626960754 -35,0.5468378067016602 -36,0.5462165474891663 -37,0.5456780791282654 -38,0.5452181696891785 -39,0.5447918772697449 -40,0.5443904399871826 -41,0.5440336465835571 -42,0.5437269806861877 -43,0.5434456467628479 -44,0.5431784391403198 -45,0.5429166555404663 -46,0.5426515340805054 -47,0.5423702001571655 -48,0.5420675277709961 -49,0.5417467951774597 -50,0.5414108633995056 -51,0.5410637855529785 -52,0.5407010316848755 -53,0.540325939655304 -54,0.5399388670921326 -55,0.5395416617393494 -56,0.5391259789466858 -57,0.5386993288993835 -58,0.5382744073867798 -59,0.5378575921058655 -60,0.5374548435211182 -61,0.5370759963989258 -62,0.5367168188095093 -63,0.5363848805427551 -64,0.5360750555992126 -65,0.5357959270477295 -66,0.5355322957038879 -67,0.5352860689163208 -68,0.5350475907325745 -69,0.5348258018493652 -70,0.5346237421035767 -71,0.5344525575637817 -72,0.5343066453933716 -73,0.5341877937316895 -74,0.5340886116027832 -75,0.5340109467506409 -76,0.5339484810829163 -77,0.5338956713676453 -78,0.5338528752326965 -79,0.5338225960731506 -80,0.5337920784950256 -81,0.5337625741958618 -82,0.5337302088737488 -83,0.5336945056915283 -84,0.533653736114502 -85,0.5336090922355652 -86,0.5335618257522583 -87,0.5335138440132141 -88,0.5334693789482117 -89,0.5334296226501465 -90,0.5333936214447021 -91,0.533359706401825 -92,0.533329427242279 -93,0.5333025455474854 -94,0.5332785248756409 -95,0.5332556962966919 -96,0.5332351922988892 -97,0.5332154035568237 -98,0.5331950187683105 -99,0.5331741571426392 -100,0.5331534147262573 -101,0.5331326127052307 -102,0.5331118702888489 -103,0.5330905318260193 -104,0.5330690741539001 -105,0.5330480933189392 -106,0.5330277681350708 -107,0.5330081582069397 -108,0.5329891443252563 -109,0.5329705476760864 -110,0.5329525470733643 -111,0.5329353213310242 -112,0.5329186916351318 -113,0.5329025983810425 -114,0.5328869223594666 -115,0.5328719019889832 -116,0.5328570008277893 -117,0.5328420400619507 -118,0.5328274369239807 -119,0.5328129529953003 -120,0.5327984094619751 -121,0.532784104347229 -122,0.5327697396278381 -123,0.5327553153038025 -124,0.5327409505844116 -125,0.5327267646789551 -126,0.5327126979827881 -127,0.5326986312866211 -128,0.532684862613678 -129,0.5326712131500244 -130,0.5326577425003052 -131,0.5326451063156128 -132,0.53263258934021 -133,0.5326201319694519 -134,0.5326079726219177 -135,0.532595157623291 -136,0.5325822234153748 -137,0.5325701832771301 -138,0.532558262348175 -139,0.5325465798377991 -140,0.5325348973274231 -141,0.5325232744216919 -142,0.5325115919113159 -143,0.5324996709823608 -144,0.5324875116348267 -145,0.532475471496582 -146,0.532463550567627 -147,0.5324514508247375 -148,0.532439112663269 -149,0.5324270129203796 -150,0.5324153900146484 -151,0.5324033498764038 -152,0.5323917269706726 -153,0.5323805212974548 -154,0.5323694348335266 -155,0.5323584079742432 -156,0.5323476791381836 -157,0.5323368906974792 -158,0.5323259234428406 -159,0.5323151350021362 -160,0.5323041677474976 -161,0.5322932004928589 -162,0.5322827696800232 -163,0.5322720408439636 -164,0.5322616696357727 -165,0.5322515964508057 -166,0.5322414636611938 -167,0.5322311520576477 -168,0.5322207808494568 -169,0.5322104096412659 -170,0.5322005152702332 -171,0.5321906208992004 -172,0.5321802496910095 -173,0.532170295715332 -174,0.5321604013442993 -175,0.5321505665779114 -176,0.5321409106254578 -177,0.5321313142776489 -178,0.5321217775344849 -179,0.5321125984191895 -180,0.5321033596992493 -181,0.5320945978164673 -182,0.5320855975151062 -183,0.5320764780044556 -184,0.5320675373077393 -185,0.5320586562156677 -186,0.5320494771003723 -187,0.5320407748222351 -188,0.5320318937301636 -189,0.5320225358009338 -190,0.5320138931274414 -191,0.5320048928260803 -192,0.531995415687561 -193,0.5319868326187134 -194,0.5319792032241821 -195,0.5319717526435852 -196,0.5319642424583435 -197,0.531956672668457 -198,0.531948983669281 -199,0.531941294670105 -200,0.5319343209266663 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.200/training_config.txt deleted file mode 100644 index be80fe2..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.200/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.200/training_log.csv deleted file mode 100644 index 0a04f28..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,7.420783519744873 -2,1.422228217124939 -3,1.5498942136764526 -4,1.142134189605713 -5,0.958436906337738 -6,0.8418040871620178 -7,0.7454504370689392 -8,0.7004188299179077 -9,0.6681489944458008 -10,0.6434193849563599 -11,0.6264261603355408 -12,0.6143813729286194 -13,0.6051505208015442 -14,0.5977157950401306 -15,0.5915635824203491 -16,0.5863670706748962 -17,0.5819429159164429 -18,0.5781809687614441 -19,0.5749149322509766 -20,0.5721027255058289 -21,0.5697097182273865 -22,0.5676867961883545 -23,0.5659503936767578 -24,0.5643233060836792 -25,0.5628169178962708 -26,0.5613908767700195 -27,0.5600264668464661 -28,0.5587082505226135 -29,0.5574482679367065 -30,0.5562002658843994 -31,0.5549660921096802 -32,0.5537395477294922 -33,0.5524669885635376 -34,0.5512259006500244 -35,0.550021767616272 -36,0.5488715767860413 -37,0.5478010177612305 -38,0.5468302369117737 -39,0.5460166931152344 -40,0.5454111099243164 -41,0.5449510216712952 -42,0.5445765256881714 -43,0.5442412495613098 -44,0.5439135432243347 -45,0.5435380339622498 -46,0.5431046485900879 -47,0.5425626039505005 -48,0.5419072508811951 -49,0.5411766171455383 -50,0.5404496788978577 -51,0.5397981405258179 -52,0.5392448306083679 -53,0.5387588143348694 -54,0.538294792175293 -55,0.5378589630126953 -56,0.53743976354599 -57,0.5370530486106873 -58,0.5366860628128052 -59,0.5363450050354004 -60,0.5360422730445862 -61,0.5357760190963745 -62,0.5355463624000549 -63,0.5353520512580872 -64,0.5351895689964294 -65,0.5350561738014221 -66,0.534942626953125 -67,0.5348339676856995 -68,0.5347305536270142 -69,0.5346440076828003 -70,0.5345667004585266 -71,0.5344927310943604 -72,0.5344268679618835 -73,0.5343717932701111 -74,0.5343140363693237 -75,0.5342605710029602 -76,0.5342110395431519 -77,0.5341616868972778 -78,0.5341129899024963 -79,0.5340663194656372 -80,0.5340215563774109 -81,0.5339791178703308 -82,0.5339384078979492 -83,0.5338994264602661 -84,0.5338613986968994 -85,0.5338234305381775 -86,0.5337809920310974 -87,0.5337391495704651 -88,0.5336980223655701 -89,0.5336535573005676 -90,0.5336147546768188 -91,0.5335814356803894 -92,0.5335570573806763 -93,0.5335351824760437 -94,0.533514142036438 -95,0.5334958434104919 -96,0.5334798693656921 -97,0.5334624648094177 -98,0.5334421396255493 -99,0.5334205031394958 -100,0.5334002375602722 -101,0.5333815813064575 -102,0.5333657264709473 -103,0.5333520770072937 -104,0.5333386659622192 -105,0.5333257913589478 -106,0.5333141088485718 -107,0.5333004593849182 -108,0.5332852602005005 -109,0.533272385597229 -110,0.533260703086853 -111,0.5332493782043457 -112,0.533237636089325 -113,0.5332260727882385 -114,0.5332155227661133 -115,0.5332059860229492 -116,0.5331969857215881 -117,0.5331881046295166 -118,0.5331782698631287 -119,0.5331684350967407 -120,0.5331591963768005 -121,0.5331509113311768 -122,0.5331423878669739 -123,0.5331335067749023 -124,0.5331248641014099 -125,0.5331169962882996 -126,0.5331104397773743 -127,0.53310227394104 -128,0.5330932140350342 -129,0.5330847501754761 -130,0.5330764651298523 -131,0.533069372177124 -132,0.5330621600151062 -133,0.5330541133880615 -134,0.5330458283424377 -135,0.5330380797386169 -136,0.5330308675765991 -137,0.5330232381820679 -138,0.5330154895782471 -139,0.5330076813697815 -140,0.5330003499984741 -141,0.5329930782318115 -142,0.532985508441925 -143,0.5329779982566833 -144,0.5329704284667969 -145,0.5329633951187134 -146,0.5329561233520508 -147,0.5329490303993225 -148,0.5329418778419495 -149,0.5329347252845764 -150,0.5329277515411377 -151,0.5329206585884094 -152,0.532913863658905 -153,0.5329070687294006 -154,0.5329000353813171 -155,0.5328933596611023 -156,0.5328864455223083 -157,0.5328797698020935 -158,0.5328731536865234 -159,0.5328665971755981 -160,0.5328600406646729 -161,0.5328534245491028 -162,0.5328468084335327 -163,0.5328405499458313 -164,0.5328340530395508 -165,0.5328274369239807 -166,0.5328211188316345 -167,0.5328147411346436 -168,0.5328083634376526 -169,0.5328018665313721 -170,0.5327957272529602 -171,0.5327894687652588 -172,0.5327832698822021 -173,0.5327768325805664 -174,0.532770574092865 -175,0.5327643156051636 -176,0.5327579975128174 -177,0.5327520370483398 -178,0.5327460169792175 -179,0.5327397584915161 -180,0.5327336192131042 -181,0.5327277183532715 -182,0.5327216982841492 -183,0.5327154994010925 -184,0.5327093601226807 -185,0.532703161239624 -186,0.5326970219612122 -187,0.5326911211013794 -188,0.5326851010322571 -189,0.5326788425445557 -190,0.5326728224754333 -191,0.532666802406311 -192,0.5326608419418335 -193,0.532654881477356 -194,0.5326489210128784 -195,0.5326429605484009 -196,0.5326371788978577 -197,0.5326312184333801 -198,0.5326253771781921 -199,0.5326194167137146 -200,0.5326137542724609 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.250/training_config.txt deleted file mode 100644 index 2047261..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.250/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.250/training_log.csv deleted file mode 100644 index e4c60bc..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,9.630842208862305 -2,1.293585181236267 -3,0.8407876491546631 -4,0.7716218829154968 -5,0.8539767265319824 -6,0.6912668943405151 -7,0.6998931765556335 -8,0.7124837636947632 -9,0.7152310013771057 -10,0.6958581209182739 -11,0.668372631072998 -12,0.6413559317588806 -13,0.6179375648498535 -14,0.6037614941596985 -15,0.6168044209480286 -16,0.613329291343689 -17,0.6001871228218079 -18,0.5844969153404236 -19,0.576032817363739 -20,0.5701853632926941 -21,0.5676423907279968 -22,0.5657768845558167 -23,0.5638224482536316 -24,0.5621351003646851 -25,0.5606006979942322 -26,0.5592136979103088 -27,0.5579319000244141 -28,0.5567043423652649 -29,0.5555098652839661 -30,0.5544753074645996 -31,0.5536235570907593 -32,0.5530064702033997 -33,0.5526127219200134 -34,0.552400529384613 -35,0.5521727204322815 -36,0.5518407821655273 -37,0.5513308048248291 -38,0.5505537986755371 -39,0.5494983196258545 -40,0.548301100730896 -41,0.5470821857452393 -42,0.5459317564964294 -43,0.5449081063270569 -44,0.5439919233322144 -45,0.543174684047699 -46,0.5424376726150513 -47,0.5417799949645996 -48,0.5411744713783264 -49,0.5406152606010437 -50,0.5400134325027466 -51,0.5394185185432434 -52,0.53880375623703 -53,0.5382470488548279 -54,0.5379741787910461 -55,0.5379382967948914 -56,0.5378961563110352 -57,0.537635087966919 -58,0.5372862815856934 -59,0.5369976758956909 -60,0.5367431044578552 -61,0.5365477800369263 -62,0.5364014506340027 -63,0.5362547636032104 -64,0.5361434817314148 -65,0.5360262989997864 -66,0.5358940362930298 -67,0.5357417464256287 -68,0.5355757474899292 -69,0.535410463809967 -70,0.535239577293396 -71,0.535065770149231 -72,0.5349043011665344 -73,0.5347684621810913 -74,0.5346689224243164 -75,0.5345906615257263 -76,0.5345256328582764 -77,0.5344388484954834 -78,0.5343246459960938 -79,0.5341978073120117 -80,0.5340851545333862 -81,0.5339871644973755 -82,0.533906877040863 -83,0.5338382720947266 -84,0.533776581287384 -85,0.5337153077125549 -86,0.5336490869522095 -87,0.5335778594017029 -88,0.5335061550140381 -89,0.5334336757659912 -90,0.5333621501922607 -91,0.5332955718040466 -92,0.5332369208335876 -93,0.5331846475601196 -94,0.5331330895423889 -95,0.5330818295478821 -96,0.5330276489257812 -97,0.5329736471176147 -98,0.5329204201698303 -99,0.5328723788261414 -100,0.5328265428543091 -101,0.5327838063240051 -102,0.5327404141426086 -103,0.5326964855194092 -104,0.5326524376869202 -105,0.5326080918312073 -106,0.5325623750686646 -107,0.532520055770874 -108,0.5324788689613342 -109,0.5324380993843079 -110,0.5324004292488098 -111,0.5323655605316162 -112,0.5323350429534912 -113,0.5323064923286438 -114,0.5322806239128113 -115,0.5322563052177429 -116,0.5322306156158447 -117,0.532203197479248 -118,0.5321725606918335 -119,0.5321422815322876 -120,0.5321335196495056 -121,0.5321200489997864 -122,0.5320972800254822 -123,0.5320762395858765 -124,0.5320751667022705 -125,0.53206467628479 -126,0.5320443511009216 -127,0.5320369601249695 -128,0.5320319533348083 -129,0.5320196747779846 -130,0.5320040583610535 -131,0.5319973230361938 -132,0.5319913625717163 -133,0.531975507736206 -134,0.5319682359695435 -135,0.531960129737854 -136,0.5319471955299377 -137,0.5319339036941528 -138,0.5319281220436096 -139,0.5319185256958008 -140,0.5319033861160278 -141,0.5318964719772339 -142,0.5318890810012817 -143,0.5318771600723267 -144,0.5318683981895447 -145,0.5318616032600403 -146,0.5318505167961121 -147,0.531844437122345 -148,0.5318359136581421 -149,0.5318272709846497 -150,0.5318223834037781 -151,0.5318152904510498 -152,0.5318036675453186 -153,0.5317960977554321 -154,0.5317901372909546 -155,0.531782865524292 -156,0.5317750573158264 -157,0.5317676663398743 -158,0.5317580103874207 -159,0.5317494869232178 -160,0.5317431688308716 -161,0.5317358374595642 -162,0.5317281484603882 -163,0.5317193865776062 -164,0.5317127108573914 -165,0.5317081212997437 -166,0.5317018628120422 -167,0.531693696975708 -168,0.5316867828369141 -169,0.5316813588142395 -170,0.5316761136054993 -171,0.5316689610481262 -172,0.5316629409790039 -173,0.531657338142395 -174,0.531650722026825 -175,0.5316439270973206 -176,0.5316373109817505 -177,0.5316315293312073 -178,0.5316247940063477 -179,0.531618595123291 -180,0.5316129326820374 -181,0.531606912612915 -182,0.5316011905670166 -183,0.531595766544342 -184,0.5315898060798645 -185,0.5315836071968079 -186,0.5315790176391602 -187,0.531573474407196 -188,0.5315673351287842 -189,0.5315620303153992 -190,0.5315568447113037 -191,0.5315508842468262 -192,0.53154456615448 -193,0.531538724899292 -194,0.5315329432487488 -195,0.5315266847610474 -196,0.5315219759941101 -197,0.5315151214599609 -198,0.5315104126930237 -199,0.5315039157867432 -200,0.531498372554779 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.300/training_config.txt deleted file mode 100644 index db0171e..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.300/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.300/training_log.csv deleted file mode 100644 index b69669e..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.300/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,14.190373420715332 -2,1.5477083921432495 -3,1.6018882989883423 -4,1.5128339529037476 -5,1.3737558126449585 -6,1.2457767724990845 -7,1.1492875814437866 -8,1.0688718557357788 -9,1.0108827352523804 -10,0.972564160823822 -11,0.954389214515686 -12,0.9528423547744751 -13,0.9539101719856262 -14,0.9465901851654053 -15,0.9299183487892151 -16,0.9104065299034119 -17,0.8930310606956482 -18,0.8795483112335205 -19,0.869507372379303 -20,0.8617298603057861 -21,0.8551152944564819 -22,0.8490244746208191 -23,0.8430647253990173 -24,0.8369614481925964 -25,0.8306054472923279 -26,0.8239301443099976 -27,0.8170323967933655 -28,0.8100444078445435 -29,0.803104817867279 -30,0.7962931394577026 -31,0.7896708250045776 -32,0.7832671403884888 -33,0.7770463228225708 -34,0.7709817886352539 -35,0.7650260925292969 -36,0.7590638995170593 -37,0.7530615329742432 -38,0.7520860433578491 -39,0.7513324618339539 -40,0.7498714327812195 -41,0.7474290728569031 -42,0.7443657517433167 -43,0.7408053278923035 -44,0.7370375394821167 -45,0.7332032322883606 -46,0.7292467951774597 -47,0.7252479791641235 -48,0.7213395237922668 -49,0.7192968130111694 -50,0.7216966152191162 -51,0.7262151837348938 -52,0.7340003848075867 -53,0.7416191101074219 -54,0.7447016835212708 -55,0.7408653497695923 -56,0.7342751026153564 -57,0.7295021414756775 -58,0.7254579067230225 -59,0.7214283347129822 -60,0.7175831198692322 -61,0.7139841318130493 -62,0.7105733752250671 -63,0.7072120904922485 -64,0.7037486433982849 -65,0.700066864490509 -66,0.6960952281951904 -67,0.6917694211006165 -68,0.6870980262756348 -69,0.6820971965789795 -70,0.6767934560775757 -71,0.6712372899055481 -72,0.6654689311981201 -73,0.6595342755317688 -74,0.6534972190856934 -75,0.647431492805481 -76,0.6414169669151306 -77,0.6354735493659973 -78,0.6296139359474182 -79,0.623859703540802 -80,0.6182323098182678 -81,0.6127521991729736 -82,0.607428789138794 -83,0.6022523045539856 -84,0.5972080230712891 -85,0.5922945737838745 -86,0.5877925753593445 -87,0.5832438468933105 -88,0.5786503553390503 -89,0.5740542411804199 -90,0.5694942474365234 -91,0.565134584903717 -92,0.5610465407371521 -93,0.5572947263717651 -94,0.553909957408905 -95,0.5507605671882629 -96,0.5478434562683105 -97,0.5451583862304688 -98,0.5427184104919434 -99,0.5405362248420715 -100,0.5386410355567932 -101,0.5370972752571106 -102,0.5360867977142334 -103,0.5358947515487671 -104,0.5364925265312195 -105,0.5376942753791809 -106,0.5389371514320374 -107,0.5392091870307922 -108,0.5383981466293335 -109,0.5373734831809998 -110,0.536592960357666 -111,0.5359536409378052 -112,0.535391092300415 -113,0.5349954962730408 -114,0.5347760319709778 -115,0.5347340106964111 -116,0.5348228216171265 -117,0.5349106788635254 -118,0.5349779725074768 -119,0.5350115895271301 -120,0.5349972248077393 -121,0.5349427461624146 -122,0.5348589420318604 -123,0.5347546339035034 -124,0.5346325039863586 -125,0.5345024466514587 -126,0.5343719124794006 -127,0.5342553853988647 -128,0.5341427326202393 -129,0.5340335369110107 -130,0.5339352488517761 -131,0.5338563323020935 -132,0.5338083505630493 -133,0.5337881445884705 -134,0.5337770581245422 -135,0.5337761640548706 -136,0.5337677597999573 -137,0.5337541103363037 -138,0.5337283611297607 -139,0.5336930751800537 -140,0.533649742603302 -141,0.5335996150970459 -142,0.5335509777069092 -143,0.5335034728050232 -144,0.5334674119949341 -145,0.5334338545799255 -146,0.5334012508392334 -147,0.5333709120750427 -148,0.5333489179611206 -149,0.533329963684082 -150,0.5333152413368225 -151,0.5332990288734436 -152,0.5332818627357483 -153,0.5332639217376709 -154,0.5332446098327637 -155,0.5332239270210266 -156,0.5332025289535522 -157,0.5331832766532898 -158,0.5331599712371826 -159,0.5331341624259949 -160,0.533112645149231 -161,0.53309565782547 -162,0.5330777764320374 -163,0.5330629944801331 -164,0.5330482125282288 -165,0.5330333709716797 -166,0.5330178141593933 -167,0.5330016016960144 -168,0.5329853296279907 -169,0.5329679250717163 -170,0.532952606678009 -171,0.5329364538192749 -172,0.5329192280769348 -173,0.5329015254974365 -174,0.5328841209411621 -175,0.5328711867332458 -176,0.5328562259674072 -177,0.5328413844108582 -178,0.5328256487846375 -179,0.5328102111816406 -180,0.5327978134155273 -181,0.5327860116958618 -182,0.5327737331390381 -183,0.5327611565589905 -184,0.5327497124671936 -185,0.5327402949333191 -186,0.5327275991439819 -187,0.5327149629592896 -188,0.5327017307281494 -189,0.5326900482177734 -190,0.5326790809631348 -191,0.5326690077781677 -192,0.5326597094535828 -193,0.5326511263847351 -194,0.5326419472694397 -195,0.5326317548751831 -196,0.5326211452484131 -197,0.5326103568077087 -198,0.5326015949249268 -199,0.5325918793678284 -200,0.5325819849967957 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.400/training_config.txt deleted file mode 100644 index cef7537..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.400/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.400/training_log.csv deleted file mode 100644 index 5ae09ed..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.400/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,123.9660873413086 -2,1.7883206605911255 -3,1.941957950592041 -4,2.667868137359619 -5,1.3499913215637207 -6,1.3162555694580078 -7,1.2820606231689453 -8,1.248679518699646 -9,1.21902334690094 -10,1.1914639472961426 -11,1.1656776666641235 -12,1.1418938636779785 -13,1.119570255279541 -14,1.0167979001998901 -15,0.9376616477966309 -16,0.87749183177948 -17,0.8373358249664307 -18,0.8196196556091309 -19,0.824779748916626 -20,0.8434734344482422 -21,0.8652281761169434 -22,0.8806150555610657 -23,0.8704774379730225 -24,0.8330709934234619 -25,0.9270583987236023 -26,0.7829014658927917 -27,0.6919918060302734 -28,0.7304421663284302 -29,0.7341864705085754 -30,0.809571385383606 -31,0.8655474185943604 -32,0.9191471934318542 -33,0.9936061501502991 -34,1.1267056465148926 -35,1.2250665426254272 -36,1.3500316143035889 -37,1.594939112663269 -38,1.7090855836868286 -39,1.777233362197876 -40,1.8051341772079468 -41,1.8011603355407715 -42,1.7193506956100464 -43,1.7261978387832642 -44,1.7077186107635498 -45,1.6651253700256348 -46,1.595107913017273 -47,1.533681869506836 -48,1.473475456237793 -49,1.4386221170425415 -50,1.4579380750656128 -51,1.419021487236023 -52,1.4558217525482178 -53,1.4712274074554443 -54,1.4445757865905762 -55,1.376896858215332 -56,1.3325958251953125 -57,1.2967675924301147 -58,1.2629960775375366 -59,1.2311627864837646 -60,1.206283688545227 -61,1.1826070547103882 -62,1.1699433326721191 -63,1.170303225517273 -64,1.1595500707626343 -65,1.1382614374160767 -66,1.1243680715560913 -67,1.1136091947555542 -68,1.1025546789169312 -69,1.0901648998260498 -70,1.076958417892456 -71,1.0639320611953735 -72,1.0519386529922485 -73,1.042627215385437 -74,1.0349621772766113 -75,1.027609944343567 -76,1.0197614431381226 -77,1.0113763809204102 -78,1.0029910802841187 -79,0.9949613213539124 -80,0.9872164130210876 -81,0.9797716736793518 -82,0.972869336605072 -83,0.9666418433189392 -84,0.9610180258750916 -85,0.9559313058853149 -86,0.951134204864502 -87,0.9463974833488464 -88,0.9417183995246887 -89,0.9370009899139404 -90,0.9321072697639465 -91,0.9269697070121765 -92,0.9216359257698059 -93,0.9161748886108398 -94,0.9106491208076477 -95,0.9052108526229858 -96,0.8998879790306091 -97,0.8946253657341003 -98,0.889411211013794 -99,0.8842877149581909 -100,0.8792240619659424 -101,0.8743700385093689 -102,0.8697242736816406 -103,0.8651250004768372 -104,0.8605311512947083 -105,0.8560567498207092 -106,0.8517547845840454 -107,0.8477166891098022 -108,0.8439161777496338 -109,0.8404728174209595 -110,0.8375291228294373 -111,0.8351860046386719 -112,0.8322899341583252 -113,0.828387439250946 -114,0.8246425986289978 -115,0.8214296698570251 -116,0.8185172080993652 -117,0.8157424330711365 -118,0.8129852414131165 -119,0.8101993203163147 -120,0.8073225617408752 -121,0.8043550252914429 -122,0.8013208508491516 -123,0.798215925693512 -124,0.7951106429100037 -125,0.7919869422912598 -126,0.7888756394386292 -127,0.7857377529144287 -128,0.7825775146484375 -129,0.7794362306594849 -130,0.7763460874557495 -131,0.7732894420623779 -132,0.7702781558036804 -133,0.7673158049583435 -134,0.7643649578094482 -135,0.7614122629165649 -136,0.7583968639373779 -137,0.7552924156188965 -138,0.7521733045578003 -139,0.7491359114646912 -140,0.7461866736412048 -141,0.7433079481124878 -142,0.7404709458351135 -143,0.7376577854156494 -144,0.7348494529724121 -145,0.7319665551185608 -146,0.728966236114502 -147,0.725823163986206 -148,0.7225452065467834 -149,0.719081699848175 -150,0.7155917882919312 -151,0.7121793627738953 -152,0.7086041569709778 -153,0.7047650814056396 -154,0.7005911469459534 -155,0.6960095763206482 -156,0.6909719705581665 -157,0.6854075789451599 -158,0.6792826056480408 -159,0.6725711822509766 -160,0.6654119491577148 -161,0.6578754186630249 -162,0.6499868631362915 -163,0.6418189406394958 -164,0.6211366057395935 -165,0.6117371320724487 -166,0.6069350838661194 -167,0.6032911539077759 -168,0.6003710031509399 -169,0.5979785323143005 -170,0.5976272821426392 -171,0.5962992906570435 -172,0.5947534441947937 -173,0.5932744145393372 -174,0.5829609632492065 -175,0.5820610523223877 -176,0.5810994505882263 -177,0.5801158547401428 -178,0.5791382193565369 -179,0.5782069563865662 -180,0.5775375366210938 -181,0.576643705368042 -182,0.5758063197135925 -183,0.5750418305397034 -184,0.5743659734725952 -185,0.5738294720649719 -186,0.5733508467674255 -187,0.5726826786994934 -188,0.5742297768592834 -189,0.572918176651001 -190,0.5717883110046387 -191,0.5727546811103821 -192,0.5733367204666138 -193,0.5735069513320923 -194,0.5733843445777893 -195,0.5729946494102478 -196,0.5722845196723938 -197,0.5712608098983765 -198,0.5715585350990295 -199,0.571658730506897 -200,0.5715134739875793 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.500/training_config.txt deleted file mode 100644 index d779442..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.500/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.500/training_log.csv deleted file mode 100644 index f14adfd..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.500/training_log.csv +++ /dev/null @@ -1,17 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,686.8750610351562 -2,2339.684814453125 -3,2294.93115234375 -4,1737.578369140625 -5,795.0547485351562 -6,72.47624206542969 -7,4.031853199005127 -8,1.4220737218856812 -9,1.207348346710205 -10,1.1726288795471191 -11,1.1506325006484985 -12,nan -13,nan -14,nan -15,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.600/training_config.txt deleted file mode 100644 index fb03540..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.600/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.600/training_log.csv deleted file mode 100644 index 9c9a647..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.600/training_log.csv +++ /dev/null @@ -1,11 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,1352.3265380859375 -2,2383.4765625 -3,2389.241455078125 -4,2389.562255859375 -5,2389.562255859375 -6,2389.562255859375 -7,2389.562255859375 -8,2389.562255859375 -9,2389.562255859375 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.700/training_config.txt deleted file mode 100644 index a698236..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.700/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.700/training_log.csv deleted file mode 100644 index af80184..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,1817.6221923828125 -2,2389.271728515625 -3,2389.562255859375 -4,2389.562255859375 -5,2389.562255859375 -6,2389.562255859375 -7,2389.562255859375 -8,2389.562255859375 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.800/training_config.txt deleted file mode 100644 index ff7ac45..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.800/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.800/training_log.csv deleted file mode 100644 index 683492d..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,2046.8402099609375 -2,2389.562255859375 -3,2389.562255859375 -4,2389.562255859375 -5,2389.562255859375 -6,2389.562255859375 -7,2389.562255859375 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_0.900/training_config.txt deleted file mode 100644 index 4c7e63e..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.900/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_0.900/training_log.csv deleted file mode 100644 index 2ad7b3c..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,2200.873291015625 -2,2389.562255859375 -3,2389.562255859375 -4,2389.562255859375 -5,2389.562255859375 -6,2389.562255859375 -7,2389.562255859375 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_1.000/training_config.txt deleted file mode 100644 index 4faa36b..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_1.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_1.000/training_log.csv deleted file mode 100644 index 3b5c3ea..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,2286.216064453125 -2,2389.562255859375 -3,2389.562255859375 -4,2389.562255859375 -5,2389.562255859375 -6,2389.562255859375 -7,2389.562255859375 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_16.000/training_config.txt deleted file mode 100644 index e34bdba..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_16.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_16.000/training_log.csv deleted file mode 100644 index 3126582..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,2399.245361328125 -2,2399.245361328125 -3,2399.245361328125 -4,2399.245361328125 -5,2399.245361328125 -6,2399.245361328125 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_2.000/training_config.txt deleted file mode 100644 index 612bee3..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_2.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_2.000/training_log.csv deleted file mode 100644 index 0a5301c..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,2398.761474609375 -2,5.0204572677612305 -3,nan -4,nan -5,nan -6,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_4.000/training_config.txt deleted file mode 100644 index 35b46f5..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_4.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_4.000/training_log.csv deleted file mode 100644 index 3126582..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,2399.245361328125 -2,2399.245361328125 -3,2399.245361328125 -4,2399.245361328125 -5,2399.245361328125 -6,2399.245361328125 diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse/lr_8.000/training_config.txt deleted file mode 100644 index 1cf7e61..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_8.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * t_span + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse/lr_8.000/training_log.csv deleted file mode 100644 index 3126582..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,15.663372039794922 -1,2399.245361328125 -2,2399.245361328125 -3,2399.245361328125 -4,2399.245361328125 -5,2399.245361328125 -6,2399.245361328125 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.010/training_config.txt deleted file mode 100644 index 1f6c863..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.010/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.010/training_log.csv deleted file mode 100644 index 3e75d80..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,24.59404754638672 -2,21.721113204956055 -3,19.98810386657715 -4,18.69507598876953 -5,15.963247299194336 -6,14.82723617553711 -7,13.150909423828125 -8,11.933369636535645 -9,10.499534606933594 -10,9.812870025634766 -11,8.509976387023926 -12,7.735812664031982 -13,7.361204624176025 -14,6.75435209274292 -15,6.247467517852783 -16,5.852933883666992 -17,5.478476524353027 -18,5.142022609710693 -19,4.832690238952637 -20,4.481014728546143 -21,4.129587650299072 -22,3.9299423694610596 -23,3.813892364501953 -24,3.662464141845703 -25,3.5557825565338135 -26,3.4408628940582275 -27,3.362147808074951 -28,3.3055224418640137 -29,3.268260955810547 -30,3.197384834289551 -31,3.127349853515625 -32,3.0491409301757812 -33,2.977081060409546 -34,2.9369356632232666 -35,2.864226818084717 -36,2.8227341175079346 -37,2.783499002456665 -38,2.7524404525756836 -39,2.714684009552002 -40,2.6784989833831787 -41,2.648019790649414 -42,2.6198477745056152 -43,2.5937259197235107 -44,2.5469117164611816 -45,2.502246379852295 -46,2.4716575145721436 -47,2.444472551345825 -48,2.4183170795440674 -49,2.3926000595092773 -50,2.3776838779449463 -51,2.3487708568573 -52,2.3260209560394287 -53,2.309412717819214 -54,2.291546106338501 -55,2.2749571800231934 -56,2.259263753890991 -57,2.2417876720428467 -58,2.2277348041534424 -59,2.213700532913208 -60,2.2025463581085205 -61,2.1902554035186768 -62,2.1774773597717285 -63,2.1638689041137695 -64,2.148393392562866 -65,2.134671449661255 -66,2.1229188442230225 -67,2.1176931858062744 -68,2.0990028381347656 -69,2.084625482559204 -70,2.073085308074951 -71,2.0611345767974854 -72,2.048490524291992 -73,2.035383462905884 -74,2.0197248458862305 -75,2.011431932449341 -76,2.002713680267334 -77,1.9917230606079102 -78,1.9903788566589355 -79,1.985385775566101 -80,1.9790340662002563 -81,1.9726417064666748 -82,1.96604585647583 -83,1.9591947793960571 -84,1.9520751237869263 -85,1.9446606636047363 -86,1.9368215799331665 -87,1.9283336400985718 -88,1.9207344055175781 -89,1.9152127504348755 -90,1.9087142944335938 -91,1.9018663167953491 -92,1.8941351175308228 -93,1.8857277631759644 -94,1.8817739486694336 -95,1.8773235082626343 -96,1.8719353675842285 -97,1.867002248764038 -98,1.8629993200302124 -99,1.8594732284545898 -100,1.8557318449020386 -101,1.85203218460083 -102,1.8477486371994019 -103,1.8455857038497925 -104,1.8428059816360474 -105,1.8391566276550293 -106,1.8345553874969482 -107,1.830152153968811 -108,1.8267533779144287 -109,1.8244744539260864 -110,1.821568489074707 -111,1.8192024230957031 -112,1.8159825801849365 -113,1.812424659729004 -114,1.8103652000427246 -115,1.8065364360809326 -116,1.803069829940796 -117,1.7999987602233887 -118,1.7963215112686157 -119,1.7938001155853271 -120,1.790767788887024 -121,1.7874165773391724 -122,1.7838839292526245 -123,1.7802716493606567 -124,1.7765474319458008 -125,1.7728999853134155 -126,1.769561767578125 -127,1.7661795616149902 -128,1.7618604898452759 -129,1.7591809034347534 -130,1.7566187381744385 -131,1.7538609504699707 -132,1.7509737014770508 -133,1.7479743957519531 -134,1.7448538541793823 -135,1.7416280508041382 -136,1.738302230834961 -137,1.734796166419983 -138,1.7306914329528809 -139,1.7273287773132324 -140,1.7262436151504517 -141,1.7250335216522217 -142,1.722247838973999 -143,1.717787504196167 -144,1.718681812286377 -145,1.7168490886688232 -146,1.716112732887268 -147,1.7143076658248901 -148,1.7123700380325317 -149,1.7113691568374634 -150,1.7100335359573364 -151,1.7081210613250732 -152,1.7054293155670166 -153,1.7022711038589478 -154,1.69928777217865 -155,1.6971757411956787 -156,1.6947720050811768 -157,1.6931674480438232 -158,1.6911051273345947 -159,1.688768982887268 -160,1.6862531900405884 -161,1.6832611560821533 -162,1.6808719635009766 -163,1.6784272193908691 -164,1.6756534576416016 -165,1.6729463338851929 -166,1.6702297925949097 -167,1.6680173873901367 -168,1.6651971340179443 -169,1.6624140739440918 -170,1.6593432426452637 -171,1.656605839729309 -172,1.6553702354431152 -173,1.6542080640792847 -174,1.6529905796051025 -175,1.651242733001709 -176,1.6489697694778442 -177,1.646538496017456 -178,1.645870327949524 -179,1.6448032855987549 -180,1.6435611248016357 -181,1.6420884132385254 -182,1.6404298543930054 -183,1.6377824544906616 -184,1.6366876363754272 -185,1.6359829902648926 -186,1.6343398094177246 -187,1.632550835609436 -188,1.6304033994674683 -189,1.629538893699646 -190,1.6282341480255127 -191,1.6271518468856812 -192,1.6268646717071533 -193,1.6260223388671875 -194,1.6247421503067017 -195,1.6231448650360107 -196,1.621718168258667 -197,1.621120572090149 -198,1.6200248003005981 -199,1.619065523147583 -200,1.6180816888809204 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.020/training_config.txt deleted file mode 100644 index 0d085a1..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.020/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.020/training_log.csv deleted file mode 100644 index bc7f699..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,23.219585418701172 -2,18.52353286743164 -3,14.260229110717773 -4,10.952372550964355 -5,8.726709365844727 -6,7.9034857749938965 -7,6.925170421600342 -8,6.08951997756958 -9,5.294270038604736 -10,4.584722518920898 -11,3.914167642593384 -12,3.4916579723358154 -13,3.2804253101348877 -14,3.0717787742614746 -15,2.797048330307007 -16,2.6310930252075195 -17,2.508080244064331 -18,2.413106918334961 -19,2.319782257080078 -20,2.2306747436523438 -21,2.1562447547912598 -22,2.0871760845184326 -23,2.027160882949829 -24,1.9832935333251953 -25,1.9389705657958984 -26,1.906236171722412 -27,1.876302719116211 -28,1.8483210802078247 -29,1.8226715326309204 -30,1.7989577054977417 -31,1.778012990951538 -32,1.76016104221344 -33,1.742494821548462 -34,1.7264816761016846 -35,1.7112189531326294 -36,1.6956876516342163 -37,1.6802345514297485 -38,1.6647210121154785 -39,1.6498264074325562 -40,1.6366578340530396 -41,1.6247276067733765 -42,1.6134909391403198 -43,1.6033207178115845 -44,1.594236135482788 -45,1.5858327150344849 -46,1.5776066780090332 -47,1.5675784349441528 -48,1.5618810653686523 -49,1.558152198791504 -50,1.5542651414871216 -51,1.5501888990402222 -52,1.5461926460266113 -53,1.5422910451889038 -54,1.5389753580093384 -55,1.5365060567855835 -56,1.534253716468811 -57,1.5317679643630981 -58,1.5289714336395264 -59,1.526029109954834 -60,1.5229898691177368 -61,1.5198603868484497 -62,1.5167115926742554 -63,1.5137066841125488 -64,1.5119106769561768 -65,1.5103728771209717 -66,1.5088071823120117 -67,1.5072331428527832 -68,1.505626916885376 -69,1.5039957761764526 -70,1.5023815631866455 -71,1.500840187072754 -72,1.4993669986724854 -73,1.4979838132858276 -74,1.4966654777526855 -75,1.4953728914260864 -76,1.4942426681518555 -77,1.4934799671173096 -78,1.4925158023834229 -79,1.491339087486267 -80,1.4902623891830444 -81,1.4893056154251099 -82,1.4883625507354736 -83,1.487492322921753 -84,1.4866960048675537 -85,1.4859718084335327 -86,1.4853041172027588 -87,1.4846774339675903 -88,1.4840787649154663 -89,1.4834989309310913 -90,1.4829319715499878 -91,1.482378602027893 -92,1.4818412065505981 -93,1.4813206195831299 -94,1.4808156490325928 -95,1.480324387550354 -96,1.4798439741134644 -97,1.4793760776519775 -98,1.4789212942123413 -99,1.4784824848175049 -100,1.4780595302581787 -101,1.4776523113250732 -102,1.477259874343872 -103,1.4768800735473633 -104,1.4765102863311768 -105,1.476149082183838 -106,1.4757968187332153 -107,1.475454330444336 -108,1.4751225709915161 -109,1.4748015403747559 -110,1.4744917154312134 -111,1.4741932153701782 -112,1.4739065170288086 -113,1.4736295938491821 -114,1.4733604192733765 -115,1.4730987548828125 -116,1.472843885421753 -117,1.472594976425171 -118,1.4723514318466187 -119,1.4721134901046753 -120,1.4718812704086304 -121,1.4716547727584839 -122,1.4714339971542358 -123,1.4712185859680176 -124,1.4710097312927246 -125,1.4708045721054077 -126,1.4706029891967773 -127,1.4704049825668335 -128,1.470211148262024 -129,1.4700205326080322 -130,1.4698330163955688 -131,1.4696482419967651 -132,1.4694665670394897 -133,1.469287395477295 -134,1.4691131114959717 -135,1.4689419269561768 -136,1.4687737226486206 -137,1.4686084985733032 -138,1.4684455394744873 -139,1.4682846069335938 -140,1.468125581741333 -141,1.4679687023162842 -142,1.4678137302398682 -143,1.4676605463027954 -144,1.467509150505066 -145,1.4673597812652588 -146,1.4672119617462158 -147,1.4670665264129639 -148,1.4669229984283447 -149,1.4667812585830688 -150,1.4666413068771362 -151,1.466503620147705 -152,1.4663678407669067 -153,1.4662338495254517 -154,1.4661015272140503 -155,1.4659711122512817 -156,1.465842366218567 -157,1.465714454650879 -158,1.4655877351760864 -159,1.465462565422058 -160,1.4653394222259521 -161,1.465217113494873 -162,1.465096116065979 -163,1.4649767875671387 -164,1.4648584127426147 -165,1.4647413492202759 -166,1.4646254777908325 -167,1.4645110368728638 -168,1.464397668838501 -169,1.4642853736877441 -170,1.4641743898391724 -171,1.4640648365020752 -172,1.4639564752578735 -173,1.463849663734436 -174,1.463743805885315 -175,1.4636387825012207 -176,1.463534951210022 -177,1.4634326696395874 -178,1.463330626487732 -179,1.463229775428772 -180,1.4631301164627075 -181,1.463031530380249 -182,1.4629335403442383 -183,1.4628361463546753 -184,1.4627394676208496 -185,1.4626433849334717 -186,1.462548017501831 -187,1.4624536037445068 -188,1.4623597860336304 -189,1.462266445159912 -190,1.4621734619140625 -191,1.4620813131332397 -192,1.4619898796081543 -193,1.4618985652923584 -194,1.4618078470230103 -195,1.4617178440093994 -196,1.4616286754608154 -197,1.4615401029586792 -198,1.4614518880844116 -199,1.4613642692565918 -200,1.4612778425216675 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.040/training_config.txt deleted file mode 100644 index 95bc9a0..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.040/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.040/training_log.csv deleted file mode 100644 index 22762bc..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,20.65635871887207 -2,12.073951721191406 -3,8.055334091186523 -4,6.302430629730225 -5,4.790444850921631 -6,3.6872165203094482 -7,3.1565771102905273 -8,2.766801118850708 -9,2.4543612003326416 -10,2.265716791152954 -11,2.1468851566314697 -12,2.0592284202575684 -13,1.9827542304992676 -14,1.9161738157272339 -15,1.8559377193450928 -16,1.8107503652572632 -17,1.7709119319915771 -18,1.735372543334961 -19,1.703719139099121 -20,1.6753342151641846 -21,1.6526416540145874 -22,1.632872462272644 -23,1.6144652366638184 -24,1.5969154834747314 -25,1.5804009437561035 -26,1.5650688409805298 -27,1.5516279935836792 -28,1.5398591756820679 -29,1.529477596282959 -30,1.520896315574646 -31,1.513649821281433 -32,1.5074646472930908 -33,1.5019654035568237 -34,1.4975835084915161 -35,1.4946426153182983 -36,1.4926238059997559 -37,1.4908021688461304 -38,1.4900686740875244 -39,1.490281581878662 -40,1.489404559135437 -41,1.4880008697509766 -42,1.4868794679641724 -43,1.4865838289260864 -44,1.4867167472839355 -45,1.4867297410964966 -46,1.4864076375961304 -47,1.485693097114563 -48,1.484632134437561 -49,1.4833335876464844 -50,1.4819313287734985 -51,1.4805651903152466 -52,1.4793237447738647 -53,1.4782017469406128 -54,1.477181077003479 -55,1.476244330406189 -56,1.475417971611023 -57,1.4747883081436157 -58,1.4743884801864624 -59,1.4740984439849854 -60,1.473713994026184 -61,1.473165512084961 -62,1.472537875175476 -63,1.4719524383544922 -64,1.4714716672897339 -65,1.4710924625396729 -66,1.4707766771316528 -67,1.4704958200454712 -68,1.4702327251434326 -69,1.4699655771255493 -70,1.4696706533432007 -71,1.4693354368209839 -72,1.468959093093872 -73,1.4685630798339844 -74,1.4681791067123413 -75,1.4678373336791992 -76,1.4675495624542236 -77,1.4673116207122803 -78,1.467100739479065 -79,1.4668982028961182 -80,1.4666906595230103 -81,1.466477632522583 -82,1.4662648439407349 -83,1.4660598039627075 -84,1.4658597707748413 -85,1.4656621217727661 -86,1.4654620885849 -87,1.4652584791183472 -88,1.4650529623031616 -89,1.4648467302322388 -90,1.464643120765686 -91,1.4644471406936646 -92,1.464261770248413 -93,1.4640882015228271 -94,1.4639266729354858 -95,1.4637728929519653 -96,1.4636223316192627 -97,1.463471531867981 -98,1.4633214473724365 -99,1.4631731510162354 -100,1.4630287885665894 -101,1.4628955125808716 -102,1.462769865989685 -103,1.4626489877700806 -104,1.4625300168991089 -105,1.4624115228652954 -106,1.4622937440872192 -107,1.4621777534484863 -108,1.4620634317398071 -109,1.4619531631469727 -110,1.4618476629257202 -111,1.4617464542388916 -112,1.461647391319275 -113,1.461549997329712 -114,1.4614537954330444 -115,1.461358904838562 -116,1.461264967918396 -117,1.4611726999282837 -118,1.4610834121704102 -119,1.4609959125518799 -120,1.460909128189087 -121,1.4608224630355835 -122,1.4607361555099487 -123,1.4606512784957886 -124,1.460567593574524 -125,1.460485577583313 -126,1.4604049921035767 -127,1.4603253602981567 -128,1.4602469205856323 -129,1.4601693153381348 -130,1.4600927829742432 -131,1.460017204284668 -132,1.4599424600601196 -133,1.4598687887191772 -134,1.4597959518432617 -135,1.4597234725952148 -136,1.4596515893936157 -137,1.4595803022384644 -138,1.4595097303390503 -139,1.4594398736953735 -140,1.4593706130981445 -141,1.4593020677566528 -142,1.4592341184616089 -143,1.4591668844223022 -144,1.4591001272201538 -145,1.459033727645874 -146,1.4589680433273315 -147,1.4589028358459473 -148,1.458837866783142 -149,1.4587732553482056 -150,1.458708643913269 -151,1.4586445093154907 -152,1.4585809707641602 -153,1.4585177898406982 -154,1.458454966545105 -155,1.458392858505249 -156,1.4583312273025513 -157,1.4582701921463013 -158,1.45820951461792 -159,1.4581491947174072 -160,1.4580894708633423 -161,1.4580304622650146 -162,1.4579718112945557 -163,1.4579137563705444 -164,1.4578558206558228 -165,1.4577981233596802 -166,1.4577409029006958 -167,1.4576842784881592 -168,1.4576281309127808 -169,1.4575729370117188 -170,1.4575185775756836 -171,1.4574651718139648 -172,1.457414150238037 -173,1.4573628902435303 -174,1.4573111534118652 -175,1.4572595357894897 -176,1.4572080373764038 -177,1.4571566581726074 -178,1.4571055173873901 -179,1.4570544958114624 -180,1.4570035934448242 -181,1.4569531679153442 -182,1.4569026231765747 -183,1.4568524360656738 -184,1.4568028450012207 -185,1.4567534923553467 -186,1.4567044973373413 -187,1.4566558599472046 -188,1.4566071033477783 -189,1.4565584659576416 -190,1.456510066986084 -191,1.456461787223816 -192,1.4564136266708374 -193,1.4563655853271484 -194,1.4563175439834595 -195,1.4562698602676392 -196,1.4562219381332397 -197,1.4561742544174194 -198,1.4561268091201782 -199,1.4560790061950684 -200,1.4560316801071167 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.050/training_config.txt deleted file mode 100644 index dc42b00..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.050/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.050/training_log.csv deleted file mode 100644 index 14352c0..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,19.568864822387695 -2,10.105947494506836 -3,7.239600658416748 -4,4.941810131072998 -5,3.7073497772216797 -6,3.1183090209960938 -7,2.5880401134490967 -8,2.32934832572937 -9,2.1578192710876465 -10,2.0380048751831055 -11,1.947862982749939 -12,1.876535177230835 -13,1.8185397386550903 -14,1.773360252380371 -15,1.7358598709106445 -16,1.7014356851577759 -17,1.6700321435928345 -18,1.6408665180206299 -19,1.6150639057159424 -20,1.591325283050537 -21,1.5691680908203125 -22,1.549878716468811 -23,1.5342566967010498 -24,1.5217281579971313 -25,1.5121235847473145 -26,1.5044822692871094 -27,1.4985557794570923 -28,1.4998356103897095 -29,1.5070990324020386 -30,1.5083929300308228 -31,1.504131555557251 -32,1.4890128374099731 -33,1.4841396808624268 -34,1.4839550256729126 -35,1.484250545501709 -36,1.4852585792541504 -37,1.4863895177841187 -38,1.4868214130401611 -39,1.4860942363739014 -40,1.4842301607131958 -41,1.4816713333129883 -42,1.4791182279586792 -43,1.476956844329834 -44,1.4751405715942383 -45,1.4736629724502563 -46,1.4731744527816772 -47,1.4736109972000122 -48,1.4736769199371338 -49,1.4730292558670044 -50,1.4717836380004883 -51,1.4705435037612915 -52,1.4699872732162476 -53,1.4698082208633423 -54,1.4696094989776611 -55,1.4692989587783813 -56,1.4688868522644043 -57,1.4683926105499268 -58,1.467841625213623 -59,1.4672523736953735 -60,1.4666423797607422 -61,1.4660497903823853 -62,1.465538740158081 -63,1.46522057056427 -64,1.4651235342025757 -65,1.4650613069534302 -66,1.4648313522338867 -67,1.4644349813461304 -68,1.4640377759933472 -69,1.4637633562088013 -70,1.4635930061340332 -71,1.4634506702423096 -72,1.4632847309112549 -73,1.4630799293518066 -74,1.462839126586914 -75,1.4625763893127441 -76,1.4623212814331055 -77,1.4620983600616455 -78,1.4619271755218506 -79,1.4617897272109985 -80,1.461647629737854 -81,1.461483120918274 -82,1.4613021612167358 -83,1.4611263275146484 -84,1.4609665870666504 -85,1.4608250856399536 -86,1.4606881141662598 -87,1.4605509042739868 -88,1.460410714149475 -89,1.460265040397644 -90,1.460125207901001 -91,1.4599896669387817 -92,1.4598602056503296 -93,1.4597382545471191 -94,1.4596203565597534 -95,1.4595013856887817 -96,1.4593793153762817 -97,1.4592537879943848 -98,1.4591283798217773 -99,1.4590057134628296 -100,1.4588875770568848 -101,1.4587775468826294 -102,1.4586740732192993 -103,1.4585741758346558 -104,1.4584753513336182 -105,1.458377718925476 -106,1.4582821130752563 -107,1.4581918716430664 -108,1.4581079483032227 -109,1.4580273628234863 -110,1.4579479694366455 -111,1.4578685760498047 -112,1.4577891826629639 -113,1.45771062374115 -114,1.4576334953308105 -115,1.457558274269104 -116,1.4574846029281616 -117,1.457412600517273 -118,1.4573427438735962 -119,1.4572741985321045 -120,1.4572064876556396 -121,1.457139253616333 -122,1.4570726156234741 -123,1.4570066928863525 -124,1.456942081451416 -125,1.4568785429000854 -126,1.4568160772323608 -127,1.4567545652389526 -128,1.4566937685012817 -129,1.45663321018219 -130,1.4565730094909668 -131,1.4565129280090332 -132,1.4564536809921265 -133,1.4563950300216675 -134,1.4563366174697876 -135,1.4562791585922241 -136,1.4562214612960815 -137,1.4561642408370972 -138,1.4561070203781128 -139,1.456049919128418 -140,1.4559932947158813 -141,1.4559365510940552 -142,1.4558799266815186 -143,1.455823540687561 -144,1.455767035484314 -145,1.4557108879089355 -146,1.4556546211242676 -147,1.4555983543395996 -148,1.4555422067642212 -149,1.4554862976074219 -150,1.4554303884506226 -151,1.4553744792938232 -152,1.4553189277648926 -153,1.4552627801895142 -154,1.4552065134048462 -155,1.4551502466201782 -156,1.455093264579773 -157,1.4550367593765259 -158,1.4549802541732788 -159,1.4549232721328735 -160,1.4548662900924683 -161,1.4548094272613525 -162,1.4547526836395264 -163,1.454695701599121 -164,1.4546401500701904 -165,1.454586148262024 -166,1.454533576965332 -167,1.454482078552246 -168,1.4544308185577393 -169,1.454379916191101 -170,1.4543293714523315 -171,1.4542791843414307 -172,1.4542292356491089 -173,1.4541796445846558 -174,1.4541302919387817 -175,1.4540807008743286 -176,1.4540317058563232 -177,1.453982949256897 -178,1.4539344310760498 -179,1.4538863897323608 -180,1.4538383483886719 -181,1.4537904262542725 -182,1.4537427425384521 -183,1.4536954164505005 -184,1.4536478519439697 -185,1.453600525856018 -186,1.453553318977356 -187,1.4535059928894043 -188,1.4534586668014526 -189,1.45341157913208 -190,1.4533650875091553 -191,1.4533185958862305 -192,1.4532725811004639 -193,1.4532266855239868 -194,1.4531809091567993 -195,1.453135371208191 -196,1.4530904293060303 -197,1.4530456066131592 -198,1.4530009031295776 -199,1.4529563188552856 -200,1.4529117345809937 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.080/training_config.txt deleted file mode 100644 index 0ce9b8a..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.080/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.080/training_log.csv deleted file mode 100644 index 2aab853..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,16.414777755737305 -2,7.493873596191406 -3,4.688365936279297 -4,3.4558095932006836 -5,2.7619240283966064 -6,2.405653238296509 -7,2.1855032444000244 -8,1.9671211242675781 -9,1.8501248359680176 -10,1.757287621498108 -11,1.6849395036697388 -12,1.6292791366577148 -13,1.5876693725585938 -14,1.5665310621261597 -15,1.5520392656326294 -16,1.5408016443252563 -17,1.5316195487976074 -18,1.5247377157211304 -19,1.5199769735336304 -20,1.5171470642089844 -21,1.5147453546524048 -22,1.512649655342102 -23,1.513277292251587 -24,1.5134532451629639 -25,1.510260820388794 -26,1.5008769035339355 -27,1.4888893365859985 -28,1.4800214767456055 -29,1.4742356538772583 -30,1.470223069190979 -31,1.467859148979187 -32,1.4666200876235962 -33,1.4661648273468018 -34,1.4662635326385498 -35,1.4667552709579468 -36,1.4674102067947388 -37,1.4680118560791016 -38,1.4683830738067627 -39,1.4684361219406128 -40,1.4681644439697266 -41,1.4676264524459839 -42,1.4669393301010132 -43,1.4662266969680786 -44,1.465582251548767 -45,1.465039610862732 -46,1.4643833637237549 -47,1.4634640216827393 -48,1.4623627662658691 -49,1.4613662958145142 -50,1.4606276750564575 -51,1.4601454734802246 -52,1.4598416090011597 -53,1.4596407413482666 -54,1.4594875574111938 -55,1.4593504667282104 -56,1.4592303037643433 -57,1.4591153860092163 -58,1.4590086936950684 -59,1.4589234590530396 -60,1.4588794708251953 -61,1.4588639736175537 -62,1.4588408470153809 -63,1.4587681293487549 -64,1.4586409330368042 -65,1.458466649055481 -66,1.4582606554031372 -67,1.4580708742141724 -68,1.4578980207443237 -69,1.4577637910842896 -70,1.457653284072876 -71,1.457557201385498 -72,1.457460880279541 -73,1.4573684930801392 -74,1.457282543182373 -75,1.4572052955627441 -76,1.45713472366333 -77,1.457072138786316 -78,1.4570170640945435 -79,1.4569679498672485 -80,1.4569101333618164 -81,1.456844449043274 -82,1.456771731376648 -83,1.4566956758499146 -84,1.4566227197647095 -85,1.456555724143982 -86,1.4564917087554932 -87,1.4564297199249268 -88,1.4563673734664917 -89,1.456305980682373 -90,1.4562443494796753 -91,1.4561851024627686 -92,1.4561301469802856 -93,1.4560792446136475 -94,1.4560304880142212 -95,1.4559836387634277 -96,1.455937147140503 -97,1.4558906555175781 -98,1.4558424949645996 -99,1.455792784690857 -100,1.4557418823242188 -101,1.4556903839111328 -102,1.4556375741958618 -103,1.4555872678756714 -104,1.4555387496948242 -105,1.455491065979004 -106,1.4554433822631836 -107,1.4553954601287842 -108,1.4553475379943848 -109,1.4552994966506958 -110,1.455253005027771 -111,1.455208420753479 -112,1.455163836479187 -113,1.455118179321289 -114,1.4550726413726807 -115,1.4550288915634155 -116,1.454984188079834 -117,1.4549388885498047 -118,1.4548931121826172 -119,1.4548463821411133 -120,1.4547995328903198 -121,1.454752802848816 -122,1.4547051191329956 -123,1.4546558856964111 -124,1.4546054601669312 -125,1.4545555114746094 -126,1.4545093774795532 -127,1.454468011856079 -128,1.4544326066970825 -129,1.4544007778167725 -130,1.4543671607971191 -131,1.4543319940567017 -132,1.4542959928512573 -133,1.4542607069015503 -134,1.454224705696106 -135,1.4541887044906616 -136,1.4541521072387695 -137,1.4541147947311401 -138,1.4540765285491943 -139,1.4540380239486694 -140,1.4539995193481445 -141,1.4539613723754883 -142,1.4539231061935425 -143,1.4538850784301758 -144,1.453848958015442 -145,1.4538146257400513 -146,1.4537814855575562 -147,1.4537489414215088 -148,1.4537160396575928 -149,1.453682780265808 -150,1.4536495208740234 -151,1.4536155462265015 -152,1.453581690788269 -153,1.4535473585128784 -154,1.453513264656067 -155,1.4534794092178345 -156,1.4534460306167603 -157,1.4534133672714233 -158,1.4533811807632446 -159,1.4533493518829346 -160,1.4533175230026245 -161,1.4532853364944458 -162,1.4532538652420044 -163,1.4532220363616943 -164,1.4531902074813843 -165,1.4531584978103638 -166,1.4531267881393433 -167,1.4530954360961914 -168,1.4530644416809082 -169,1.4530333280563354 -170,1.453002691268921 -171,1.4529718160629272 -172,1.4529411792755127 -173,1.4529107809066772 -174,1.4528800249099731 -175,1.4528493881225586 -176,1.452818751335144 -177,1.4527884721755981 -178,1.4527578353881836 -179,1.452727198600769 -180,1.452696681022644 -181,1.452666163444519 -182,1.452635645866394 -183,1.4526053667068481 -184,1.4525752067565918 -185,1.452544927597046 -186,1.4525152444839478 -187,1.45248544216156 -188,1.4524562358856201 -189,1.4524264335632324 -190,1.452397108078003 -191,1.4523674249649048 -192,1.4523380994796753 -193,1.4523091316223145 -194,1.452279806137085 -195,1.452250599861145 -196,1.4522218704223633 -197,1.452192783355713 -198,1.452163577079773 -199,1.4521349668502808 -200,1.452106237411499 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.100/training_config.txt deleted file mode 100644 index 490c1cf..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.100/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.100/training_log.csv deleted file mode 100644 index 6b152ed..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,15.473681449890137 -2,8.638729095458984 -3,4.782788276672363 -4,2.969635248184204 -5,2.449263334274292 -6,2.1345300674438477 -7,1.969503402709961 -8,1.8761775493621826 -9,1.81075119972229 -10,1.7622418403625488 -11,1.7221604585647583 -12,1.688661813735962 -13,1.6620664596557617 -14,1.6411365270614624 -15,1.6242457628250122 -16,1.6097257137298584 -17,1.5964046716690063 -18,1.5835758447647095 -19,1.571230173110962 -20,1.5593093633651733 -21,1.547702431678772 -22,1.5363924503326416 -23,1.5253757238388062 -24,1.5150058269500732 -25,1.505464792251587 -26,1.4971555471420288 -27,1.4900511503219604 -28,1.4846168756484985 -29,1.4799104928970337 -30,1.476759910583496 -31,1.4740079641342163 -32,1.4725329875946045 -33,1.4717905521392822 -34,1.472001552581787 -35,1.4729137420654297 -36,1.4727662801742554 -37,1.4696221351623535 -38,1.465174913406372 -39,1.4622507095336914 -40,1.462092638015747 -41,1.4630013704299927 -42,1.464141845703125 -43,1.4652445316314697 -44,1.4660539627075195 -45,1.4663732051849365 -46,1.4661054611206055 -47,1.4653152227401733 -48,1.4641708135604858 -49,1.4628831148147583 -50,1.4616371393203735 -51,1.4605485200881958 -52,1.4596846103668213 -53,1.4591317176818848 -54,1.4590377807617188 -55,1.459372878074646 -56,1.4597800970077515 -57,1.4598475694656372 -58,1.459493637084961 -59,1.458949089050293 -60,1.4585394859313965 -61,1.4583613872528076 -62,1.4583306312561035 -63,1.4583650827407837 -64,1.4583989381790161 -65,1.458391547203064 -66,1.4583425521850586 -67,1.4582480192184448 -68,1.458108901977539 -69,1.4579384326934814 -70,1.4577478170394897 -71,1.4575438499450684 -72,1.4573545455932617 -73,1.457197666168213 -74,1.457086443901062 -75,1.4570283889770508 -76,1.4569975137710571 -77,1.4569687843322754 -78,1.4569140672683716 -79,1.4568300247192383 -80,1.456725835800171 -81,1.4566370248794556 -82,1.4565656185150146 -83,1.456520915031433 -84,1.456484317779541 -85,1.456443190574646 -86,1.4563872814178467 -87,1.4563132524490356 -88,1.456230878829956 -89,1.456148386001587 -90,1.4560718536376953 -91,1.4560085535049438 -92,1.4559587240219116 -93,1.4559094905853271 -94,1.4558600187301636 -95,1.4558066129684448 -96,1.4557461738586426 -97,1.4556834697723389 -98,1.4556212425231934 -99,1.455560564994812 -100,1.4555058479309082 -101,1.4554561376571655 -102,1.4554071426391602 -103,1.455358624458313 -104,1.4553070068359375 -105,1.4552518129348755 -106,1.4551955461502075 -107,1.4551403522491455 -108,1.4550868272781372 -109,1.455034852027893 -110,1.4549829959869385 -111,1.454932451248169 -112,1.4548819065093994 -113,1.4548310041427612 -114,1.4547795057296753 -115,1.4547276496887207 -116,1.4546751976013184 -117,1.4546234607696533 -118,1.454573392868042 -119,1.4545245170593262 -120,1.454476237297058 -121,1.4544280767440796 -122,1.454379677772522 -123,1.4543309211730957 -124,1.4542819261550903 -125,1.454233169555664 -126,1.4541850090026855 -127,1.4541376829147339 -128,1.4540910720825195 -129,1.4540445804595947 -130,1.4539977312088013 -131,1.4539512395858765 -132,1.453904628753662 -133,1.4538582563400269 -134,1.4538118839263916 -135,1.453765630722046 -136,1.453720211982727 -137,1.4536746740341187 -138,1.453628659248352 -139,1.4535826444625854 -140,1.4535375833511353 -141,1.4534928798675537 -142,1.4534482955932617 -143,1.4534034729003906 -144,1.4533588886260986 -145,1.4533140659332275 -146,1.4532692432403564 -147,1.453224778175354 -148,1.4531805515289307 -149,1.453136682510376 -150,1.4530928134918213 -151,1.453048825263977 -152,1.4530047178268433 -153,1.4529603719711304 -154,1.452916145324707 -155,1.4528717994689941 -156,1.4528275728225708 -157,1.452783226966858 -158,1.4527379274368286 -159,1.4526926279067993 -160,1.45264732837677 -161,1.4526020288467407 -162,1.4525566101074219 -163,1.452511191368103 -164,1.4524657726287842 -165,1.4524203538894653 -166,1.4523752927780151 -167,1.452330470085144 -168,1.452285647392273 -169,1.452241063117981 -170,1.4521965980529785 -171,1.452152132987976 -172,1.4521079063415527 -173,1.452063798904419 -174,1.4520199298858643 -175,1.4519758224487305 -176,1.4519315958023071 -177,1.4518874883651733 -178,1.4518433809280396 -179,1.4517993927001953 -180,1.4517555236816406 -181,1.4517115354537964 -182,1.4516676664352417 -183,1.4516241550445557 -184,1.4515807628631592 -185,1.4515376091003418 -186,1.451494812965393 -187,1.4514520168304443 -188,1.4514098167419434 -189,1.451367735862732 -190,1.45132577419281 -191,1.4512840509414673 -192,1.4512423276901245 -193,1.4512012004852295 -194,1.451160192489624 -195,1.451119303703308 -196,1.4510788917541504 -197,1.4510382413864136 -198,1.4509977102279663 -199,1.4509574174880981 -200,1.4509172439575195 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.125/training_config.txt deleted file mode 100644 index 798f14c..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.125/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.125/training_log.csv deleted file mode 100644 index f792e17..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,14.904146194458008 -2,5.107444763183594 -3,2.8419971466064453 -4,2.4583566188812256 -5,2.2579047679901123 -6,2.09039306640625 -7,1.9657574892044067 -8,1.8685749769210815 -9,1.7787773609161377 -10,1.7054009437561035 -11,1.6564843654632568 -12,1.6179243326187134 -13,1.5866111516952515 -14,1.5608689785003662 -15,1.5387951135635376 -16,1.5199588537216187 -17,1.5064730644226074 -18,1.4998161792755127 -19,1.4978829622268677 -20,1.4993282556533813 -21,1.5070605278015137 -22,1.51156485080719 -23,1.5102463960647583 -24,1.5024312734603882 -25,1.4929180145263672 -26,1.4843984842300415 -27,1.4767051935195923 -28,1.4724608659744263 -29,1.4693667888641357 -30,1.4676892757415771 -31,1.4670628309249878 -32,1.4671103954315186 -33,1.4674614667892456 -34,1.467982292175293 -35,1.468399167060852 -36,1.4684895277023315 -37,1.4681214094161987 -38,1.46732497215271 -39,1.4662374258041382 -40,1.4650527238845825 -41,1.4639616012573242 -42,1.46314537525177 -43,1.4626864194869995 -44,1.462755560874939 -45,1.4634103775024414 -46,1.4639030694961548 -47,1.4633128643035889 -48,1.4621764421463013 -49,1.4612983465194702 -50,1.4608370065689087 -51,1.4606225490570068 -52,1.4604607820510864 -53,1.460297703742981 -54,1.4601224660873413 -55,1.459932565689087 -56,1.4597264528274536 -57,1.459525465965271 -58,1.459342122077942 -59,1.4591939449310303 -60,1.4591004848480225 -61,1.4590387344360352 -62,1.4589879512786865 -63,1.458919882774353 -64,1.4588068723678589 -65,1.4586519002914429 -66,1.4584969282150269 -67,1.4583574533462524 -68,1.458247423171997 -69,1.4581743478775024 -70,1.45811927318573 -71,1.4580553770065308 -72,1.4579752683639526 -73,1.4578782320022583 -74,1.4577760696411133 -75,1.4576678276062012 -76,1.4575600624084473 -77,1.457473635673523 -78,1.4574048519134521 -79,1.4573403596878052 -80,1.4572904109954834 -81,1.4572396278381348 -82,1.4571791887283325 -83,1.4571086168289185 -84,1.4570341110229492 -85,1.4569646120071411 -86,1.456900954246521 -87,1.4568417072296143 -88,1.4567841291427612 -89,1.45672607421875 -90,1.4566636085510254 -91,1.4565980434417725 -92,1.4565361738204956 -93,1.4564756155014038 -94,1.4564135074615479 -95,1.4563500881195068 -96,1.4562830924987793 -97,1.4562127590179443 -98,1.456142544746399 -99,1.4560754299163818 -100,1.4560128450393677 -101,1.4559534788131714 -102,1.4558963775634766 -103,1.455839991569519 -104,1.4557830095291138 -105,1.4557267427444458 -106,1.4556715488433838 -107,1.4556173086166382 -108,1.4555622339248657 -109,1.4555119276046753 -110,1.4554638862609863 -111,1.455415964126587 -112,1.4553683996200562 -113,1.4553217887878418 -114,1.4552744626998901 -115,1.4552271366119385 -116,1.4551832675933838 -117,1.4551401138305664 -118,1.4550955295562744 -119,1.4550501108169556 -120,1.4550042152404785 -121,1.4549568891525269 -122,1.4549083709716797 -123,1.4548594951629639 -124,1.4548097848892212 -125,1.4547600746154785 -126,1.4547110795974731 -127,1.4546619653701782 -128,1.4546140432357788 -129,1.4545663595199585 -130,1.454520344734192 -131,1.4544750452041626 -132,1.454430341720581 -133,1.4543869495391846 -134,1.4543429613113403 -135,1.4543004035949707 -136,1.4542585611343384 -137,1.454217791557312 -138,1.454177737236023 -139,1.4541380405426025 -140,1.4540987014770508 -141,1.4540603160858154 -142,1.4540224075317383 -143,1.4539841413497925 -144,1.4539453983306885 -145,1.4539064168930054 -146,1.453866958618164 -147,1.453827977180481 -148,1.4537898302078247 -149,1.4537521600723267 -150,1.4537146091461182 -151,1.4536770582199097 -152,1.4536408185958862 -153,1.453604817390442 -154,1.453568935394287 -155,1.4535331726074219 -156,1.453497290611267 -157,1.4534615278244019 -158,1.4534263610839844 -159,1.4533908367156982 -160,1.4533554315567017 -161,1.4533202648162842 -162,1.4532848596572876 -163,1.4532489776611328 -164,1.4532142877578735 -165,1.4531794786453247 -166,1.4531439542770386 -167,1.4531091451644897 -168,1.45307457447052 -169,1.4530400037765503 -170,1.453005313873291 -171,1.4529705047607422 -172,1.4529359340667725 -173,1.4529021978378296 -174,1.452867865562439 -175,1.4528343677520752 -176,1.4528006315231323 -177,1.4527662992477417 -178,1.4527314901351929 -179,1.4526981115341187 -180,1.4526640176773071 -181,1.4526290893554688 -182,1.4525936841964722 -183,1.4525604248046875 -184,1.4525266885757446 -185,1.452492117881775 -186,1.4524565935134888 -187,1.4524213075637817 -188,1.4523862600326538 -189,1.452350378036499 -190,1.4523142576217651 -191,1.4522782564163208 -192,1.4522417783737183 -193,1.4522043466567993 -194,1.4521673917770386 -195,1.4521316289901733 -196,1.4520962238311768 -197,1.4520609378814697 -198,1.4520246982574463 -199,1.4519883394241333 -200,1.4519507884979248 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.160/training_config.txt deleted file mode 100644 index a02f355..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.160/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.160/training_log.csv deleted file mode 100644 index d0a6fe1..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,14.672582626342773 -2,3.865825891494751 -3,3.0047595500946045 -4,2.521855115890503 -5,2.097691059112549 -6,1.939280390739441 -7,1.8345550298690796 -8,1.7583540678024292 -9,1.6987954378128052 -10,1.656506061553955 -11,1.6256083250045776 -12,1.602416753768921 -13,1.5845447778701782 -14,1.5704729557037354 -15,1.5605870485305786 -16,1.5517972707748413 -17,1.5419172048568726 -18,1.5310823917388916 -19,1.5196737051010132 -20,1.5086108446121216 -21,1.4981060028076172 -22,1.4886990785598755 -23,1.4811439514160156 -24,1.475969672203064 -25,1.4724974632263184 -26,1.4700887203216553 -27,1.46867036819458 -28,1.4679460525512695 -29,1.467441201210022 -30,1.4672014713287354 -31,1.4673515558242798 -32,1.4678970575332642 -33,1.4685533046722412 -34,1.4686604738235474 -35,1.4680495262145996 -36,1.4671015739440918 -37,1.466199517250061 -38,1.465516209602356 -39,1.4649726152420044 -40,1.4644595384597778 -41,1.4639945030212402 -42,1.4636108875274658 -43,1.4633822441101074 -44,1.4632694721221924 -45,1.4633561372756958 -46,1.4634389877319336 -47,1.4634495973587036 -48,1.46321439743042 -49,1.462780475616455 -50,1.4621917009353638 -51,1.4616469144821167 -52,1.461159348487854 -53,1.460787057876587 -54,1.4605377912521362 -55,1.4603389501571655 -56,1.4601777791976929 -57,1.4600703716278076 -58,1.4599559307098389 -59,1.4598102569580078 -60,1.4596480131149292 -61,1.459515929222107 -62,1.4593753814697266 -63,1.4592349529266357 -64,1.459098219871521 -65,1.4589662551879883 -66,1.4588512182235718 -67,1.45875883102417 -68,1.4586644172668457 -69,1.4585477113723755 -70,1.4584341049194336 -71,1.4583271741867065 -72,1.458221197128296 -73,1.4581143856048584 -74,1.4579893350601196 -75,1.4578465223312378 -76,1.457704782485962 -77,1.4576091766357422 -78,1.4575165510177612 -79,1.4574284553527832 -80,1.457343339920044 -81,1.4572536945343018 -82,1.457151174545288 -83,1.4570478200912476 -84,1.456963300704956 -85,1.4568860530853271 -86,1.456816554069519 -87,1.4567511081695557 -88,1.4566843509674072 -89,1.4566195011138916 -90,1.4565601348876953 -91,1.456497073173523 -92,1.4564298391342163 -93,1.4563676118850708 -94,1.4563031196594238 -95,1.456233024597168 -96,1.4561707973480225 -97,1.4561126232147217 -98,1.4560562372207642 -99,1.455998182296753 -100,1.4559510946273804 -101,1.4559005498886108 -102,1.455847144126892 -103,1.455796241760254 -104,1.4557551145553589 -105,1.455709457397461 -106,1.4556593894958496 -107,1.4556161165237427 -108,1.4555686712265015 -109,1.4555175304412842 -110,1.4554654359817505 -111,1.4554152488708496 -112,1.4553613662719727 -113,1.4553048610687256 -114,1.4552414417266846 -115,1.4551715850830078 -116,1.4550957679748535 -117,1.4550225734710693 -118,1.4549732208251953 -119,1.4549249410629272 -120,1.4548851251602173 -121,1.4548348188400269 -122,1.4547868967056274 -123,1.4547433853149414 -124,1.454700231552124 -125,1.4546566009521484 -126,1.4546171426773071 -127,1.4545750617980957 -128,1.4545314311981201 -129,1.4544862508773804 -130,1.4544472694396973 -131,1.454419493675232 -132,1.454384207725525 -133,1.454353928565979 -134,1.4543243646621704 -135,1.4542912244796753 -136,1.454257845878601 -137,1.4542279243469238 -138,1.4541946649551392 -139,1.454158902168274 -140,1.4541237354278564 -141,1.454093337059021 -142,1.4540644884109497 -143,1.4540345668792725 -144,1.4540014266967773 -145,1.453973412513733 -146,1.4539459943771362 -147,1.4539170265197754 -148,1.4538846015930176 -149,1.4538506269454956 -150,1.4538185596466064 -151,1.4537969827651978 -152,1.4537620544433594 -153,1.4537312984466553 -154,1.4537029266357422 -155,1.4536688327789307 -156,1.4536304473876953 -157,1.4535943269729614 -158,1.4535644054412842 -159,1.453538417816162 -160,1.453508734703064 -161,1.4534753561019897 -162,1.4534430503845215 -163,1.4534131288528442 -164,1.4533872604370117 -165,1.453359603881836 -166,1.4533292055130005 -167,1.4532976150512695 -168,1.4532684087753296 -169,1.4532395601272583 -170,1.45320725440979 -171,1.4531724452972412 -172,1.4531365633010864 -173,1.4531009197235107 -174,1.4530653953552246 -175,1.4530282020568848 -176,1.4529871940612793 -177,1.4529500007629395 -178,1.4529318809509277 -179,1.4529057741165161 -180,1.452872633934021 -181,1.4528337717056274 -182,1.4527933597564697 -183,1.452770471572876 -184,1.4527441263198853 -185,1.4527137279510498 -186,1.452679991722107 -187,1.452642798423767 -188,1.4526034593582153 -189,1.4525784254074097 -190,1.4525485038757324 -191,1.4525123834609985 -192,1.4524754285812378 -193,1.4524458646774292 -194,1.4524110555648804 -195,1.4523805379867554 -196,1.4523487091064453 -197,1.452312707901001 -198,1.4522818326950073 -199,1.452248454093933 -200,1.452214002609253 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.200/training_config.txt deleted file mode 100644 index d990f86..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.200/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.200/training_log.csv deleted file mode 100644 index 83dc18e..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,14.85738468170166 -2,4.611105918884277 -3,3.342634916305542 -4,2.743663787841797 -5,2.3084845542907715 -6,2.0602571964263916 -7,1.9174151420593262 -8,1.8079882860183716 -9,1.7252837419509888 -10,1.6671680212020874 -11,1.6253772974014282 -12,1.5962486267089844 -13,1.5755362510681152 -14,1.559843897819519 -15,1.5464948415756226 -16,1.535754919052124 -17,1.5277925729751587 -18,1.5202704668045044 -19,1.5145714282989502 -20,1.5110653638839722 -21,1.5085582733154297 -22,1.5064224004745483 -23,1.5044347047805786 -24,1.5025129318237305 -25,1.500649094581604 -26,1.4985381364822388 -27,1.4964089393615723 -28,1.4944214820861816 -29,1.49258291721344 -30,1.4908151626586914 -31,1.4890642166137695 -32,1.487343430519104 -33,1.4856173992156982 -34,1.483666181564331 -35,1.481330394744873 -36,1.4787448644638062 -37,1.4765238761901855 -38,1.4748507738113403 -39,1.472959280014038 -40,1.4707229137420654 -41,1.4688951969146729 -42,1.4682410955429077 -43,1.4679656028747559 -44,1.4676203727722168 -45,1.4669264554977417 -46,1.4657565355300903 -47,1.4641833305358887 -48,1.4626590013504028 -49,1.4617520570755005 -50,1.4613076448440552 -51,1.461014986038208 -52,1.4607456922531128 -53,1.4604803323745728 -54,1.4601846933364868 -55,1.4599086046218872 -56,1.4596244096755981 -57,1.4593297243118286 -58,1.45902681350708 -59,1.4587119817733765 -60,1.4583908319473267 -61,1.458069920539856 -62,1.4577528238296509 -63,1.4574589729309082 -64,1.4571833610534668 -65,1.4569495916366577 -66,1.4567211866378784 -67,1.456527590751648 -68,1.4563549757003784 -69,1.4562203884124756 -70,1.4560824632644653 -71,1.4559662342071533 -72,1.4558682441711426 -73,1.4557844400405884 -74,1.455689787864685 -75,1.4555792808532715 -76,1.4554730653762817 -77,1.4553738832473755 -78,1.4552810192108154 -79,1.4551910161972046 -80,1.4551031589508057 -81,1.4550161361694336 -82,1.4549272060394287 -83,1.4548344612121582 -84,1.4547468423843384 -85,1.4546722173690796 -86,1.4545927047729492 -87,1.4545010328292847 -88,1.454407811164856 -89,1.4543278217315674 -90,1.45425283908844 -91,1.4541758298873901 -92,1.4541003704071045 -93,1.4540297985076904 -94,1.4539614915847778 -95,1.4538888931274414 -96,1.4538155794143677 -97,1.4537512063980103 -98,1.4536852836608887 -99,1.4536129236221313 -100,1.4535359144210815 -101,1.453461766242981 -102,1.4533904790878296 -103,1.4533191919326782 -104,1.4532474279403687 -105,1.453180193901062 -106,1.4531129598617554 -107,1.453041672706604 -108,1.4529732465744019 -109,1.4529081583023071 -110,1.4528404474258423 -111,1.452776312828064 -112,1.452713966369629 -113,1.4526511430740356 -114,1.452589750289917 -115,1.4525312185287476 -116,1.45247483253479 -117,1.4524143934249878 -118,1.4523571729660034 -119,1.4523001909255981 -120,1.4522417783737183 -121,1.452182412147522 -122,1.4521267414093018 -123,1.4520683288574219 -124,1.452007532119751 -125,1.451946496963501 -126,1.4518835544586182 -127,1.4518160820007324 -128,1.4517436027526855 -129,1.451667308807373 -130,1.4515862464904785 -131,1.451498031616211 -132,1.4514002799987793 -133,1.4513039588928223 -134,1.4512784481048584 -135,1.4512605667114258 -136,1.451228380203247 -137,1.4511806964874268 -138,1.4511218070983887 -139,1.4510434865951538 -140,1.450958251953125 -141,1.4508861303329468 -142,1.4508559703826904 -143,1.450826644897461 -144,1.4507761001586914 -145,1.4507156610488892 -146,1.450646996498108 -147,1.4505870342254639 -148,1.450543999671936 -149,1.450497031211853 -150,1.4504433870315552 -151,1.450392246246338 -152,1.4503393173217773 -153,1.4502841234207153 -154,1.4502313137054443 -155,1.4501843452453613 -156,1.4501408338546753 -157,1.4500879049301147 -158,1.4500319957733154 -159,1.449982762336731 -160,1.4499346017837524 -161,1.4498894214630127 -162,1.4498437643051147 -163,1.4497923851013184 -164,1.4497417211532593 -165,1.4496952295303345 -166,1.4496512413024902 -167,1.4496055841445923 -168,1.4495586156845093 -169,1.449509620666504 -170,1.4494619369506836 -171,1.4494171142578125 -172,1.4493721723556519 -173,1.4493272304534912 -174,1.449282169342041 -175,1.4492368698120117 -176,1.4491897821426392 -177,1.4491435289382935 -178,1.4490991830825806 -179,1.4490547180175781 -180,1.449008822441101 -181,1.4489636421203613 -182,1.4489197731018066 -183,1.4488756656646729 -184,1.44883131980896 -185,1.4487872123718262 -186,1.448742151260376 -187,1.448697805404663 -188,1.4486538171768188 -189,1.4486100673675537 -190,1.4485664367675781 -191,1.448522686958313 -192,1.4484790563583374 -193,1.4484353065490723 -194,1.4483904838562012 -195,1.4483453035354614 -196,1.448303461074829 -197,1.4482613801956177 -198,1.4482182264328003 -199,1.4481732845306396 -200,1.4481265544891357 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.250/training_config.txt deleted file mode 100644 index 034a831..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.250/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.250/training_log.csv deleted file mode 100644 index 1b3d74f..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,18.62130355834961 -2,6.563620567321777 -3,4.391988754272461 -4,3.7104592323303223 -5,3.2490975856781006 -6,2.9006855487823486 -7,2.688847064971924 -8,2.554342269897461 -9,2.4545998573303223 -10,2.3717305660247803 -11,2.2976598739624023 -12,2.2273144721984863 -13,2.1606178283691406 -14,2.0988855361938477 -15,2.0423660278320312 -16,1.9907923936843872 -17,1.9441150426864624 -18,1.902077555656433 -19,1.8642871379852295 -20,1.8300970792770386 -21,1.7989946603775024 -22,1.7707217931747437 -23,1.7449357509613037 -24,1.721273422241211 -25,1.6996279954910278 -26,1.6799992322921753 -27,1.662862777709961 -28,1.6475951671600342 -29,1.6338409185409546 -30,1.6214492321014404 -31,1.6101324558258057 -32,1.5999035835266113 -33,1.5902620553970337 -34,1.5809611082077026 -35,1.572109341621399 -36,1.563739538192749 -37,1.5560314655303955 -38,1.5489468574523926 -39,1.542533040046692 -40,1.536821722984314 -41,1.5317723751068115 -42,1.527252435684204 -43,1.5233675241470337 -44,1.5199520587921143 -45,1.5169639587402344 -46,1.514402151107788 -47,1.512110948562622 -48,1.5099942684173584 -49,1.5080156326293945 -50,1.5062631368637085 -51,1.5046945810317993 -52,1.5032800436019897 -53,1.5020115375518799 -54,1.5008010864257812 -55,1.4995853900909424 -56,1.4983923435211182 -57,1.497204065322876 -58,1.49601149559021 -59,1.4948073625564575 -60,1.493589997291565 -61,1.4923655986785889 -62,1.4911404848098755 -63,1.4898918867111206 -64,1.4886276721954346 -65,1.4873372316360474 -66,1.486045241355896 -67,1.4847798347473145 -68,1.4835195541381836 -69,1.4822678565979004 -70,1.4810062646865845 -71,1.479749083518982 -72,1.4784221649169922 -73,1.47707998752594 -74,1.475744366645813 -75,1.4744606018066406 -76,1.4732189178466797 -77,1.4720242023468018 -78,1.4708726406097412 -79,1.4697670936584473 -80,1.4687275886535645 -81,1.4677298069000244 -82,1.4668477773666382 -83,1.465997338294983 -84,1.4652044773101807 -85,1.4644551277160645 -86,1.463746428489685 -87,1.4630273580551147 -88,1.462410569190979 -89,1.461899995803833 -90,1.4614770412445068 -91,1.4611307382583618 -92,1.460848331451416 -93,1.4605934619903564 -94,1.4603427648544312 -95,1.4600998163223267 -96,1.4598397016525269 -97,1.4595847129821777 -98,1.459359884262085 -99,1.4591747522354126 -100,1.4590195417404175 -101,1.4588662385940552 -102,1.4587148427963257 -103,1.458571434020996 -104,1.4584308862686157 -105,1.4582945108413696 -106,1.4581670761108398 -107,1.4580589532852173 -108,1.457946538925171 -109,1.4578306674957275 -110,1.4577252864837646 -111,1.457646131515503 -112,1.4575673341751099 -113,1.457506537437439 -114,1.4574310779571533 -115,1.45733642578125 -116,1.4572515487670898 -117,1.4571844339370728 -118,1.4571326971054077 -119,1.4570738077163696 -120,1.4570034742355347 -121,1.4569417238235474 -122,1.4568817615509033 -123,1.4568216800689697 -124,1.4567718505859375 -125,1.4567159414291382 -126,1.4566597938537598 -127,1.4566043615341187 -128,1.4565486907958984 -129,1.4564932584762573 -130,1.4564344882965088 -131,1.4563703536987305 -132,1.4563013315200806 -133,1.4562273025512695 -134,1.4561468362808228 -135,1.456062912940979 -136,1.4559956789016724 -137,1.4559392929077148 -138,1.4558802843093872 -139,1.4558175802230835 -140,1.4557520151138306 -141,1.4556832313537598 -142,1.455610752105713 -143,1.4555352926254272 -144,1.4554580450057983 -145,1.4553797245025635 -146,1.4553003311157227 -147,1.45521879196167 -148,1.455136775970459 -149,1.4550557136535645 -150,1.4549751281738281 -151,1.454920768737793 -152,1.4548890590667725 -153,1.4548536539077759 -154,1.45481538772583 -155,1.4547743797302246 -156,1.4547312259674072 -157,1.454684853553772 -158,1.4546363353729248 -159,1.4545865058898926 -160,1.4545373916625977 -161,1.4544894695281982 -162,1.4544408321380615 -163,1.4543918371200562 -164,1.4543378353118896 -165,1.4542824029922485 -166,1.4542268514633179 -167,1.4541717767715454 -168,1.454116702079773 -169,1.4540627002716064 -170,1.4540112018585205 -171,1.4539726972579956 -172,1.4539470672607422 -173,1.4539079666137695 -174,1.453870415687561 -175,1.4538288116455078 -176,1.4537807703018188 -177,1.4537239074707031 -178,1.4536728858947754 -179,1.4536280632019043 -180,1.4535901546478271 -181,1.4535484313964844 -182,1.4535008668899536 -183,1.4534517526626587 -184,1.453405499458313 -185,1.4533565044403076 -186,1.453305959701538 -187,1.4532604217529297 -188,1.4532183408737183 -189,1.453181505203247 -190,1.4531474113464355 -191,1.4531102180480957 -192,1.4530709981918335 -193,1.4530316591262817 -194,1.4529943466186523 -195,1.4529578685760498 -196,1.4529207944869995 -197,1.4528849124908447 -198,1.4528496265411377 -199,1.452812910079956 -200,1.452775239944458 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.300/training_config.txt deleted file mode 100644 index fdb0f95..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.300/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.300/training_log.csv deleted file mode 100644 index 0eba951..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.300/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,28.31456184387207 -2,4.624358177185059 -3,4.472574710845947 -4,3.4743733406066895 -5,3.1341066360473633 -6,2.900212526321411 -7,2.736121654510498 -8,2.6175377368927 -9,2.5248260498046875 -10,2.451521396636963 -11,2.3880770206451416 -12,2.3322958946228027 -13,2.2832882404327393 -14,2.2365870475769043 -15,2.1888673305511475 -16,2.141124963760376 -17,2.0941431522369385 -18,2.049769401550293 -19,2.008622884750366 -20,1.9704400300979614 -21,1.9352329969406128 -22,1.9023891687393188 -23,1.8714975118637085 -24,1.8423701524734497 -25,1.8147335052490234 -26,1.788216233253479 -27,1.7624095678329468 -28,1.7370816469192505 -29,1.7116608619689941 -30,1.6862266063690186 -31,1.6611186265945435 -32,1.636823296546936 -33,1.614192247390747 -34,1.5938137769699097 -35,1.5754289627075195 -36,1.5591521263122559 -37,1.5453964471817017 -38,1.5343327522277832 -39,1.5259932279586792 -40,1.5204418897628784 -41,1.5170179605484009 -42,1.515358328819275 -43,1.515322208404541 -44,1.5161738395690918 -45,1.5173074007034302 -46,1.5181193351745605 -47,1.518006443977356 -48,1.516695261001587 -49,1.5144057273864746 -50,1.5111500024795532 -51,1.5071334838867188 -52,1.5026408433914185 -53,1.498045802116394 -54,1.4936494827270508 -55,1.4896495342254639 -56,1.4861034154891968 -57,1.4830317497253418 -58,1.4804835319519043 -59,1.4784451723098755 -60,1.4767119884490967 -61,1.4752143621444702 -62,1.4739216566085815 -63,1.4728127717971802 -64,1.4717626571655273 -65,1.4707109928131104 -66,1.4696836471557617 -67,1.468770980834961 -68,1.4679901599884033 -69,1.4673171043395996 -70,1.4668083190917969 -71,1.4663769006729126 -72,1.4660398960113525 -73,1.4656763076782227 -74,1.4653409719467163 -75,1.465029239654541 -76,1.4647167921066284 -77,1.464383840560913 -78,1.4641069173812866 -79,1.463821291923523 -80,1.463529109954834 -81,1.463271141052246 -82,1.4630175828933716 -83,1.4627861976623535 -84,1.4625765085220337 -85,1.4623838663101196 -86,1.462209939956665 -87,1.4620174169540405 -88,1.4618231058120728 -89,1.4616235494613647 -90,1.461408019065857 -91,1.4611775875091553 -92,1.4609588384628296 -93,1.4607830047607422 -94,1.460598111152649 -95,1.4604212045669556 -96,1.4602363109588623 -97,1.4600383043289185 -98,1.459878921508789 -99,1.4597110748291016 -100,1.4595319032669067 -101,1.4593396186828613 -102,1.45918869972229 -103,1.459030270576477 -104,1.4588590860366821 -105,1.4587026834487915 -106,1.458552598953247 -107,1.4583989381790161 -108,1.4582765102386475 -109,1.4581526517868042 -110,1.4580074548721313 -111,1.4578672647476196 -112,1.4577311277389526 -113,1.4576222896575928 -114,1.457503318786621 -115,1.4573739767074585 -116,1.457261323928833 -117,1.4571597576141357 -118,1.457054615020752 -119,1.456949234008789 -120,1.4568428993225098 -121,1.4567526578903198 -122,1.4566528797149658 -123,1.4565461874008179 -124,1.4564520120620728 -125,1.456356167793274 -126,1.4562594890594482 -127,1.456159234046936 -128,1.4560763835906982 -129,1.4559845924377441 -130,1.455885410308838 -131,1.455801248550415 -132,1.4557145833969116 -133,1.455633521080017 -134,1.4555481672286987 -135,1.455468773841858 -136,1.4553909301757812 -137,1.455310344696045 -138,1.4552210569381714 -139,1.4551485776901245 -140,1.4550683498382568 -141,1.4549899101257324 -142,1.4549195766448975 -143,1.4548455476760864 -144,1.4547592401504517 -145,1.4546915292739868 -146,1.4546215534210205 -147,1.4545331001281738 -148,1.4544669389724731 -149,1.454410195350647 -150,1.4543206691741943 -151,1.4542489051818848 -152,1.4541900157928467 -153,1.4541378021240234 -154,1.4540811777114868 -155,1.4540026187896729 -156,1.453916311264038 -157,1.4538499116897583 -158,1.4537807703018188 -159,1.453707218170166 -160,1.4536340236663818 -161,1.4535635709762573 -162,1.4534927606582642 -163,1.4534209966659546 -164,1.4533571004867554 -165,1.453279972076416 -166,1.45321786403656 -167,1.4531553983688354 -168,1.4530932903289795 -169,1.4530519247055054 -170,1.4529595375061035 -171,1.452883243560791 -172,1.4528295993804932 -173,1.4527634382247925 -174,1.4526901245117188 -175,1.4526125192642212 -176,1.4525638818740845 -177,1.452520728111267 -178,1.452444076538086 -179,1.4523649215698242 -180,1.4523135423660278 -181,1.4522624015808105 -182,1.4522018432617188 -183,1.4521418809890747 -184,1.452079176902771 -185,1.452016830444336 -186,1.4519546031951904 -187,1.4518766403198242 -188,1.4518215656280518 -189,1.4517689943313599 -190,1.4517123699188232 -191,1.451651692390442 -192,1.4516005516052246 -193,1.4515419006347656 -194,1.4514849185943604 -195,1.45142662525177 -196,1.4513764381408691 -197,1.4513226747512817 -198,1.4512690305709839 -199,1.4512134790420532 -200,1.4511653184890747 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.400/training_config.txt deleted file mode 100644 index 3c6dc7f..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.400/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.400/training_log.csv deleted file mode 100644 index 3480c0a..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.400/training_log.csv +++ /dev/null @@ -1,57 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,266.26031494140625 -2,4.686159133911133 -3,4.821897506713867 -4,4.320993423461914 -5,3.4523444175720215 -6,3.3167638778686523 -7,3.2215054035186768 -8,3.1422550678253174 -9,3.0761942863464355 -10,3.0191023349761963 -11,2.9699223041534424 -12,2.9856200218200684 -13,8.732401847839355 -14,82.9572525024414 -15,713.96875 -16,2025.201416015625 -17,2717.021484375 -18,2953.56396484375 -19,2994.17919921875 -20,2881.20947265625 -21,2567.093505859375 -22,2146.655029296875 -23,1619.5660400390625 -24,1102.51123046875 -25,717.6436157226562 -26,468.6149597167969 -27,298.03350830078125 -28,188.01480102539062 -29,117.47369384765625 -30,76.25564575195312 -31,53.97468566894531 -32,39.29145050048828 -33,29.31096839904785 -34,22.757909774780273 -35,17.081716537475586 -36,11.887883186340332 -37,8.052499771118164 -38,5.802879810333252 -39,4.410678386688232 -40,3.8419556617736816 -41,3.979961633682251 -42,3.9832403659820557 -43,3.9598305225372314 -44,3.9112958908081055 -45,3.8712735176086426 -46,3.8381664752960205 -47,3.8110408782958984 -48,3.787302255630493 -49,3.783799409866333 -50,3.77535343170166 -51,3.769078493118286 -52,nan -53,nan -54,nan -55,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.500/training_config.txt deleted file mode 100644 index bcffbdd..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.500/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.500/training_log.csv deleted file mode 100644 index 891989b..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.500/training_log.csv +++ /dev/null @@ -1,17 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,1080.4974365234375 -2,3169.412109375 -3,3093.86083984375 -4,2324.89404296875 -5,1297.96337890625 -6,76.08409118652344 -7,2.7033472061157227 -8,2.826526403427124 -9,3.013382911682129 -10,3.149374008178711 -11,3.2575950622558594 -12,nan -13,nan -14,nan -15,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.600/training_config.txt deleted file mode 100644 index 9d8080b..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.600/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.600/training_log.csv deleted file mode 100644 index 260d422..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.600/training_log.csv +++ /dev/null @@ -1,12 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,1931.40673828125 -2,3233.32373046875 -3,3241.359375 -4,3242.302490234375 -5,3242.41162109375 -6,3242.41162109375 -7,3242.41162109375 -8,3242.41162109375 -9,3242.41162109375 -10,3242.41162109375 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.700/training_config.txt deleted file mode 100644 index 83f88cd..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.700/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.700/training_log.csv deleted file mode 100644 index bd14c28..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,2545.7900390625 -2,3241.390625 -3,3242.41162109375 -4,3242.41162109375 -5,3242.41162109375 -6,3242.41162109375 -7,3242.41162109375 -8,3242.41162109375 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.800/training_config.txt deleted file mode 100644 index a1a0ed0..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.800/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.800/training_log.csv deleted file mode 100644 index 0a1ac92..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,2841.9189453125 -2,3242.41162109375 -3,3242.41162109375 -4,3242.41162109375 -5,3242.41162109375 -6,3242.41162109375 -7,3242.41162109375 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.900/training_config.txt deleted file mode 100644 index 73e44b5..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.900/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.900/training_log.csv deleted file mode 100644 index 45211c0..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,3038.469970703125 -2,3242.41162109375 -3,3242.41162109375 -4,3242.41162109375 -5,3242.41162109375 -6,3242.41162109375 -7,3242.41162109375 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_1.000/training_config.txt deleted file mode 100644 index e7c95b8..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_1.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_1.000/training_log.csv deleted file mode 100644 index 5866d0b..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,3140.62646484375 -2,3242.41162109375 -3,3242.41162109375 -4,3242.41162109375 -5,3242.41162109375 -6,3242.41162109375 -7,3242.41162109375 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_16.000/training_config.txt deleted file mode 100644 index e2d8f07..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_16.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_16.000/training_log.csv deleted file mode 100644 index b1f4742..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,3258.946533203125 -2,3258.946533203125 -3,3258.946533203125 -4,3258.946533203125 -5,3258.946533203125 -6,3258.946533203125 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_2.000/training_config.txt deleted file mode 100644 index a7e094d..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_2.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_2.000/training_log.csv deleted file mode 100644 index 8ec294b..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,3258.858154296875 -2,296.68267822265625 -3,nan -4,nan -5,nan -6,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_4.000/training_config.txt deleted file mode 100644 index ce301ac..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_4.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_4.000/training_log.csv deleted file mode 100644 index b1f4742..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,3258.946533203125 -2,3258.946533203125 -3,3258.946533203125 -4,3258.946533203125 -5,3258.946533203125 -6,3258.946533203125 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_8.000/training_config.txt deleted file mode 100644 index 94dda4c..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_8.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * t_span + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_8.000/training_log.csv deleted file mode 100644 index b1f4742..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,27.12971305847168 -1,3258.946533203125 -2,3258.946533203125 -3,3258.946533203125 -4,3258.946533203125 -5,3258.946533203125 -6,3258.946533203125 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.010/training_config.txt deleted file mode 100644 index 74ce38a..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.010/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.010/training_log.csv deleted file mode 100644 index f80a217..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,275.0838317871094 -2,226.7275848388672 -3,181.3385009765625 -4,154.5103759765625 -5,126.86194610595703 -6,96.60025787353516 -7,71.22163391113281 -8,46.789398193359375 -9,53.51068878173828 -10,37.55705642700195 -11,40.26057815551758 -12,39.22757339477539 -13,42.504966735839844 -14,45.25449752807617 -15,43.611873626708984 -16,47.15289306640625 -17,49.78810119628906 -18,53.6796760559082 -19,58.950103759765625 -20,59.53542709350586 -21,58.54838562011719 -22,57.4407844543457 -23,57.1434211730957 -24,53.93942642211914 -25,53.1650276184082 -26,51.78904724121094 -27,49.129913330078125 -28,47.65816879272461 -29,46.449222564697266 -30,44.72037124633789 -31,43.611698150634766 -32,42.5721435546875 -33,43.66789245605469 -34,42.67402648925781 -35,39.44016647338867 -36,38.176307678222656 -37,37.5357780456543 -38,36.20994186401367 -39,35.555015563964844 -40,35.39585876464844 -41,35.92770767211914 -42,35.68026351928711 -43,35.4999885559082 -44,35.43817138671875 -45,35.14554214477539 -46,34.29414367675781 -47,34.01017761230469 -48,33.92128372192383 -49,33.808387756347656 -50,33.558082580566406 -51,33.13623046875 -52,32.563438415527344 -53,31.15009880065918 -54,32.237640380859375 -55,29.66178321838379 -56,31.561351776123047 -57,30.269256591796875 -58,32.751895904541016 -59,31.35386848449707 -60,29.884607315063477 -61,36.92993927001953 -62,29.008005142211914 -63,31.616300582885742 -64,30.33638572692871 -65,31.90616798400879 -66,30.71432876586914 -67,28.810091018676758 -68,29.163551330566406 -69,28.73668098449707 -70,28.500675201416016 -71,28.550874710083008 -72,20.55232810974121 -73,18.75281524658203 -74,15.934911727905273 -75,15.591341972351074 -76,13.016403198242188 -77,14.288409233093262 -78,14.464521408081055 -79,13.572062492370605 -80,13.360575675964355 -81,13.682679176330566 -82,13.529097557067871 -83,13.075002670288086 -84,12.600320816040039 -85,12.244044303894043 -86,11.704170227050781 -87,11.458518028259277 -88,11.174753189086914 -89,10.805512428283691 -90,10.368586540222168 -91,9.913079261779785 -92,9.411075592041016 -93,9.182594299316406 -94,8.775422096252441 -95,8.57878303527832 -96,8.069701194763184 -97,7.185676097869873 -98,7.006459712982178 -99,6.9091596603393555 -100,6.850782871246338 -101,6.7907586097717285 -102,6.725147724151611 -103,6.6661696434021 -104,6.609242916107178 -105,6.550628662109375 -106,6.489836692810059 -107,6.42885160446167 -108,6.370622158050537 -109,6.317517280578613 -110,6.26821756362915 -111,6.219040393829346 -112,6.170483589172363 -113,6.125767707824707 -114,6.085538387298584 -115,6.048455238342285 -116,6.013321876525879 -117,5.979485988616943 -118,5.946709156036377 -119,5.915010452270508 -120,5.8844194412231445 -121,5.854683876037598 -122,5.82512092590332 -123,5.795010089874268 -124,5.76404333114624 -125,5.73231840133667 -126,5.699886798858643 -127,5.66663932800293 -128,5.6322736740112305 -129,5.596395015716553 -130,5.558547496795654 -131,5.518427848815918 -132,5.4757232666015625 -133,5.430072784423828 -134,5.381085395812988 -135,5.328406810760498 -136,5.271753787994385 -137,5.211078643798828 -138,5.146767616271973 -139,5.080181121826172 -140,5.014427185058594 -141,4.954380035400391 -142,4.903672218322754 -143,4.853315830230713 -144,4.792120933532715 -145,4.736128330230713 -146,4.694891452789307 -147,4.665212154388428 -148,4.642251968383789 -149,4.622623920440674 -150,4.604206562042236 -151,4.585912704467773 -152,4.567272663116455 -153,4.548190116882324 -154,4.528778553009033 -155,4.509394645690918 -156,4.490363121032715 -157,4.471975326538086 -158,4.454426288604736 -159,4.437708377838135 -160,4.421684265136719 -161,4.406135082244873 -162,4.390941619873047 -163,4.37611722946167 -164,4.361776828765869 -165,4.348079681396484 -166,4.335125923156738 -167,4.322951793670654 -168,4.311593055725098 -169,4.300959587097168 -170,4.29089879989624 -171,4.281135082244873 -172,4.271332263946533 -173,4.261223316192627 -174,4.250696182250977 -175,4.239800930023193 -176,4.228647232055664 -177,4.217285633087158 -178,4.205714702606201 -179,4.193961143493652 -180,4.182162761688232 -181,4.17051362991333 -182,4.159147262573242 -183,4.148146152496338 -184,4.137485980987549 -185,4.127127647399902 -186,4.116962432861328 -187,4.106935501098633 -188,4.09696102142334 -189,4.0870184898376465 -190,4.077086448669434 -191,4.067191123962402 -192,4.057347774505615 -193,4.047576904296875 -194,4.037909507751465 -195,4.028326034545898 -196,4.018808364868164 -197,4.009352207183838 -198,3.9999425411224365 -199,3.9906182289123535 -200,3.9813995361328125 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.020/training_config.txt deleted file mode 100644 index dd860fc..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.020/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.020/training_log.csv deleted file mode 100644 index 6549f4c..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,241.7369842529297 -2,147.81350708007812 -3,68.56905364990234 -4,42.215049743652344 -5,42.066959381103516 -6,44.2579231262207 -7,46.84860610961914 -8,50.628543853759766 -9,47.362518310546875 -10,35.413970947265625 -11,34.2098503112793 -12,30.296411514282227 -13,30.351545333862305 -14,28.34673500061035 -15,29.165786743164062 -16,29.39647674560547 -17,27.778324127197266 -18,26.1652774810791 -19,24.5626163482666 -20,23.025299072265625 -21,21.6649169921875 -22,20.562326431274414 -23,20.73691749572754 -24,26.223966598510742 -25,23.357988357543945 -26,21.365575790405273 -27,32.48175048828125 -28,32.4604377746582 -29,38.35187911987305 -30,48.99205017089844 -31,52.10971450805664 -32,49.01251220703125 -33,32.69997024536133 -34,34.254547119140625 -35,30.487571716308594 -36,28.182710647583008 -37,25.303071975708008 -38,29.09435272216797 -39,34.179813385009766 -40,27.863542556762695 -41,29.621553421020508 -42,28.734289169311523 -43,20.972997665405273 -44,17.90471649169922 -45,23.080720901489258 -46,24.82501220703125 -47,26.167516708374023 -48,22.853023529052734 -49,21.10666847229004 -50,20.3487606048584 -51,18.899169921875 -52,18.124744415283203 -53,17.819110870361328 -54,15.583757400512695 -55,16.737104415893555 -56,16.938495635986328 -57,19.409709930419922 -58,17.850128173828125 -59,18.02928352355957 -60,15.873028755187988 -61,12.745617866516113 -62,13.91303825378418 -63,15.116058349609375 -64,6.746414661407471 -65,7.895148277282715 -66,10.418258666992188 -67,13.4087553024292 -68,7.113725662231445 -69,6.456613540649414 -70,4.705975532531738 -71,3.897498607635498 -72,4.251119136810303 -73,4.992946147918701 -74,5.750645637512207 -75,4.689373016357422 -76,5.979013442993164 -77,4.0466837882995605 -78,4.175020694732666 -79,4.989347457885742 -80,4.317152500152588 -81,3.9948863983154297 -82,3.0241036415100098 -83,3.9796714782714844 -84,3.4252800941467285 -85,3.26950740814209 -86,3.929492473602295 -87,4.2252068519592285 -88,4.346107006072998 -89,4.388244152069092 -90,4.406655788421631 -91,4.410376071929932 -92,4.39508056640625 -93,4.364744186401367 -94,4.321280479431152 -95,4.263832092285156 -96,4.190514087677002 -97,4.099239349365234 -98,3.987848997116089 -99,3.8522069454193115 -100,3.6779897212982178 -101,3.4416418075561523 -102,3.143759250640869 -103,2.978123664855957 -104,3.8098835945129395 -105,3.7463934421539307 -106,3.6433799266815186 -107,3.5295588970184326 -108,3.4486725330352783 -109,3.3378548622131348 -110,3.267245054244995 -111,3.2154061794281006 -112,3.1307308673858643 -113,3.0305261611938477 -114,2.938687801361084 -115,2.859927177429199 -116,2.7921700477600098 -117,2.732673406600952 -118,2.679377555847168 -119,2.6308929920196533 -120,2.5863864421844482 -121,2.545243263244629 -122,2.506904363632202 -123,2.4709365367889404 -124,2.436758279800415 -125,2.403856039047241 -126,2.3713319301605225 -127,2.3379414081573486 -128,2.303518533706665 -129,2.2735595703125 -130,2.2558672428131104 -131,2.247945785522461 -132,2.2405412197113037 -133,2.2302589416503906 -134,2.217226982116699 -135,2.202008008956909 -136,2.1849191188812256 -137,2.1665773391723633 -138,2.1497085094451904 -139,2.1441943645477295 -140,2.160661458969116 -141,2.148453950881958 -142,2.127460479736328 -143,2.1282308101654053 -144,2.131624221801758 -145,2.129124164581299 -146,2.119939088821411 -147,2.1072139739990234 -148,2.0957446098327637 -149,2.0901217460632324 -150,2.089956045150757 -151,2.0844273567199707 -152,2.072188138961792 -153,2.0641915798187256 -154,2.0612094402313232 -155,2.0592215061187744 -156,2.0555293560028076 -157,2.0499584674835205 -158,2.044827461242676 -159,2.043020486831665 -160,2.041663885116577 -161,2.0361859798431396 -162,2.031771421432495 -163,2.0295917987823486 -164,2.0274436473846436 -165,2.024301767349243 -166,2.0205485820770264 -167,2.017197847366333 -168,2.0147857666015625 -169,2.012322187423706 -170,2.0087814331054688 -171,2.0051662921905518 -172,2.0023937225341797 -173,1.999953269958496 -174,1.9971033334732056 -175,1.9937996864318848 -176,1.9905884265899658 -177,1.9877047538757324 -178,1.9845187664031982 -179,1.9807592630386353 -180,1.9770249128341675 -181,1.973507046699524 -182,1.969862937927246 -183,1.9659008979797363 -184,1.9617838859558105 -185,1.9577178955078125 -186,1.9536306858062744 -187,1.9492557048797607 -188,1.944664716720581 -189,1.940063238143921 -190,1.9353926181793213 -191,1.9304856061935425 -192,1.925342321395874 -193,1.9200797080993652 -194,1.9146885871887207 -195,1.9090461730957031 -196,1.9031856060028076 -197,1.8972370624542236 -198,1.8912160396575928 -199,1.8851014375686646 -200,1.878954529762268 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.040/training_config.txt deleted file mode 100644 index 76cb2c9..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.040/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.040/training_log.csv deleted file mode 100644 index 838b3a2..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,184.4945526123047 -2,40.99314880371094 -3,36.81889724731445 -4,25.53664207458496 -5,22.395376205444336 -6,16.683582305908203 -7,8.996954917907715 -8,6.523298263549805 -9,6.627187728881836 -10,4.636604309082031 -11,3.541154384613037 -12,2.1041903495788574 -13,2.815140962600708 -14,2.639333724975586 -15,3.1975390911102295 -16,3.594587802886963 -17,3.717209815979004 -18,3.6608927249908447 -19,3.516169309616089 -20,3.6225268840789795 -21,3.707904100418091 -22,4.0693678855896 -23,3.999884605407715 -24,4.556051254272461 -25,5.779964447021484 -26,5.715750217437744 -27,5.72322940826416 -28,5.269540309906006 -29,6.902668476104736 -30,5.3588032722473145 -31,4.4288411140441895 -32,4.3009233474731445 -33,3.530858039855957 -34,3.4910809993743896 -35,3.5708415508270264 -36,3.570883274078369 -37,3.1453628540039062 -38,3.0455920696258545 -39,2.891982316970825 -40,2.9036941528320312 -41,2.872880220413208 -42,2.842966079711914 -43,2.876221179962158 -44,2.886622428894043 -45,2.8979129791259766 -46,2.936894416809082 -47,2.953768730163574 -48,2.968714714050293 -49,2.972209930419922 -50,3.010324239730835 -51,3.0060336589813232 -52,2.958439588546753 -53,2.954328775405884 -54,2.9703612327575684 -55,2.9527294635772705 -56,2.9537558555603027 -57,2.953974485397339 -58,2.9471654891967773 -59,2.935218334197998 -60,2.92643141746521 -61,2.926689624786377 -62,2.914111614227295 -63,2.9040510654449463 -64,2.8990402221679688 -65,2.891732931137085 -66,2.88100266456604 -67,2.870347738265991 -68,2.864018678665161 -69,2.856830596923828 -70,2.846226215362549 -71,2.838721990585327 -72,2.832479476928711 -73,2.824962615966797 -74,2.816520929336548 -75,2.8092243671417236 -76,2.8035268783569336 -77,2.7967607975006104 -78,2.789280652999878 -79,2.782973527908325 -80,2.777144432067871 -81,2.7707297801971436 -82,2.7638745307922363 -83,2.7573914527893066 -84,2.7513482570648193 -85,2.744790554046631 -86,2.7376558780670166 -87,2.7305970191955566 -88,2.7234647274017334 -89,2.715726137161255 -90,2.7072293758392334 -91,2.6982429027557373 -92,2.6888883113861084 -93,2.679056406021118 -94,2.6695690155029297 -95,2.664942741394043 -96,2.6609814167022705 -97,2.642254114151001 -98,2.6364729404449463 -99,2.639662027359009 -100,2.591768980026245 -101,2.5894603729248047 -102,2.5898571014404297 -103,2.5891332626342773 -104,2.587174654006958 -105,2.585097551345825 -106,2.583836317062378 -107,2.582268476486206 -108,2.578951835632324 -109,2.5747406482696533 -110,2.5707011222839355 -111,2.5665080547332764 -112,2.5614657402038574 -113,2.5554802417755127 -114,2.5490305423736572 -115,2.5426368713378906 -116,2.5363292694091797 -117,2.52992582321167 -118,2.523662805557251 -119,2.5176026821136475 -120,2.511953592300415 -121,2.5064384937286377 -122,2.50077486038208 -123,2.4948856830596924 -124,2.488880157470703 -125,2.482815742492676 -126,2.476618528366089 -127,2.470189332962036 -128,2.4635021686553955 -129,2.4565672874450684 -130,2.449398994445801 -131,2.441969156265259 -132,2.434319019317627 -133,2.4264583587646484 -134,2.4180967807769775 -135,2.4084794521331787 -136,2.391707181930542 -137,2.142209768295288 -138,1.5076797008514404 -139,1.4388808012008667 -140,1.3565713167190552 -141,1.3134146928787231 -142,1.2974475622177124 -143,1.2160288095474243 -144,1.1661103963851929 -145,0.9561031460762024 -146,0.7716641426086426 -147,0.7515533566474915 -148,0.7308793663978577 -149,0.7255648970603943 -150,0.6865513920783997 -151,0.6921244859695435 -152,0.7008768916130066 -153,0.702796220779419 -154,0.7074258923530579 -155,0.7313183546066284 -156,0.7367772459983826 -157,0.72609543800354 -158,0.7346982359886169 -159,0.7911201119422913 -160,0.7109082937240601 -161,0.6995245814323425 -162,0.700240433216095 -163,0.6869509220123291 -164,0.6760812997817993 -165,0.6710781455039978 -166,0.6595756411552429 -167,0.6466647982597351 -168,0.6401785612106323 -169,0.629640519618988 -170,0.6164250373840332 -171,0.6075484156608582 -172,0.598057210445404 -173,0.5867829322814941 -174,0.5772542357444763 -175,0.5707394480705261 -176,0.5625104308128357 -177,0.5547151565551758 -178,0.573661744594574 -179,0.5458137392997742 -180,0.5453504323959351 -181,0.5412657856941223 -182,0.5281438231468201 -183,0.5297583937644958 -184,0.538392186164856 -185,0.5188648104667664 -186,0.5242753624916077 -187,0.5288658142089844 -188,0.526138961315155 -189,0.5159410834312439 -190,0.5132560133934021 -191,0.5170423984527588 -192,0.5054225921630859 -193,0.5073118209838867 -194,0.5072013735771179 -195,0.5010586977005005 -196,0.4976551830768585 -197,0.5021039843559265 -198,0.498147189617157 -199,0.495728462934494 -200,0.49625492095947266 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.050/training_config.txt deleted file mode 100644 index 0330aee..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.050/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.050/training_log.csv deleted file mode 100644 index 5bbb144..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,164.5695037841797 -2,38.2790412902832 -3,31.030256271362305 -4,22.759918212890625 -5,14.208052635192871 -6,8.8912992477417 -7,6.705487251281738 -8,4.967263698577881 -9,3.0117883682250977 -10,1.8947104215621948 -11,1.7108246088027954 -12,1.162451148033142 -13,1.3045843839645386 -14,1.2451295852661133 -15,1.0068833827972412 -16,0.925822913646698 -17,0.9118883013725281 -18,0.9053067564964294 -19,0.9003437757492065 -20,0.896724283695221 -21,0.8924077749252319 -22,0.8837642073631287 -23,0.8658729791641235 -24,0.8462041616439819 -25,0.8332922458648682 -26,0.8207205533981323 -27,0.8571797013282776 -28,0.8067445158958435 -29,0.7797972559928894 -30,0.7464010119438171 -31,0.7102876901626587 -32,0.6928954124450684 -33,0.6886155009269714 -34,0.6837243437767029 -35,0.6761093139648438 -36,0.6670547127723694 -37,0.6573032140731812 -38,0.6472201943397522 -39,0.6370030045509338 -40,0.6267759203910828 -41,0.6166344881057739 -42,0.6066679954528809 -43,0.5969929099082947 -44,0.587671160697937 -45,0.578707754611969 -46,0.5700762271881104 -47,0.5617287158966064 -48,0.553634762763977 -49,0.5457731485366821 -50,0.5381256341934204 -51,0.5306792855262756 -52,0.5234231352806091 -53,0.5163490176200867 -54,0.5094636678695679 -55,0.502752423286438 -56,0.4961952269077301 -57,0.4897892475128174 -58,0.4835429787635803 -59,0.4774276912212372 -60,0.47141939401626587 -61,0.46549636125564575 -62,0.4596356451511383 -63,0.45380955934524536 -64,0.44799351692199707 -65,0.44215869903564453 -66,0.4362815022468567 -67,0.43033894896507263 -68,0.4243237376213074 -69,0.41821518540382385 -70,0.4119911193847656 -71,0.4056106209754944 -72,0.399014413356781 -73,0.3921118974685669 -74,0.384775310754776 -75,0.37680402398109436 -76,0.3678964078426361 -77,0.3575977087020874 -78,0.34522971510887146 -79,0.32997602224349976 -80,0.31178492307662964 -81,0.2939818799495697 -82,0.28335246443748474 -83,0.2819393575191498 -84,0.2853543162345886 -85,0.2895062267780304 -86,0.29267245531082153 -87,0.2944304049015045 -88,0.29483678936958313 -89,0.29404324293136597 -90,0.2921988070011139 -91,0.2894124984741211 -92,0.2857649326324463 -93,0.2813190519809723 -94,0.27617907524108887 -95,0.27051809430122375 -96,0.26465776562690735 -97,0.2590756118297577 -98,0.2543863356113434 -99,0.2509901523590088 -100,0.24838021397590637 -101,0.24404998123645782 -102,0.23189608752727509 -103,0.22846487164497375 -104,0.2026841938495636 -105,0.20751242339611053 -106,0.2091798037290573 -107,0.2086942493915558 -108,0.20794451236724854 -109,0.2072431892156601 -110,0.20647700130939484 -111,0.2054491937160492 -112,0.20399150252342224 -113,0.2020070105791092 -114,0.19947412610054016 -115,0.19642780721187592 -116,0.1929420530796051 -117,0.18910273909568787 -118,0.18497776985168457 -119,0.18058902025222778 -120,0.1759074181318283 -121,0.17087025940418243 -122,0.16541524231433868 -123,0.1594998836517334 -124,0.15312545001506805 -125,0.1462741643190384 -126,0.1386265754699707 -127,0.13362064957618713 -128,0.14029276371002197 -129,0.13801419734954834 -130,0.12806940078735352 -131,0.12787331640720367 -132,0.12928403913974762 -133,0.12886318564414978 -134,0.12680457532405853 -135,0.12311584502458572 -136,0.11829452216625214 -137,0.1157720610499382 -138,0.11847535520792007 -139,0.11441915482282639 -140,0.11097409576177597 -141,0.11139126867055893 -142,0.11105038970708847 -143,0.10921113938093185 -144,0.10642502456903458 -145,0.10488927364349365 -146,0.1054653450846672 -147,0.10403746366500854 -148,0.10187540203332901 -149,0.1015620306134224 -150,0.10151810944080353 -151,0.10059815645217896 -152,0.09917911887168884 -153,0.09852523356676102 -154,0.09857220202684402 -155,0.09785812348127365 -156,0.09678434580564499 -157,0.09641057997941971 -158,0.0962962880730629 -159,0.09578712284564972 -160,0.09504879266023636 -161,0.09465547651052475 -162,0.09454219043254852 -163,0.09414087235927582 -164,0.09355120360851288 -165,0.09321659803390503 -166,0.09303215146064758 -167,0.09268340468406677 -168,0.09221319109201431 -169,0.0918843001127243 -170,0.091689333319664 -171,0.0913870558142662 -172,0.09099340438842773 -173,0.09070181846618652 -174,0.0904887393116951 -175,0.09020774066448212 -176,0.08986975997686386 -177,0.0895950123667717 -178,0.08938222378492355 -179,0.08912618458271027 -180,0.08883398771286011 -181,0.08859100192785263 -182,0.08838513493537903 -183,0.08815007656812668 -184,0.08789398521184921 -185,0.08767092972993851 -186,0.08747376501560211 -187,0.08725599199533463 -188,0.08702786266803741 -189,0.08682629466056824 -190,0.08663957566022873 -191,0.08643898367881775 -192,0.08623471111059189 -193,0.08604944497346878 -194,0.08587262779474258 -195,0.0856856182217598 -196,0.08549933135509491 -197,0.08532743901014328 -198,0.08515924960374832 -199,0.08498497307300568 -200,0.08481410145759583 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.080/training_config.txt deleted file mode 100644 index c020668..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.080/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.080/training_log.csv deleted file mode 100644 index 6d12072..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,106.753173828125 -2,31.59357452392578 -3,7.9822258949279785 -4,4.6665520668029785 -5,3.162118673324585 -6,3.076265573501587 -7,2.581423282623291 -8,3.248969078063965 -9,3.3040947914123535 -10,3.2732503414154053 -11,3.192314624786377 -12,3.098344564437866 -13,2.7400436401367188 -14,2.593604803085327 -15,2.358102321624756 -16,2.012235164642334 -17,1.9438045024871826 -18,1.869865894317627 -19,1.80354905128479 -20,1.7438206672668457 -21,1.6881585121154785 -22,1.6368725299835205 -23,1.5887136459350586 -24,1.5430246591567993 -25,1.499674677848816 -26,1.457910180091858 -27,1.4175254106521606 -28,1.3780620098114014 -29,1.3393973112106323 -30,1.3013039827346802 -31,1.263556957244873 -32,1.2260072231292725 -33,1.1880712509155273 -34,1.1488633155822754 -35,1.1065807342529297 -36,1.0602097511291504 -37,1.0079247951507568 -38,0.9473084211349487 -39,0.8815397024154663 -40,0.8150752782821655 -41,0.7512197494506836 -42,0.6909998655319214 -43,0.6342948079109192 -44,0.583043098449707 -45,0.5380421876907349 -46,0.49863800406455994 -47,0.464353084564209 -48,0.43425607681274414 -49,0.40757909417152405 -50,0.3839525878429413 -51,0.3623599112033844 -52,0.34246164560317993 -53,0.3241642415523529 -54,0.3072264790534973 -55,0.2914313077926636 -56,0.2766331136226654 -57,0.262612909078598 -58,0.24910695850849152 -59,0.23588620126247406 -60,0.22259554266929626 -61,0.20849740505218506 -62,0.19151733815670013 -63,0.16376352310180664 -64,0.14187079668045044 -65,0.1392495483160019 -66,0.14662790298461914 -67,0.15814819931983948 -68,0.1638404279947281 -69,0.1643274873495102 -70,0.16066519916057587 -71,0.15312539041042328 -72,0.14146623015403748 -73,0.12738686800003052 -74,0.11643390357494354 -75,0.10956274718046188 -76,0.10630741715431213 -77,0.10775396227836609 -78,0.1113983616232872 -79,0.11275544762611389 -80,0.11049936711788177 -81,0.1052522286772728 -82,0.09881973266601562 -83,0.09352830797433853 -84,0.09048490971326828 -85,0.08929670602083206 -86,0.08907071501016617 -87,0.08899426460266113 -88,0.08847997337579727 -89,0.08726931363344193 -90,0.0853600725531578 -91,0.08288320153951645 -92,0.08009771257638931 -93,0.07737119495868683 -94,0.07507046312093735 -95,0.07340700924396515 -96,0.07238847762346268 -97,0.07176847755908966 -98,0.07115422189235687 -99,0.07023367285728455 -100,0.06893893331289291 -101,0.0674348920583725 -102,0.06601139903068542 -103,0.06489919126033783 -104,0.06417179107666016 -105,0.06375853717327118 -106,0.06351196020841599 -107,0.06327935308218002 -108,0.06295687705278397 -109,0.06251239776611328 -110,0.0619775615632534 -111,0.06145607680082321 -112,0.0610562302172184 -113,0.06083270162343979 -114,0.060750555247068405 -115,0.06071492284536362 -116,0.06062038242816925 -117,0.06041334569454193 -118,0.06011601537466049 -119,0.05980115383863449 -120,0.059539150446653366 -121,0.059355735778808594 -122,0.05922486633062363 -123,0.059094782918691635 -124,0.05892292410135269 -125,0.05869660526514053 -126,0.058435019105672836 -127,0.05817471072077751 -128,0.05794723704457283 -129,0.05776040256023407 -130,0.05759535729885101 -131,0.05742279067635536 -132,0.05722449719905853 -133,0.05700491741299629 -134,0.056784775108098984 -135,0.05658409371972084 -136,0.056408826261758804 -137,0.05624997615814209 -138,0.056092530488967896 -139,0.055926501750946045 -140,0.05575234070420265 -141,0.05557882785797119 -142,0.055415742099285126 -143,0.05526711046695709 -144,0.055128857493400574 -145,0.05499282479286194 -146,0.054852887988090515 -147,0.0547090582549572 -148,0.05456610023975372 -149,0.0544293038547039 -150,0.05430002510547638 -151,0.054175492376089096 -152,0.054051514714956284 -153,0.05392545834183693 -154,0.053797852247953415 -155,0.05367129668593407 -156,0.053548216819763184 -157,0.05342893302440643 -158,0.053311899304389954 -159,0.05319490283727646 -160,0.053077105432748795 -161,0.0529591403901577 -162,0.05284257233142853 -163,0.05272836983203888 -164,0.05261648818850517 -165,0.05250585824251175 -166,0.052395567297935486 -167,0.052285388112068176 -168,0.05217593163251877 -169,0.05206799879670143 -170,0.05196192115545273 -171,0.05185729265213013 -172,0.05175360292196274 -173,0.051650356501340866 -174,0.051547929644584656 -175,0.051446493715047836 -176,0.05134640634059906 -177,0.05124766007065773 -178,0.051149919629096985 -179,0.051053017377853394 -180,0.05095681548118591 -181,0.05086138844490051 -182,0.05076700076460838 -183,0.050673726946115494 -184,0.0505814366042614 -185,0.05048993602395058 -186,0.050399135798215866 -187,0.050309065729379654 -188,0.05021986365318298 -189,0.050131574273109436 -190,0.05004426836967468 -191,0.049957819283008575 -192,0.04987205192446709 -193,0.04978691041469574 -194,0.04970243573188782 -195,0.049618612974882126 -196,0.04953543469309807 -197,0.049452897161245346 -198,0.049371011555194855 -199,0.04928964376449585 -200,0.04920883849263191 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.100/training_config.txt deleted file mode 100644 index 8eb19eb..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.100/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.100/training_log.csv deleted file mode 100644 index be4cd58..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,71.32837677001953 -2,124.1866683959961 -3,31.161479949951172 -4,6.362227439880371 -5,3.3169777393341064 -6,2.6082100868225098 -7,2.716627359390259 -8,1.7983362674713135 -9,1.7102015018463135 -10,1.6688698530197144 -11,1.5789989233016968 -12,1.504524827003479 -13,1.4385143518447876 -14,1.3734188079833984 -15,1.3080805540084839 -16,1.241276502609253 -17,1.167244553565979 -18,1.0963369607925415 -19,1.0125824213027954 -20,0.7069624662399292 -21,0.6844952702522278 -22,0.6417853236198425 -23,0.6024838089942932 -24,0.5686852931976318 -25,0.5385146737098694 -26,0.5112378597259521 -27,0.48638880252838135 -28,0.46345293521881104 -29,0.4419107735157013 -30,0.4209806025028229 -31,0.39977580308914185 -32,0.3765217363834381 -33,0.3489212691783905 -34,0.32347792387008667 -35,0.301837682723999 -36,0.28831571340560913 -37,0.2547037601470947 -38,0.27172571420669556 -39,0.2695416808128357 -40,0.26485109329223633 -41,0.2608095705509186 -42,0.25721776485443115 -43,0.253814160823822 -44,0.25032946467399597 -45,0.24660421907901764 -46,0.24256399273872375 -47,0.23818190395832062 -48,0.2334589660167694 -49,0.22841419279575348 -50,0.2230859100818634 -51,0.21752767264842987 -52,0.21182191371917725 -53,0.2060430347919464 -54,0.20023956894874573 -55,0.19449187815189362 -56,0.18886303901672363 -57,0.1833663284778595 -58,0.17794901132583618 -59,0.17245188355445862 -60,0.16671550273895264 -61,0.16049200296401978 -62,0.15323978662490845 -63,0.14475956559181213 -64,0.1397828310728073 -65,0.1383485049009323 -66,0.13848091661930084 -67,0.1382594108581543 -68,0.13661135733127594 -69,0.1334999054670334 -70,0.12947607040405273 -71,0.1252973973751068 -72,0.12158853560686111 -73,0.1186121255159378 -74,0.1162906065583229 -75,0.11441295593976974 -76,0.11278296262025833 -77,0.11120990663766861 -78,0.10958357155323029 -79,0.10788983851671219 -80,0.10613585263490677 -81,0.10432814806699753 -82,0.10247740894556046 -83,0.10059875249862671 -84,0.09871132671833038 -85,0.09684068709611893 -86,0.09501197189092636 -87,0.09324061125516891 -88,0.09154081344604492 -89,0.0899239033460617 -90,0.08839309215545654 -91,0.08693647384643555 -92,0.08554070442914963 -93,0.08419496566057205 -94,0.08288849145174026 -95,0.08161207288503647 -96,0.08036063611507416 -97,0.07913289964199066 -98,0.07793182879686356 -99,0.0767643004655838 -100,0.07563850283622742 -101,0.07456054538488388 -102,0.0735359638929367 -103,0.07256778329610825 -104,0.07165729999542236 -105,0.07080373167991638 -106,0.07000511884689331 -107,0.0692550465464592 -108,0.06854798644781113 -109,0.06788060814142227 -110,0.06725136935710907 -111,0.0666588693857193 -112,0.06610282510519028 -113,0.06558419018983841 -114,0.06510093808174133 -115,0.06465169787406921 -116,0.06423615664243698 -117,0.06385345757007599 -118,0.0635019987821579 -119,0.06317949295043945 -120,0.06288227438926697 -121,0.06260666996240616 -122,0.06234993040561676 -123,0.06210947036743164 -124,0.06188298016786575 -125,0.0616687573492527 -126,0.0614655539393425 -127,0.061273302882909775 -128,0.06109131872653961 -129,0.06091834977269173 -130,0.06075361743569374 -131,0.0605962872505188 -132,0.060445450246334076 -133,0.060300152748823166 -134,0.06015952304005623 -135,0.060022592544555664 -136,0.05988886579871178 -137,0.059757769107818604 -138,0.05962926149368286 -139,0.05950310826301575 -140,0.059378962963819504 -141,0.05925695225596428 -142,0.05913702771067619 -143,0.05901913344860077 -144,0.05890325456857681 -145,0.05878915265202522 -146,0.058677125722169876 -147,0.05856730416417122 -148,0.058459606021642685 -149,0.058353882282972336 -150,0.058250442147254944 -151,0.05814919248223305 -152,0.05805005505681038 -153,0.05795308202505112 -154,0.057858165353536606 -155,0.057765182107686996 -156,0.05767401307821274 -157,0.05758453905582428 -158,0.05749654769897461 -159,0.057409994304180145 -160,0.05732479691505432 -161,0.05724100396037102 -162,0.05715842917561531 -163,0.057076964527368546 -164,0.056996580213308334 -165,0.056917279958724976 -166,0.05683886259794235 -167,0.05676136538386345 -168,0.05668472498655319 -169,0.05660893768072128 -170,0.05653407797217369 -171,0.056460000574588776 -172,0.056386448442935944 -173,0.05631337687373161 -174,0.05624070763587952 -175,0.05616854503750801 -176,0.05609682574868202 -177,0.05602549761533737 -178,0.055954545736312866 -179,0.05588401108980179 -180,0.055813830345869064 -181,0.0557439848780632 -182,0.05567444860935211 -183,0.055605269968509674 -184,0.05553649738430977 -185,0.05546806380152702 -186,0.05539996176958084 -187,0.05533222109079361 -188,0.055264782160520554 -189,0.055197808891534805 -190,0.0551312081515789 -191,0.05506502836942673 -192,0.054999228566884995 -193,0.054933760315179825 -194,0.0548686645925045 -195,0.054803911596536636 -196,0.05473945289850235 -197,0.05467535927891731 -198,0.054611604660749435 -199,0.054548121988773346 -200,0.05448494106531143 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.125/training_config.txt deleted file mode 100644 index e4a60fa..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.125/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.125/training_log.csv deleted file mode 100644 index 54b855c..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,86.89329528808594 -2,9.79745864868164 -3,3.9576973915100098 -4,2.1131131649017334 -5,1.5236777067184448 -6,1.0975193977355957 -7,0.9484968781471252 -8,0.8514317274093628 -9,0.7588451504707336 -10,0.6147339940071106 -11,0.4098510444164276 -12,0.33170732855796814 -13,0.27066418528556824 -14,0.22252729535102844 -15,0.1853306144475937 -16,0.15524829924106598 -17,0.11089783906936646 -18,0.10149765014648438 -19,0.15317672491073608 -20,0.1867757886648178 -21,0.20531196892261505 -22,0.2123117297887802 -23,0.208701953291893 -24,0.19555650651454926 -25,0.17431038618087769 -26,0.14668315649032593 -27,0.11665979027748108 -28,0.09350330382585526 -29,0.07964999973773956 -30,0.07383961975574493 -31,0.0736149474978447 -32,0.07590695470571518 -33,0.07829520106315613 -34,0.07970958203077316 -35,0.07993081212043762 -36,0.07901634275913239 -37,0.0771222934126854 -38,0.07442095130681992 -39,0.07104509323835373 -40,0.06714635342359543 -41,0.06295207142829895 -42,0.05875697359442711 -43,0.0548943467438221 -44,0.051754627376794815 -45,0.04963386058807373 -46,0.04864204674959183 -47,0.04866617172956467 -48,0.049419205635786057 -49,0.05053582787513733 -50,0.051668502390384674 -51,0.05255221202969551 -52,0.05302761122584343 -53,0.05303926765918732 -54,0.05261468514800072 -55,0.05186165124177933 -56,0.05090681090950966 -57,0.0498812273144722 -58,0.04890994355082512 -59,0.04809404909610748 -60,0.04749864712357521 -61,0.0471353642642498 -62,0.04698082432150841 -63,0.04698266088962555 -64,0.04707099497318268 -65,0.047179870307445526 -66,0.04725557193160057 -67,0.04726586490869522 -68,0.04719981923699379 -69,0.04706598445773125 -70,0.04688585177063942 -71,0.046687062829732895 -72,0.04649679362773895 -73,0.04633563756942749 -74,0.04621397703886032 -75,0.04613174870610237 -76,0.04608045145869255 -77,0.04604613035917282 -78,0.04601385444402695 -79,0.04597114026546478 -80,0.04591033235192299 -81,0.045829497277736664 -82,0.045732758939266205 -83,0.045627932995557785 -84,0.04552401602268219 -85,0.04542939364910126 -86,0.04534992575645447 -87,0.04528769105672836 -88,0.04524118825793266 -89,0.045206159353256226 -90,0.04517682269215584 -91,0.04514709860086441 -92,0.045112017542123795 -93,0.04506867378950119 -94,0.04501651972532272 -95,0.044956982135772705 -96,0.04489269480109215 -97,0.044827044010162354 -98,0.04476332664489746 -99,0.0447036512196064 -100,0.04464911296963692 -101,0.04459960758686066 -102,0.04455425590276718 -103,0.04451144114136696 -104,0.044469334185123444 -105,0.04442654550075531 -106,0.044382140040397644 -107,0.044335849583148956 -108,0.04428787901997566 -109,0.04423896223306656 -110,0.044189807027578354 -111,0.04414111748337746 -112,0.04409331828355789 -113,0.04404658079147339 -114,0.0440007820725441 -115,0.04395563155412674 -116,0.04391075670719147 -117,0.04386579617857933 -118,0.04382047802209854 -119,0.0437747985124588 -120,0.043728865683078766 -121,0.043682828545570374 -122,0.043636906892061234 -123,0.04359133541584015 -124,0.04354625195264816 -125,0.04350157454609871 -126,0.043457213789224625 -127,0.043413128703832626 -128,0.04336908087134361 -129,0.04332497715950012 -130,0.043280765414237976 -131,0.04323644936084747 -132,0.043191999197006226 -133,0.04314751550555229 -134,0.04310309886932373 -135,0.043058887124061584 -136,0.043014880269765854 -137,0.04297104850411415 -138,0.04292741417884827 -139,0.04288391023874283 -140,0.04284052550792694 -141,0.04279719293117523 -142,0.04275389760732651 -143,0.04271062836050987 -144,0.04266735538840294 -145,0.04262411966919899 -146,0.04258091747760773 -147,0.04253783077001572 -148,0.04249492287635803 -149,0.04245207458734512 -150,0.04240932688117027 -151,0.04236672818660736 -152,0.04232415929436684 -153,0.0422816202044487 -154,0.04223919287323952 -155,0.0421968549489975 -156,0.04215452820062637 -157,0.042112261056900024 -158,0.042070094496011734 -159,0.0420280359685421 -160,0.04198610410094261 -161,0.041944246739149094 -162,0.04190248250961304 -163,0.04186078533530235 -164,0.041819144040346146 -165,0.0417775958776474 -166,0.041736140847206116 -167,0.0416947640478611 -168,0.04165344685316086 -169,0.04161219298839569 -170,0.041571054607629776 -171,0.04153003916144371 -172,0.04148907959461212 -173,0.0414482019841671 -174,0.04140743613243103 -175,0.04136671870946884 -176,0.04132606089115143 -177,0.041285544633865356 -178,0.04124512895941734 -179,0.041204776614904404 -180,0.04116448760032654 -181,0.04112434759736061 -182,0.041084349155426025 -183,0.0410444438457489 -184,0.04100463166832924 -185,0.04096490889787674 -186,0.040925249457359314 -187,0.040885716676712036 -188,0.040846291929483414 -189,0.04080692678689957 -190,0.040767621248960495 -191,0.0407283790409565 -192,0.04068929702043533 -193,0.040650345385074615 -194,0.04061145707964897 -195,0.0405726283788681 -196,0.04053390771150589 -197,0.04049526900053024 -198,0.04045673832297325 -199,0.04041825607419014 -200,0.04037985950708389 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.160/training_config.txt deleted file mode 100644 index 0a538aa..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.160/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.160/training_log.csv deleted file mode 100644 index 3a5bb45..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,89.62513732910156 -2,5.664984226226807 -3,6.2093095779418945 -4,6.394765853881836 -5,6.022562503814697 -6,5.711552619934082 -7,5.38148307800293 -8,5.061083793640137 -9,4.7132248878479 -10,4.253727436065674 -11,3.723489761352539 -12,3.4558725357055664 -13,3.248114585876465 -14,3.0654470920562744 -15,2.8969786167144775 -16,2.7359564304351807 -17,2.5861172676086426 -18,2.4394516944885254 -19,2.2925405502319336 -20,2.1444504261016846 -21,1.9962677955627441 -22,1.850273847579956 -23,1.7218137979507446 -24,1.6032675504684448 -25,1.4923263788223267 -26,1.3883330821990967 -27,1.2935726642608643 -28,1.2077882289886475 -29,1.131571888923645 -30,1.0639727115631104 -31,1.0022101402282715 -32,0.9457269310951233 -33,0.893385648727417 -34,0.8445428013801575 -35,0.7987157106399536 -36,0.755585253238678 -37,0.7147661447525024 -38,0.674936056137085 -39,0.6378732323646545 -40,0.6049517393112183 -41,0.5775244235992432 -42,0.5565212368965149 -43,0.5417281985282898 -44,0.5312022566795349 -45,0.5229249596595764 -46,0.5153945088386536 -47,0.5076151490211487 -48,0.4989480674266815 -49,0.48935115337371826 -50,0.47824662923812866 -51,0.4654814898967743 -52,0.45125457644462585 -53,0.4357924461364746 -54,0.4192277491092682 -55,0.4020943343639374 -56,0.3847631812095642 -57,0.3676261901855469 -58,0.35096311569213867 -59,0.33501580357551575 -60,0.32016927003860474 -61,0.30647212266921997 -62,0.29391300678253174 -63,0.2822948694229126 -64,0.27155953645706177 -65,0.2617792785167694 -66,0.25273969769477844 -67,0.24417726695537567 -68,0.23593120276927948 -69,0.2278144359588623 -70,0.21970537304878235 -71,0.21170355379581451 -72,0.20378686487674713 -73,0.19596797227859497 -74,0.18833312392234802 -75,0.18096129596233368 -76,0.17391696572303772 -77,0.1672365516424179 -78,0.16093234717845917 -79,0.15501759946346283 -80,0.14950841665267944 -81,0.1444101333618164 -82,0.13959668576717377 -83,0.13507916033267975 -84,0.1307959407567978 -85,0.1266242414712906 -86,0.12254370003938675 -87,0.1185314729809761 -88,0.11459564417600632 -89,0.11075391620397568 -90,0.1070220023393631 -91,0.10336882621049881 -92,0.09988793730735779 -93,0.09656620770692825 -94,0.09338446706533432 -95,0.09034010767936707 -96,0.08742165565490723 -97,0.08461792021989822 -98,0.08191199600696564 -99,0.07929030805826187 -100,0.07676614075899124 -101,0.07432635128498077 -102,0.07195426523685455 -103,0.06964191049337387 -104,0.06740645319223404 -105,0.06525813788175583 -106,0.06320573389530182 -107,0.06125587224960327 -108,0.059412553906440735 -109,0.05768195167183876 -110,0.05606253445148468 -111,0.054539505392313004 -112,0.05311260372400284 -113,0.051779236644506454 -114,0.05053800716996193 -115,0.049392759799957275 -116,0.048336949199438095 -117,0.0473640151321888 -118,0.04646988958120346 -119,0.045656222850084305 -120,0.04492570459842682 -121,0.04427206516265869 -122,0.04369935020804405 -123,0.04319705441594124 -124,0.04275970533490181 -125,0.04239128157496452 -126,0.04207611083984375 -127,0.04181281849741936 -128,0.041586603969335556 -129,0.04139377921819687 -130,0.041230276226997375 -131,0.04109209403395653 -132,0.04097612202167511 -133,0.040878210216760635 -134,0.040795620530843735 -135,0.04072636365890503 -136,0.040668193250894547 -137,0.04061900079250336 -138,0.04057729244232178 -139,0.04054153338074684 -140,0.04051025211811066 -141,0.040482182055711746 -142,0.04045621678233147 -143,0.04043149948120117 -144,0.04040735587477684 -145,0.0403832271695137 -146,0.04035881161689758 -147,0.040333863347768784 -148,0.04030824825167656 -149,0.04028183966875076 -150,0.04025464877486229 -151,0.04022672772407532 -152,0.04019809141755104 -153,0.040168844163417816 -154,0.04013904184103012 -155,0.04010867699980736 -156,0.04007800668478012 -157,0.040047112852334976 -158,0.040016114711761475 -159,0.03998507559299469 -160,0.039954110980033875 -161,0.039923377335071564 -162,0.03989286720752716 -163,0.03986271098256111 -164,0.03983286768198013 -165,0.03980347886681557 -166,0.03977468982338905 -167,0.03974658623337746 -168,0.039719246327877045 -169,0.03969255089759827 -170,0.039666589349508286 -171,0.03964134305715561 -172,0.039616771042346954 -173,0.03959282860159874 -174,0.039569560438394547 -175,0.03954687342047691 -176,0.03952473774552345 -177,0.03950310871005058 -178,0.03948191553354263 -179,0.03946114704012871 -180,0.03944077342748642 -181,0.03942081704735756 -182,0.039401210844516754 -183,0.039381932467222214 -184,0.03936294838786125 -185,0.039344243705272675 -186,0.03932579606771469 -187,0.03930755704641342 -188,0.03928954899311066 -189,0.0392717570066452 -190,0.03925413638353348 -191,0.03923669457435608 -192,0.03921939805150032 -193,0.0392022468149662 -194,0.03918524831533432 -195,0.03916839137673378 -196,0.03915170952677727 -197,0.039135172963142395 -198,0.03911878168582916 -199,0.039102520793676376 -200,0.03908640518784523 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.200/training_config.txt deleted file mode 100644 index 5f98833..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.200/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.200/training_log.csv deleted file mode 100644 index a4702fa..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,57.48727798461914 -2,3.0171878337860107 -3,4.830185890197754 -4,3.9158661365509033 -5,3.7214670181274414 -6,3.509265661239624 -7,3.279031991958618 -8,3.017045497894287 -9,2.7050116062164307 -10,2.305279493331909 -11,1.9636037349700928 -12,1.7104434967041016 -13,1.5002297163009644 -14,1.3136521577835083 -15,1.1325345039367676 -16,0.9514293074607849 -17,0.8053052425384521 -18,0.6954002976417542 -19,0.6042900085449219 -20,0.5236718654632568 -21,0.4526040554046631 -22,0.39163047075271606 -23,0.3441469073295593 -24,0.3076304793357849 -25,0.2804439663887024 -26,0.2614082992076874 -27,0.24848859012126923 -28,0.24002207815647125 -29,0.23472285270690918 -30,0.231373131275177 -31,0.2288428694009781 -32,0.22640760242938995 -33,0.2235696017742157 -34,0.2199365198612213 -35,0.2152591198682785 -36,0.20942460000514984 -37,0.20245587825775146 -38,0.1944132000207901 -39,0.18550223112106323 -40,0.17598290741443634 -41,0.1663326919078827 -42,0.15681514143943787 -43,0.14744746685028076 -44,0.13839612901210785 -45,0.12974372506141663 -46,0.12162540853023529 -47,0.11413417756557465 -48,0.1073097288608551 -49,0.10116654634475708 -50,0.09581631422042847 -51,0.09113050997257233 -52,0.08704286068677902 -53,0.08345261216163635 -54,0.08023250848054886 -55,0.07738283276557922 -56,0.07484739273786545 -57,0.07256148010492325 -58,0.070480115711689 -59,0.06851516664028168 -60,0.0666910707950592 -61,0.06500189751386642 -62,0.06340008229017258 -63,0.06184918060898781 -64,0.060330431908369064 -65,0.058843083679676056 -66,0.05739414319396019 -67,0.05598275363445282 -68,0.05462421104311943 -69,0.05333610251545906 -70,0.05213678628206253 -71,0.05102022364735603 -72,0.049993712455034256 -73,0.0490620993077755 -74,0.048221614211797714 -75,0.047477588057518005 -76,0.04683000594377518 -77,0.046272195875644684 -78,0.04579763486981392 -79,0.045397307723760605 -80,0.04505949839949608 -81,0.044774968177080154 -82,0.04453421011567116 -83,0.0443279929459095 -84,0.044148027896881104 -85,0.04398725926876068 -86,0.04383963719010353 -87,0.04370125010609627 -88,0.04357273876667023 -89,0.04344925284385681 -90,0.04332832247018814 -91,0.043209776282310486 -92,0.04309383034706116 -93,0.04298090934753418 -94,0.04287143051624298 -95,0.04276592656970024 -96,0.042664535343647 -97,0.04256719350814819 -98,0.0424736812710762 -99,0.042383577674627304 -100,0.04229636862874031 -101,0.04221144691109657 -102,0.0421280674636364 -103,0.042045559734106064 -104,0.04196466505527496 -105,0.04188404232263565 -106,0.04180275648832321 -107,0.04172047972679138 -108,0.04163707420229912 -109,0.04155267775058746 -110,0.04146752506494522 -111,0.041381996124982834 -112,0.04129653796553612 -113,0.0412115752696991 -114,0.041127562522888184 -115,0.0410449281334877 -116,0.04096399247646332 -117,0.04088495299220085 -118,0.040807973593473434 -119,0.04073305428028107 -120,0.04066016152501106 -121,0.04058920964598656 -122,0.04052005335688591 -123,0.04045254737138748 -124,0.04038657620549202 -125,0.04032205417752266 -126,0.04025891050696373 -127,0.040196970105171204 -128,0.04013613238930702 -129,0.040076322853565216 -130,0.04001753032207489 -131,0.03995973989367485 -132,0.0399029366672039 -133,0.03984713554382324 -134,0.03979232907295227 -135,0.0397384911775589 -136,0.039685655385255814 -137,0.03963376581668854 -138,0.03958280384540558 -139,0.03953273966908455 -140,0.03948352485895157 -141,0.03943514823913574 -142,0.03938755765557289 -143,0.039340708404779434 -144,0.03929457440972328 -145,0.03924914076924324 -146,0.039204373955726624 -147,0.039160240441560745 -148,0.0391167476773262 -149,0.0390738919377327 -150,0.039031654596328735 -151,0.03899003192782402 -152,0.03894903138279915 -153,0.03890866041183472 -154,0.03886890038847923 -155,0.03882977366447449 -156,0.03879127651453018 -157,0.03875338286161423 -158,0.038716092705726624 -159,0.03867930918931961 -160,0.03864298388361931 -161,0.038607191294431686 -162,0.03857193514704704 -163,0.0385371632874012 -164,0.038502901792526245 -165,0.03846916928887367 -166,0.038435932248830795 -167,0.03840320184826851 -168,0.03837094083428383 -169,0.038339171558618546 -170,0.038307882845401764 -171,0.03827705234289169 -172,0.038246672600507736 -173,0.0382167212665081 -174,0.03818722441792488 -175,0.03815816715359688 -176,0.03812951594591141 -177,0.03810128569602966 -178,0.03807344287633896 -179,0.0380459763109684 -180,0.038018908351659775 -181,0.03799222409725189 -182,0.03796590119600296 -183,0.03793992102146149 -184,0.03791429102420807 -185,0.0378890298306942 -186,0.03786410018801689 -187,0.03783947601914406 -188,0.037815168499946594 -189,0.03779115900397301 -190,0.037767451256513596 -191,0.03774404153227806 -192,0.03772091493010521 -193,0.03769806772470474 -194,0.03767552971839905 -195,0.037653304636478424 -196,0.03763134777545929 -197,0.03760965168476105 -198,0.037588201463222504 -199,0.037567004561424255 -200,0.03754603490233421 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.250/training_config.txt deleted file mode 100644 index 3495b0e..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.250/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.250/training_log.csv deleted file mode 100644 index da7a7b8..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,78.86443328857422 -2,3061.630615234375 -3,3.448330879211426 -4,3.411478281021118 -5,3.3930227756500244 -6,3.391857862472534 -7,3.4600489139556885 -8,3.5519418716430664 -9,3.696221351623535 -10,3.892289400100708 -11,4.048620223999023 -12,4.201815128326416 -13,4.374793529510498 -14,4.400200843811035 -15,4.200795650482178 -16,3.989428997039795 -17,3.8143680095672607 -18,3.6485421657562256 -19,3.49236798286438 -20,3.3545072078704834 -21,3.2462964057922363 -22,3.1590423583984375 -23,3.083796262741089 -24,3.0161988735198975 -25,2.9540789127349854 -26,2.8963546752929688 -27,2.842355251312256 -28,2.791618824005127 -29,2.7436153888702393 -30,2.6980581283569336 -31,2.6545372009277344 -32,2.612743854522705 -33,2.572359085083008 -34,2.533336877822876 -35,2.49627947807312 -36,2.4602158069610596 -37,2.4249486923217773 -38,2.3905742168426514 -39,2.358980894088745 -40,2.3296077251434326 -41,2.3023569583892822 -42,2.275627374649048 -43,2.249802350997925 -44,2.22458815574646 -45,2.199737310409546 -46,2.175110340118408 -47,2.150665521621704 -48,2.126370668411255 -49,2.102222204208374 -50,2.0782101154327393 -51,2.054327964782715 -52,2.0305702686309814 -53,2.006934404373169 -54,1.9834223985671997 -55,1.9601324796676636 -56,1.937125563621521 -57,1.914269208908081 -58,1.891553282737732 -59,1.8689777851104736 -60,1.8465473651885986 -61,1.824281096458435 -62,1.8024325370788574 -63,1.7807592153549194 -64,1.7592453956604004 -65,1.737904667854309 -66,1.7167428731918335 -67,1.6957571506500244 -68,1.674957513809204 -69,1.6543490886688232 -70,1.6339380741119385 -71,1.6138497591018677 -72,1.5940083265304565 -73,1.5743507146835327 -74,1.5549348592758179 -75,1.535834550857544 -76,1.5170255899429321 -77,1.498421311378479 -78,1.4800209999084473 -79,1.4618220329284668 -80,1.4438271522521973 -81,1.4260382652282715 -82,1.4084500074386597 -83,1.3910582065582275 -84,1.3738607168197632 -85,1.3568779230117798 -86,1.340097427368164 -87,1.323446273803711 -88,1.3067913055419922 -89,1.290298581123352 -90,1.2739924192428589 -91,1.257902979850769 -92,1.2419953346252441 -93,1.2260748147964478 -94,1.210244059562683 -95,1.194554328918457 -96,1.1790039539337158 -97,1.1636018753051758 -98,1.1483896970748901 -99,1.1333074569702148 -100,1.1183534860610962 -101,1.1035382747650146 -102,1.0888341665267944 -103,1.0742332935333252 -104,1.0597330331802368 -105,1.0453330278396606 -106,1.0310312509536743 -107,1.0168097019195557 -108,1.0025403499603271 -109,0.9882181882858276 -110,0.9739567041397095 -111,0.9597407579421997 -112,0.9455889463424683 -113,0.9314975738525391 -114,0.9174709916114807 -115,0.9035063982009888 -116,0.8896163702011108 -117,0.8757995367050171 -118,0.8620460629463196 -119,0.8483306765556335 -120,0.8344481587409973 -121,0.8204718828201294 -122,0.8065083622932434 -123,0.7925527095794678 -124,0.7784733772277832 -125,0.7641069889068604 -126,0.7491897940635681 -127,0.7337123155593872 -128,0.7178558707237244 -129,0.7018551826477051 -130,0.6857439279556274 -131,0.6695433259010315 -132,0.6532536149024963 -133,0.6352528929710388 -134,0.616062343120575 -135,0.5964937210083008 -136,0.576655924320221 -137,0.5566047430038452 -138,0.536402702331543 -139,0.5162279605865479 -140,0.4962668716907501 -141,0.47652629017829895 -142,0.4569410979747772 -143,0.43754926323890686 -144,0.4184027314186096 -145,0.39956414699554443 -146,0.3810919225215912 -147,0.3630392849445343 -148,0.345457524061203 -149,0.32835474610328674 -150,0.31177881360054016 -151,0.2957269251346588 -152,0.2801155149936676 -153,0.264997661113739 -154,0.2504197955131531 -155,0.23631824553012848 -156,0.22274018824100494 -157,0.2096986025571823 -158,0.19721153378486633 -159,0.1852836161851883 -160,0.17391040921211243 -161,0.1630762666463852 -162,0.15286396443843842 -163,0.14323192834854126 -164,0.1342708021402359 -165,0.1259361058473587 -166,0.11809985339641571 -167,0.11075543612241745 -168,0.10390489548444748 -169,0.09755533933639526 -170,0.09171852469444275 -171,0.08626481145620346 -172,0.08112622797489166 -173,0.07642009109258652 -174,0.07220201939344406 -175,0.06831430643796921 -176,0.06470092386007309 -177,0.06155185028910637 -178,0.058938510715961456 -179,0.05684282258152962 -180,0.05522890388965607 -181,0.054045580327510834 -182,0.053215205669403076 -183,0.05264895409345627 -184,0.052257735282182693 -185,0.05200246348977089 -186,0.05180337280035019 -187,0.05159885436296463 -188,0.05134677514433861 -189,0.05102115869522095 -190,0.050611112266778946 -191,0.05011774227023125 -192,0.049551840871572495 -193,0.04893098771572113 -194,0.048277005553245544 -195,0.047613002359867096 -196,0.04696133732795715 -197,0.04634184017777443 -198,0.04577089473605156 -199,0.04525833949446678 -200,0.04481124132871628 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.300/training_config.txt deleted file mode 100644 index 592417b..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.300/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.300/training_log.csv deleted file mode 100644 index 62b8d6a..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.300/training_log.csv +++ /dev/null @@ -1,105 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,110.30671691894531 -2,10.267021179199219 -3,62122.28515625 -4,19.74958610534668 -5,18.197572708129883 -6,171.65945434570312 -7,43.887577056884766 -8,20.940507888793945 -9,17.429384231567383 -10,19.05335807800293 -11,21.563846588134766 -12,20.752017974853516 -13,18.272537231445312 -14,16.051387786865234 -15,11.929858207702637 -16,9.870332717895508 -17,8.979559898376465 -18,7.7474822998046875 -19,7.094003677368164 -20,6.580947399139404 -21,6.0790019035339355 -22,5.684493064880371 -23,5.3839192390441895 -24,5.1419596672058105 -25,4.937781810760498 -26,4.75927209854126 -27,4.598871231079102 -28,4.451286315917969 -29,4.3147196769714355 -30,4.190276622772217 -31,4.076364040374756 -32,3.9710419178009033 -33,3.8730499744415283 -34,3.781099319458008 -35,3.6941685676574707 -36,3.611438274383545 -37,3.5326061248779297 -38,3.4573397636413574 -39,3.385338068008423 -40,3.3163628578186035 -41,3.249988317489624 -42,3.185936450958252 -43,3.123995065689087 -44,3.064474105834961 -45,3.006708860397339 -46,2.9503729343414307 -47,2.8953857421875 -48,2.841594934463501 -49,2.788810968399048 -50,2.736926555633545 -51,2.6858158111572266 -52,2.6353282928466797 -53,2.585559368133545 -54,2.5365378856658936 -55,2.4883010387420654 -56,2.440783739089966 -57,2.3939363956451416 -58,2.347754716873169 -59,2.302278518676758 -60,2.2575337886810303 -61,2.2135496139526367 -62,2.1703624725341797 -63,2.12800931930542 -64,2.0865206718444824 -65,2.0459234714508057 -66,2.0062386989593506 -67,1.9674880504608154 -68,1.929705262184143 -69,1.892978549003601 -70,1.8572494983673096 -71,1.8224847316741943 -72,1.7886501550674438 -73,1.755722999572754 -74,1.7236727476119995 -75,1.692488670349121 -76,2.847368001937866 -77,6.388359546661377 -78,19.414413452148438 -79,96.09228515625 -80,162.97181701660156 -81,59.82024383544922 -82,43.308536529541016 -83,33.715335845947266 -84,26.88729476928711 -85,22.9480037689209 -86,21.32719612121582 -87,20.4597225189209 -88,19.329849243164062 -89,18.395469665527344 -90,17.50013542175293 -91,16.56598663330078 -92,15.023205757141113 -93,14.888143539428711 -94,12.845963478088379 -95,12.845976829528809 -96,12.46943473815918 -97,12.049368858337402 -98,11.651838302612305 -99,12.850604057312012 -100,nan -101,nan -102,nan -103,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.400/training_config.txt deleted file mode 100644 index c1945c7..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.400/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.400/training_log.csv deleted file mode 100644 index 17e35a5..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.400/training_log.csv +++ /dev/null @@ -1,16 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,1126.791748046875 -2,4.326338291168213 -3,4.340819835662842 -4,4.1349592208862305 -5,8.00390625 -6,55969.29296875 -7,6.698582172393799 -8,2.3140876293182373 -9,2.180274486541748 -10,2.111131429672241 -11,nan -12,nan -13,nan -14,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.500/training_config.txt deleted file mode 100644 index 907b9b1..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.500/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.500/training_log.csv deleted file mode 100644 index ac5bbce..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.500/training_log.csv +++ /dev/null @@ -1,16 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,16351.451171875 -2,77387.75 -3,73185.09375 -4,32617.43359375 -5,10.602575302124023 -6,6.613097190856934 -7,6.6182122230529785 -8,6.7574543952941895 -9,6.877490997314453 -10,6.96396541595459 -11,nan -12,nan -13,nan -14,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.600/training_config.txt deleted file mode 100644 index 17bc2f6..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.600/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.600/training_log.csv deleted file mode 100644 index 52517bc..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.600/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,40741.2890625 -2,78438.8515625 -3,78500.046875 -4,78500.046875 -5,78500.046875 -6,78500.046875 -7,78500.046875 -8,78500.046875 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.700/training_config.txt deleted file mode 100644 index 558edb1..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.700/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.700/training_log.csv deleted file mode 100644 index 64be42e..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,59869.23828125 -2,78492.6328125 -3,78500.046875 -4,78500.046875 -5,78500.046875 -6,78500.046875 -7,78500.046875 -8,78500.046875 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.800/training_config.txt deleted file mode 100644 index f4dbeab..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.800/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.800/training_log.csv deleted file mode 100644 index ea68393..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,68674.3359375 -2,78500.046875 -3,78500.046875 -4,78500.046875 -5,78500.046875 -6,78500.046875 -7,78500.046875 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.900/training_config.txt deleted file mode 100644 index 5fdb720..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.900/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.900/training_log.csv deleted file mode 100644 index f152a66..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,74006.375 -2,78500.046875 -3,78500.046875 -4,78500.046875 -5,78500.046875 -6,78500.046875 -7,78500.046875 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_1.000/training_config.txt deleted file mode 100644 index 1443b20..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_1.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_1.000/training_log.csv deleted file mode 100644 index dd85772..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,76239.328125 -2,78500.046875 -3,78500.046875 -4,78500.046875 -5,78500.046875 -6,78500.046875 -7,78500.046875 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_16.000/training_config.txt deleted file mode 100644 index 52e0ce9..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_16.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_16.000/training_log.csv deleted file mode 100644 index 61f502b..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,78707.1328125 -2,78707.1328125 -3,78707.1328125 -4,78707.1328125 -5,78707.1328125 -6,78707.1328125 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_2.000/training_config.txt deleted file mode 100644 index 59927a2..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_2.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_2.000/training_log.csv deleted file mode 100644 index 749af72..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,78704.4140625 -2,1.5603140592575073 -3,nan -4,nan -5,nan -6,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_4.000/training_config.txt deleted file mode 100644 index 1230735..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_4.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_4.000/training_log.csv deleted file mode 100644 index 61f502b..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,78707.1328125 -2,78707.1328125 -3,78707.1328125 -4,78707.1328125 -5,78707.1328125 -6,78707.1328125 diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_8.000/training_config.txt deleted file mode 100644 index 4ef8d1f..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_8.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_cubed_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/3) - 1) * 1/t_max * (-t_span + t_max) + 1)**-3 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_8.000/training_log.csv deleted file mode 100644 index 61f502b..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_cubed_mirrored/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,339.7125244140625 -1,78707.1328125 -2,78707.1328125 -3,78707.1328125 -4,78707.1328125 -5,78707.1328125 -6,78707.1328125 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.010/training_config.txt deleted file mode 100644 index a7f9663..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.010/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.010/training_log.csv deleted file mode 100644 index 6b7f5aa..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,95.03910064697266 -2,76.24960327148438 -3,66.39354705810547 -4,58.82444763183594 -5,48.968055725097656 -6,33.73896408081055 -7,26.308801651000977 -8,25.326007843017578 -9,19.996511459350586 -10,19.34816551208496 -11,19.53236961364746 -12,16.39805793762207 -13,17.442279815673828 -14,16.76350212097168 -15,18.205509185791016 -16,19.01325798034668 -17,20.936372756958008 -18,20.873767852783203 -19,20.843563079833984 -20,20.753175735473633 -21,22.1287784576416 -22,21.3051815032959 -23,21.373605728149414 -24,20.65428352355957 -25,20.577116012573242 -26,17.964975357055664 -27,16.1373291015625 -28,14.08481216430664 -29,15.4234619140625 -30,12.993844985961914 -31,12.053935050964355 -32,11.578987121582031 -33,10.668536186218262 -34,10.66777229309082 -35,9.79392147064209 -36,9.803110122680664 -37,10.338434219360352 -38,10.307804107666016 -39,9.708623886108398 -40,9.4553804397583 -41,9.378254890441895 -42,9.587066650390625 -43,10.365020751953125 -44,10.195956230163574 -45,9.756787300109863 -46,9.504606246948242 -47,9.362282752990723 -48,9.294698715209961 -49,9.130123138427734 -50,9.36418628692627 -51,9.859098434448242 -52,9.327312469482422 -53,8.847362518310547 -54,8.755932807922363 -55,8.733234405517578 -56,9.109670639038086 -57,8.7381591796875 -58,8.811966896057129 -59,9.17891788482666 -60,8.69981575012207 -61,8.391565322875977 -62,8.320454597473145 -63,7.84580135345459 -64,7.894992828369141 -65,7.959505558013916 -66,7.592531204223633 -67,7.599440574645996 -68,7.714060306549072 -69,7.611579418182373 -70,7.495656967163086 -71,7.299350738525391 -72,7.0380964279174805 -73,6.735690116882324 -74,6.771193981170654 -75,6.66304349899292 -76,6.475670337677002 -77,6.408943176269531 -78,6.433596611022949 -79,6.389490604400635 -80,6.302536964416504 -81,6.224008560180664 -82,6.187747001647949 -83,6.182133674621582 -84,6.145622730255127 -85,6.537967205047607 -86,6.279247283935547 -87,6.462826728820801 -88,7.689733505249023 -89,7.384486675262451 -90,8.827401161193848 -91,8.880176544189453 -92,8.519051551818848 -93,10.392526626586914 -94,11.183920860290527 -95,11.006494522094727 -96,11.735664367675781 -97,11.935935020446777 -98,11.309928894042969 -99,11.677967071533203 -100,11.99633502960205 -101,11.791521072387695 -102,11.360182762145996 -103,10.89393138885498 -104,10.874415397644043 -105,10.929986000061035 -106,11.821320533752441 -107,10.258667945861816 -108,9.898942947387695 -109,9.76904296875 -110,9.745965957641602 -111,9.782800674438477 -112,9.81572151184082 -113,9.767435073852539 -114,9.667057991027832 -115,9.576626777648926 -116,9.505386352539062 -117,9.449451446533203 -118,9.405950546264648 -119,9.373318672180176 -120,9.347760200500488 -121,9.323936462402344 -122,9.298701286315918 -123,9.269039154052734 -124,9.23427677154541 -125,9.19677448272705 -126,9.159979820251465 -127,9.126553535461426 -128,9.09769058227539 -129,9.073280334472656 -130,9.052465438842773 -131,9.034177780151367 -132,9.017241477966309 -133,9.000571250915527 -134,8.98308277130127 -135,8.963935852050781 -136,8.942765235900879 -137,8.919689178466797 -138,8.895364761352539 -139,8.870532989501953 -140,8.845982551574707 -141,8.822343826293945 -142,8.799861907958984 -143,8.778589248657227 -144,8.758278846740723 -145,8.738434791564941 -146,8.718774795532227 -147,8.698891639709473 -148,8.678629875183105 -149,8.658025741577148 -150,8.637301445007324 -151,8.61645793914795 -152,8.595281600952148 -153,8.57435131072998 -154,8.554269790649414 -155,8.534825325012207 -156,8.515336990356445 -157,8.49524211883545 -158,8.47422981262207 -159,8.452363967895508 -160,8.429903030395508 -161,8.407171249389648 -162,8.384398460388184 -163,8.361583709716797 -164,8.33854866027832 -165,8.31521224975586 -166,8.291471481323242 -167,8.2672700881958 -168,8.242545127868652 -169,8.217202186584473 -170,8.191106796264648 -171,8.16425895690918 -172,8.136584281921387 -173,8.108016967773438 -174,8.07856273651123 -175,8.04810905456543 -176,8.016641616821289 -177,7.984140396118164 -178,7.951086044311523 -179,7.918913841247559 -180,7.890426158905029 -181,7.870710849761963 -182,7.870548248291016 -183,7.855991363525391 -184,7.813508033752441 -185,7.796695232391357 -186,8.695302963256836 -187,8.218213081359863 -188,8.871312141418457 -189,8.79179859161377 -190,8.647163391113281 -191,7.689424514770508 -192,7.491130828857422 -193,7.484787464141846 -194,7.51449728012085 -195,7.55880880355835 -196,7.605967998504639 -197,7.648251533508301 -198,7.679788589477539 -199,7.695584297180176 -200,7.690452575683594 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.020/training_config.txt deleted file mode 100644 index 4ec24e3..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.020/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.020/training_log.csv deleted file mode 100644 index 2148a65..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,82.43912506103516 -2,48.902740478515625 -3,22.918195724487305 -4,15.310898780822754 -5,14.459193229675293 -6,14.623432159423828 -7,13.746367454528809 -8,14.508721351623535 -9,19.639217376708984 -10,20.53241539001465 -11,21.6141300201416 -12,22.7136173248291 -13,25.51247787475586 -14,18.0202693939209 -15,16.323345184326172 -16,10.707250595092773 -17,10.470747947692871 -18,8.434075355529785 -19,5.27421760559082 -20,5.740842819213867 -21,4.877232551574707 -22,4.298964977264404 -23,3.4619908332824707 -24,3.2574658393859863 -25,3.3129570484161377 -26,3.307922840118408 -27,3.376842737197876 -28,3.712953567504883 -29,3.1219754219055176 -30,2.9214324951171875 -31,2.8629777431488037 -32,2.7044289112091064 -33,2.604201316833496 -34,2.540435314178467 -35,2.5014138221740723 -36,2.4718525409698486 -37,2.457754611968994 -38,2.4590084552764893 -39,2.475747585296631 -40,2.4946107864379883 -41,2.504319190979004 -42,2.42991042137146 -43,2.429490327835083 -44,2.4468584060668945 -45,2.4532992839813232 -46,2.3795971870422363 -47,2.3479199409484863 -48,2.296729326248169 -49,2.2774245738983154 -50,2.2627129554748535 -51,2.2718665599823 -52,2.1979072093963623 -53,2.2231454849243164 -54,2.224728584289551 -55,2.2160916328430176 -56,2.203197479248047 -57,2.1896417140960693 -58,2.179462432861328 -59,2.177290201187134 -60,2.1660141944885254 -61,2.1504013538360596 -62,2.139113664627075 -63,2.127985715866089 -64,2.115370273590088 -65,2.1010076999664307 -66,2.085151195526123 -67,2.0681376457214355 -68,2.050363063812256 -69,2.0322494506835938 -70,2.0142078399658203 -71,1.9966670274734497 -72,1.9796398878097534 -73,1.9622619152069092 -74,1.9436328411102295 -75,1.923998236656189 -76,1.903533935546875 -77,1.8809115886688232 -78,1.8540118932724 -79,1.828003168106079 -80,1.8480191230773926 -81,1.8131392002105713 -82,1.8266404867172241 -83,1.8344453573226929 -84,1.8342276811599731 -85,1.8289400339126587 -86,1.8200353384017944 -87,1.8056782484054565 -88,1.7755985260009766 -89,1.7205677032470703 -90,1.749650001525879 -91,1.8472850322723389 -92,1.801310420036316 -93,1.7715405225753784 -94,1.7854454517364502 -95,1.7718876600265503 -96,1.7510132789611816 -97,1.6277021169662476 -98,1.5749520063400269 -99,1.7674583196640015 -100,1.4798908233642578 -101,1.517238974571228 -102,1.4506319761276245 -103,1.4158647060394287 -104,1.3488818407058716 -105,1.3490287065505981 -106,1.3850162029266357 -107,1.392197847366333 -108,1.4851698875427246 -109,1.49537992477417 -110,1.5265541076660156 -111,1.5252680778503418 -112,1.5120633840560913 -113,1.4195387363433838 -114,1.4111931324005127 -115,1.4345804452896118 -116,1.4332951307296753 -117,1.4077868461608887 -118,1.3557342290878296 -119,1.3995683193206787 -120,1.4069609642028809 -121,1.4161968231201172 -122,1.4171679019927979 -123,1.4123456478118896 -124,1.4085177183151245 -125,1.4081388711929321 -126,1.4063493013381958 -127,1.4014840126037598 -128,1.3944549560546875 -129,1.3861383199691772 -130,1.3754149675369263 -131,1.3643512725830078 -132,1.356070876121521 -133,1.3475708961486816 -134,1.3364219665527344 -135,1.3233609199523926 -136,1.310833215713501 -137,1.3004862070083618 -138,1.2912787199020386 -139,1.2800486087799072 -140,1.26474928855896 -141,1.2475062608718872 -142,1.2288559675216675 -143,1.2074202299118042 -144,1.2786189317703247 -145,1.2199618816375732 -146,1.2556278705596924 -147,1.2912285327911377 -148,1.3367886543273926 -149,1.477548599243164 -150,1.4614245891571045 -151,1.3517922163009644 -152,1.3627039194107056 -153,1.3933621644973755 -154,1.4161109924316406 -155,1.4277015924453735 -156,1.434300422668457 -157,1.438791275024414 -158,1.4411647319793701 -159,1.441308617591858 -160,1.4395076036453247 -161,1.4360485076904297 -162,1.4308974742889404 -163,1.4233964681625366 -164,1.4125455617904663 -165,1.3991366624832153 -166,1.3881348371505737 -167,1.3799316883087158 -168,1.3708442449569702 -169,1.359774112701416 -170,1.3476059436798096 -171,1.334333062171936 -172,1.3171591758728027 -173,1.2904539108276367 -174,1.341513752937317 -175,1.319312572479248 -176,1.3380199670791626 -177,1.3465924263000488 -178,1.3653695583343506 -179,1.38485848903656 -180,1.401158332824707 -181,1.4116811752319336 -182,1.4144139289855957 -183,1.4101330041885376 -184,1.4010119438171387 -185,1.3888156414031982 -186,1.3773565292358398 -187,1.3735347986221313 -188,1.3739221096038818 -189,1.3731274604797363 -190,1.370875597000122 -191,1.36915123462677 -192,1.3695424795150757 -193,1.3701107501983643 -194,1.3690026998519897 -195,1.366491436958313 -196,1.363605260848999 -197,1.3614426851272583 -198,1.3603384494781494 -199,1.359687089920044 -200,1.3588992357254028 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.040/training_config.txt deleted file mode 100644 index 4edcf79..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.040/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.040/training_log.csv deleted file mode 100644 index 1f1f513..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,61.06413650512695 -2,14.506061553955078 -3,12.179173469543457 -4,10.508133888244629 -5,7.781476020812988 -6,7.331408977508545 -7,4.046557426452637 -8,3.0861735343933105 -9,2.4210879802703857 -10,2.4559125900268555 -11,2.639932632446289 -12,2.745286703109741 -13,2.849179267883301 -14,3.000889301300049 -15,3.5021698474884033 -16,3.4893062114715576 -17,3.532759666442871 -18,4.366307735443115 -19,3.60078763961792 -20,3.2822318077087402 -21,3.1369388103485107 -22,3.1352953910827637 -23,3.2261879444122314 -24,3.1815624237060547 -25,3.1694958209991455 -26,3.667695999145508 -27,3.6177818775177 -28,3.678986072540283 -29,3.9404332637786865 -30,4.353290557861328 -31,8.901665687561035 -32,39.0648307800293 -33,60.26176834106445 -34,19.259321212768555 -35,9.963799476623535 -36,9.4312105178833 -37,7.336921691894531 -38,8.998162269592285 -39,9.367026329040527 -40,10.834895133972168 -41,12.795616149902344 -42,10.481525421142578 -43,7.4565019607543945 -44,7.8288373947143555 -45,6.26043176651001 -46,5.414919853210449 -47,4.771931171417236 -48,4.118546009063721 -49,3.597261428833008 -50,3.626758098602295 -51,3.134084463119507 -52,3.1453657150268555 -53,3.133932113647461 -54,3.0891783237457275 -55,2.9968793392181396 -56,2.9714908599853516 -57,2.8042821884155273 -58,2.6968319416046143 -59,2.5640764236450195 -60,2.524521589279175 -61,2.4952468872070312 -62,2.5776114463806152 -63,2.3506662845611572 -64,2.432927131652832 -65,2.4627013206481934 -66,2.4605350494384766 -67,2.417171001434326 -68,2.419071674346924 -69,2.348137855529785 -70,2.2632458209991455 -71,2.1524317264556885 -72,2.3938193321228027 -73,2.3716418743133545 -74,2.362083911895752 -75,2.351661205291748 -76,2.342134475708008 -77,2.338404655456543 -78,2.334430694580078 -79,2.3257734775543213 -80,2.315495729446411 -81,2.3149452209472656 -82,2.3085105419158936 -83,2.295214891433716 -84,2.283432960510254 -85,2.2775139808654785 -86,2.274329423904419 -87,2.266498327255249 -88,2.2581331729888916 -89,2.2535929679870605 -90,2.248863697052002 -91,2.2417356967926025 -92,2.2348334789276123 -93,2.2299935817718506 -94,2.224867582321167 -95,2.2180609703063965 -96,2.211881399154663 -97,2.2075977325439453 -98,2.203707218170166 -99,2.1977875232696533 -100,2.1913504600524902 -101,2.187664270401001 -102,2.187474012374878 -103,2.187849760055542 -104,2.1786015033721924 -105,2.1440494060516357 -106,2.1576850414276123 -107,2.157291889190674 -108,2.1540231704711914 -109,2.1505298614501953 -110,2.1466453075408936 -111,2.141815662384033 -112,2.1365532875061035 -113,2.12903094291687 -114,2.113351583480835 -115,2.0824904441833496 -116,2.0229110717773438 -117,1.9379749298095703 -118,1.9146860837936401 -119,1.9221832752227783 -120,1.9325714111328125 -121,1.9384090900421143 -122,1.938533067703247 -123,1.9333913326263428 -124,1.9231047630310059 -125,1.906815528869629 -126,1.8829410076141357 -127,1.851495385169983 -128,1.817812204360962 -129,1.7875065803527832 -130,1.7622439861297607 -131,1.7426143884658813 -132,1.7293442487716675 -133,1.7207530736923218 -134,1.714156985282898 -135,1.708114743232727 -136,1.702040433883667 -137,1.6956510543823242 -138,1.6887671947479248 -139,1.681334137916565 -140,1.673524022102356 -141,1.6653507947921753 -142,1.656707525253296 -143,1.6474642753601074 -144,1.6375741958618164 -145,1.6270285844802856 -146,1.6159687042236328 -147,1.604935884475708 -148,1.5945160388946533 -149,1.585283875465393 -150,1.5775717496871948 -151,1.5714224576950073 -152,1.5666165351867676 -153,1.5628046989440918 -154,1.559523105621338 -155,1.5560808181762695 -156,1.5519733428955078 -157,1.5471307039260864 -158,1.5419666767120361 -159,1.5370664596557617 -160,1.5328487157821655 -161,1.5293824672698975 -162,1.5264350175857544 -163,1.5237088203430176 -164,1.5209369659423828 -165,1.5179405212402344 -166,1.514629602432251 -167,1.510979413986206 -168,1.50701904296875 -169,1.502808928489685 -170,1.4984350204467773 -171,1.49399995803833 -172,1.4896812438964844 -173,1.4857237339019775 -174,1.4824551343917847 -175,1.4805935621261597 -176,1.479503870010376 -177,1.4763412475585938 -178,1.4725016355514526 -179,1.469553828239441 -180,1.4667552709579468 -181,1.4637571573257446 -182,1.4606908559799194 -183,1.4576983451843262 -184,1.454737901687622 -185,1.4516297578811646 -186,1.4482265710830688 -187,1.444455862045288 -188,1.4403318166732788 -189,1.43609619140625 -190,1.4322320222854614 -191,1.4286742210388184 -192,1.4246952533721924 -193,1.4204034805297852 -194,1.4167214632034302 -195,1.4138966798782349 -196,1.4112567901611328 -197,1.4082573652267456 -198,1.4051650762557983 -199,1.4024430513381958 -200,1.3999470472335815 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.050/training_config.txt deleted file mode 100644 index d650ae9..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.050/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.050/training_log.csv deleted file mode 100644 index ff8a946..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,52.40354537963867 -2,10.393492698669434 -3,9.88778305053711 -4,9.30689525604248 -5,5.116946220397949 -6,3.204042911529541 -7,2.6865522861480713 -8,3.3314521312713623 -9,2.160701274871826 -10,2.0266976356506348 -11,1.9492961168289185 -12,1.827576756477356 -13,1.6974716186523438 -14,1.5505374670028687 -15,1.6173851490020752 -16,1.5877224206924438 -17,1.5664033889770508 -18,1.5433350801467896 -19,1.516239047050476 -20,1.491828441619873 -21,1.4611262083053589 -22,1.4239336252212524 -23,1.4012936353683472 -24,1.2891321182250977 -25,1.2659573554992676 -26,1.2296522855758667 -27,1.1222678422927856 -28,1.0850415229797363 -29,1.049329400062561 -30,1.0084943771362305 -31,0.8852363228797913 -32,0.7923242449760437 -33,0.5770782232284546 -34,0.5488627552986145 -35,0.5179731249809265 -36,0.5013468861579895 -37,0.49849945306777954 -38,0.492409348487854 -39,0.4856579899787903 -40,0.4774925410747528 -41,0.4717472493648529 -42,0.4669254720211029 -43,0.4623938798904419 -44,0.45842352509498596 -45,0.4562998116016388 -46,0.45057201385498047 -47,0.40048325061798096 -48,0.3511279821395874 -49,0.3459100127220154 -50,0.3397539258003235 -51,0.3341367244720459 -52,0.33047959208488464 -53,0.32903704047203064 -54,0.32904040813446045 -55,0.3273302912712097 -56,0.32330039143562317 -57,0.31822478771209717 -58,0.3127497732639313 -59,0.30821114778518677 -60,0.30508390069007874 -61,0.3028815686702728 -62,0.3010285794734955 -63,0.29927778244018555 -64,0.2975827753543854 -65,0.2959173619747162 -66,0.29427123069763184 -67,0.2926520109176636 -68,0.29107412695884705 -69,0.2895480990409851 -70,0.28808605670928955 -71,0.2866930365562439 -72,0.2853602170944214 -73,0.2840683162212372 -74,0.2828051745891571 -75,0.2815687954425812 -76,0.28033867478370667 -77,0.2790963053703308 -78,0.27781572937965393 -79,0.2764440178871155 -80,0.2749022841453552 -81,0.2729593515396118 -82,0.26933836936950684 -83,0.252530038356781 -84,0.2525174915790558 -85,0.25724443793296814 -86,0.2534225285053253 -87,0.2527257204055786 -88,0.24859729409217834 -89,0.2509954571723938 -90,0.24461762607097626 -91,0.2450493425130844 -92,0.24333909153938293 -93,0.2413758933544159 -94,0.24159933626651764 -95,0.23921391367912292 -96,0.23906508088111877 -97,0.23737379908561707 -98,0.2355109006166458 -99,0.2356773316860199 -100,0.2333143800497055 -101,0.23308029770851135 -102,0.23222540318965912 -103,0.23052215576171875 -104,0.23024192452430725 -105,0.22849196195602417 -106,0.2276879847049713 -107,0.2269735485315323 -108,0.2256498783826828 -109,0.22459909319877625 -110,0.22389453649520874 -111,0.22248661518096924 -112,0.22162997722625732 -113,0.22060218453407288 -114,0.2192690223455429 -115,0.21817275881767273 -116,0.21695679426193237 -117,0.21533571183681488 -118,0.213530495762825 -119,0.21024905145168304 -120,0.20460321009159088 -121,0.2023165374994278 -122,0.20383669435977936 -123,0.2051309049129486 -124,0.20520856976509094 -125,0.20405182242393494 -126,0.20263776183128357 -127,0.2018342763185501 -128,0.20091085135936737 -129,0.19987572729587555 -130,0.19898304343223572 -131,0.19813261926174164 -132,0.19737038016319275 -133,0.19665460288524628 -134,0.19591981172561646 -135,0.19523794949054718 -136,0.1945207118988037 -137,0.1938389539718628 -138,0.1931551843881607 -139,0.1924886554479599 -140,0.1918627917766571 -141,0.19124741852283478 -142,0.19068412482738495 -143,0.1901109516620636 -144,0.18955042958259583 -145,0.18896262347698212 -146,0.18836699426174164 -147,0.18777193129062653 -148,0.18718023598194122 -149,0.1865960955619812 -150,0.1859586238861084 -151,0.18527154624462128 -152,0.18453601002693176 -153,0.1837938278913498 -154,0.1830178052186966 -155,0.18220354616641998 -156,0.18132928013801575 -157,0.1804036945104599 -158,0.1794290840625763 -159,0.17841582000255585 -160,0.17737777531147003 -161,0.17634397745132446 -162,0.17540861666202545 -163,0.174540176987648 -164,0.1737804412841797 -165,0.1731167733669281 -166,0.17251694202423096 -167,0.1718801110982895 -168,0.17097952961921692 -169,0.16975905001163483 -170,0.16959521174430847 -171,0.17020472884178162 -172,0.17018431425094604 -173,0.16927199065685272 -174,0.16809305548667908 -175,0.16754792630672455 -176,0.1677381843328476 -177,0.16728845238685608 -178,0.16686344146728516 -179,0.16589871048927307 -180,0.1656079739332199 -181,0.1652897298336029 -182,0.16529738903045654 -183,0.16502900421619415 -184,0.16458019614219666 -185,0.16422614455223083 -186,0.1638825535774231 -187,0.16377221047878265 -188,0.16354872286319733 -189,0.1633685827255249 -190,0.16304516792297363 -191,0.16277526319026947 -192,0.16253876686096191 -193,0.1623411625623703 -194,0.1621827632188797 -195,0.16192373633384705 -196,0.16166788339614868 -197,0.16139009594917297 -198,0.16114549338817596 -199,0.1609409749507904 -200,0.16070130467414856 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.080/training_config.txt deleted file mode 100644 index 05c199b..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.080/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.080/training_log.csv deleted file mode 100644 index cca2f85..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,25.028772354125977 -2,9.917669296264648 -3,4.885807514190674 -4,4.086370468139648 -5,3.4800972938537598 -6,5.663668155670166 -7,133.53518676757812 -8,2.9642841815948486 -9,1.6006953716278076 -10,0.9952401518821716 -11,0.6931933760643005 -12,0.6686489582061768 -13,0.7220417261123657 -14,0.682354211807251 -15,0.6706952452659607 -16,0.6724902987480164 -17,0.6486103534698486 -18,0.6462306380271912 -19,0.6291531324386597 -20,0.6223896145820618 -21,0.6182618141174316 -22,0.6138381361961365 -23,0.6089372634887695 -24,0.6033844351768494 -25,0.5970155596733093 -26,0.5900869369506836 -27,0.5834127068519592 -28,0.5774861574172974 -29,0.5721697211265564 -30,0.567245364189148 -31,0.5625745058059692 -32,0.5580479502677917 -33,0.553551197052002 -34,0.5489827990531921 -35,0.5442469120025635 -36,0.5392820239067078 -37,0.5340420603752136 -38,0.5284491181373596 -39,0.522429347038269 -40,0.5158870816230774 -41,0.5086984634399414 -42,0.5008510947227478 -43,0.4914970397949219 -44,0.47387659549713135 -45,0.42925164103507996 -46,0.41010838747024536 -47,0.4007713198661804 -48,0.3933660387992859 -49,0.38706067204475403 -50,0.38155099749565125 -51,0.3766348659992218 -52,0.3721492290496826 -53,0.36796656250953674 -54,0.36404016613960266 -55,0.360311359167099 -56,0.3566899001598358 -57,0.3531985878944397 -58,0.34983310103416443 -59,0.3465738892555237 -60,0.34339046478271484 -61,0.3402698338031769 -62,0.33721473813056946 -63,0.33421751856803894 -64,0.331266313791275 -65,0.32835471630096436 -66,0.3254776895046234 -67,0.32263368368148804 -68,0.3198133707046509 -69,0.31701168417930603 -70,0.31422337889671326 -71,0.31144341826438904 -72,0.3086645007133484 -73,0.3058815002441406 -74,0.3030911982059479 -75,0.30029064416885376 -76,0.29747599363327026 -77,0.2946428954601288 -78,0.2917894124984741 -79,0.28890863060951233 -80,0.2859917879104614 -81,0.2830048203468323 -82,0.2798585891723633 -83,0.2763195335865021 -84,0.27205073833465576 -85,0.2683747410774231 -86,0.26664257049560547 -87,0.2651979625225067 -88,0.2635403573513031 -89,0.26168355345726013 -90,0.25967666506767273 -91,0.25760945677757263 -92,0.2556169331073761 -93,0.25381922721862793 -94,0.2521747648715973 -95,0.25059425830841064 -96,0.24896016716957092 -97,0.24719220399856567 -98,0.24524305760860443 -99,0.24307401478290558 -100,0.24063237011432648 -101,0.2378639429807663 -102,0.23470303416252136 -103,0.23105797171592712 -104,0.22682400047779083 -105,0.2219410091638565 -106,0.21631059050559998 -107,0.20968252420425415 -108,0.20205233991146088 -109,0.19337184727191925 -110,0.1845308095216751 -111,0.17655684053897858 -112,0.17024441063404083 -113,0.16526253521442413 -114,0.16115328669548035 -115,0.15762241184711456 -116,0.15455806255340576 -117,0.15188319981098175 -118,0.14964710175991058 -119,0.147788867354393 -120,0.1461019665002823 -121,0.14443211257457733 -122,0.14274956285953522 -123,0.14105911552906036 -124,0.13937006890773773 -125,0.1377231776714325 -126,0.13618656992912292 -127,0.13488462567329407 -128,0.13379797339439392 -129,0.13279160857200623 -130,0.13175112009048462 -131,0.1306418925523758 -132,0.12950050830841064 -133,0.12842415273189545 -134,0.12750551104545593 -135,0.12675142288208008 -136,0.1259859949350357 -137,0.1251278668642044 -138,0.12420538812875748 -139,0.12333652377128601 -140,0.1226176992058754 -141,0.12196572870016098 -142,0.12126678973436356 -143,0.12049883604049683 -144,0.11974086612462997 -145,0.11907815933227539 -146,0.11847880482673645 -147,0.11785104870796204 -148,0.11717690527439117 -149,0.1165090799331665 -150,0.11590603739023209 -151,0.11534184962511063 -152,0.11475371569395065 -153,0.11413281410932541 -154,0.11352697014808655 -155,0.11296186596155167 -156,0.11240610480308533 -157,0.11182796955108643 -158,0.11123669147491455 -159,0.11066359281539917 -160,0.11011230200529099 -161,0.10955685377120972 -162,0.10898397862911224 -163,0.10841115564107895 -164,0.10785388201475143 -165,0.1073034256696701 -166,0.10674326121807098 -167,0.10617487132549286 -168,0.10561070591211319 -169,0.10505405813455582 -170,0.10449350625276566 -171,0.10392110794782639 -172,0.10334485024213791 -173,0.10277370363473892 -174,0.10220255702733994 -175,0.10162464529275894 -176,0.10104263573884964 -177,0.10045763850212097 -178,0.09986809641122818 -179,0.09926868975162506 -180,0.0986611545085907 -181,0.0980522483587265 -182,0.09744485467672348 -183,0.09683268517255783 -184,0.0962139219045639 -185,0.0955970361828804 -186,0.0949811339378357 -187,0.09435894340276718 -188,0.09373454004526138 -189,0.0931142270565033 -190,0.09249059855937958 -191,0.09186358004808426 -192,0.09123943001031876 -193,0.0906132236123085 -194,0.08998418599367142 -195,0.0893574208021164 -196,0.08872932195663452 -197,0.08809869736433029 -198,0.08746951818466187 -199,0.08683864772319794 -200,0.08620581030845642 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.100/training_config.txt deleted file mode 100644 index fd733b0..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.100/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.100/training_log.csv deleted file mode 100644 index c6373a1..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,25.390798568725586 -2,157.78956604003906 -3,118.34703063964844 -4,28.166791915893555 -5,12.652923583984375 -6,5.909815788269043 -7,4.124309062957764 -8,3.5854766368865967 -9,3.1732726097106934 -10,3.053969383239746 -11,2.6255950927734375 -12,2.492331027984619 -13,2.5396101474761963 -14,2.4185588359832764 -15,2.375730037689209 -16,1.9249330759048462 -17,1.4928386211395264 -18,1.4896786212921143 -19,1.456915020942688 -20,1.4433144330978394 -21,1.570889949798584 -22,1.575574517250061 -23,1.56044602394104 -24,1.533485770225525 -25,1.5002634525299072 -26,1.4729909896850586 -27,1.4999525547027588 -28,1.4957298040390015 -29,1.3971298933029175 -30,1.0881057977676392 -31,1.19003427028656 -32,1.231261968612671 -33,1.2336654663085938 -34,1.2123888731002808 -35,1.171397089958191 -36,1.1162508726119995 -37,1.0622305870056152 -38,1.018932580947876 -39,0.9338076114654541 -40,0.5351735353469849 -41,0.4227641522884369 -42,0.4393269121646881 -43,0.4515998661518097 -44,0.4432448446750641 -45,0.42872482538223267 -46,0.4138961434364319 -47,0.3980695903301239 -48,0.49446412920951843 -49,0.45678964257240295 -50,0.3853941261768341 -51,0.38662034273147583 -52,0.3853927552700043 -53,0.2573019564151764 -54,0.2222956269979477 -55,0.2057589441537857 -56,0.2081489861011505 -57,0.20149902999401093 -58,0.19035059213638306 -59,0.181462362408638 -60,0.2153782695531845 -61,0.21107150614261627 -62,0.20406083762645721 -63,0.18804028630256653 -64,0.19079875946044922 -65,0.17302364110946655 -66,0.1914525032043457 -67,0.19897103309631348 -68,0.1989433318376541 -69,0.19339273869991302 -70,0.17292886972427368 -71,0.1915132701396942 -72,0.18425777554512024 -73,0.1890411078929901 -74,0.19278748333454132 -75,0.19145141541957855 -76,0.1881604641675949 -77,0.1834065318107605 -78,0.17708662152290344 -79,0.16868185997009277 -80,0.15912532806396484 -81,0.16356034576892853 -82,0.16131047904491425 -83,0.15364208817481995 -84,0.15604490041732788 -85,0.15596477687358856 -86,0.1532631516456604 -87,0.14865656197071075 -88,0.14371991157531738 -89,0.14325757324695587 -90,0.1424807757139206 -91,0.13809247314929962 -92,0.13765212893486023 -93,0.13699857890605927 -94,0.13556711375713348 -95,0.13347585499286652 -96,0.13123922049999237 -97,0.12936678528785706 -98,0.12818734347820282 -99,0.12709946930408478 -100,0.12467410415410995 -101,0.12241216748952866 -102,0.12042950093746185 -103,0.11883393675088882 -104,0.1177101731300354 -105,0.11691610515117645 -106,0.11619928479194641 -107,0.11540081351995468 -108,0.11445561051368713 -109,0.11335514485836029 -110,0.11210671067237854 -111,0.11074480414390564 -112,0.10931376367807388 -113,0.10787321627140045 -114,0.10647628456354141 -115,0.10514692962169647 -116,0.1038864329457283 -117,0.10267601162195206 -118,0.10148332267999649 -119,0.1002967432141304 -120,0.09911172837018967 -121,0.09792376309633255 -122,0.09673666954040527 -123,0.09555856138467789 -124,0.09440179914236069 -125,0.09328626841306686 -126,0.09223934262990952 -127,0.09126546233892441 -128,0.09037497639656067 -129,0.08953900635242462 -130,0.0887397825717926 -131,0.0879671722650528 -132,0.08721894770860672 -133,0.08649276196956635 -134,0.08578528463840485 -135,0.0850951224565506 -136,0.08442143350839615 -137,0.08376691490411758 -138,0.08313426375389099 -139,0.08252406120300293 -140,0.0819355696439743 -141,0.08136671781539917 -142,0.08081547915935516 -143,0.08028054237365723 -144,0.07976005226373672 -145,0.07925209403038025 -146,0.07875537127256393 -147,0.07826849818229675 -148,0.07779085636138916 -149,0.07732199877500534 -150,0.07686176896095276 -151,0.07640960812568665 -152,0.07596532255411148 -153,0.07552865892648697 -154,0.07509969174861908 -155,0.07467822730541229 -156,0.07426375150680542 -157,0.07385552674531937 -158,0.07345318049192429 -159,0.07305619865655899 -160,0.0726645365357399 -161,0.07227791100740433 -162,0.07189593464136124 -163,0.07151899486780167 -164,0.07114678621292114 -165,0.07077931612730026 -166,0.07041634619235992 -167,0.07005783170461655 -168,0.06970401853322983 -169,0.06935471296310425 -170,0.06900941580533981 -171,0.06866779923439026 -172,0.0683293417096138 -173,0.06799325346946716 -174,0.06766027212142944 -175,0.06732998043298721 -176,0.06700241565704346 -177,0.06667765229940414 -178,0.06635577231645584 -179,0.06603676080703735 -180,0.06572066992521286 -181,0.0654076561331749 -182,0.06509773433208466 -183,0.0647912248969078 -184,0.06448785215616226 -185,0.06418775767087936 -186,0.06389085948467255 -187,0.06359735876321793 -188,0.06330706924200058 -189,0.06302009522914886 -190,0.06273636221885681 -191,0.062455810606479645 -192,0.06217828392982483 -193,0.06190360337495804 -194,0.061631735414266586 -195,0.06136242672801018 -196,0.061095599085092545 -197,0.06083117797970772 -198,0.060569096356630325 -199,0.06030958518385887 -200,0.060052502900362015 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.125/training_config.txt deleted file mode 100644 index 3bb6636..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.125/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.125/training_log.csv deleted file mode 100644 index c8845e5..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,21.462425231933594 -2,4.2574687004089355 -3,1.0365502834320068 -4,0.4529518485069275 -5,0.32711562514305115 -6,0.2486903816461563 -7,0.19635263085365295 -8,0.1506468951702118 -9,0.11421635001897812 -10,0.0959155485033989 -11,0.08757182955741882 -12,0.10190972685813904 -13,0.10579250007867813 -14,0.10948990285396576 -15,0.11206129938364029 -16,0.11349722743034363 -17,0.11386856436729431 -18,0.11322091519832611 -19,0.11159501224756241 -20,0.10901817679405212 -21,0.10556113719940186 -22,0.10120166093111038 -23,0.09590663015842438 -24,0.08976170420646667 -25,0.08289719372987747 -26,0.07580380886793137 -27,0.06996914744377136 -28,0.06475658714771271 -29,0.0607096403837204 -30,0.05815093591809273 -31,0.05687680467963219 -32,0.05638054758310318 -33,0.05603107810020447 -34,0.05551719665527344 -35,0.05480627343058586 -36,0.05392902344465256 -37,0.05291607603430748 -38,0.051800865679979324 -39,0.050604186952114105 -40,0.04936222732067108 -41,0.048099588602781296 -42,0.046843234449625015 -43,0.045661911368370056 -44,0.04462471231818199 -45,0.043803416192531586 -46,0.043275751173496246 -47,0.04307704418897629 -48,0.043158069252967834 -49,0.04342491179704666 -50,0.04377112537622452 -51,0.04410043731331825 -52,0.044345200061798096 -53,0.04446526616811752 -54,0.04444674775004387 -55,0.044294312596321106 -56,0.04402740299701691 -57,0.04367893934249878 -58,0.04328786954283714 -59,0.042893581092357635 -60,0.04252977296710014 -61,0.04222406446933746 -62,0.04199506342411041 -63,0.04184721037745476 -64,0.04177151620388031 -65,0.041751500219106674 -66,0.04176468774676323 -67,0.04178978502750397 -68,0.041809581220149994 -69,0.0418122336268425 -70,0.04179134964942932 -71,0.04174584895372391 -72,0.04167881980538368 -73,0.041596267372369766 -74,0.041505489498376846 -75,0.04141351208090782 -76,0.04132597893476486 -77,0.04124664142727852 -78,0.04117660969495773 -79,0.04111519455909729 -80,0.041060395538806915 -81,0.041009657084941864 -82,0.04096055403351784 -83,0.0409112311899662 -84,0.040860749781131744 -85,0.04080899432301521 -86,0.04075658693909645 -87,0.04070449247956276 -88,0.04065380245447159 -89,0.040605247020721436 -90,0.040559228509664536 -91,0.04051564633846283 -92,0.040473904460668564 -93,0.04043317586183548 -94,0.04039256274700165 -95,0.04035123065114021 -96,0.04030860215425491 -97,0.04026440531015396 -98,0.040218789130449295 -99,0.04017215222120285 -100,0.04012511298060417 -101,0.040078409016132355 -102,0.04003256931900978 -103,0.039987947791814804 -104,0.039944712072610855 -105,0.039902813732624054 -106,0.039862021803855896 -107,0.039822012186050415 -108,0.03978237137198448 -109,0.039742738008499146 -110,0.039702873677015305 -111,0.0396626740694046 -112,0.03962215781211853 -113,0.03958144411444664 -114,0.03954063728451729 -115,0.03949990123510361 -116,0.03945937752723694 -117,0.039419177919626236 -118,0.039379313588142395 -119,0.03933979198336601 -120,0.03930061310529709 -121,0.039261654019355774 -122,0.03922286257147789 -123,0.039184220135211945 -124,0.03914565593004227 -125,0.03910718485713005 -126,0.03906882926821709 -127,0.03903065621852875 -128,0.038992688059806824 -129,0.038954924792051315 -130,0.03891732171177864 -131,0.038879893720149994 -132,0.03884260728955269 -133,0.03880549967288971 -134,0.03876849636435509 -135,0.03873162716627121 -136,0.03869486227631569 -137,0.038658179342746735 -138,0.038621626794338226 -139,0.03858523815870285 -140,0.038548994809389114 -141,0.03851289302110672 -142,0.038476962596178055 -143,0.03844118118286133 -144,0.03840554505586624 -145,0.03837005794048309 -146,0.038334671407938004 -147,0.03829948231577873 -148,0.03826442360877991 -149,0.03822951763868332 -150,0.038194794207811356 -151,0.038160212337970734 -152,0.038125742226839066 -153,0.03809140995144844 -154,0.03805716335773468 -155,0.038023028522729874 -156,0.03798907622694969 -157,0.037955235689878464 -158,0.037921514362096786 -159,0.03788790479302406 -160,0.0378543958067894 -161,0.03782098367810249 -162,0.037787701934576035 -163,0.037754520773887634 -164,0.0377214290201664 -165,0.037688467651605606 -166,0.037655606865882874 -167,0.03762282431125641 -168,0.03759019076824188 -169,0.0375576876103878 -170,0.03752525523304939 -171,0.037492942065000534 -172,0.037460725754499435 -173,0.03742862865328789 -174,0.03739666938781738 -175,0.037364814430475235 -176,0.037333108484745026 -177,0.037301503121852875 -178,0.037269968539476395 -179,0.03723854944109917 -180,0.037207212299108505 -181,0.03717596456408501 -182,0.03714480623602867 -183,0.037113722413778305 -184,0.037082746624946594 -185,0.037051890045404434 -186,0.037021100521087646 -187,0.03699037805199623 -188,0.03695974871516228 -189,0.03692922368645668 -190,0.03689882531762123 -191,0.03686850145459175 -192,0.03683822974562645 -193,0.03680804744362831 -194,0.036777958273887634 -195,0.036747969686985016 -196,0.03671805188059807 -197,0.03668821230530739 -198,0.036658432334661484 -199,0.03662871941924095 -200,0.03659914433956146 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.160/training_config.txt deleted file mode 100644 index 790b9ba..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.160/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.160/training_log.csv deleted file mode 100644 index e45a0bc..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,21.852094650268555 -2,1.8368357419967651 -3,1.908435583114624 -4,1.7913340330123901 -5,1.6858857870101929 -6,1.5924654006958008 -7,1.4805742502212524 -8,1.3430347442626953 -9,1.2112549543380737 -10,1.1039090156555176 -11,1.0088584423065186 -12,0.9160186648368835 -13,0.8217256665229797 -14,0.7235317230224609 -15,0.6162300109863281 -16,0.5233327150344849 -17,0.4509635865688324 -18,0.39320841431617737 -19,0.3456980288028717 -20,0.3053548336029053 -21,0.2718755006790161 -22,0.2457677721977234 -23,0.2278047353029251 -24,0.21568390727043152 -25,0.20711034536361694 -26,0.20001235604286194 -27,0.19354353845119476 -28,0.18701869249343872 -29,0.1798117607831955 -30,0.1719854474067688 -31,0.16350698471069336 -32,0.15453983843326569 -33,0.1451488435268402 -34,0.13549329340457916 -35,0.12597940862178802 -36,0.11666106432676315 -37,0.10769318044185638 -38,0.09911876171827316 -39,0.09097956866025925 -40,0.08352116495370865 -41,0.07684138417243958 -42,0.07095959782600403 -43,0.0659213662147522 -44,0.06170019507408142 -45,0.05821727588772774 -46,0.05538558587431908 -47,0.05310114100575447 -48,0.05124190077185631 -49,0.049720197916030884 -50,0.04839663580060005 -51,0.047207266092300415 -52,0.04612616449594498 -53,0.04508689045906067 -54,0.044093355536460876 -55,0.043103642761707306 -56,0.04211057722568512 -57,0.041140440851449966 -58,0.040226876735687256 -59,0.03937588632106781 -60,0.03860435262322426 -61,0.03793087601661682 -62,0.03736918047070503 -63,0.036926403641700745 -64,0.036603037267923355 -65,0.03639264404773712 -66,0.03628239780664444 -67,0.036255039274692535 -68,0.03629017248749733 -69,0.03636652231216431 -70,0.03646343573927879 -71,0.03656265139579773 -72,0.03664926812052727 -73,0.03671244531869888 -74,0.03674541786313057 -75,0.036745667457580566 -76,0.03671414405107498 -77,0.03665453940629959 -78,0.03657239302992821 -79,0.03647433966398239 -80,0.036367133259773254 -81,0.03625694662332535 -82,0.036149002611637115 -83,0.03604709729552269 -84,0.035953640937805176 -85,0.035869624465703964 -86,0.03579488769173622 -87,0.03572836145758629 -88,0.035668469965457916 -89,0.03561338782310486 -90,0.03556136041879654 -91,0.035510897636413574 -92,0.03546083718538284 -93,0.0354105681180954 -94,0.035359837114810944 -95,0.03530886396765709 -96,0.035258106887340546 -97,0.035208187997341156 -98,0.035159844905138016 -99,0.03511371836066246 -100,0.035070255398750305 -101,0.035029828548431396 -102,0.03499235212802887 -103,0.03495771437883377 -104,0.03492552787065506 -105,0.03489524871110916 -106,0.0348663292825222 -107,0.034838151186704636 -108,0.03481019288301468 -109,0.034782037138938904 -110,0.034753356128931046 -111,0.03472394496202469 -112,0.03469375520944595 -113,0.03466292470693588 -114,0.03463152423501015 -115,0.03459974005818367 -116,0.03456783667206764 -117,0.034535959362983704 -118,0.034504301846027374 -119,0.03447302058339119 -120,0.03444215655326843 -121,0.03441173583269119 -122,0.03438175842165947 -123,0.03435220196843147 -124,0.034322965890169144 -125,0.03429397940635681 -126,0.034265223890542984 -127,0.03423662111163139 -128,0.03420811519026756 -129,0.034179769456386566 -130,0.03415154665708542 -131,0.03412341699004173 -132,0.03409544378519058 -133,0.03406761959195137 -134,0.034039951860904694 -135,0.034012362360954285 -136,0.033984847366809845 -137,0.03395745903253555 -138,0.03393019363284111 -139,0.03390301391482353 -140,0.033875878900289536 -141,0.03384881466627121 -142,0.03382180631160736 -143,0.03379476070404053 -144,0.033767733722925186 -145,0.033740684390068054 -146,0.03371357172727585 -147,0.03368641808629036 -148,0.03365910053253174 -149,0.03363164886832237 -150,0.03360409289598465 -151,0.03357640653848648 -152,0.033548664301633835 -153,0.033520832657814026 -154,0.033492881804704666 -155,0.03346488997340202 -156,0.033436812460422516 -157,0.03340863063931465 -158,0.03338043764233589 -159,0.033352166414260864 -160,0.03332383930683136 -161,0.03329547122120857 -162,0.03326701372861862 -163,0.03323858976364136 -164,0.033210087567567825 -165,0.033181607723236084 -166,0.033153142780065536 -167,0.0331246480345726 -168,0.03309626504778862 -169,0.03306792676448822 -170,0.03303966671228409 -171,0.03301149234175682 -172,0.03298337012529373 -173,0.03295539692044258 -174,0.032927438616752625 -175,0.03289958834648132 -176,0.032871801406145096 -177,0.03284405916929245 -178,0.03281649947166443 -179,0.03278891742229462 -180,0.032761506736278534 -181,0.03273412212729454 -182,0.0327068492770195 -183,0.03267962113022804 -184,0.03265244886279106 -185,0.032625507563352585 -186,0.03259861469268799 -187,0.032571908086538315 -188,0.0325453020632267 -189,0.032518744468688965 -190,0.032492365688085556 -191,0.03246599808335304 -192,0.03243986517190933 -193,0.0324137918651104 -194,0.03238782286643982 -195,0.0323619544506073 -196,0.03233620524406433 -197,0.03231063112616539 -198,0.03228512406349182 -199,0.03225989267230034 -200,0.032234687358140945 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.200/training_config.txt deleted file mode 100644 index 3c4d307..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.200/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.200/training_log.csv deleted file mode 100644 index 1a06c9e..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,13.713693618774414 -2,20960.087890625 -3,5053.802734375 -4,93.67684936523438 -5,2132.85400390625 -6,150.71658325195312 -7,109.25942993164062 -8,101.58708953857422 -9,84.15821075439453 -10,86.61813354492188 -11,95.40739440917969 -12,83.7750015258789 -13,77.71988677978516 -14,84.3980712890625 -15,75.2622299194336 -16,69.92220306396484 -17,69.09131622314453 -18,68.599853515625 -19,63.29630661010742 -20,63.3383903503418 -21,65.83534240722656 -22,68.52239990234375 -23,71.7425308227539 -24,62.54529571533203 -25,70.61063385009766 -26,78.35160064697266 -27,73.50505828857422 -28,90.96891021728516 -29,99.28433990478516 -30,98.0283432006836 -31,96.36274719238281 -32,92.19445037841797 -33,87.24057006835938 -34,80.65122985839844 -35,68.91799926757812 -36,65.0300521850586 -37,60.79045486450195 -38,56.62013244628906 -39,57.21597671508789 -40,61.39469909667969 -41,68.66880798339844 -42,64.04352569580078 -43,59.34780502319336 -44,58.00949478149414 -45,57.818660736083984 -46,57.77006912231445 -47,56.293033599853516 -48,53.67938232421875 -49,53.239097595214844 -50,53.74092483520508 -51,54.797672271728516 -52,54.947689056396484 -53,55.491580963134766 -54,54.90807342529297 -55,54.12422180175781 -56,52.94389343261719 -57,50.65598678588867 -58,49.428035736083984 -59,47.869041442871094 -60,43.61170959472656 -61,42.285316467285156 -62,40.016719818115234 -63,35.56639862060547 -64,31.735198974609375 -65,27.35207748413086 -66,14.855903625488281 -67,9.691596984863281 -68,4.302386283874512 -69,1.5847169160842896 -70,1.378108024597168 -71,0.9405443072319031 -72,0.7403144836425781 -73,0.6843240261077881 -74,0.8814423680305481 -75,1.280025601387024 -76,1.2994359731674194 -77,1.4566988945007324 -78,1.4705243110656738 -79,1.4560760259628296 -80,1.4436864852905273 -81,1.4378105401992798 -82,1.4318041801452637 -83,1.4214621782302856 -84,1.4044032096862793 -85,1.3672738075256348 -86,1.326311469078064 -87,1.1205247640609741 -88,1.347261667251587 -89,1.3666266202926636 -90,1.3641223907470703 -91,1.3586056232452393 -92,1.3590177297592163 -93,1.3642818927764893 -94,1.3699870109558105 -95,1.374159812927246 -96,1.376063585281372 -97,1.3756608963012695 -98,1.37315034866333 -99,1.368743658065796 -100,1.3478728532791138 -101,1.241611361503601 -102,1.1429580450057983 -103,1.1684019565582275 -104,1.0391972064971924 -105,1.396305799484253 -106,1.4653269052505493 -107,1.56740140914917 -108,1.586012601852417 -109,1.6924999952316284 -110,1.5132521390914917 -111,1.4334018230438232 -112,2.177711248397827 -113,2.176687002182007 -114,2.191725969314575 -115,1.5619659423828125 -116,1.4454771280288696 -117,1.4483270645141602 -118,1.4560198783874512 -119,1.4630557298660278 -120,1.4733822345733643 -121,1.4933899641036987 -122,1.5069363117218018 -123,1.4847346544265747 -124,1.4713400602340698 -125,1.472054362297058 -126,1.4816303253173828 -127,1.4941779375076294 -128,1.500547170639038 -129,1.4948185682296753 -130,1.4827375411987305 -131,1.4714924097061157 -132,1.4637459516525269 -133,1.4595673084259033 -134,1.4582068920135498 -135,1.4583669900894165 -136,1.458783507347107 -137,1.4585009813308716 -138,1.4571027755737305 -139,1.4546735286712646 -140,1.4515581130981445 -141,1.4481366872787476 -142,1.4446462392807007 -143,1.441106915473938 -144,1.4378917217254639 -145,1.4352129697799683 -146,1.4330685138702393 -147,1.4313961267471313 -148,1.4300552606582642 -149,1.4288740158081055 -150,1.4277230501174927 -151,1.4265016317367554 -152,1.4251583814620972 -153,1.42368483543396 -154,1.4221042394638062 -155,1.420459508895874 -156,1.4187896251678467 -157,1.4171370267868042 -158,1.4155465364456177 -159,1.4140337705612183 -160,1.4126309156417847 -161,1.411338210105896 -162,1.410109519958496 -163,1.4089124202728271 -164,1.4077235460281372 -165,1.4065179824829102 -166,1.405287742614746 -167,1.4040266275405884 -168,1.4027312994003296 -169,1.401411533355713 -170,1.4000701904296875 -171,1.398717999458313 -172,1.3973621129989624 -173,1.396008014678955 -174,1.3946681022644043 -175,1.393336296081543 -176,1.3920092582702637 -177,1.3906865119934082 -178,1.3893623352050781 -179,1.3880339860916138 -180,1.3866947889328003 -181,1.3853497505187988 -182,1.3839980363845825 -183,1.3826442956924438 -184,1.3812973499298096 -185,1.3799668550491333 -186,1.3786678314208984 -187,1.3774046897888184 -188,1.376188039779663 -189,1.375028371810913 -190,1.3739324808120728 -191,1.3729044198989868 -192,1.3719435930252075 -193,1.3710445165634155 -194,1.3701945543289185 -195,1.369380235671997 -196,1.3685790300369263 -197,1.3677784204483032 -198,1.3669588565826416 -199,1.3661186695098877 -200,1.3652663230895996 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.250/training_config.txt deleted file mode 100644 index 1c7a2bc..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.250/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.250/training_log.csv deleted file mode 100644 index c44eada..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,10.691899299621582 -2,2.4418036937713623 -3,2.481048345565796 -4,2.4857254028320312 -5,2.5042612552642822 -6,2.512164354324341 -7,2.5092597007751465 -8,2.493121385574341 -9,2.4665300846099854 -10,2.4330193996429443 -11,2.393085241317749 -12,2.348879337310791 -13,2.3022212982177734 -14,2.254781723022461 -15,2.206820487976074 -16,2.158102512359619 -17,2.1070103645324707 -18,2.0545549392700195 -19,2.0016682147979736 -20,1.9484305381774902 -21,1.8905669450759888 -22,1.8303568363189697 -23,1.7680453062057495 -24,1.702478289604187 -25,1.6351560354232788 -26,1.563744306564331 -27,1.311976671218872 -28,1.2545673847198486 -29,1.145090103149414 -30,1.0820577144622803 -31,1.0125797986984253 -32,0.9350470304489136 -33,0.924946665763855 -34,0.8297977447509766 -35,0.8011391162872314 -36,0.7561903595924377 -37,0.7088679075241089 -38,0.6680602431297302 -39,0.6411536931991577 -40,0.6559550166130066 -41,0.5997649431228638 -42,0.7730346322059631 -43,0.8280656337738037 -44,0.7797960638999939 -45,0.7091997265815735 -46,1.2607266902923584 -47,0.6945741176605225 -48,0.4585501253604889 -49,0.49077308177948 -50,0.5107300281524658 -51,0.5230181813240051 -52,0.5218517780303955 -53,0.5069609880447388 -54,0.48283469676971436 -55,0.4560159742832184 -56,0.43072327971458435 -57,0.408400297164917 -58,0.38977673649787903 -59,0.3749425709247589 -60,0.3633555471897125 -61,0.3544009029865265 -62,0.3473244309425354 -63,0.3415098190307617 -64,0.33649516105651855 -65,0.3316100537776947 -66,0.32641372084617615 -67,0.32075443863868713 -68,0.3146274983882904 -69,0.3344081938266754 -70,0.3594000041484833 -71,0.3855386972427368 -72,0.41210436820983887 -73,0.4378718137741089 -74,0.46450912952423096 -75,0.49073055386543274 -76,0.516434907913208 -77,0.5409712791442871 -78,0.5624517202377319 -79,0.5786750316619873 -80,0.5908172130584717 -81,0.5999523997306824 -82,0.6063276529312134 -83,0.6099898219108582 -84,0.611060380935669 -85,0.6097903251647949 -86,0.6064915060997009 -87,0.6014800071716309 -88,0.5950443148612976 -89,0.587608277797699 -90,0.5796747803688049 -91,0.5710820555686951 -92,0.5620809197425842 -93,0.5526764988899231 -94,0.5429211854934692 -95,0.5328989028930664 -96,0.5226753354072571 -97,0.5123082399368286 -98,0.501945436000824 -99,0.49221071600914 -100,0.48303061723709106 -101,0.4741534888744354 -102,0.46576517820358276 -103,0.45765385031700134 -104,0.4496653378009796 -105,0.44180357456207275 -106,0.4340713322162628 -107,0.4264746606349945 -108,0.41901952028274536 -109,0.41171008348464966 -110,0.4045470654964447 -111,0.39753568172454834 -112,0.3906799554824829 -113,0.3839794993400574 -114,0.37744975090026855 -115,0.3710956275463104 -116,0.3649037182331085 -117,0.35888978838920593 -118,0.3530872166156769 -119,0.34746214747428894 -120,0.34195777773857117 -121,0.33656951785087585 -122,0.3313125967979431 -123,0.3261815011501312 -124,0.3211629390716553 -125,0.31624743342399597 -126,0.3114219009876251 -127,0.3066769242286682 -128,0.3020317852497101 -129,0.2974754869937897 -130,0.2929826080799103 -131,0.2885856032371521 -132,0.2843117117881775 -133,0.28018757700920105 -134,0.2762256860733032 -135,0.27231597900390625 -136,0.2684580683708191 -137,0.264652281999588 -138,0.26089778542518616 -139,0.2571813762187958 -140,0.253492146730423 -141,0.2498331218957901 -142,0.24619846045970917 -143,0.24260205030441284 -144,0.23906944692134857 -145,0.23558111488819122 -146,0.23212233185768127 -147,0.22869664430618286 -148,0.22531232237815857 -149,0.22199158370494843 -150,0.21875381469726562 -151,0.21556884050369263 -152,0.2124442309141159 -153,0.20939739048480988 -154,0.20643378794193268 -155,0.20352526009082794 -156,0.20060275495052338 -157,0.19766484200954437 -158,0.19471104443073273 -159,0.19174416363239288 -160,0.18876749277114868 -161,0.18579120934009552 -162,0.18282192945480347 -163,0.1798609346151352 -164,0.17690475285053253 -165,0.1739608198404312 -166,0.17104056477546692 -167,0.16813701391220093 -168,0.16524840891361237 -169,0.16237345337867737 -170,0.1595136523246765 -171,0.15667285025119781 -172,0.15385328233242035 -173,0.15105968713760376 -174,0.14829552173614502 -175,0.14556652307510376 -176,0.14287470281124115 -177,0.14021843671798706 -178,0.1375933140516281 -179,0.13499733805656433 -180,0.1324353665113449 -181,0.12990543246269226 -182,0.12740564346313477 -183,0.12494152039289474 -184,0.12251941859722137 -185,0.12014204263687134 -186,0.11780188977718353 -187,0.11549772322177887 -188,0.11322697252035141 -189,0.11098814755678177 -190,0.1087820827960968 -191,0.10661078244447708 -192,0.10447963327169418 -193,0.1023913249373436 -194,0.10034750401973724 -195,0.09834945201873779 -196,0.09639610350131989 -197,0.09448365122079849 -198,0.092609703540802 -199,0.09077819436788559 -200,0.08899012207984924 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.300/training_config.txt deleted file mode 100644 index 22f5680..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.300/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.300/training_log.csv deleted file mode 100644 index d65e9fc..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.300/training_log.csv +++ /dev/null @@ -1,44 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,10.008012771606445 -2,1.5136646032333374 -3,1.963661551475525 -4,2.00846004486084 -5,2.3287556171417236 -6,2.8865911960601807 -7,1.5657306909561157 -8,1.3989850282669067 -9,1.1783978939056396 -10,0.9126049280166626 -11,0.7305407524108887 -12,0.5502794981002808 -13,0.4278123676776886 -14,0.35331448912620544 -15,0.3399680256843567 -16,0.36862725019454956 -17,0.35601118206977844 -18,0.2928250730037689 -19,0.24026460945606232 -20,0.20132431387901306 -21,0.1777777224779129 -22,0.1644756942987442 -23,0.1559000462293625 -24,0.14842535555362701 -25,0.14100223779678345 -26,0.13327759504318237 -27,0.12493190914392471 -28,0.11606127768754959 -29,0.10688768327236176 -30,0.09770044684410095 -31,0.08882470428943634 -32,0.08060001581907272 -33,0.07335210591554642 -34,0.06731893122196198 -35,0.0625244677066803 -36,0.0590079128742218 -37,0.05645587295293808 -38,0.08539224416017532 -39,nan -40,nan -41,nan -42,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.400/training_config.txt deleted file mode 100644 index c2bd30a..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.400/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.400/training_log.csv deleted file mode 100644 index aa5c616..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.400/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,12.035469055175781 -2,28320.31640625 -3,22977.822265625 -4,22048.146484375 -5,25297.0703125 -6,9848.908203125 -7,4.015556335449219 -8,5.419307231903076 -9,12.20499324798584 -10,21.36324691772461 -11,33.572654724121094 -12,43.84205627441406 -13,39.69121170043945 -14,25.13047218322754 -15,12.763236999511719 -16,7.09976863861084 -17,5.247854232788086 -18,3.994004249572754 -19,3.2974741458892822 -20,2.9163386821746826 -21,2.693575620651245 -22,2.54392671585083 -23,2.4343209266662598 -24,2.3493449687957764 -25,2.2801432609558105 -26,2.2223119735717773 -27,2.1730363368988037 -28,2.130439519882202 -29,2.093127727508545 -30,2.060157299041748 -31,2.031458854675293 -32,2.006171941757202 -33,1.9842044115066528 -34,1.9645462036132812 -35,1.9470897912979126 -36,1.9314258098602295 -37,1.9172407388687134 -38,1.9042695760726929 -39,1.8924119472503662 -40,1.8815839290618896 -41,1.8716026544570923 -42,1.8623570203781128 -43,1.853771448135376 -44,1.8457603454589844 -45,1.8382556438446045 -46,1.8311878442764282 -47,1.8245275020599365 -48,1.8182331323623657 -49,1.8122634887695312 -50,1.8065834045410156 -51,1.801156759262085 -52,1.795976161956787 -53,1.7910813093185425 -54,1.7863601446151733 -55,1.7818076610565186 -56,1.7773959636688232 -57,1.7731164693832397 -58,1.7689632177352905 -59,1.7649766206741333 -60,1.7610820531845093 -61,1.757279634475708 -62,1.7535390853881836 -63,1.749871850013733 -64,1.7462588548660278 -65,1.7427153587341309 -66,1.7392078638076782 -67,1.7357563972473145 -68,1.732330560684204 -69,1.7289432287216187 -70,1.7255885601043701 -71,1.7222665548324585 -72,1.718971610069275 -73,1.7156926393508911 -74,1.7124390602111816 -75,1.7091871500015259 -76,1.7059533596038818 -77,1.7027175426483154 -78,1.6995086669921875 -79,1.696295976638794 -80,1.6930981874465942 -81,1.6898961067199707 -82,1.6867002248764038 -83,1.6835004091262817 -84,1.6803042888641357 -85,1.677112340927124 -86,1.6739221811294556 -87,1.6707290410995483 -88,1.667537808418274 -89,1.6643428802490234 -90,1.6611486673355103 -91,1.657951831817627 -92,1.6547596454620361 -93,1.6515612602233887 -94,1.6483652591705322 -95,1.645157814025879 -96,1.641929030418396 -97,1.6386873722076416 -98,1.6354483366012573 -99,1.6322016716003418 -100,1.6289541721343994 -101,1.625699758529663 -102,1.622440218925476 -103,1.6191773414611816 -104,1.6159106492996216 -105,1.612646460533142 -106,1.609376072883606 -107,1.6061058044433594 -108,1.6028237342834473 -109,1.5995476245880127 -110,1.5962610244750977 -111,1.5929783582687378 -112,1.5896867513656616 -113,1.586397409439087 -114,1.583094596862793 -115,1.5797966718673706 -116,1.5764893293380737 -117,1.5731889009475708 -118,1.5698792934417725 -119,1.5665912628173828 -120,1.5632896423339844 -121,1.5599987506866455 -122,1.5566974878311157 -123,1.5534090995788574 -124,1.5501154661178589 -125,1.54682457447052 -126,1.543527364730835 -127,1.5402332544326782 -128,1.5369389057159424 -129,1.5336486101150513 -130,1.5303590297698975 -131,1.5270662307739258 -132,1.5237733125686646 -133,1.5204836130142212 -134,1.5171918869018555 -135,1.5139033794403076 -136,1.5106123685836792 -137,1.5073237419128418 -138,1.5040305852890015 -139,1.5007412433624268 -140,1.49745512008667 -141,1.494168996810913 -142,1.490889072418213 -143,1.4876108169555664 -144,1.4843382835388184 -145,1.4810733795166016 -146,1.477813482284546 -147,1.4745557308197021 -148,1.471303105354309 -149,1.468051791191101 -150,1.464803695678711 -151,1.4615570306777954 -152,1.4583163261413574 -153,1.4550766944885254 -154,1.4518426656723022 -155,1.4486114978790283 -156,1.4453850984573364 -157,1.4421610832214355 -158,1.438943862915039 -159,1.4357261657714844 -160,1.4325119256973267 -161,1.4293023347854614 -162,1.4260990619659424 -163,1.4229012727737427 -164,1.4197100400924683 -165,1.416520595550537 -166,1.4133491516113281 -167,1.4101884365081787 -168,1.4070374965667725 -169,1.4038972854614258 -170,1.4007219076156616 -171,1.397496223449707 -172,1.3942615985870361 -173,1.3910260200500488 -174,1.3877898454666138 -175,1.384560227394104 -176,1.3813393115997314 -177,1.378126859664917 -178,1.3749117851257324 -179,1.3717060089111328 -180,1.3685067892074585 -181,1.3653132915496826 -182,1.3621246814727783 -183,1.3589425086975098 -184,1.3557634353637695 -185,1.3525965213775635 -186,1.3494369983673096 -187,1.3462451696395874 -188,1.343032956123352 -189,1.3398205041885376 -190,1.3366085290908813 -191,1.3334026336669922 -192,1.3302001953125 -193,1.3270058631896973 -194,1.3238215446472168 -195,1.3206442594528198 -196,1.3174751996994019 -197,1.3143184185028076 -198,1.3111613988876343 -199,1.3079962730407715 -200,1.3048354387283325 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.500/training_config.txt deleted file mode 100644 index 492e767..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.500/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.500/training_log.csv deleted file mode 100644 index c07bb2e..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.500/training_log.csv +++ /dev/null @@ -1,16 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,271.5838623046875 -2,1.8033311367034912 -3,1.9317882061004639 -4,1.894938588142395 -5,1.4872627258300781 -6,1.1583973169326782 -7,1.2511214017868042 -8,1.5288810729980469 -9,34.022972106933594 -10,19.399734497070312 -11,nan -12,nan -13,nan -14,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.600/training_config.txt deleted file mode 100644 index 52399fe..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.600/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.600/training_log.csv deleted file mode 100644 index 9910762..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.600/training_log.csv +++ /dev/null @@ -1,14 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,6486.37255859375 -2,28375.791015625 -3,28376.5703125 -4,28202.478515625 -5,23100.681640625 -6,8428.63671875 -7,413.37872314453125 -8,1.3775529861450195 -9,nan -10,nan -11,nan -12,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.700/training_config.txt deleted file mode 100644 index cbef95c..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.700/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.700/training_log.csv deleted file mode 100644 index ea07cd9..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,15202.1357421875 -2,28433.28515625 -3,28434.833984375 -4,28434.833984375 -5,28434.833984375 -6,28434.833984375 -7,28434.833984375 -8,28434.833984375 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.800/training_config.txt deleted file mode 100644 index 34337b5..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.800/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.800/training_log.csv deleted file mode 100644 index 1da7dff..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,21313.91015625 -2,28434.833984375 -3,28434.833984375 -4,28434.833984375 -5,28434.833984375 -6,28434.833984375 -7,28434.833984375 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.900/training_config.txt deleted file mode 100644 index 41fc52a..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.900/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.900/training_log.csv deleted file mode 100644 index 0151348..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,25620.671875 -2,28434.833984375 -3,28434.833984375 -4,28434.833984375 -5,28434.833984375 -6,28434.833984375 -7,28434.833984375 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_1.000/training_config.txt deleted file mode 100644 index a3fc571..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_1.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_1.000/training_log.csv deleted file mode 100644 index 91763a0..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,26963.833984375 -2,28434.833984375 -3,28434.833984375 -4,28434.833984375 -5,28434.833984375 -6,28434.833984375 -7,28434.833984375 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_16.000/training_config.txt deleted file mode 100644 index 93f04d0..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_16.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_16.000/training_log.csv deleted file mode 100644 index 9780ac9..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,28506.94921875 -2,28506.94921875 -3,28506.94921875 -4,28506.94921875 -5,28506.94921875 -6,28506.94921875 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_2.000/training_config.txt deleted file mode 100644 index b6eec5f..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_2.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_2.000/training_log.csv deleted file mode 100644 index 11f9804..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,28467.70703125 -2,0.9552232027053833 -3,nan -4,nan -5,nan -6,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_4.000/training_config.txt deleted file mode 100644 index 96d3810..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_4.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_4.000/training_log.csv deleted file mode 100644 index 9780ac9..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,28506.94921875 -2,28506.94921875 -3,28506.94921875 -4,28506.94921875 -5,28506.94921875 -6,28506.94921875 diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_8.000/training_config.txt deleted file mode 100644 index b56af50..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_8.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/1) - 1) * 1/t_max * (-t_span + t_max) + 1)**-1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_8.000/training_log.csv deleted file mode 100644 index 9780ac9..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_mirrored/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,118.98458862304688 -1,28506.94921875 -2,28506.94921875 -3,28506.94921875 -4,28506.94921875 -5,28506.94921875 -6,28506.94921875 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.010/training_config.txt deleted file mode 100644 index 5076d93..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.010/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.010/training_log.csv deleted file mode 100644 index f36d4a3..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,19.85808563232422 -2,17.379470825195312 -3,15.750399589538574 -4,15.051355361938477 -5,13.290031433105469 -6,12.187076568603516 -7,10.055891990661621 -8,9.403786659240723 -9,7.63273286819458 -10,7.38482666015625 -11,6.516764163970947 -12,6.144367218017578 -13,5.92551851272583 -14,5.64841890335083 -15,5.53433084487915 -16,5.235152721405029 -17,4.869664669036865 -18,4.600190162658691 -19,4.386261940002441 -20,4.084507942199707 -21,3.873014211654663 -22,3.6839871406555176 -23,3.522446393966675 -24,3.4066622257232666 -25,3.2669968605041504 -26,3.1672585010528564 -27,3.0822248458862305 -28,2.961632013320923 -29,2.8343207836151123 -30,2.711378335952759 -31,2.6556971073150635 -32,2.5728259086608887 -33,2.5270869731903076 -34,2.4669628143310547 -35,2.3825910091400146 -36,2.334088087081909 -37,2.2891478538513184 -38,2.2607784271240234 -39,2.201059341430664 -40,2.184824228286743 -41,2.1646902561187744 -42,2.1591808795928955 -43,2.139979124069214 -44,2.1233768463134766 -45,2.107274055480957 -46,2.090726137161255 -47,2.0644960403442383 -48,2.0449931621551514 -49,2.0261406898498535 -50,2.002485513687134 -51,1.9545843601226807 -52,1.929490566253662 -53,1.9113913774490356 -54,1.895596981048584 -55,1.8822860717773438 -56,1.8669270277023315 -57,1.8511989116668701 -58,1.838334321975708 -59,1.8269304037094116 -60,1.8158336877822876 -61,1.8047574758529663 -62,1.7936228513717651 -63,1.7823107242584229 -64,1.7705187797546387 -65,1.757125735282898 -66,1.7435351610183716 -67,1.7334152460098267 -68,1.725490927696228 -69,1.7172086238861084 -70,1.708832025527954 -71,1.7002931833267212 -72,1.6916191577911377 -73,1.6839662790298462 -74,1.6766088008880615 -75,1.6686409711837769 -76,1.6601521968841553 -77,1.6467013359069824 -78,1.639936089515686 -79,1.633082389831543 -80,1.6262949705123901 -81,1.619619369506836 -82,1.6130493879318237 -83,1.6065738201141357 -84,1.6001782417297363 -85,1.5938471555709839 -86,1.587563157081604 -87,1.5813089609146118 -88,1.5750632286071777 -89,1.56880784034729 -90,1.5625191926956177 -91,1.5561708211898804 -92,1.5497429370880127 -93,1.5432158708572388 -94,1.536601185798645 -95,1.5299266576766968 -96,1.5231963396072388 -97,1.5162609815597534 -98,1.5083489418029785 -99,1.5037436485290527 -100,1.4983913898468018 -101,1.492939829826355 -102,1.4873669147491455 -103,1.4813299179077148 -104,1.4756896495819092 -105,1.47088623046875 -106,1.4672181606292725 -107,1.4639310836791992 -108,1.4609609842300415 -109,1.4578653573989868 -110,1.4544627666473389 -111,1.4508801698684692 -112,1.4476333856582642 -113,1.4445092678070068 -114,1.4410755634307861 -115,1.437735676765442 -116,1.4345732927322388 -117,1.431471347808838 -118,1.4283069372177124 -119,1.4243236780166626 -120,1.421122670173645 -121,1.4157768487930298 -122,1.4151263236999512 -123,1.4153257608413696 -124,1.4132072925567627 -125,1.4112919569015503 -126,1.4099277257919312 -127,1.4078691005706787 -128,1.4056203365325928 -129,1.40314781665802 -130,1.4010237455368042 -131,1.3979313373565674 -132,1.3955028057098389 -133,1.392837643623352 -134,1.3896598815917969 -135,1.3859736919403076 -136,1.3810385465621948 -137,1.380601406097412 -138,1.3796851634979248 -139,1.376988410949707 -140,1.3750779628753662 -141,1.375860571861267 -142,1.374847650527954 -143,1.3737318515777588 -144,1.373056411743164 -145,1.3707730770111084 -146,1.3685814142227173 -147,1.3652540445327759 -148,1.3632481098175049 -149,1.3624531030654907 -150,1.3613171577453613 -151,1.3601508140563965 -152,1.3587501049041748 -153,1.3570698499679565 -154,1.3550853729248047 -155,1.3530980348587036 -156,1.3517911434173584 -157,1.350175380706787 -158,1.3483737707138062 -159,1.3463202714920044 -160,1.3444156646728516 -161,1.3433018922805786 -162,1.3421134948730469 -163,1.3406649827957153 -164,1.3389911651611328 -165,1.3368808031082153 -166,1.3368786573410034 -167,1.3358670473098755 -168,1.334167242050171 -169,1.3334100246429443 -170,1.3326612710952759 -171,1.331364631652832 -172,1.3297756910324097 -173,1.3279670476913452 -174,1.3261874914169312 -175,1.3243361711502075 -176,1.3232604265213013 -177,1.3215861320495605 -178,1.319472074508667 -179,1.3183190822601318 -180,1.317492961883545 -181,1.316162109375 -182,1.3143301010131836 -183,1.3131884336471558 -184,1.3120367527008057 -185,1.3107049465179443 -186,1.3090239763259888 -187,1.3076767921447754 -188,1.3063932657241821 -189,1.3048959970474243 -190,1.3038780689239502 -191,1.3027527332305908 -192,1.3013596534729004 -193,1.3000562191009521 -194,1.2991145849227905 -195,1.2979929447174072 -196,1.2967708110809326 -197,1.295110821723938 -198,1.2945488691329956 -199,1.293691635131836 -200,1.2926716804504395 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.020/training_config.txt deleted file mode 100644 index c1ccf06..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.020/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.020/training_log.csv deleted file mode 100644 index 99b70a3..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,18.664377212524414 -2,14.619477272033691 -3,10.95068359375 -4,8.222716331481934 -5,6.523622989654541 -6,5.985683917999268 -7,5.617551326751709 -8,5.108346462249756 -9,4.468939781188965 -10,4.071103572845459 -11,3.607121706008911 -12,3.363325595855713 -13,3.1282734870910645 -14,2.951246738433838 -15,2.7218079566955566 -16,2.5496673583984375 -17,2.302729606628418 -18,2.2094309329986572 -19,2.1611318588256836 -20,2.078108549118042 -21,1.9833781719207764 -22,1.9197534322738647 -23,1.8642421960830688 -24,1.8196296691894531 -25,1.7814797163009644 -26,1.7456839084625244 -27,1.7244369983673096 -28,1.7014950513839722 -29,1.6763468980789185 -30,1.6517633199691772 -31,1.6308199167251587 -32,1.608999252319336 -33,1.5868505239486694 -34,1.5651899576187134 -35,1.5495976209640503 -36,1.5338057279586792 -37,1.5179872512817383 -38,1.5023143291473389 -39,1.4872210025787354 -40,1.4752758741378784 -41,1.4627472162246704 -42,1.450251579284668 -43,1.4389187097549438 -44,1.427922248840332 -45,1.4171912670135498 -46,1.406924843788147 -47,1.3971725702285767 -48,1.3879215717315674 -49,1.3791345357894897 -50,1.370806336402893 -51,1.362781047821045 -52,1.3551106452941895 -53,1.348101258277893 -54,1.3418102264404297 -55,1.3356130123138428 -56,1.3311872482299805 -57,1.3260899782180786 -58,1.320288062095642 -59,1.316841959953308 -60,1.3133689165115356 -61,1.3088141679763794 -62,1.3054851293563843 -63,1.3034287691116333 -64,1.3004122972488403 -65,1.2958996295928955 -66,1.292366623878479 -67,1.2895770072937012 -68,1.2870928049087524 -69,1.2850358486175537 -70,1.2828032970428467 -71,1.2805418968200684 -72,1.278157114982605 -73,1.2755482196807861 -74,1.2728077173233032 -75,1.2699213027954102 -76,1.2671846151351929 -77,1.2644270658493042 -78,1.2615846395492554 -79,1.2595468759536743 -80,1.257949709892273 -81,1.256246566772461 -82,1.2543084621429443 -83,1.2521964311599731 -84,1.2502590417861938 -85,1.2495324611663818 -86,1.2482664585113525 -87,1.2468311786651611 -88,1.2452523708343506 -89,1.2435612678527832 -90,1.2418208122253418 -91,1.2400940656661987 -92,1.2383594512939453 -93,1.2364263534545898 -94,1.235144853591919 -95,1.2337700128555298 -96,1.2322418689727783 -97,1.2308050394058228 -98,1.2294456958770752 -99,1.2278883457183838 -100,1.227183222770691 -101,1.2263624668121338 -102,1.225350022315979 -103,1.2242355346679688 -104,1.223046064376831 -105,1.2217849493026733 -106,1.2204055786132812 -107,1.2188079357147217 -108,1.2184267044067383 -109,1.2178369760513306 -110,1.2171831130981445 -111,1.216426968574524 -112,1.2155662775039673 -113,1.2145837545394897 -114,1.2136385440826416 -115,1.2133158445358276 -116,1.2126939296722412 -117,1.211817741394043 -118,1.2114284038543701 -119,1.2110176086425781 -120,1.2104510068893433 -121,1.2097350358963013 -122,1.2094030380249023 -123,1.2090423107147217 -124,1.2084604501724243 -125,1.2080657482147217 -126,1.207800030708313 -127,1.2073121070861816 -128,1.206944465637207 -129,1.206695318222046 -130,1.206282615661621 -131,1.2059091329574585 -132,1.2056756019592285 -133,1.2053182125091553 -134,1.2049928903579712 -135,1.2047638893127441 -136,1.204461932182312 -137,1.2041664123535156 -138,1.2039520740509033 -139,1.2036938667297363 -140,1.203426718711853 -141,1.2032209634780884 -142,1.2029938697814941 -143,1.2027502059936523 -144,1.2025507688522339 -145,1.2023496627807617 -146,1.2021300792694092 -147,1.2019357681274414 -148,1.2017531394958496 -149,1.2015550136566162 -150,1.2013667821884155 -151,1.2011953592300415 -152,1.2010176181793213 -153,1.2008386850357056 -154,1.2006738185882568 -155,1.2005128860473633 -156,1.2003475427627563 -157,1.2001882791519165 -158,1.200036644935608 -159,1.1998844146728516 -160,1.199731707572937 -161,1.1995837688446045 -162,1.1994401216506958 -163,1.1992967128753662 -164,1.199154257774353 -165,1.1990156173706055 -166,1.1988803148269653 -167,1.1987460851669312 -168,1.1986128091812134 -169,1.198482632637024 -170,1.198356032371521 -171,1.1982314586639404 -172,1.1981085538864136 -173,1.1979875564575195 -174,1.197869062423706 -175,1.1977524757385254 -176,1.1976372003555298 -177,1.1975233554840088 -178,1.1974115371704102 -179,1.1973015069961548 -180,1.197192907333374 -181,1.1970857381820679 -182,1.1969797611236572 -183,1.1968752145767212 -184,1.1967722177505493 -185,1.1966711282730103 -186,1.1965712308883667 -187,1.1964727640151978 -188,1.1963752508163452 -189,1.1962790489196777 -190,1.1961843967437744 -191,1.1960911750793457 -192,1.1959993839263916 -193,1.1959086656570435 -194,1.1958187818527222 -195,1.1957300901412964 -196,1.195642113685608 -197,1.1955549716949463 -198,1.1954686641693115 -199,1.1953829526901245 -200,1.1952980756759644 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.040/training_config.txt deleted file mode 100644 index d8a39ff..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.040/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.040/training_log.csv deleted file mode 100644 index 57e6753..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,16.468137741088867 -2,9.177665710449219 -3,6.193994522094727 -4,5.110238552093506 -5,4.105052471160889 -6,3.846336603164673 -7,3.2850241661071777 -8,2.6850502490997314 -9,2.3773033618927 -10,2.065570116043091 -11,1.8989131450653076 -12,1.7763721942901611 -13,1.7096606492996216 -14,1.6529128551483154 -15,1.6048249006271362 -16,1.5523838996887207 -17,1.5119719505310059 -18,1.4738705158233643 -19,1.4480409622192383 -20,1.4259369373321533 -21,1.405691385269165 -22,1.3875818252563477 -23,1.3709807395935059 -24,1.357700228691101 -25,1.347400426864624 -26,1.337282657623291 -27,1.3275161981582642 -28,1.3180397748947144 -29,1.308921217918396 -30,1.3003870248794556 -31,1.2924543619155884 -32,1.2848303318023682 -33,1.2774347066879272 -34,1.2702651023864746 -35,1.2634447813034058 -36,1.257041096687317 -37,1.2509509325027466 -38,1.245119333267212 -39,1.239537000656128 -40,1.2343688011169434 -41,1.229650616645813 -42,1.225394606590271 -43,1.2216029167175293 -44,1.2182756662368774 -45,1.215396761894226 -46,1.2129770517349243 -47,1.2112520933151245 -48,1.2100051641464233 -49,1.2088631391525269 -50,1.2078205347061157 -51,1.2074354887008667 -52,1.2077986001968384 -53,1.2084096670150757 -54,1.2088704109191895 -55,1.208432912826538 -56,1.2073032855987549 -57,1.2061381340026855 -58,1.2048498392105103 -59,1.2037749290466309 -60,1.2031033039093018 -61,1.2027584314346313 -62,1.202511191368103 -63,1.2022497653961182 -64,1.2019579410552979 -65,1.20164954662323 -66,1.2013450860977173 -67,1.2010760307312012 -68,1.2008609771728516 -69,1.2006796598434448 -70,1.2005043029785156 -71,1.2003179788589478 -72,1.2001090049743652 -73,1.1998738050460815 -74,1.199615716934204 -75,1.1993401050567627 -76,1.1990541219711304 -77,1.198765754699707 -78,1.1984862089157104 -79,1.198224663734436 -80,1.1979866027832031 -81,1.1977721452713013 -82,1.1975769996643066 -83,1.1973940134048462 -84,1.197218418121338 -85,1.1970480680465698 -86,1.1968841552734375 -87,1.1967283487319946 -88,1.196581244468689 -89,1.196441888809204 -90,1.1963083744049072 -91,1.1961768865585327 -92,1.1960444450378418 -93,1.1959091424942017 -94,1.1957709789276123 -95,1.1956318616867065 -96,1.1954951286315918 -97,1.1953617334365845 -98,1.1952325105667114 -99,1.1951091289520264 -100,1.1949909925460815 -101,1.194877028465271 -102,1.194767713546753 -103,1.1946622133255005 -104,1.1945596933364868 -105,1.1944588422775269 -106,1.194359540939331 -107,1.1942611932754517 -108,1.194163203239441 -109,1.1940656900405884 -110,1.1939691305160522 -111,1.193873643875122 -112,1.1937793493270874 -113,1.193686842918396 -114,1.1935958862304688 -115,1.1935073137283325 -116,1.1934207677841187 -117,1.1933367252349854 -118,1.1932551860809326 -119,1.193175196647644 -120,1.1930969953536987 -121,1.193020224571228 -122,1.1929450035095215 -123,1.19287109375 -124,1.192798376083374 -125,1.192726492881775 -126,1.1926559209823608 -127,1.19258713722229 -128,1.1925197839736938 -129,1.1924537420272827 -130,1.192388653755188 -131,1.1923245191574097 -132,1.1922610998153687 -133,1.1921982765197754 -134,1.1921360492706299 -135,1.1920744180679321 -136,1.1920132637023926 -137,1.1919528245925903 -138,1.191892385482788 -139,1.1918329000473022 -140,1.191773772239685 -141,1.1917152404785156 -142,1.191657304763794 -143,1.1915996074676514 -144,1.1915427446365356 -145,1.1914865970611572 -146,1.1914308071136475 -147,1.1913756132125854 -148,1.1913210153579712 -149,1.191266655921936 -150,1.1912126541137695 -151,1.1911591291427612 -152,1.191105842590332 -153,1.1910529136657715 -154,1.1910004615783691 -155,1.190948724746704 -156,1.1908971071243286 -157,1.1908458471298218 -158,1.1907950639724731 -159,1.1907447576522827 -160,1.190694808959961 -161,1.1906452178955078 -162,1.1905959844589233 -163,1.1905471086502075 -164,1.1904983520507812 -165,1.1904501914978027 -166,1.1904022693634033 -167,1.1903549432754517 -168,1.190307855606079 -169,1.1902613639831543 -170,1.190214991569519 -171,1.190168857574463 -172,1.190123200416565 -173,1.190077781677246 -174,1.190032720565796 -175,1.1899882555007935 -176,1.1899440288543701 -177,1.1899001598358154 -178,1.1898565292358398 -179,1.1898133754730225 -180,1.1897704601287842 -181,1.189728021621704 -182,1.1896859407424927 -183,1.1896436214447021 -184,1.1896018981933594 -185,1.1895607709884644 -186,1.1895196437835693 -187,1.1894787549972534 -188,1.1894383430480957 -189,1.189397931098938 -190,1.1893579959869385 -191,1.1893181800842285 -192,1.1892787218093872 -193,1.189239501953125 -194,1.1892006397247314 -195,1.189161777496338 -196,1.189123272895813 -197,1.1890850067138672 -198,1.1890467405319214 -199,1.1890087127685547 -200,1.1889710426330566 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.050/training_config.txt deleted file mode 100644 index cf5a5ac..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.050/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.050/training_log.csv deleted file mode 100644 index 5834dc6..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,15.553996086120605 -2,7.683303356170654 -3,5.591757297515869 -4,3.9254443645477295 -5,2.952569007873535 -6,2.4781839847564697 -7,2.083414077758789 -8,1.8678888082504272 -9,1.7397929430007935 -10,1.6574159860610962 -11,1.5862236022949219 -12,1.5313540697097778 -13,1.4868649244308472 -14,1.4586281776428223 -15,1.437097430229187 -16,1.4180573225021362 -17,1.400762915611267 -18,1.3849246501922607 -19,1.37017822265625 -20,1.3562932014465332 -21,1.343382477760315 -22,1.331230878829956 -23,1.319946527481079 -24,1.3097329139709473 -25,1.300407886505127 -26,1.291599988937378 -27,1.2830791473388672 -28,1.2747759819030762 -29,1.2666963338851929 -30,1.258841633796692 -31,1.2512539625167847 -32,1.2441962957382202 -33,1.2376867532730103 -34,1.2315939664840698 -35,1.2259615659713745 -36,1.2210214138031006 -37,1.2165907621383667 -38,1.2124102115631104 -39,1.2108720541000366 -40,1.2127211093902588 -41,1.2095236778259277 -42,1.2062005996704102 -43,1.2055737972259521 -44,1.2057305574417114 -45,1.2056323289871216 -46,1.2052922248840332 -47,1.2048068046569824 -48,1.204283356666565 -49,1.203819990158081 -50,1.2033140659332275 -51,1.202595829963684 -52,1.2016714811325073 -53,1.2006701231002808 -54,1.1997201442718506 -55,1.1988704204559326 -56,1.1981325149536133 -57,1.197489857673645 -58,1.196938395500183 -59,1.1964324712753296 -60,1.1959600448608398 -61,1.1955262422561646 -62,1.195174217224121 -63,1.1949875354766846 -64,1.1949903964996338 -65,1.1949979066848755 -66,1.1948014497756958 -67,1.194408655166626 -68,1.1939871311187744 -69,1.193662166595459 -70,1.1934322118759155 -71,1.193251132965088 -72,1.1930875778198242 -73,1.1929270029067993 -74,1.1927664279937744 -75,1.192608118057251 -76,1.1924532651901245 -77,1.1923065185546875 -78,1.192165493965149 -79,1.1920263767242432 -80,1.191886067390442 -81,1.1917402744293213 -82,1.1915879249572754 -83,1.1914303302764893 -84,1.1912705898284912 -85,1.1911123991012573 -86,1.1909594535827637 -87,1.190815806388855 -88,1.1906814575195312 -89,1.1905543804168701 -90,1.1904343366622925 -91,1.19031822681427 -92,1.1902034282684326 -93,1.1900886297225952 -94,1.1899722814559937 -95,1.189855694770813 -96,1.1897395849227905 -97,1.1896252632141113 -98,1.18951416015625 -99,1.189407229423523 -100,1.189305067062378 -101,1.1892067193984985 -102,1.1891123056411743 -103,1.1890219449996948 -104,1.1889344453811646 -105,1.1888495683670044 -106,1.1887677907943726 -107,1.1886882781982422 -108,1.1886104345321655 -109,1.1885340213775635 -110,1.1884585618972778 -111,1.1883848905563354 -112,1.188312292098999 -113,1.1882412433624268 -114,1.1881717443466187 -115,1.1881033182144165 -116,1.1880367994308472 -117,1.187971830368042 -118,1.1879081726074219 -119,1.187846064567566 -120,1.187785029411316 -121,1.1877249479293823 -122,1.1876654624938965 -123,1.187606692314148 -124,1.1875483989715576 -125,1.187490463256836 -126,1.1874334812164307 -127,1.187376856803894 -128,1.1873211860656738 -129,1.1872659921646118 -130,1.1872111558914185 -131,1.1871567964553833 -132,1.1871027946472168 -133,1.187049150466919 -134,1.1869957447052002 -135,1.1869423389434814 -136,1.1868892908096313 -137,1.1868367195129395 -138,1.1867847442626953 -139,1.186733365058899 -140,1.1866823434829712 -141,1.1866322755813599 -142,1.1865826845169067 -143,1.18653404712677 -144,1.186486005783081 -145,1.1864384412765503 -146,1.1863911151885986 -147,1.1863445043563843 -148,1.186298131942749 -149,1.1862521171569824 -150,1.186206579208374 -151,1.1861612796783447 -152,1.1861164569854736 -153,1.1860722303390503 -154,1.186028003692627 -155,1.1859842538833618 -156,1.1859408617019653 -157,1.1858974695205688 -158,1.1858547925949097 -159,1.1858118772506714 -160,1.1857695579528809 -161,1.1857274770736694 -162,1.1856855154037476 -163,1.1856440305709839 -164,1.1856023073196411 -165,1.1855612993240356 -166,1.1855201721191406 -167,1.1854794025421143 -168,1.185438871383667 -169,1.1853984594345093 -170,1.1853585243225098 -171,1.1853185892105103 -172,1.1852787733078003 -173,1.185239315032959 -174,1.1851999759674072 -175,1.1851606369018555 -176,1.1851216554641724 -177,1.1850826740264893 -178,1.1850438117980957 -179,1.1850053071975708 -180,1.1849669218063354 -181,1.184929370880127 -182,1.184891700744629 -183,1.184854507446289 -184,1.1848175525665283 -185,1.1847805976867676 -186,1.1847440004348755 -187,1.1847078800201416 -188,1.1846716403961182 -189,1.1846356391906738 -190,1.1845996379852295 -191,1.1845638751983643 -192,1.1845283508300781 -193,1.184493064880371 -194,1.1844578981399536 -195,1.1844228506088257 -196,1.1843881607055664 -197,1.1843538284301758 -198,1.1843196153640747 -199,1.1842856407165527 -200,1.1842522621154785 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.080/training_config.txt deleted file mode 100644 index 4b8de44..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.080/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.080/training_log.csv deleted file mode 100644 index b897625..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,12.942340850830078 -2,5.729569435119629 -3,3.8225419521331787 -4,2.777601480484009 -5,2.2743945121765137 -6,1.9262100458145142 -7,1.7217192649841309 -8,1.5604474544525146 -9,1.4571447372436523 -10,1.400023341178894 -11,1.3560799360275269 -12,1.3226795196533203 -13,1.2952375411987305 -14,1.2732172012329102 -15,1.2598689794540405 -16,1.2491546869277954 -17,1.2402502298355103 -18,1.2337905168533325 -19,1.2293719053268433 -20,1.224892497062683 -21,1.222550392150879 -22,1.2263518571853638 -23,1.2247164249420166 -24,1.2205902338027954 -25,1.2157384157180786 -26,1.2109146118164062 -27,1.208504319190979 -28,1.2068085670471191 -29,1.2067192792892456 -30,1.207337498664856 -31,1.2078306674957275 -32,1.2075515985488892 -33,1.2063096761703491 -34,1.2043782472610474 -35,1.2022418975830078 -36,1.200255274772644 -37,1.1988509893417358 -38,1.1990231275558472 -39,1.1995069980621338 -40,1.1984750032424927 -41,1.1973075866699219 -42,1.1965385675430298 -43,1.195802092552185 -44,1.1949987411499023 -45,1.1942209005355835 -46,1.1934796571731567 -47,1.1927505731582642 -48,1.1920009851455688 -49,1.1912078857421875 -50,1.190374732017517 -51,1.1895986795425415 -52,1.1890380382537842 -53,1.1887775659561157 -54,1.188612937927246 -55,1.188256859779358 -56,1.1877230405807495 -57,1.187296986579895 -58,1.1870501041412354 -59,1.1868896484375 -60,1.186765432357788 -61,1.186655044555664 -62,1.1865495443344116 -63,1.1864464282989502 -64,1.1863445043563843 -65,1.1862437725067139 -66,1.1861464977264404 -67,1.1860530376434326 -68,1.185957431793213 -69,1.1858609914779663 -70,1.1857683658599854 -71,1.18567955493927 -72,1.1856005191802979 -73,1.1855289936065674 -74,1.1854618787765503 -75,1.1853965520858765 -76,1.1853296756744385 -77,1.185259222984314 -78,1.1851847171783447 -79,1.1851094961166382 -80,1.1850380897521973 -81,1.184971570968628 -82,1.1849111318588257 -83,1.1848546266555786 -84,1.1848008632659912 -85,1.1847485303878784 -86,1.184697151184082 -87,1.1846462488174438 -88,1.1845953464508057 -89,1.184545636177063 -90,1.1844968795776367 -91,1.1844487190246582 -92,1.1844021081924438 -93,1.1843568086624146 -94,1.1843132972717285 -95,1.1842716932296753 -96,1.1842334270477295 -97,1.1841983795166016 -98,1.1841647624969482 -99,1.1841317415237427 -100,1.1840991973876953 -101,1.1840665340423584 -102,1.1840341091156006 -103,1.184001088142395 -104,1.1839675903320312 -105,1.1839337348937988 -106,1.183899998664856 -107,1.183866262435913 -108,1.1838338375091553 -109,1.183801293373108 -110,1.18376886844635 -111,1.1837366819381714 -112,1.1837043762207031 -113,1.1836719512939453 -114,1.1836398839950562 -115,1.1836081743240356 -116,1.183577537536621 -117,1.1835466623306274 -118,1.183516025543213 -119,1.183485507965088 -120,1.183454990386963 -121,1.1834241151809692 -122,1.1833933591842651 -123,1.1833629608154297 -124,1.1833325624465942 -125,1.1833025217056274 -126,1.1832728385925293 -127,1.1832433938980103 -128,1.1832139492034912 -129,1.1831846237182617 -130,1.1831556558609009 -131,1.1831269264221191 -132,1.1830986738204956 -133,1.183070182800293 -134,1.1830413341522217 -135,1.183012843132019 -136,1.1829842329025269 -137,1.1829562187194824 -138,1.1829279661178589 -139,1.1828995943069458 -140,1.1828714609146118 -141,1.1828434467315674 -142,1.1828153133392334 -143,1.1827871799468994 -144,1.1827589273452759 -145,1.1827309131622314 -146,1.1827030181884766 -147,1.1826751232147217 -148,1.1826475858688354 -149,1.1826204061508179 -150,1.1825937032699585 -151,1.1825674772262573 -152,1.1825416088104248 -153,1.1825162172317505 -154,1.1824908256530762 -155,1.1824655532836914 -156,1.182439923286438 -157,1.182414174079895 -158,1.1823883056640625 -159,1.182362675666809 -160,1.1823363304138184 -161,1.1823102235794067 -162,1.182283878326416 -163,1.1822576522827148 -164,1.1822313070297241 -165,1.1822048425674438 -166,1.1821783781051636 -167,1.1821520328521729 -168,1.1821256875991821 -169,1.1820992231369019 -170,1.1820727586746216 -171,1.1820459365844727 -172,1.1820193529129028 -173,1.1819928884506226 -174,1.1819661855697632 -175,1.181939721107483 -176,1.181913137435913 -177,1.1818864345550537 -178,1.1818598508834839 -179,1.1818335056304932 -180,1.1818081140518188 -181,1.181782603263855 -182,1.1817569732666016 -183,1.1817315816879272 -184,1.1817058324813843 -185,1.1816797256469727 -186,1.1816538572311401 -187,1.1816277503967285 -188,1.1816012859344482 -189,1.181574821472168 -190,1.1815485954284668 -191,1.1815223693847656 -192,1.1814957857131958 -193,1.1814682483673096 -194,1.1814393997192383 -195,1.181409239768982 -196,1.18137788772583 -197,1.181356430053711 -198,1.1813358068466187 -199,1.1813141107559204 -200,1.1812914609909058 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.100/training_config.txt deleted file mode 100644 index 5f572e4..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.100/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.100/training_log.csv deleted file mode 100644 index 85d5aa1..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,12.299866676330566 -2,6.844983100891113 -3,4.321502685546875 -4,2.5979416370391846 -5,1.9869745969772339 -6,1.756585955619812 -7,1.6589961051940918 -8,1.573098063468933 -9,1.5062566995620728 -10,1.4586665630340576 -11,1.4198827743530273 -12,1.387722373008728 -13,1.364918828010559 -14,1.3507171869277954 -15,1.3391441106796265 -16,1.3287914991378784 -17,1.3191725015640259 -18,1.3099843263626099 -19,1.3010238409042358 -20,1.2922487258911133 -21,1.2836819887161255 -22,1.275315761566162 -23,1.2671736478805542 -24,1.2591679096221924 -25,1.2512894868850708 -26,1.2436405420303345 -27,1.2362622022628784 -28,1.2293267250061035 -29,1.2228599786758423 -30,1.2168307304382324 -31,1.2112551927566528 -32,1.2063448429107666 -33,1.2022653818130493 -34,1.1990234851837158 -35,1.1963156461715698 -36,1.1939629316329956 -37,1.192138671875 -38,1.1909502744674683 -39,1.1909209489822388 -40,1.1925644874572754 -41,1.1935131549835205 -42,1.19228994846344 -43,1.1899322271347046 -44,1.1883738040924072 -45,1.1877529621124268 -46,1.1882712841033936 -47,1.189151406288147 -48,1.1899080276489258 -49,1.1903209686279297 -50,1.1903208494186401 -51,1.1899378299713135 -52,1.1892807483673096 -53,1.18848717212677 -54,1.1877055168151855 -55,1.1870602369308472 -56,1.186614751815796 -57,1.1864123344421387 -58,1.186505913734436 -59,1.1868654489517212 -60,1.1871751546859741 -61,1.1870667934417725 -62,1.1866321563720703 -63,1.186214804649353 -64,1.1859817504882812 -65,1.185892939567566 -66,1.1858874559402466 -67,1.1859050989151 -68,1.185921311378479 -69,1.1859186887741089 -70,1.1858835220336914 -71,1.1858139038085938 -72,1.1857203245162964 -73,1.1856164932250977 -74,1.1855151653289795 -75,1.1854280233383179 -76,1.185370683670044 -77,1.1853331327438354 -78,1.1853011846542358 -79,1.1852638721466064 -80,1.185214877128601 -81,1.1851650476455688 -82,1.1851234436035156 -83,1.1850851774215698 -84,1.1850552558898926 -85,1.1850277185440063 -86,1.1849972009658813 -87,1.1849617958068848 -88,1.1849220991134644 -89,1.1848775148391724 -90,1.1848297119140625 -91,1.1847821474075317 -92,1.1847351789474487 -93,1.1846925020217896 -94,1.1846553087234497 -95,1.1846208572387695 -96,1.184586763381958 -97,1.1845544576644897 -98,1.1845217943191528 -99,1.1844877004623413 -100,1.1844546794891357 -101,1.1844240427017212 -102,1.184396743774414 -103,1.1843703985214233 -104,1.184343695640564 -105,1.1843156814575195 -106,1.184287190437317 -107,1.184259295463562 -108,1.1842314004898071 -109,1.184204339981079 -110,1.1841782331466675 -111,1.184152603149414 -112,1.1841260194778442 -113,1.1840986013412476 -114,1.1840715408325195 -115,1.1840448379516602 -116,1.184017539024353 -117,1.1839900016784668 -118,1.1839624643325806 -119,1.1839348077774048 -120,1.1839066743850708 -121,1.1838783025741577 -122,1.1838499307632446 -123,1.1838215589523315 -124,1.1837928295135498 -125,1.1837639808654785 -126,1.183734655380249 -127,1.18370521068573 -128,1.183674931526184 -129,1.183644413948059 -130,1.183613657951355 -131,1.1835825443267822 -132,1.183551549911499 -133,1.1835205554962158 -134,1.183489441871643 -135,1.1834585666656494 -136,1.1834276914596558 -137,1.1833966970443726 -138,1.1833659410476685 -139,1.1833351850509644 -140,1.183304786682129 -141,1.1832746267318726 -142,1.1832448244094849 -143,1.1832152605056763 -144,1.1831861734390259 -145,1.1831570863723755 -146,1.1831284761428833 -147,1.1831001043319702 -148,1.1830722093582153 -149,1.1830447912216187 -150,1.183017611503601 -151,1.1829909086227417 -152,1.1829644441604614 -153,1.1829383373260498 -154,1.1829125881195068 -155,1.182887077331543 -156,1.1828621625900269 -157,1.1828374862670898 -158,1.182813048362732 -159,1.1827887296676636 -160,1.1827646493911743 -161,1.1827409267425537 -162,1.1827173233032227 -163,1.1826941967010498 -164,1.1826709508895874 -165,1.1826475858688354 -166,1.1826239824295044 -167,1.1826006174087524 -168,1.1825778484344482 -169,1.1825555562973022 -170,1.1825332641601562 -171,1.182511329650879 -172,1.1824893951416016 -173,1.1824675798416138 -174,1.1824458837509155 -175,1.1824244260787964 -176,1.182403326034546 -177,1.1823822259902954 -178,1.182361364364624 -179,1.1823402643203735 -180,1.1823192834854126 -181,1.1822983026504517 -182,1.1822773218154907 -183,1.1822563409805298 -184,1.1822353601455688 -185,1.1822147369384766 -186,1.1821941137313843 -187,1.1821736097335815 -188,1.182153344154358 -189,1.1821328401565552 -190,1.1821125745773315 -191,1.1820921897888184 -192,1.1820716857910156 -193,1.182051658630371 -194,1.1820316314697266 -195,1.182011604309082 -196,1.181991457939148 -197,1.181971549987793 -198,1.1819515228271484 -199,1.1819308996200562 -200,1.1819103956222534 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.125/training_config.txt deleted file mode 100644 index f230045..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.125/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.125/training_log.csv deleted file mode 100644 index 77246c8..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,12.185430526733398 -2,4.136042594909668 -3,2.136422634124756 -4,1.8181010484695435 -5,1.6860380172729492 -6,1.605135440826416 -7,1.5496128797531128 -8,1.5078991651535034 -9,1.4712241888046265 -10,1.437076210975647 -11,1.4033358097076416 -12,1.367519497871399 -13,1.3310716152191162 -14,1.3070029020309448 -15,1.2845218181610107 -16,1.267598032951355 -17,1.2495139837265015 -18,1.2432678937911987 -19,1.24173104763031 -20,1.234537959098816 -21,1.2227609157562256 -22,1.2123709917068481 -23,1.2082693576812744 -24,1.208201289176941 -25,1.2084705829620361 -26,1.2081557512283325 -27,1.2073079347610474 -28,1.206221103668213 -29,1.204985499382019 -30,1.2036815881729126 -31,1.2024186849594116 -32,1.2011915445327759 -33,1.2000683546066284 -34,1.1991034746170044 -35,1.1982733011245728 -36,1.1974730491638184 -37,1.1966627836227417 -38,1.1957850456237793 -39,1.1949055194854736 -40,1.1941052675247192 -41,1.1934317350387573 -42,1.19282066822052 -43,1.1922210454940796 -44,1.1916146278381348 -45,1.1909995079040527 -46,1.19038724899292 -47,1.1897966861724854 -48,1.189238429069519 -49,1.188713788986206 -50,1.1882203817367554 -51,1.187765121459961 -52,1.1873462200164795 -53,1.1869566440582275 -54,1.1865973472595215 -55,1.1862750053405762 -56,1.1860010623931885 -57,1.18578040599823 -58,1.18561589717865 -59,1.185511827468872 -60,1.1854345798492432 -61,1.185387134552002 -62,1.1853309869766235 -63,1.1852455139160156 -64,1.1851310729980469 -65,1.1850104331970215 -66,1.184918999671936 -67,1.1848535537719727 -68,1.1847976446151733 -69,1.1847482919692993 -70,1.1846935749053955 -71,1.1846369504928589 -72,1.1845781803131104 -73,1.1845139265060425 -74,1.1844515800476074 -75,1.1843938827514648 -76,1.184339165687561 -77,1.1842855215072632 -78,1.1842288970947266 -79,1.1841670274734497 -80,1.1841020584106445 -81,1.1840368509292603 -82,1.1839723587036133 -83,1.1839090585708618 -84,1.1838443279266357 -85,1.1837774515151978 -86,1.183706521987915 -87,1.1836391687393188 -88,1.1835758686065674 -89,1.1835167407989502 -90,1.183471441268921 -91,1.1834356784820557 -92,1.1834019422531128 -93,1.1833678483963013 -94,1.1833287477493286 -95,1.183282494544983 -96,1.183234691619873 -97,1.1831849813461304 -98,1.1831365823745728 -99,1.1830928325653076 -100,1.1830538511276245 -101,1.1830193996429443 -102,1.1829849481582642 -103,1.182950496673584 -104,1.1829167604446411 -105,1.182883620262146 -106,1.1828505992889404 -107,1.1828181743621826 -108,1.182786226272583 -109,1.1827551126480103 -110,1.1827231645584106 -111,1.1826915740966797 -112,1.182660460472107 -113,1.1826319694519043 -114,1.182604432106018 -115,1.1825779676437378 -116,1.182552456855774 -117,1.1825268268585205 -118,1.1825008392333984 -119,1.1824750900268555 -120,1.1824493408203125 -121,1.1824238300323486 -122,1.1823985576629639 -123,1.182373285293579 -124,1.1823481321334839 -125,1.1823233366012573 -126,1.1822985410690308 -127,1.1822733879089355 -128,1.182248592376709 -129,1.1822242736816406 -130,1.18220055103302 -131,1.1821768283843994 -132,1.1821523904800415 -133,1.1821277141571045 -134,1.1821033954620361 -135,1.1820793151855469 -136,1.1820558309555054 -137,1.182032585144043 -138,1.1820087432861328 -139,1.1819852590560913 -140,1.1819617748260498 -141,1.1819384098052979 -142,1.181915283203125 -143,1.1818922758102417 -144,1.1818692684173584 -145,1.181846261024475 -146,1.1818231344223022 -147,1.181800127029419 -148,1.1817774772644043 -149,1.1817549467086792 -150,1.1817322969436646 -151,1.18170964717865 -152,1.1816871166229248 -153,1.1816648244857788 -154,1.1816425323486328 -155,1.1816202402114868 -156,1.1815983057022095 -157,1.1815762519836426 -158,1.1815543174743652 -159,1.181532621383667 -160,1.1815111637115479 -161,1.1814897060394287 -162,1.1814680099487305 -163,1.1814467906951904 -164,1.1814255714416504 -165,1.1814043521881104 -166,1.181383490562439 -167,1.1813626289367676 -168,1.1813417673110962 -169,1.1813210248947144 -170,1.1813002824783325 -171,1.1812796592712402 -172,1.181259274482727 -173,1.1812387704849243 -174,1.1812185049057007 -175,1.1811984777450562 -176,1.1811782121658325 -177,1.181158423423767 -178,1.1811388731002808 -179,1.181119441986084 -180,1.181099772453308 -181,1.1810804605484009 -182,1.1810611486434937 -183,1.181041955947876 -184,1.1810228824615479 -185,1.1810036897659302 -186,1.180984616279602 -187,1.180965781211853 -188,1.180946707725525 -189,1.1809278726577759 -190,1.180909276008606 -191,1.180890679359436 -192,1.1808724403381348 -193,1.1808539628982544 -194,1.1808357238769531 -195,1.1808176040649414 -196,1.1807994842529297 -197,1.1807818412780762 -198,1.180764079093933 -199,1.1807464361190796 -200,1.1807289123535156 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.160/training_config.txt deleted file mode 100644 index 19b234f..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.160/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.160/training_log.csv deleted file mode 100644 index 564adfd..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,12.492166519165039 -2,2.9183480739593506 -3,2.3114655017852783 -4,2.0459823608398438 -5,1.6991477012634277 -6,1.562807559967041 -7,1.4794864654541016 -8,1.4119378328323364 -9,1.3550177812576294 -10,1.3234446048736572 -11,1.3023920059204102 -12,1.2869781255722046 -13,1.2747654914855957 -14,1.2653974294662476 -15,1.2579517364501953 -16,1.2530361413955688 -17,1.2492250204086304 -18,1.2447577714920044 -19,1.2389620542526245 -20,1.232204556465149 -21,1.2254621982574463 -22,1.2193845510482788 -23,1.2143090963363647 -24,1.210333228111267 -25,1.2075779438018799 -26,1.2057939767837524 -27,1.204635500907898 -28,1.203694462776184 -29,1.2025343179702759 -30,1.2012717723846436 -31,1.199929118156433 -32,1.1985195875167847 -33,1.1971304416656494 -34,1.1958863735198975 -35,1.1948527097702026 -36,1.1939966678619385 -37,1.1932872533798218 -38,1.1929646730422974 -39,1.1938999891281128 -40,1.195123553276062 -41,1.195219874382019 -42,1.1939510107040405 -43,1.1927205324172974 -44,1.1918818950653076 -45,1.191540002822876 -46,1.191346526145935 -47,1.1912046670913696 -48,1.1910593509674072 -49,1.1909089088439941 -50,1.1907234191894531 -51,1.1905001401901245 -52,1.1902605295181274 -53,1.190012812614441 -54,1.1897424459457397 -55,1.1894524097442627 -56,1.1891485452651978 -57,1.1888495683670044 -58,1.1885707378387451 -59,1.18831467628479 -60,1.18809175491333 -61,1.1879091262817383 -62,1.1877540349960327 -63,1.1876178979873657 -64,1.1875039339065552 -65,1.1874048709869385 -66,1.187304973602295 -67,1.1872044801712036 -68,1.1870999336242676 -69,1.1869893074035645 -70,1.186877965927124 -71,1.1867680549621582 -72,1.1866651773452759 -73,1.1865746974945068 -74,1.186485767364502 -75,1.186397671699524 -76,1.186316728591919 -77,1.1862380504608154 -78,1.186165690422058 -79,1.1860960721969604 -80,1.186023473739624 -81,1.1859493255615234 -82,1.1858763694763184 -83,1.1858012676239014 -84,1.1857290267944336 -85,1.1856614351272583 -86,1.1855968236923218 -87,1.1855376958847046 -88,1.185486912727356 -89,1.1854355335235596 -90,1.1853805780410767 -91,1.1853210926055908 -92,1.1852576732635498 -93,1.1851933002471924 -94,1.1851283311843872 -95,1.185066819190979 -96,1.1850088834762573 -97,1.184955358505249 -98,1.18490731716156 -99,1.1848667860031128 -100,1.1848278045654297 -101,1.1847889423370361 -102,1.1847480535507202 -103,1.184706449508667 -104,1.1846657991409302 -105,1.1846238374710083 -106,1.184579849243164 -107,1.1845372915267944 -108,1.1844933032989502 -109,1.1844507455825806 -110,1.1844068765640259 -111,1.1843687295913696 -112,1.1843329668045044 -113,1.1842949390411377 -114,1.1842613220214844 -115,1.1842268705368042 -116,1.1841903924942017 -117,1.1841537952423096 -118,1.1841211318969727 -119,1.1840842962265015 -120,1.1840438842773438 -121,1.1840044260025024 -122,1.183968424797058 -123,1.1839293241500854 -124,1.1838926076889038 -125,1.18385648727417 -126,1.183820128440857 -127,1.1837831735610962 -128,1.18374502658844 -129,1.1837068796157837 -130,1.1836687326431274 -131,1.183631181716919 -132,1.183593511581421 -133,1.1835585832595825 -134,1.1835224628448486 -135,1.183487057685852 -136,1.18345308303833 -137,1.183421015739441 -138,1.1833900213241577 -139,1.1833585500717163 -140,1.1833277940750122 -141,1.1832976341247559 -142,1.1832683086395264 -143,1.1832401752471924 -144,1.183212399482727 -145,1.1831849813461304 -146,1.1831557750701904 -147,1.1831282377243042 -148,1.1830980777740479 -149,1.183068871498108 -150,1.1830391883850098 -151,1.1830089092254639 -152,1.1829787492752075 -153,1.1829489469528198 -154,1.1829195022583008 -155,1.1828912496566772 -156,1.1828621625900269 -157,1.1828347444534302 -158,1.1828070878982544 -159,1.1827787160873413 -160,1.182750940322876 -161,1.182723045349121 -162,1.1826963424682617 -163,1.1826672554016113 -164,1.1826406717300415 -165,1.1826130151748657 -166,1.1825835704803467 -167,1.182554841041565 -168,1.1825263500213623 -169,1.1824970245361328 -170,1.1824663877487183 -171,1.182436227798462 -172,1.1824063062667847 -173,1.1823753118515015 -174,1.1823443174362183 -175,1.1823140382766724 -176,1.182283878326416 -177,1.182254433631897 -178,1.1822254657745361 -179,1.1821975708007812 -180,1.1821707487106323 -181,1.1821461915969849 -182,1.1821235418319702 -183,1.1821023225784302 -184,1.182074785232544 -185,1.1820470094680786 -186,1.18202543258667 -187,1.1820083856582642 -188,1.1819854974746704 -189,1.1819581985473633 -190,1.1819318532943726 -191,1.1819075345993042 -192,1.1818846464157104 -193,1.1818612813949585 -194,1.1818335056304932 -195,1.1818068027496338 -196,1.1817824840545654 -197,1.181760311126709 -198,1.1817386150360107 -199,1.1817127466201782 -200,1.181687831878662 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.200/training_config.txt deleted file mode 100644 index 304353d..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.200/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.200/training_log.csv deleted file mode 100644 index a7e66da..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,13.708223342895508 -2,3.3870105743408203 -3,2.692207098007202 -4,2.2542009353637695 -5,2.0292673110961914 -6,1.748447060585022 -7,1.6027048826217651 -8,1.5067516565322876 -9,1.4313387870788574 -10,1.3729937076568604 -11,1.331534504890442 -12,1.2991636991500854 -13,1.273895263671875 -14,1.2556527853012085 -15,1.244903326034546 -16,1.241180181503296 -17,1.2400842905044556 -18,1.2387254238128662 -19,1.2352430820465088 -20,1.2304826974868774 -21,1.2267253398895264 -22,1.2243499755859375 -23,1.2234989404678345 -24,1.2236353158950806 -25,1.2238621711730957 -26,1.2233102321624756 -27,1.221786618232727 -28,1.2195143699645996 -29,1.2169922590255737 -30,1.214768648147583 -31,1.2130080461502075 -32,1.2116249799728394 -33,1.210370421409607 -34,1.2092012166976929 -35,1.207816243171692 -36,1.206142783164978 -37,1.2042394876480103 -38,1.2023016214370728 -39,1.2006350755691528 -40,1.1993041038513184 -41,1.1981664896011353 -42,1.1971666812896729 -43,1.1962242126464844 -44,1.1952929496765137 -45,1.194345474243164 -46,1.1934187412261963 -47,1.192563533782959 -48,1.1917448043823242 -49,1.1910293102264404 -50,1.1904131174087524 -51,1.1899008750915527 -52,1.1895203590393066 -53,1.1892551183700562 -54,1.1890480518341064 -55,1.1888049840927124 -56,1.1884585618972778 -57,1.1880687475204468 -58,1.1877084970474243 -59,1.1874091625213623 -60,1.187173843383789 -61,1.1869715452194214 -62,1.186814785003662 -63,1.186668872833252 -64,1.1865204572677612 -65,1.1863709688186646 -66,1.186238408088684 -67,1.1861251592636108 -68,1.1860311031341553 -69,1.1859434843063354 -70,1.185847520828247 -71,1.185738444328308 -72,1.1856273412704468 -73,1.1855086088180542 -74,1.1854028701782227 -75,1.185303807258606 -76,1.1852480173110962 -77,1.1852129697799683 -78,1.1851521730422974 -79,1.1850738525390625 -80,1.1849974393844604 -81,1.1849209070205688 -82,1.1848536729812622 -83,1.1848019361495972 -84,1.1847401857376099 -85,1.1846803426742554 -86,1.1846266984939575 -87,1.1845852136611938 -88,1.1845422983169556 -89,1.1844912767410278 -90,1.184428095817566 -91,1.1843674182891846 -92,1.1843159198760986 -93,1.1842668056488037 -94,1.1842223405838013 -95,1.1841756105422974 -96,1.1841256618499756 -97,1.1840741634368896 -98,1.1840262413024902 -99,1.183975338935852 -100,1.1839275360107422 -101,1.1838796138763428 -102,1.183829665184021 -103,1.1837775707244873 -104,1.183724284172058 -105,1.1836707592010498 -106,1.1836180686950684 -107,1.1835675239562988 -108,1.183511734008789 -109,1.183449625968933 -110,1.183392882347107 -111,1.1833343505859375 -112,1.1832712888717651 -113,1.183211326599121 -114,1.183179497718811 -115,1.183144211769104 -116,1.1831088066101074 -117,1.1830687522888184 -118,1.1830239295959473 -119,1.1829769611358643 -120,1.182931661605835 -121,1.182883620262146 -122,1.1828298568725586 -123,1.182778239250183 -124,1.1827296018600464 -125,1.1826927661895752 -126,1.1826531887054443 -127,1.182612419128418 -128,1.1825649738311768 -129,1.1825165748596191 -130,1.1824700832366943 -131,1.182430624961853 -132,1.1823890209197998 -133,1.1823441982269287 -134,1.182295560836792 -135,1.1822458505630493 -136,1.1822055578231812 -137,1.1821614503860474 -138,1.18211829662323 -139,1.182081699371338 -140,1.1820400953292847 -141,1.181993007659912 -142,1.1819499731063843 -143,1.1819064617156982 -144,1.1818606853485107 -145,1.1818158626556396 -146,1.1817734241485596 -147,1.1817295551300049 -148,1.1816861629486084 -149,1.1816457509994507 -150,1.1816006898880005 -151,1.1815539598464966 -152,1.18150794506073 -153,1.1814582347869873 -154,1.1814067363739014 -155,1.1813504695892334 -156,1.1812856197357178 -157,1.181219458580017 -158,1.181154727935791 -159,1.1811027526855469 -160,1.1810476779937744 -161,1.1809844970703125 -162,1.180942177772522 -163,1.180925965309143 -164,1.1809028387069702 -165,1.1808701753616333 -166,1.1808271408081055 -167,1.1807740926742554 -168,1.1807143688201904 -169,1.1806495189666748 -170,1.1806036233901978 -171,1.1805821657180786 -172,1.1805552244186401 -173,1.1805163621902466 -174,1.1804637908935547 -175,1.1804057359695435 -176,1.1803605556488037 -177,1.1803237199783325 -178,1.1802899837493896 -179,1.1802560091018677 -180,1.1802167892456055 -181,1.1801704168319702 -182,1.1801238059997559 -183,1.180083155632019 -184,1.180046796798706 -185,1.180008888244629 -186,1.1799683570861816 -187,1.179925560951233 -188,1.1798856258392334 -189,1.1798449754714966 -190,1.1798043251037598 -191,1.1797668933868408 -192,1.1797287464141846 -193,1.1796895265579224 -194,1.1796534061431885 -195,1.1796140670776367 -196,1.1795711517333984 -197,1.1795315742492676 -198,1.1794971227645874 -199,1.1794606447219849 -200,1.1794205904006958 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.250/training_config.txt deleted file mode 100644 index 1f0efde..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.250/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.250/training_log.csv deleted file mode 100644 index 4bc4001..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,18.699508666992188 -2,7.921181678771973 -3,4.985385417938232 -4,3.606903314590454 -5,3.0705313682556152 -6,2.7705533504486084 -7,2.5630171298980713 -8,2.403841733932495 -9,2.278005599975586 -10,2.172988176345825 -11,2.0812113285064697 -12,2.002127170562744 -13,1.9329179525375366 -14,1.8714561462402344 -15,1.8166054487228394 -16,1.7670681476593018 -17,1.7224096059799194 -18,1.6824917793273926 -19,1.64706289768219 -20,1.6156885623931885 -21,1.5879895687103271 -22,1.563458800315857 -23,1.541591763496399 -24,1.5218169689178467 -25,1.5037840604782104 -26,1.4871015548706055 -27,1.4714221954345703 -28,1.4564505815505981 -29,1.4420125484466553 -30,1.427958369255066 -31,1.4141989946365356 -32,1.4010616540908813 -33,1.3882694244384766 -34,1.3757599592208862 -35,1.3635003566741943 -36,1.3518078327178955 -37,1.3410965204238892 -38,1.3306854963302612 -39,1.3206219673156738 -40,1.310916781425476 -41,1.3016357421875 -42,1.292813777923584 -43,1.2844966650009155 -44,1.2768685817718506 -45,1.2698354721069336 -46,1.2636137008666992 -47,1.2580924034118652 -48,1.2534133195877075 -49,1.2494653463363647 -50,1.2462817430496216 -51,1.2436107397079468 -52,1.241443157196045 -53,1.2396562099456787 -54,1.2381601333618164 -55,1.2368898391723633 -56,1.235800862312317 -57,1.234834909439087 -58,1.23396897315979 -59,1.2331703901290894 -60,1.2323999404907227 -61,1.2316327095031738 -62,1.2308515310287476 -63,1.2300701141357422 -64,1.2292722463607788 -65,1.2284525632858276 -66,1.2275824546813965 -67,1.2266680002212524 -68,1.2257204055786133 -69,1.22475004196167 -70,1.223748803138733 -71,1.222731351852417 -72,1.2216764688491821 -73,1.2206090688705444 -74,1.2195398807525635 -75,1.2184747457504272 -76,1.2174193859100342 -77,1.2163697481155396 -78,1.2153319120407104 -79,1.214308738708496 -80,1.2133013010025024 -81,1.2123013734817505 -82,1.2112997770309448 -83,1.2102973461151123 -84,1.2092938423156738 -85,1.2082910537719727 -86,1.2072886228561401 -87,1.206279993057251 -88,1.205237627029419 -89,1.20414137840271 -90,1.2029609680175781 -91,1.2017602920532227 -92,1.2005804777145386 -93,1.199440836906433 -94,1.198343276977539 -95,1.1973092555999756 -96,1.196346640586853 -97,1.195456624031067 -98,1.1946324110031128 -99,1.1939188241958618 -100,1.19336998462677 -101,1.1929872035980225 -102,1.1927149295806885 -103,1.1925314664840698 -104,1.192331314086914 -105,1.1921625137329102 -106,1.191925287246704 -107,1.1916279792785645 -108,1.1912962198257446 -109,1.1909432411193848 -110,1.1905741691589355 -111,1.190212607383728 -112,1.1898696422576904 -113,1.1895976066589355 -114,1.1893746852874756 -115,1.1892026662826538 -116,1.1890982389450073 -117,1.1890430450439453 -118,1.188997745513916 -119,1.1889554262161255 -120,1.1889125108718872 -121,1.1888612508773804 -122,1.188797950744629 -123,1.1887247562408447 -124,1.1886415481567383 -125,1.1885491609573364 -126,1.1884584426879883 -127,1.1883684396743774 -128,1.1882942914962769 -129,1.1882357597351074 -130,1.1881797313690186 -131,1.1881341934204102 -132,1.1880857944488525 -133,1.1880477666854858 -134,1.1880041360855103 -135,1.187958836555481 -136,1.187912940979004 -137,1.1878575086593628 -138,1.1877999305725098 -139,1.1877447366714478 -140,1.1876875162124634 -141,1.1876308917999268 -142,1.1875801086425781 -143,1.187532901763916 -144,1.1874849796295166 -145,1.1874371767044067 -146,1.187390923500061 -147,1.1873440742492676 -148,1.1872955560684204 -149,1.1872434616088867 -150,1.1871882677078247 -151,1.1871308088302612 -152,1.187074899673462 -153,1.1870185136795044 -154,1.1869689226150513 -155,1.1869317293167114 -156,1.1868951320648193 -157,1.1868599653244019 -158,1.1868237257003784 -159,1.18678617477417 -160,1.1867468357086182 -161,1.1867061853408813 -162,1.1866672039031982 -163,1.1866298913955688 -164,1.1865921020507812 -165,1.1865540742874146 -166,1.1865155696868896 -167,1.1864770650863647 -168,1.1864382028579712 -169,1.1863993406295776 -170,1.186360478401184 -171,1.1863218545913696 -172,1.1862837076187134 -173,1.1862456798553467 -174,1.1862075328826904 -175,1.186169147491455 -176,1.1861306428909302 -177,1.1860920190811157 -178,1.18605375289917 -179,1.186015248298645 -180,1.1859767436981201 -181,1.1859387159347534 -182,1.1859010457992554 -183,1.1858633756637573 -184,1.1858261823654175 -185,1.1857892274856567 -186,1.1857521533966064 -187,1.1857160329818726 -188,1.1856848001480103 -189,1.1856541633605957 -190,1.1856225728988647 -191,1.1855897903442383 -192,1.1855560541152954 -193,1.1855213642120361 -194,1.185486912727356 -195,1.1854547262191772 -196,1.1854133605957031 -197,1.1853668689727783 -198,1.1853210926055908 -199,1.1852911710739136 -200,1.1852525472640991 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.300/training_config.txt deleted file mode 100644 index 7e0e1c3..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.300/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.300/training_log.csv deleted file mode 100644 index 6c65114..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.300/training_log.csv +++ /dev/null @@ -1,33 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,31.221738815307617 -2,3.112255573272705 -3,2.900252103805542 -4,2.610888957977295 -5,2.387361764907837 -6,2.231126308441162 -7,2.1406047344207764 -8,2.1011104583740234 -9,2.069960832595825 -10,2.02765154838562 -11,1.9826548099517822 -12,1.9428565502166748 -13,1.9096951484680176 -14,1.8821051120758057 -15,1.8582180738449097 -16,1.836182713508606 -17,1.8147212266921997 -18,1.793265461921692 -19,1.7710232734680176 -20,1.748215913772583 -21,1.7254948616027832 -22,1.7031992673873901 -23,1.6815978288650513 -24,1.6609699726104736 -25,1.641611099243164 -26,1.6232105493545532 -27,1.6052838563919067 -28,nan -29,nan -30,nan -31,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.400/training_config.txt deleted file mode 100644 index 3505d63..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.400/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.400/training_log.csv deleted file mode 100644 index 7bf7484..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.400/training_log.csv +++ /dev/null @@ -1,18 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,288.4366760253906 -2,3.3472957611083984 -3,3.7400496006011963 -4,4.651937484741211 -5,2.741330146789551 -6,2.601053237915039 -7,2.5424985885620117 -8,2.500849485397339 -9,2.4642269611358643 -10,2.431540012359619 -11,2.401414632797241 -12,2.2983970642089844 -13,nan -14,nan -15,nan -16,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.500/training_config.txt deleted file mode 100644 index 16df7dd..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.500/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.500/training_log.csv deleted file mode 100644 index b08b8e8..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.500/training_log.csv +++ /dev/null @@ -1,17 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,1085.288330078125 -2,2798.412841796875 -3,2752.143798828125 -4,2073.35791015625 -5,905.2445678710938 -6,119.20957946777344 -7,9.75479507446289 -8,3.494941473007202 -9,2.6711666584014893 -10,2.4652559757232666 -11,2.3907082080841064 -12,nan -13,nan -14,nan -15,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.600/training_config.txt deleted file mode 100644 index 510f2b6..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.600/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.600/training_log.csv deleted file mode 100644 index 27489dc..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.600/training_log.csv +++ /dev/null @@ -1,12 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,1866.0623779296875 -2,2856.182373046875 -3,2862.6337890625 -4,2863.06884765625 -5,2863.095947265625 -6,2863.095947265625 -7,2863.095947265625 -8,2863.095947265625 -9,2863.095947265625 -10,2863.095947265625 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.700/training_config.txt deleted file mode 100644 index 50f91ea..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.700/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.700/training_log.csv deleted file mode 100644 index 4d745e1..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,2322.204833984375 -2,2862.691162109375 -3,2863.095947265625 -4,2863.095947265625 -5,2863.095947265625 -6,2863.095947265625 -7,2863.095947265625 -8,2863.095947265625 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.800/training_config.txt deleted file mode 100644 index 1bd24ef..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.800/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.800/training_log.csv deleted file mode 100644 index 957cae4..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,2542.299072265625 -2,2863.095947265625 -3,2863.095947265625 -4,2863.095947265625 -5,2863.095947265625 -6,2863.095947265625 -7,2863.095947265625 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.900/training_config.txt deleted file mode 100644 index 05c2935..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.900/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.900/training_log.csv deleted file mode 100644 index 92a6af3..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,2700.2294921875 -2,2863.095947265625 -3,2863.095947265625 -4,2863.095947265625 -5,2863.095947265625 -6,2863.095947265625 -7,2863.095947265625 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_1.000/training_config.txt deleted file mode 100644 index 905fd9d..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_1.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_1.000/training_log.csv deleted file mode 100644 index bd8d9ae..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,2781.8369140625 -2,2863.095947265625 -3,2863.095947265625 -4,2863.095947265625 -5,2863.095947265625 -6,2863.095947265625 -7,2863.095947265625 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_16.000/training_config.txt deleted file mode 100644 index 74a507c..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_16.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_16.000/training_log.csv deleted file mode 100644 index f91f9e5..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,2876.520751953125 -2,2876.520751953125 -3,2876.520751953125 -4,2876.520751953125 -5,2876.520751953125 -6,2876.520751953125 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_2.000/training_config.txt deleted file mode 100644 index 0343461..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_2.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_2.000/training_log.csv deleted file mode 100644 index 7b6d3c4..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,2876.136474609375 -2,863.2739868164062 -3,nan -4,nan -5,nan -6,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_4.000/training_config.txt deleted file mode 100644 index 302f618..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_4.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_4.000/training_log.csv deleted file mode 100644 index f91f9e5..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,2876.520751953125 -2,2876.520751953125 -3,2876.520751953125 -4,2876.520751953125 -5,2876.520751953125 -6,2876.520751953125 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_8.000/training_config.txt deleted file mode 100644 index 7a4f5c6..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_8.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * t_span + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared/lr_8.000/training_log.csv deleted file mode 100644 index f91f9e5..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,22.095075607299805 -1,2876.520751953125 -2,2876.520751953125 -3,2876.520751953125 -4,2876.520751953125 -5,2876.520751953125 -6,2876.520751953125 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.010/training_config.txt deleted file mode 100644 index 94c86b8..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.010/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.010/training_log.csv deleted file mode 100644 index 724c8c0..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,213.0167236328125 -2,173.0717010498047 -3,150.0225830078125 -4,123.00654602050781 -5,103.3897705078125 -6,81.95651245117188 -7,70.21539306640625 -8,45.73616027832031 -9,34.03105545043945 -10,32.353031158447266 -11,34.991615295410156 -12,25.09242057800293 -13,31.303335189819336 -14,30.01373291015625 -15,32.027557373046875 -16,29.324974060058594 -17,27.8333797454834 -18,24.257781982421875 -19,21.847837448120117 -20,21.382314682006836 -21,20.826990127563477 -22,21.27461814880371 -23,21.26218032836914 -24,20.41361427307129 -25,19.36992835998535 -26,17.719053268432617 -27,16.560991287231445 -28,15.72287654876709 -29,14.989616394042969 -30,14.795432090759277 -31,14.685379028320312 -32,15.029973030090332 -33,15.16539192199707 -34,14.205863952636719 -35,14.050883293151855 -36,14.129486083984375 -37,14.13491153717041 -38,13.965437889099121 -39,13.445045471191406 -40,13.098957061767578 -41,13.160545349121094 -42,12.912355422973633 -43,12.762398719787598 -44,12.640503883361816 -45,12.537394523620605 -46,12.445642471313477 -47,12.353056907653809 -48,12.246087074279785 -49,12.11122989654541 -50,11.934914588928223 -51,11.704938888549805 -52,11.428611755371094 -53,11.14273452758789 -54,10.86915397644043 -55,10.614587783813477 -56,10.38335132598877 -57,10.17342472076416 -58,9.978919982910156 -59,9.795684814453125 -60,9.62311840057373 -61,9.464280128479004 -62,9.32486629486084 -63,9.207917213439941 -64,9.102298736572266 -65,8.984081268310547 -66,8.843945503234863 -67,8.418068885803223 -68,8.361529350280762 -69,8.275358200073242 -70,8.160774230957031 -71,8.0152587890625 -72,7.860365867614746 -73,7.642137050628662 -74,7.90536642074585 -75,7.668700218200684 -76,7.281579494476318 -77,8.045768737792969 -78,10.119131088256836 -79,10.436074256896973 -80,11.802755355834961 -81,11.665087699890137 -82,12.999747276306152 -83,18.134878158569336 -84,18.46754264831543 -85,16.81949806213379 -86,12.467874526977539 -87,12.315753936767578 -88,12.329687118530273 -89,13.338238716125488 -90,13.061301231384277 -91,13.015604019165039 -92,13.251863479614258 -93,12.250191688537598 -94,10.525947570800781 -95,9.884092330932617 -96,9.519999504089355 -97,8.902472496032715 -98,9.772724151611328 -99,9.567292213439941 -100,7.697810173034668 -101,7.766295433044434 -102,8.149250984191895 -103,7.011802673339844 -104,7.056440830230713 -105,6.439101696014404 -106,6.24491548538208 -107,6.066121578216553 -108,6.027917385101318 -109,6.345337867736816 -110,6.340590953826904 -111,6.275694370269775 -112,6.178025722503662 -113,6.063338279724121 -114,5.94260311126709 -115,5.823153018951416 -116,5.711262226104736 -117,5.611966133117676 -118,5.525136470794678 -119,5.443105697631836 -120,5.351928234100342 -121,5.237190246582031 -122,5.081452369689941 -123,4.833459854125977 -124,4.837883949279785 -125,4.798029899597168 -126,4.833836555480957 -127,4.891109943389893 -128,4.750506401062012 -129,4.429678440093994 -130,4.25378942489624 -131,3.9005579948425293 -132,3.948626756668091 -133,4.035829544067383 -134,4.100869655609131 -135,4.077837944030762 -136,3.975583791732788 -137,3.8539047241210938 -138,3.739694595336914 -139,3.632659912109375 -140,3.5335540771484375 -141,3.5506064891815186 -142,3.440891981124878 -143,3.4342875480651855 -144,3.4235293865203857 -145,3.3993618488311768 -146,3.361377239227295 -147,3.3133482933044434 -148,3.260606527328491 -149,3.2112271785736084 -150,3.1734912395477295 -151,3.1466569900512695 -152,3.1239125728607178 -153,3.100747585296631 -154,3.0749738216400146 -155,3.044341564178467 -156,3.004425525665283 -157,2.96313738822937 -158,2.9483065605163574 -159,2.9366977214813232 -160,2.9093575477600098 -161,2.865081787109375 -162,2.843977451324463 -163,2.842419147491455 -164,2.824957847595215 -165,2.789283275604248 -166,2.764240026473999 -167,2.7651872634887695 -168,2.733755111694336 -169,2.7224698066711426 -170,2.7218897342681885 -171,2.693995714187622 -172,2.6939947605133057 -173,2.6805553436279297 -174,2.667632818222046 -175,2.6669881343841553 -176,2.6425235271453857 -177,2.6538124084472656 -178,2.630579948425293 -179,2.6363189220428467 -180,2.62045955657959 -181,2.623035430908203 -182,2.6216108798980713 -183,2.6094415187835693 -184,2.622180223464966 -185,2.6053121089935303 -186,2.6048660278320312 -187,2.612370729446411 -188,2.598564863204956 -189,2.5956101417541504 -190,2.6034271717071533 -191,2.599590301513672 -192,2.5886611938476562 -193,2.590484619140625 -194,2.600768566131592 -195,2.585747718811035 -196,2.580857992172241 -197,2.583522081375122 -198,2.5870816707611084 -199,2.5842535495758057 -200,2.575119733810425 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.020/training_config.txt deleted file mode 100644 index f6a414b..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.020/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.020/training_log.csv deleted file mode 100644 index 133568d..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,185.5879364013672 -2,110.94385528564453 -3,52.18143081665039 -4,33.220664978027344 -5,33.58713912963867 -6,33.1390266418457 -7,38.21333694458008 -8,38.89930725097656 -9,39.86369705200195 -10,42.71205520629883 -11,43.85108184814453 -12,44.687538146972656 -13,43.32851028442383 -14,40.78075408935547 -15,37.504066467285156 -16,33.49193572998047 -17,31.353397369384766 -18,27.741838455200195 -19,25.196306228637695 -20,23.734025955200195 -21,22.79034423828125 -22,22.80961799621582 -23,21.35194969177246 -24,23.421783447265625 -25,23.187015533447266 -26,21.981245040893555 -27,20.152185440063477 -28,20.340496063232422 -29,18.882814407348633 -30,20.48117446899414 -31,20.085641860961914 -32,19.34621238708496 -33,18.150182723999023 -34,16.408864974975586 -35,10.48577880859375 -36,10.407658576965332 -37,10.495985984802246 -38,10.353586196899414 -39,12.81568717956543 -40,12.245251655578613 -41,11.301149368286133 -42,10.762737274169922 -43,10.391121864318848 -44,10.640470504760742 -45,10.76949691772461 -46,10.587235450744629 -47,10.092933654785156 -48,9.939040184020996 -49,8.76399040222168 -50,7.895619869232178 -51,7.6070427894592285 -52,7.318936824798584 -53,7.0979838371276855 -54,6.720556735992432 -55,6.11873722076416 -56,5.634496212005615 -57,5.3384504318237305 -58,5.132089614868164 -59,4.751035213470459 -60,4.531846523284912 -61,4.479679107666016 -62,4.526905536651611 -63,4.372461318969727 -64,4.166091442108154 -65,4.1586480140686035 -66,4.175627708435059 -67,4.191760540008545 -68,4.189549922943115 -69,4.149818420410156 -70,3.9193153381347656 -71,3.839151620864868 -72,3.8974502086639404 -73,3.8688693046569824 -74,3.7302258014678955 -75,3.6300408840179443 -76,3.5604889392852783 -77,3.4789786338806152 -78,3.375596523284912 -79,3.261117935180664 -80,3.149874210357666 -81,3.0431973934173584 -82,2.9429829120635986 -83,2.8599464893341064 -84,2.813875436782837 -85,2.834529399871826 -86,2.8210041522979736 -87,2.7604403495788574 -88,2.743260383605957 -89,2.7392780780792236 -90,2.735095262527466 -91,2.724555492401123 -92,2.705213785171509 -93,2.6780436038970947 -94,2.6463003158569336 -95,2.6140878200531006 -96,2.5851690769195557 -97,2.562614679336548 -98,2.5478806495666504 -99,2.5358939170837402 -100,2.5170345306396484 -101,2.4990925788879395 -102,2.4889028072357178 -103,2.483924627304077 -104,2.4804739952087402 -105,2.4762661457061768 -106,2.4704036712646484 -107,2.4630956649780273 -108,2.455345869064331 -109,2.448641061782837 -110,2.4437906742095947 -111,2.438871145248413 -112,2.430758237838745 -113,2.4213223457336426 -114,2.4132089614868164 -115,2.4062626361846924 -116,2.3995630741119385 -117,2.392629384994507 -118,2.3855206966400146 -119,2.3786349296569824 -120,2.3728976249694824 -121,2.3687257766723633 -122,2.365020513534546 -123,2.3602569103240967 -124,2.354160785675049 -125,2.3476853370666504 -126,2.34173321723938 -127,2.336421251296997 -128,2.3313934803009033 -129,2.326321601867676 -130,2.3210880756378174 -131,2.315793752670288 -132,2.310715436935425 -133,2.305893659591675 -134,2.3016810417175293 -135,2.2977352142333984 -136,2.2935516834259033 -137,2.2889411449432373 -138,2.284630060195923 -139,2.280787706375122 -140,2.276839256286621 -141,2.27229905128479 -142,2.267137050628662 -143,2.26218843460083 -144,2.2581512928009033 -145,2.2538509368896484 -146,2.248718023300171 -147,2.2439827919006348 -148,2.2397677898406982 -149,2.235356569290161 -150,2.2305853366851807 -151,2.225888252258301 -152,2.2214558124542236 -153,2.2164738178253174 -154,2.2111101150512695 -155,2.2059524059295654 -156,2.2006189823150635 -157,2.194944381713867 -158,2.1893928050994873 -159,2.183906316757202 -160,2.178032636642456 -161,2.1721384525299072 -162,2.166303873062134 -163,2.1603260040283203 -164,2.1548266410827637 -165,2.149712085723877 -166,2.145151376724243 -167,2.141054630279541 -168,2.1369781494140625 -169,2.1333365440368652 -170,2.1296472549438477 -171,2.126105785369873 -172,2.1227829456329346 -173,2.119652032852173 -174,2.116579294204712 -175,2.1133174896240234 -176,2.109867572784424 -177,2.106128692626953 -178,2.1022095680236816 -179,2.0981860160827637 -180,2.094148874282837 -181,2.090148687362671 -182,2.086181640625 -183,2.0822527408599854 -184,2.0783238410949707 -185,2.074477195739746 -186,2.0707194805145264 -187,2.0670742988586426 -188,2.0635464191436768 -189,2.0601272583007812 -190,2.0568039417266846 -191,2.0535073280334473 -192,2.050243616104126 -193,2.0470166206359863 -194,2.0437967777252197 -195,2.0405521392822266 -196,2.037259340286255 -197,2.0339322090148926 -198,2.0305745601654053 -199,2.0271778106689453 -200,2.0237209796905518 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.040/training_config.txt deleted file mode 100644 index 779c42e..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.040/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.040/training_log.csv deleted file mode 100644 index 5158932..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,138.1700439453125 -2,31.85932731628418 -3,28.14820671081543 -4,24.21721649169922 -5,20.4753360748291 -6,16.758150100708008 -7,9.709321022033691 -8,7.395853042602539 -9,6.698759078979492 -10,6.67933464050293 -11,7.142555236816406 -12,6.1398773193359375 -13,6.587876319885254 -14,6.719339370727539 -15,4.876566410064697 -16,5.251307010650635 -17,4.918770790100098 -18,4.494187355041504 -19,3.526106834411621 -20,3.1759722232818604 -21,3.440809965133667 -22,3.5106289386749268 -23,3.484010934829712 -24,3.404935359954834 -25,3.4303340911865234 -26,3.3487112522125244 -27,3.2902581691741943 -28,3.2515475749969482 -29,3.225813150405884 -30,3.162708044052124 -31,3.4411721229553223 -32,3.386814594268799 -33,3.373488187789917 -34,3.362851142883301 -35,3.3541483879089355 -36,3.3471484184265137 -37,3.3412067890167236 -38,3.3357131481170654 -39,3.3300418853759766 -40,3.324134588241577 -41,3.31793212890625 -42,3.3116118907928467 -43,3.305363893508911 -44,3.2993693351745605 -45,3.293684959411621 -46,3.2882986068725586 -47,3.2830617427825928 -48,3.277782917022705 -49,3.2722949981689453 -50,3.266468048095703 -51,3.2603650093078613 -52,3.2541863918304443 -53,3.2481369972229004 -54,3.242386817932129 -55,3.2370195388793945 -56,3.2320446968078613 -57,3.227357864379883 -58,3.22292160987854 -59,3.2187435626983643 -60,3.2148611545562744 -61,3.2112791538238525 -62,3.207932233810425 -63,3.2046847343444824 -64,3.201364278793335 -65,3.197843074798584 -66,3.1940646171569824 -67,3.1900649070739746 -68,3.1859538555145264 -69,3.181874990463257 -70,3.1779630184173584 -71,3.174309015274048 -72,3.170938491821289 -73,3.1678504943847656 -74,3.1650071144104004 -75,3.1623761653900146 -76,3.159909725189209 -77,3.157557249069214 -78,3.155250310897827 -79,3.152914524078369 -80,3.1504974365234375 -81,3.1479804515838623 -82,3.1453778743743896 -83,3.142716884613037 -84,3.1400299072265625 -85,3.1373348236083984 -86,3.13464093208313 -87,3.1319425106048584 -88,3.1292226314544678 -89,3.126458168029785 -90,3.1236391067504883 -91,3.120778799057007 -92,3.1178979873657227 -93,3.1150238513946533 -94,3.112177848815918 -95,3.1093599796295166 -96,3.1065497398376465 -97,3.1037418842315674 -98,3.100931406021118 -99,3.098106861114502 -100,3.0952672958374023 -101,3.092404842376709 -102,3.0895092487335205 -103,3.0865695476531982 -104,3.083583116531372 -105,3.080561876296997 -106,3.0775184631347656 -107,3.07444429397583 -108,3.0713303089141846 -109,3.068176507949829 -110,3.0649847984313965 -111,3.06176495552063 -112,3.0585269927978516 -113,3.0552680492401123 -114,3.0519893169403076 -115,3.048696994781494 -116,3.045395612716675 -117,3.0420820713043213 -118,3.0387637615203857 -119,3.035433530807495 -120,3.0320873260498047 -121,3.0287222862243652 -122,3.025327682495117 -123,3.0218942165374756 -124,3.0184242725372314 -125,3.0149147510528564 -126,3.011353015899658 -127,3.007737874984741 -128,3.004070281982422 -129,3.00034236907959 -130,2.9965574741363525 -131,2.992706060409546 -132,2.9887869358062744 -133,2.984797239303589 -134,2.980738878250122 -135,2.9766225814819336 -136,2.972454786300659 -137,2.9682326316833496 -138,2.9639694690704346 -139,2.9596710205078125 -140,2.9553678035736084 -141,2.951075792312622 -142,2.9467976093292236 -143,2.9425160884857178 -144,2.938220262527466 -145,2.933900833129883 -146,2.9295520782470703 -147,2.925185441970825 -148,2.9207839965820312 -149,2.9163289070129395 -150,2.911823272705078 -151,2.907238245010376 -152,2.902548313140869 -153,2.8977465629577637 -154,2.8928515911102295 -155,2.8878471851348877 -156,2.882735013961792 -157,2.8775203227996826 -158,2.8722198009490967 -159,2.866853952407837 -160,2.8613932132720947 -161,2.855839729309082 -162,2.8501832485198975 -163,2.844379425048828 -164,2.8381969928741455 -165,2.8305747509002686 -166,2.8149166107177734 -167,2.6723744869232178 -168,2.2528107166290283 -169,3.246459484100342 -170,4.358674049377441 -171,5.03744649887085 -172,25.614858627319336 -173,6345.89453125 -174,24.77815818786621 -175,5.007219314575195 -176,3.6331682205200195 -177,2.970029592514038 -178,5.487204074859619 -179,3.2445483207702637 -180,4.557656288146973 -181,3.5707294940948486 -182,3.593806743621826 -183,3.56911039352417 -184,3.503084897994995 -185,3.2923338413238525 -186,3.1865689754486084 -187,3.986565351486206 -188,3.832707405090332 -189,3.292820692062378 -190,3.36645245552063 -191,3.8097386360168457 -192,3.827634334564209 -193,3.8276097774505615 -194,3.819244623184204 -195,3.804995059967041 -196,3.7860336303710938 -197,3.7631101608276367 -198,3.7371978759765625 -199,3.7090134620666504 -200,3.678191661834717 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.050/training_config.txt deleted file mode 100644 index 44c451e..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.050/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.050/training_log.csv deleted file mode 100644 index c672090..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,120.92838287353516 -2,22.39174461364746 -3,27.287761688232422 -4,22.73724365234375 -5,14.580426216125488 -6,10.159234046936035 -7,8.784314155578613 -8,8.181129455566406 -9,6.884852886199951 -10,6.235615253448486 -11,5.695441246032715 -12,5.62746000289917 -13,5.0358734130859375 -14,5.827089309692383 -15,3.9459495544433594 -16,3.9459922313690186 -17,5.075603485107422 -18,6.25885534286499 -19,5.292871475219727 -20,4.323019027709961 -21,3.0346221923828125 -22,2.2557456493377686 -23,1.760027289390564 -24,1.5062123537063599 -25,1.55298912525177 -26,1.2757114171981812 -27,1.2378697395324707 -28,1.3850092887878418 -29,1.3342832326889038 -30,1.3090388774871826 -31,1.3016009330749512 -32,1.317691445350647 -33,1.3582819700241089 -34,1.2281241416931152 -35,0.9942561984062195 -36,0.7731239199638367 -37,0.7544995546340942 -38,0.7478087544441223 -39,0.7386417388916016 -40,0.7281888127326965 -41,0.7168199419975281 -42,0.7051247954368591 -43,0.6936726570129395 -44,0.6823412179946899 -45,0.6708379983901978 -46,0.6592437624931335 -47,0.647618293762207 -48,0.6357808709144592 -49,0.6231976747512817 -50,0.6089606285095215 -51,0.5916551947593689 -52,0.5694841146469116 -53,0.542880654335022 -54,0.5217435359954834 -55,0.5201078653335571 -56,0.522031843662262 -57,0.5011444687843323 -58,0.519596517086029 -59,0.5251798629760742 -60,0.5284807085990906 -61,0.5304818749427795 -62,0.5317606329917908 -63,0.532404363155365 -64,0.5320563912391663 -65,0.5292194485664368 -66,0.518706202507019 -67,0.4943285584449768 -68,0.4768742024898529 -69,0.4501584768295288 -70,0.4244542717933655 -71,0.43090102076530457 -72,0.43351930379867554 -73,0.4342056214809418 -74,0.4340212643146515 -75,0.43312233686447144 -76,0.4315855801105499 -77,0.429492712020874 -78,0.4268731474876404 -79,0.42374685406684875 -80,0.42020076513290405 -81,0.4163409471511841 -82,0.41230714321136475 -83,0.40830859541893005 -84,0.4045688509941101 -85,0.401185005903244 -86,0.3980962336063385 -87,0.39309224486351013 -88,0.40395304560661316 -89,0.38696715235710144 -90,0.3878851532936096 -91,0.3859594166278839 -92,0.3823562562465668 -93,0.37577977776527405 -94,0.38943251967430115 -95,0.3772876560688019 -96,0.37751448154449463 -97,0.3765529692173004 -98,0.3752483129501343 -99,0.37363654375076294 -100,0.3716139793395996 -101,0.36911216378211975 -102,0.3660910427570343 -103,0.3625675141811371 -104,0.35852813720703125 -105,0.35393035411834717 -106,0.3487720787525177 -107,0.3430223762989044 -108,0.3370775580406189 -109,0.33318185806274414 -110,0.3340010941028595 -111,0.33455827832221985 -112,0.32834118604660034 -113,0.31966930627822876 -114,0.31383630633354187 -115,0.3097049295902252 -116,0.30617639422416687 -117,0.3040423095226288 -118,0.3043895959854126 -119,0.3059890568256378 -120,0.30722564458847046 -121,0.3082719147205353 -122,0.30887576937675476 -123,0.30691689252853394 -124,0.30536022782325745 -125,0.30485647916793823 -126,0.3041212558746338 -127,0.3024110794067383 -128,0.2997046113014221 -129,0.29695916175842285 -130,0.29551663994789124 -131,0.29638415575027466 -132,0.2973634600639343 -133,0.2973629832267761 -134,0.2977921664714813 -135,0.29761451482772827 -136,0.29673734307289124 -137,0.29541435837745667 -138,0.2940469980239868 -139,0.29324445128440857 -140,0.2929973304271698 -141,0.29232192039489746 -142,0.29214680194854736 -143,0.292157381772995 -144,0.2919422686100006 -145,0.2914758622646332 -146,0.2910228967666626 -147,0.2905917167663574 -148,0.28981077671051025 -149,0.2891702950000763 -150,0.2886946499347687 -151,0.2882445454597473 -152,0.28781723976135254 -153,0.287462443113327 -154,0.28716254234313965 -155,0.2867050766944885 -156,0.2860771715641022 -157,0.28544196486473083 -158,0.2848013937473297 -159,0.284140408039093 -160,0.2835162580013275 -161,0.2829793095588684 -162,0.2823690176010132 -163,0.2817227244377136 -164,0.28106969594955444 -165,0.2803134620189667 -166,0.27952703833580017 -167,0.27874308824539185 -168,0.2778869867324829 -169,0.27707210183143616 -170,0.2762000262737274 -171,0.27533966302871704 -172,0.27445441484451294 -173,0.27356722950935364 -174,0.27266809344291687 -175,0.271776020526886 -176,0.2708909213542938 -177,0.27005627751350403 -178,0.2692279517650604 -179,0.26846352219581604 -180,0.2677040696144104 -181,0.26695963740348816 -182,0.2662481665611267 -183,0.2655040919780731 -184,0.26474490761756897 -185,0.26401400566101074 -186,0.26325148344039917 -187,0.26242271065711975 -188,0.26160767674446106 -189,0.26072874665260315 -190,0.25966209173202515 -191,0.25852715969085693 -192,0.2572006583213806 -193,0.2554534375667572 -194,0.2534649670124054 -195,0.2513061761856079 -196,0.24914215505123138 -197,0.24807581305503845 -198,0.24766942858695984 -199,0.24624469876289368 -200,0.24524915218353271 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.080/training_config.txt deleted file mode 100644 index 089c9ea..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.080/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.080/training_log.csv deleted file mode 100644 index 73d4ff0..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,61.52596664428711 -2,22.764110565185547 -3,6.703132152557373 -4,2.589465856552124 -5,2.7013466358184814 -6,1.668764591217041 -7,2.3199400901794434 -8,2.3782291412353516 -9,2.335559129714966 -10,2.3052256107330322 -11,2.269305944442749 -12,2.2260138988494873 -13,2.1988980770111084 -14,2.172624349594116 -15,2.1475281715393066 -16,2.1193692684173584 -17,2.0905075073242188 -18,2.0625405311584473 -19,2.036118507385254 -20,2.011460542678833 -21,1.9876267910003662 -22,1.9639360904693604 -23,1.9402403831481934 -24,1.9160587787628174 -25,1.8909785747528076 -26,1.8648842573165894 -27,1.837504267692566 -28,1.8087139129638672 -29,1.7783236503601074 -30,1.7461475133895874 -31,1.712072491645813 -32,1.67542564868927 -33,1.6361050605773926 -34,1.593774676322937 -35,1.5487868785858154 -36,1.5004470348358154 -37,1.447962760925293 -38,1.3916804790496826 -39,1.3299998044967651 -40,1.2596863508224487 -41,1.1799845695495605 -42,1.095406413078308 -43,1.0195168256759644 -44,0.9576516151428223 -45,0.9084706902503967 -46,0.8657496571540833 -47,0.8277012705802917 -48,0.793319582939148 -49,0.762039840221405 -50,0.733460009098053 -51,0.707076907157898 -52,0.6825439929962158 -53,0.6596240997314453 -54,0.6380472779273987 -55,0.6175651550292969 -56,0.5979951620101929 -57,0.5792019963264465 -58,0.5610573291778564 -59,0.5434157848358154 -60,0.5261768698692322 -61,0.509189784526825 -62,0.4923275113105774 -63,0.4754403531551361 -64,0.4581710696220398 -65,0.43979302048683167 -66,0.41866254806518555 -67,0.39019814133644104 -68,0.3490469455718994 -69,0.29835397005081177 -70,0.25751596689224243 -71,0.23070427775382996 -72,0.21086949110031128 -73,0.19455209374427795 -74,0.18039603531360626 -75,0.16784432530403137 -76,0.15677282214164734 -77,0.14689041674137115 -78,0.13793078064918518 -79,0.1298980861902237 -80,0.12278736382722855 -81,0.11656759679317474 -82,0.11124038696289062 -83,0.10679837316274643 -84,0.10324519127607346 -85,0.10043329745531082 -86,0.09809752553701401 -87,0.0959826186299324 -88,0.09385544806718826 -89,0.09153390675783157 -90,0.08889865130186081 -91,0.08592947572469711 -92,0.08264906704425812 -93,0.07911546528339386 -94,0.07541044801473618 -95,0.07171531766653061 -96,0.06806320697069168 -97,0.06452292203903198 -98,0.0611555241048336 -99,0.058003395795822144 -100,0.05516909062862396 -101,0.052831411361694336 -102,0.051165495067834854 -103,0.050227317959070206 -104,0.049945347011089325 -105,0.050152022391557693 -106,0.050659146159887314 -107,0.05129791423678398 -108,0.05193229392170906 -109,0.052480872720479965 -110,0.05290624499320984 -111,0.05318722128868103 -112,0.05331982299685478 -113,0.05331094190478325 -114,0.05317319929599762 -115,0.05292253941297531 -116,0.05257726088166237 -117,0.052157919853925705 -118,0.05168795585632324 -119,0.05119172856211662 -120,0.05069538578391075 -121,0.050220075994729996 -122,0.04978525638580322 -123,0.04940890893340111 -124,0.04910255968570709 -125,0.04887392744421959 -126,0.04872320964932442 -127,0.0486423559486866 -128,0.04861881211400032 -129,0.04863647371530533 -130,0.04867773503065109 -131,0.04872583970427513 -132,0.04876655712723732 -133,0.04878932237625122 -134,0.048788223415613174 -135,0.04876118525862694 -136,0.04870950058102608 -137,0.048636894673109055 -138,0.048548951745033264 -139,0.048451900482177734 -140,0.04835200682282448 -141,0.04825468361377716 -142,0.048164427280426025 -143,0.048083946108818054 -144,0.04801461473107338 -145,0.04795655980706215 -146,0.0479087308049202 -147,0.04786929115653038 -148,0.047836121171712875 -149,0.04780695214867592 -150,0.047779619693756104 -151,0.04775227978825569 -152,0.047723520547151566 -153,0.0476924367249012 -154,0.04765857011079788 -155,0.04762193188071251 -156,0.04758276790380478 -157,0.04754162207245827 -158,0.04749913513660431 -159,0.04745602235198021 -160,0.047412898391485214 -161,0.04737038537859917 -162,0.047328781336545944 -163,0.047288428992033005 -164,0.047249387949705124 -165,0.04721160978078842 -166,0.04717499390244484 -167,0.04713929817080498 -168,0.047104302793741226 -169,0.0470697395503521 -170,0.04703535512089729 -171,0.04700098931789398 -172,0.046966493129730225 -173,0.04693174734711647 -174,0.046896688640117645 -175,0.04686138406395912 -176,0.046825870871543884 -177,0.046790190041065216 -178,0.04675442725419998 -179,0.046718668192625046 -180,0.04668298736214638 -181,0.04664740711450577 -182,0.04661191254854202 -183,0.0465766042470932 -184,0.046541422605514526 -185,0.04650640860199928 -186,0.04647144675254822 -187,0.04643651098012924 -188,0.046401601284742355 -189,0.046366628259420395 -190,0.046331580728292465 -191,0.046296458691358566 -192,0.04626122862100601 -193,0.04622587561607361 -194,0.04619041830301285 -195,0.04615488648414612 -196,0.04611924663186073 -197,0.04608352109789848 -198,0.04604770243167877 -199,0.04601183906197548 -200,0.045975908637046814 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.100/training_config.txt deleted file mode 100644 index d2d913f..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.100/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.100/training_log.csv deleted file mode 100644 index a36dad3..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,56.395870208740234 -2,618.2738037109375 -3,289.96063232421875 -4,126.99728393554688 -5,24.187952041625977 -6,11.72338581085205 -7,6.406065940856934 -8,3.7768335342407227 -9,2.5021162033081055 -10,1.9397401809692383 -11,1.5421391725540161 -12,1.6344754695892334 -13,1.1872730255126953 -14,0.9844869375228882 -15,0.7966924905776978 -16,0.8278066515922546 -17,1.0199671983718872 -18,0.8955531716346741 -19,0.8492088317871094 -20,0.8644806742668152 -21,0.9000710248947144 -22,0.9166257381439209 -23,0.9222947359085083 -24,0.9210724234580994 -25,0.9098783731460571 -26,0.8890942931175232 -27,0.8625909686088562 -28,0.8355246782302856 -29,0.7885390520095825 -30,0.7450405955314636 -31,0.7499000430107117 -32,0.7375315427780151 -33,0.720058798789978 -34,0.7043435573577881 -35,0.6906453967094421 -36,0.6784504055976868 -37,0.6673413515090942 -38,0.6571304798126221 -39,0.6476917862892151 -40,0.6389560699462891 -41,0.6308486461639404 -42,0.6232163310050964 -43,0.6158862113952637 -44,0.6087031960487366 -45,0.6014907956123352 -46,0.5941907167434692 -47,0.5868128538131714 -48,0.5793898105621338 -49,0.5719520449638367 -50,0.5645440816879272 -51,0.5572339296340942 -52,0.5501165390014648 -53,0.5432504415512085 -54,0.5367034673690796 -55,0.530501663684845 -56,0.5246551632881165 -57,0.5191664695739746 -58,0.5139918327331543 -59,0.5090926885604858 -60,0.5044441223144531 -61,0.5000134706497192 -62,0.4957675337791443 -63,0.4916827976703644 -64,0.48773953318595886 -65,0.48392945528030396 -66,0.4802376627922058 -67,0.47665393352508545 -68,0.4731660485267639 -69,0.46976372599601746 -70,0.4664333760738373 -71,0.4631723165512085 -72,0.4599761664867401 -73,0.4568444490432739 -74,0.45378366112709045 -75,0.4507859945297241 -76,0.4478500485420227 -77,0.44497567415237427 -78,0.44216015934944153 -79,0.43939951062202454 -80,0.4366942346096039 -81,0.4340400695800781 -82,0.43140697479248047 -83,0.428818941116333 -84,0.4262886941432953 -85,0.42381685972213745 -86,0.4214092791080475 -87,0.4190581142902374 -88,0.4167529344558716 -89,0.4144871234893799 -90,0.4122602939605713 -91,0.41006991267204285 -92,0.4079093933105469 -93,0.4057753384113312 -94,0.4036651849746704 -95,0.4015755355358124 -96,0.3995037376880646 -97,0.39744722843170166 -98,0.3954141139984131 -99,0.3934043049812317 -100,0.3914113938808441 -101,0.389431893825531 -102,0.38746178150177 -103,0.38549959659576416 -104,0.3835453689098358 -105,0.38159340620040894 -106,0.379637748003006 -107,0.3776780068874359 -108,0.3757116496562958 -109,0.3737354874610901 -110,0.37174269556999207 -111,0.3697328567504883 -112,0.36769771575927734 -113,0.36563968658447266 -114,0.36355987191200256 -115,0.3614700734615326 -116,0.35936659574508667 -117,0.35724884271621704 -118,0.35512295365333557 -119,0.35296934843063354 -120,0.3507831394672394 -121,0.3485584259033203 -122,0.3462953567504883 -123,0.3440031111240387 -124,0.34166666865348816 -125,0.33924105763435364 -126,0.3367442190647125 -127,0.3341634273529053 -128,0.33148694038391113 -129,0.32866576313972473 -130,0.32571518421173096 -131,0.32261988520622253 -132,0.3190818130970001 -133,0.31434208154678345 -134,0.30607515573501587 -135,0.29300355911254883 -136,0.2648164629936218 -137,0.2715768814086914 -138,0.2722551226615906 -139,0.2700127959251404 -140,0.26610061526298523 -141,0.2595905065536499 -142,0.2557962238788605 -143,0.25382959842681885 -144,0.2531299293041229 -145,0.25372645258903503 -146,0.2517792582511902 -147,0.24796774983406067 -148,0.24508105218410492 -149,0.24711741507053375 -150,0.24201881885528564 -151,0.24218659102916718 -152,0.2412446141242981 -153,0.2394208163022995 -154,0.2370244860649109 -155,0.23489928245544434 -156,0.2345024198293686 -157,0.23282983899116516 -158,0.23059046268463135 -159,0.22930100560188293 -160,0.22782306373119354 -161,0.22589564323425293 -162,0.2230158895254135 -163,0.21685904264450073 -164,0.20693515241146088 -165,0.20639409124851227 -166,0.20755653083324432 -167,0.2074602246284485 -168,0.20634497702121735 -169,0.2044283151626587 -170,0.20185451209545135 -171,0.19874398410320282 -172,0.1952340453863144 -173,0.1920078843832016 -174,0.19039419293403625 -175,0.19036124646663666 -176,0.19010977447032928 -177,0.18838591873645782 -178,0.1857794225215912 -179,0.1835392564535141 -180,0.18218757212162018 -181,0.18131963908672333 -182,0.1805165857076645 -183,0.17958314716815948 -184,0.17847250401973724 -185,0.17722013592720032 -186,0.17590565979480743 -187,0.17462158203125 -188,0.17344598472118378 -189,0.17242076992988586 -190,0.17153491079807281 -191,0.1707308292388916 -192,0.16993440687656403 -193,0.16908974945545197 -194,0.16818071901798248 -195,0.16722939908504486 -196,0.1662745475769043 -197,0.16534730792045593 -198,0.16446372866630554 -199,0.16362150013446808 -200,0.16280865669250488 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.125/training_config.txt deleted file mode 100644 index ab7e95c..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.125/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.125/training_log.csv deleted file mode 100644 index 8321a52..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,49.774288177490234 -2,9.73676586151123 -3,7.006316661834717 -4,1.283062219619751 -5,2.0849897861480713 -6,1.9444055557250977 -7,1.1815639734268188 -8,0.9941245317459106 -9,0.9141437411308289 -10,0.8504834771156311 -11,0.7917806506156921 -12,0.7368951439857483 -13,0.6829706430435181 -14,0.6297546625137329 -15,0.5801012516021729 -16,0.5358855128288269 -17,0.49699732661247253 -18,0.46299561858177185 -19,0.43339231610298157 -20,0.4070543348789215 -21,0.38295814394950867 -22,0.36117106676101685 -23,0.3413770794868469 -24,0.32320931553840637 -25,0.30645251274108887 -26,0.29101914167404175 -27,0.27649593353271484 -28,0.2628023028373718 -29,0.24988096952438354 -30,0.237640380859375 -31,0.22597835958003998 -32,0.21495485305786133 -33,0.20460884273052216 -34,0.19476033747196198 -35,0.18542975187301636 -36,0.1765775829553604 -37,0.1680707186460495 -38,0.15994460880756378 -39,0.15218086540699005 -40,0.1448083221912384 -41,0.1377766728401184 -42,0.13104979693889618 -43,0.1246229037642479 -44,0.11852587759494781 -45,0.11274787783622742 -46,0.1072716936469078 -47,0.10206127911806107 -48,0.09714000672101974 -49,0.09248779714107513 -50,0.0881735235452652 -51,0.0842360183596611 -52,0.08065453916788101 -53,0.07738180458545685 -54,0.07439485192298889 -55,0.07167768478393555 -56,0.06916040182113647 -57,0.06680484861135483 -58,0.06467273086309433 -59,0.06276915222406387 -60,0.06107901781797409 -61,0.05958772823214531 -62,0.05829408019781113 -63,0.05716126412153244 -64,0.056164707988500595 -65,0.0552859790623188 -66,0.05450969934463501 -67,0.05381269007921219 -68,0.05317693576216698 -69,0.05258812755346298 -70,0.05203445255756378 -71,0.05150655284523964 -72,0.05099670961499214 -73,0.050499435514211655 -74,0.05001110956072807 -75,0.049529481679201126 -76,0.04905384033918381 -77,0.04858429357409477 -78,0.04812213033437729 -79,0.04767318442463875 -80,0.0472370944917202 -81,0.04681728407740593 -82,0.04641784727573395 -83,0.046042267233133316 -84,0.04569559916853905 -85,0.04536781832575798 -86,0.04505974054336548 -87,0.044772062450647354 -88,0.044505324214696884 -89,0.04425979405641556 -90,0.0440356619656086 -91,0.04383230581879616 -92,0.04364880919456482 -93,0.04348520562052727 -94,0.04334035888314247 -95,0.043212924152612686 -96,0.04310173913836479 -97,0.04300529137253761 -98,0.042922236025333405 -99,0.04285125434398651 -100,0.042790964245796204 -101,0.042739998549222946 -102,0.042697127908468246 -103,0.042661186307668686 -104,0.04263138025999069 -105,0.042606767266988754 -106,0.04258563369512558 -107,0.042567119002342224 -108,0.042550649493932724 -109,0.042535748332738876 -110,0.04252220690250397 -111,0.042509447783231735 -112,0.04249722138047218 -113,0.04248535633087158 -114,0.042473722249269485 -115,0.04246227443218231 -116,0.042450979351997375 -117,0.04243980720639229 -118,0.04242880642414093 -119,0.042417947202920914 -120,0.04240725561976433 -121,0.04239674657583237 -122,0.04238643869757652 -123,0.042376331984996796 -124,0.04236641526222229 -125,0.0423566959798336 -126,0.04234714061021805 -127,0.042337749153375626 -128,0.04232848435640335 -129,0.04231932386755943 -130,0.042310260236263275 -131,0.04230126366019249 -132,0.0422922819852829 -133,0.042283300310373306 -134,0.04227432608604431 -135,0.04226529225707054 -136,0.04225621744990349 -137,0.04224708303809166 -138,0.042237866669893265 -139,0.04222859814763069 -140,0.042219262570142746 -141,0.04220985993742943 -142,0.04220038279891014 -143,0.04219084978103638 -144,0.04218127951025963 -145,0.0421716645359993 -146,0.04216201975941658 -147,0.04215236008167267 -148,0.04214266687631607 -149,0.042132969945669174 -150,0.04212327301502228 -151,0.04211357980966568 -152,0.04210389032959938 -153,0.04209420457482338 -154,0.04208454117178917 -155,0.04207489639520645 -156,0.04206528142094612 -157,0.04205568507313728 -158,0.042046114802360535 -159,0.04203655570745468 -160,0.042027030140161514 -161,0.04201750457286835 -162,0.042007993906736374 -163,0.04199850186705589 -164,0.04198900982737541 -165,0.04197952151298523 -166,0.04197005555033684 -167,0.04196058586239815 -168,0.04195113852620125 -169,0.041941672563552856 -170,0.04193222150206566 -171,0.041922759264707565 -172,0.041913311928510666 -173,0.041903868317604065 -174,0.04189441353082657 -175,0.04188496991991997 -176,0.04187552258372307 -177,0.04186607897281647 -178,0.041856635361909866 -179,0.041847191751003265 -180,0.04183774068951607 -181,0.04182827100157738 -182,0.04181879758834839 -183,0.0418093167245388 -184,0.041799817234277725 -185,0.04179031774401665 -186,0.04178079962730408 -187,0.04177127406001091 -188,0.04176173731684685 -189,0.04175218567252159 -190,0.041742630302906036 -191,0.041733063757419586 -192,0.041723500937223434 -193,0.04171392694115639 -194,0.04170435667037964 -195,0.041694775223731995 -196,0.04168517887592316 -197,0.04167557135224342 -198,0.041665952652692795 -199,0.04165632650256157 -200,0.04164668917655945 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.160/training_config.txt deleted file mode 100644 index 9d10882..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.160/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.160/training_log.csv deleted file mode 100644 index be245a8..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,50.97512435913086 -2,4.150564670562744 -3,1.573170781135559 -4,2.2186548709869385 -5,2.1028523445129395 -6,1.9942821264266968 -7,1.9899870157241821 -8,1.9943923950195312 -9,1.9612926244735718 -10,1.9116706848144531 -11,1.8563153743743896 -12,1.797938585281372 -13,1.7352420091629028 -14,1.6643284559249878 -15,1.5817813873291016 -16,1.4766803979873657 -17,1.3137236833572388 -18,1.2095017433166504 -19,1.1236647367477417 -20,1.0247161388397217 -21,1.149553656578064 -22,1.1198699474334717 -23,1.0585869550704956 -24,1.0007152557373047 -25,0.900468111038208 -26,0.8550513386726379 -27,0.8794893622398376 -28,0.8710265159606934 -29,0.8654524087905884 -30,0.8582326173782349 -31,0.8211129307746887 -32,0.7482754588127136 -33,0.8228527903556824 -34,0.8154683113098145 -35,0.7088714241981506 -36,0.7468860745429993 -37,0.7380490303039551 -38,0.7006487250328064 -39,0.6515026688575745 -40,0.5682522058486938 -41,0.4629102349281311 -42,0.36636480689048767 -43,0.31873348355293274 -44,0.31545037031173706 -45,0.2655588388442993 -46,0.24397696554660797 -47,0.23559574782848358 -48,0.21647781133651733 -49,0.19784735143184662 -50,0.18477991223335266 -51,0.17297285795211792 -52,0.16139435768127441 -53,0.1506444811820984 -54,0.14073161780834198 -55,0.1314309537410736 -56,0.12258841097354889 -57,0.11422092467546463 -58,0.10626064240932465 -59,0.09879821538925171 -60,0.09180519729852676 -61,0.08471552282571793 -62,0.07804499566555023 -63,0.07177918404340744 -64,0.06641246378421783 -65,0.06171282008290291 -66,0.05764498561620712 -67,0.054362304508686066 -68,0.051680441945791245 -69,0.049550797790288925 -70,0.04802807793021202 -71,0.04702502489089966 -72,0.04642371088266373 -73,0.046127237379550934 -74,0.0460142083466053 -75,0.046024080365896225 -76,0.04609648510813713 -77,0.046181533485651016 -78,0.04624361917376518 -79,0.04625821113586426 -80,0.04621148854494095 -81,0.046100374311208725 -82,0.0459272526204586 -83,0.04570017382502556 -84,0.04542960971593857 -85,0.0451231449842453 -86,0.04478643834590912 -87,0.04442102462053299 -88,0.04404764622449875 -89,0.04367741197347641 -90,0.04331802576780319 -91,0.042976364493370056 -92,0.04265676438808441 -93,0.042361654341220856 -94,0.04209185019135475 -95,0.041846904903650284 -96,0.04162599891424179 -97,0.04142603650689125 -98,0.04124560207128525 -99,0.041082125157117844 -100,0.040932487696409225 -101,0.04079392924904823 -102,0.04066383093595505 -103,0.040539879351854324 -104,0.04041999951004982 -105,0.04030259698629379 -106,0.040186528116464615 -107,0.0400710254907608 -108,0.03995569795370102 -109,0.039840541779994965 -110,0.03972584754228592 -111,0.03961160033941269 -112,0.039498165249824524 -113,0.03938592970371246 -114,0.039275363087654114 -115,0.03916686028242111 -116,0.0390608124434948 -117,0.038957323879003525 -118,0.03885640203952789 -119,0.0387580581009388 -120,0.03866206482052803 -121,0.03856848552823067 -122,0.03847717121243477 -123,0.03838792443275452 -124,0.038300588726997375 -125,0.03821486979722977 -126,0.03813047334551811 -127,0.0380471870303154 -128,0.037964895367622375 -129,0.03788337484002113 -130,0.03780252858996391 -131,0.03772223740816116 -132,0.037642404437065125 -133,0.037562984973192215 -134,0.03748394921422005 -135,0.03740530088543892 -136,0.03732707351446152 -137,0.03724921494722366 -138,0.03717190772294998 -139,0.037095360457897186 -140,0.03701970726251602 -141,0.03694499656558037 -142,0.03687133640050888 -143,0.03679877892136574 -144,0.03672749549150467 -145,0.03665729984641075 -146,0.0365881584584713 -147,0.03652004897594452 -148,0.03645294904708862 -149,0.03638676553964615 -150,0.03632141649723053 -151,0.036256853491067886 -152,0.036193039268255234 -153,0.0361299067735672 -154,0.0360674224793911 -155,0.03600555285811424 -156,0.03594430163502693 -157,0.03588365763425827 -158,0.03582361340522766 -159,0.03576415404677391 -160,0.03570530563592911 -161,0.03564709797501564 -162,0.035589512437582016 -163,0.035532549023628235 -164,0.03547626733779907 -165,0.03542064502835274 -166,0.03536565974354744 -167,0.03531135246157646 -168,0.03525770083069801 -169,0.035204701125621796 -170,0.03515235334634781 -171,0.03510065749287605 -172,0.03504960983991623 -173,0.03499927744269371 -174,0.034949593245983124 -175,0.0349004864692688 -176,0.03485195338726044 -177,0.03480397164821625 -178,0.03475654497742653 -179,0.03470967710018158 -180,0.0346633680164814 -181,0.034617599099874496 -182,0.03457239270210266 -183,0.034527797251939774 -184,0.034483734518289566 -185,0.034440189599990845 -186,0.0343971848487854 -187,0.03435470908880234 -188,0.034312739968299866 -189,0.03427126631140709 -190,0.03423028811812401 -191,0.034189797937870026 -192,0.03414978086948395 -193,0.03411022946238518 -194,0.03407115116715431 -195,0.034032516181468964 -196,0.03399433568120003 -197,0.033956609666347504 -198,0.0339193157851696 -199,0.033882442861795425 -200,0.033845994621515274 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.200/training_config.txt deleted file mode 100644 index 44dd7e1..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.200/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.200/training_log.csv deleted file mode 100644 index 6223f4d..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,26.976224899291992 -2,45966.55859375 -3,9838.3310546875 -4,171.7367706298828 -5,2932.744384765625 -6,241.39785766601562 -7,214.7980194091797 -8,57.01173400878906 -9,61.522762298583984 -10,86.56292724609375 -11,28.98691749572754 -12,10.780486106872559 -13,5.945277214050293 -14,3.7590582370758057 -15,4.645999431610107 -16,3.6453328132629395 -17,2.536205768585205 -18,2.128023862838745 -19,1.8588348627090454 -20,1.7977408170700073 -21,1.7768949270248413 -22,1.7605458498001099 -23,1.743664026260376 -24,1.7275618314743042 -25,1.7131434679031372 -26,1.696273922920227 -27,1.6613413095474243 -28,1.614498257637024 -29,1.5995469093322754 -30,1.5854136943817139 -31,1.5721124410629272 -32,1.5592386722564697 -33,1.5465997457504272 -34,1.533888339996338 -35,1.520957589149475 -36,1.5079256296157837 -37,1.4946924448013306 -38,1.4813872575759888 -39,1.4680287837982178 -40,1.4546769857406616 -41,1.4412546157836914 -42,1.4285082817077637 -43,1.417437195777893 -44,1.4063979387283325 -45,1.3952583074569702 -46,1.3839343786239624 -47,1.3723622560501099 -48,1.3604755401611328 -49,1.3482539653778076 -50,1.3358200788497925 -51,1.3228048086166382 -52,1.308919072151184 -53,1.2935079336166382 -54,1.2746201753616333 -55,1.244884729385376 -56,1.1740107536315918 -57,1.1123347282409668 -58,1.0950132608413696 -59,1.0823445320129395 -60,1.071495532989502 -61,1.0618867874145508 -62,1.0531373023986816 -63,1.045055627822876 -64,1.037683129310608 -65,1.030853509902954 -66,1.0245150327682495 -67,1.0186156034469604 -68,1.0130175352096558 -69,1.00766921043396 -70,1.00253164768219 -71,0.9975680112838745 -72,0.9927396774291992 -73,0.9879984855651855 -74,0.9832776188850403 -75,0.9784784317016602 -76,0.9734547138214111 -77,0.9678614139556885 -78,0.9610490202903748 -79,0.9521101117134094 -80,0.9447373151779175 -81,0.9403135776519775 -82,0.9360491633415222 -83,0.9319099187850952 -84,0.9278814792633057 -85,0.9239541888237 -86,0.9201199412345886 -87,0.9163720607757568 -88,0.9127057194709778 -89,0.9091201424598694 -90,0.90561443567276 -91,0.9021690487861633 -92,0.8987789154052734 -93,0.8954403400421143 -94,0.8921488523483276 -95,0.8889015316963196 -96,0.8856948614120483 -97,0.8825260996818542 -98,0.8793925642967224 -99,0.8762917518615723 -100,0.8732209801673889 -101,0.8701781034469604 -102,0.8671618700027466 -103,0.864170253276825 -104,0.8612020611763 -105,0.8582553863525391 -106,0.8553285002708435 -107,0.8524205684661865 -108,0.8495301008224487 -109,0.8466557264328003 -110,0.843796968460083 -111,0.8409527540206909 -112,0.8381271958351135 -113,0.8353170156478882 -114,0.832518458366394 -115,0.829730749130249 -116,0.8269528746604919 -117,0.8241834044456482 -118,0.8214211463928223 -119,0.8186655044555664 -120,0.8159154653549194 -121,0.8131701946258545 -122,0.8104287981987 -123,0.8076906800270081 -124,0.804954469203949 -125,0.8022196292877197 -126,0.7994860410690308 -127,0.7967531681060791 -128,0.7940211892127991 -129,0.7912887334823608 -130,0.7885551452636719 -131,0.7858197093009949 -132,0.7830816507339478 -133,0.7803404927253723 -134,0.7775951623916626 -135,0.7748451828956604 -136,0.7720896601676941 -137,0.7693280577659607 -138,0.7665596604347229 -139,0.7637841701507568 -140,0.7610008120536804 -141,0.7582090497016907 -142,0.7554084062576294 -143,0.7525977492332458 -144,0.7497768402099609 -145,0.7469460368156433 -146,0.7441047430038452 -147,0.7412528395652771 -148,0.7383905053138733 -149,0.7355170845985413 -150,0.7326324582099915 -151,0.729737401008606 -152,0.7268338799476624 -153,0.7239239811897278 -154,0.7210097908973694 -155,0.7180967330932617 -156,0.715191125869751 -157,0.7123010754585266 -158,0.7094341516494751 -159,0.7065975069999695 -160,0.7037882804870605 -161,0.7009916305541992 -162,0.6981883645057678 -163,0.6953526735305786 -164,0.6924483180046082 -165,0.6894177198410034 -166,0.6861595511436462 -167,0.6825014352798462 -168,0.678192138671875 -169,0.6730921268463135 -170,0.6676711440086365 -171,0.6628520488739014 -172,0.6588314175605774 -173,0.655146598815918 -174,0.651824414730072 -175,0.6489582061767578 -176,0.6519948244094849 -177,0.6493121385574341 -178,0.6413381099700928 -179,0.63895183801651 -180,0.6368067264556885 -181,0.6348159909248352 -182,0.632853627204895 -183,0.6307599544525146 -184,0.628511369228363 -185,0.6261082291603088 -186,0.6235643029212952 -187,0.6209037899971008 -188,0.618156909942627 -189,0.6153567433357239 -190,0.6125358939170837 -191,0.6097277402877808 -192,0.6069765090942383 -193,0.6043291687965393 -194,0.6018623113632202 -195,0.6003893613815308 -196,0.5967395901679993 -197,0.5942767262458801 -198,0.5919531583786011 -199,0.589608907699585 -200,0.5871949195861816 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.250/training_config.txt deleted file mode 100644 index 61bf28e..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.250/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.250/training_log.csv deleted file mode 100644 index 50a2f4e..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,29.751766204833984 -2,1.4609711170196533 -3,2.26060152053833 -4,2.858231782913208 -5,1.2541658878326416 -6,0.7703808546066284 -7,0.45820146799087524 -8,0.3098980188369751 -9,0.21600283682346344 -10,0.11988547444343567 -11,0.21258866786956787 -12,0.37112483382225037 -13,0.24592484533786774 -14,0.15025712549686432 -15,0.20581267774105072 -16,0.2557287812232971 -17,0.28047746419906616 -18,0.28567829728126526 -19,0.27629855275154114 -20,0.25647446513175964 -21,0.22869773209095 -22,0.19502387940883636 -23,0.1583365648984909 -24,0.1336923986673355 -25,0.1391267478466034 -26,0.15621830523014069 -27,0.16046971082687378 -28,0.1455787718296051 -29,0.12160169333219528 -30,0.10398796945810318 -31,0.10013359040021896 -32,0.1037333682179451 -33,0.10689253360033035 -34,0.10612405836582184 -35,0.10091238468885422 -36,0.09243309497833252 -37,0.08305191993713379 -38,0.07584509253501892 -39,0.07290683686733246 -40,0.07371775060892105 -41,0.07525410503149033 -42,0.07462555170059204 -43,0.07115214318037033 -44,0.06638393551111221 -45,0.06264759600162506 -46,0.0609155036509037 -47,0.060989439487457275 -48,0.061774346977472305 -49,0.062141917645931244 -50,0.06157441809773445 -51,0.060185231268405914 -52,0.05852069705724716 -53,0.05727943032979965 -54,0.056878142058849335 -55,0.05719618871808052 -56,0.057717468589544296 -57,0.057911381125450134 -58,0.057551901787519455 -59,0.05679260194301605 -60,0.05600325018167496 -61,0.05550360679626465 -62,0.055372726172208786 -63,0.0554458349943161 -64,0.055463168770074844 -65,0.05524663254618645 -66,0.05478598177433014 -67,0.054212845861911774 -68,0.053704388439655304 -69,0.05336984619498253 -70,0.053194623440504074 -71,0.05306989699602127 -72,0.052875250577926636 -73,0.05256371572613716 -74,0.05217689275741577 -75,0.05180344358086586 -76,0.0515144057571888 -77,0.05132053419947624 -78,0.05117705836892128 -79,0.05102280527353287 -80,0.05082172155380249 -81,0.05058082193136215 -82,0.050337716937065125 -83,0.050130609422922134 -84,0.049972984939813614 -85,0.049848273396492004 -86,0.04972519353032112 -87,0.049580540508031845 -88,0.04941302910447121 -89,0.049239568412303925 -90,0.049080200493335724 -91,0.04894388094544411 -92,0.04882410913705826 -93,0.04870607703924179 -94,0.048577770590782166 -95,0.048437464982271194 -96,0.04829299822449684 -97,0.048154789954423904 -98,0.04802793636918068 -99,0.04791008308529854 -100,0.047794099897146225 -101,0.04767392948269844 -102,0.047548629343509674 -103,0.04742221161723137 -104,0.047299716621637344 -105,0.04718383401632309 -106,0.04707295820116997 -107,0.046963732689619064 -108,0.04685315489768982 -109,0.04674072936177254 -110,0.04662851244211197 -111,0.04651910811662674 -112,0.04641351476311684 -113,0.04631098359823227 -114,0.04620979353785515 -115,0.04610849916934967 -116,0.04600678011775017 -117,0.04590580239892006 -118,0.045806627720594406 -119,0.04570969566702843 -120,0.04561445116996765 -121,0.04551999643445015 -122,0.04542548581957817 -123,0.045330941677093506 -124,0.045237116515636444 -125,0.04514429718255997 -126,0.04505274072289467 -127,0.04496202990412712 -128,0.04487169533967972 -129,0.04478205367922783 -130,0.044693682342767715 -131,0.04460644721984863 -132,0.04452074319124222 -133,0.04443654417991638 -134,0.04435364156961441 -135,0.04427133873105049 -136,0.04418905824422836 -137,0.044106341898441315 -138,0.0440232940018177 -139,0.04394002631306648 -140,0.04385640472173691 -141,0.04377269372344017 -142,0.043690554797649384 -143,0.04361231252551079 -144,0.04353584721684456 -145,0.04346061497926712 -146,0.04338568076491356 -147,0.04331166297197342 -148,0.043238721787929535 -149,0.043166521936655045 -150,0.04309454932808876 -151,0.0430227592587471 -152,0.04295115917921066 -153,0.04287999868392944 -154,0.04280959069728851 -155,0.04273984581232071 -156,0.04267054796218872 -157,0.04260152950882912 -158,0.04253281652927399 -159,0.04246436804533005 -160,0.04239632934331894 -161,0.04232892021536827 -162,0.042261894792318344 -163,0.042195286601781845 -164,0.04212907329201698 -165,0.04206317290663719 -166,0.04199778288602829 -167,0.04193278029561043 -168,0.041868168860673904 -169,0.04180380329489708 -170,0.04173985868692398 -171,0.04167620465159416 -172,0.041612811386585236 -173,0.04154988378286362 -174,0.04148733615875244 -175,0.04142507165670395 -176,0.041363075375556946 -177,0.04130129516124725 -178,0.041239719837903976 -179,0.041178468614816666 -180,0.04111746698617935 -181,0.04105670005083084 -182,0.04099620506167412 -183,0.04093598574399948 -184,0.040875956416130066 -185,0.04081619903445244 -186,0.04075663909316063 -187,0.04069722443819046 -188,0.040638282895088196 -189,0.04057951644062996 -190,0.040520910173654556 -191,0.04046252369880676 -192,0.04040426015853882 -193,0.040346287190914154 -194,0.04028869792819023 -195,0.04023147001862526 -196,0.04017467796802521 -197,0.04011816531419754 -198,0.040061935782432556 -199,0.04000609368085861 -200,0.03995047137141228 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.300/training_config.txt deleted file mode 100644 index 6a41412..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.300/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.300/training_log.csv deleted file mode 100644 index 91811a4..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.300/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,24.403804779052734 -2,5.302168846130371 -3,6.417696952819824 -4,6.663599967956543 -5,11.640973091125488 -6,142.82875061035156 -7,113.84545135498047 -8,62.3697395324707 -9,16.519977569580078 -10,7.949100971221924 -11,5.860877990722656 -12,4.949398994445801 -13,4.419576168060303 -14,4.029250621795654 -15,3.723994255065918 -16,3.469531297683716 -17,3.2510628700256348 -18,3.0624468326568604 -19,2.8986306190490723 -20,2.756695032119751 -21,2.633244752883911 -22,2.5253117084503174 -23,2.429532527923584 -24,2.348453998565674 -25,2.279642343521118 -26,2.218674421310425 -27,2.163984537124634 -28,2.1132795810699463 -29,2.0662624835968018 -30,1.726731538772583 -31,1.3960983753204346 -32,1.1001335382461548 -33,0.8328579664230347 -34,0.6273350119590759 -35,0.5304766297340393 -36,0.6160061955451965 -37,0.8005059361457825 -38,1.2019392251968384 -39,1.8657622337341309 -40,2.605424642562866 -41,3.6771388053894043 -42,4.901352882385254 -43,6.906239986419678 -44,9.240731239318848 -45,10.96189022064209 -46,12.211825370788574 -47,13.790351867675781 -48,14.575552940368652 -49,15.82691478729248 -50,16.886411666870117 -51,16.702396392822266 -52,17.762815475463867 -53,16.195615768432617 -54,14.999692916870117 -55,14.335841178894043 -56,15.605047225952148 -57,13.110873222351074 -58,13.206561088562012 -59,11.58869743347168 -60,9.78322696685791 -61,9.55672550201416 -62,10.893397331237793 -63,13.508940696716309 -64,27.169878005981445 -65,155.79678344726562 -66,19.736474990844727 -67,13.121679306030273 -68,7.6054511070251465 -69,7.319561958312988 -70,6.961833953857422 -71,5.9064178466796875 -72,7.083558082580566 -73,6.5854291915893555 -74,6.397174835205078 -75,6.519264221191406 -76,5.7696332931518555 -77,6.311639785766602 -78,5.825382232666016 -79,5.782466411590576 -80,5.499042987823486 -81,5.356941223144531 -82,5.428700923919678 -83,5.390044212341309 -84,5.366820812225342 -85,5.3355560302734375 -86,5.397894859313965 -87,5.644278526306152 -88,5.774752616882324 -89,5.795128345489502 -90,5.8000168800354 -91,5.8044023513793945 -92,5.802270412445068 -93,5.795446872711182 -94,5.789263725280762 -95,5.781038284301758 -96,5.769467830657959 -97,5.756453990936279 -98,5.742919921875 -99,5.728841781616211 -100,5.713850498199463 -101,5.697965621948242 -102,5.681582927703857 -103,5.663326740264893 -104,5.644327640533447 -105,5.624281883239746 -106,5.605315208435059 -107,5.5869975090026855 -108,5.569972991943359 -109,5.613217830657959 -110,5.6477861404418945 -111,5.680514335632324 -112,5.705422401428223 -113,5.724052429199219 -114,5.737934589385986 -115,5.748281478881836 -116,5.753823280334473 -117,5.755988121032715 -118,5.756014823913574 -119,5.751565456390381 -120,5.7441205978393555 -121,5.734851360321045 -122,5.724320888519287 -123,5.712479591369629 -124,5.698854923248291 -125,5.683082580566406 -126,5.666414737701416 -127,5.649035930633545 -128,5.6311235427856445 -129,5.613024711608887 -130,5.594449996948242 -131,5.575385093688965 -132,5.555997371673584 -133,5.536275386810303 -134,5.516432762145996 -135,5.49648380279541 -136,5.476499557495117 -137,5.456540107727051 -138,5.4365692138671875 -139,5.416644096374512 -140,5.396719455718994 -141,5.376810073852539 -142,5.356957912445068 -143,5.337177753448486 -144,5.317762851715088 -145,5.298376560211182 -146,5.279025077819824 -147,5.259649753570557 -148,5.240499019622803 -149,5.221240997314453 -150,5.202176570892334 -151,5.182945251464844 -152,5.163786888122559 -153,5.144608497619629 -154,5.125521183013916 -155,5.106405258178711 -156,5.08725643157959 -157,5.068120956420898 -158,5.0489115715026855 -159,5.02988862991333 -160,5.010739326477051 -161,4.991707801818848 -162,4.972446918487549 -163,4.953149795532227 -164,4.934071063995361 -165,4.91486930847168 -166,4.895778656005859 -167,4.87645959854126 -168,4.857062339782715 -169,4.837740898132324 -170,4.818342208862305 -171,4.7990288734436035 -172,4.779512882232666 -173,4.759884834289551 -174,4.74039888381958 -175,4.7208428382873535 -176,4.70135498046875 -177,4.681641101837158 -178,4.6616597175598145 -179,4.641908645629883 -180,4.6219892501831055 -181,4.602001190185547 -182,4.581830024719238 -183,4.561444282531738 -184,4.541308403015137 -185,4.521088123321533 -186,4.500600814819336 -187,4.479837417602539 -188,4.458885669708252 -189,4.437826156616211 -190,4.416824817657471 -191,4.395745754241943 -192,4.3741455078125 -193,4.353198528289795 -194,4.332273483276367 -195,4.311269760131836 -196,4.290205478668213 -197,4.269473552703857 -198,4.248467922210693 -199,4.22817850112915 -200,4.207895755767822 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.400/training_config.txt deleted file mode 100644 index 34b43ec..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.400/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.400/training_log.csv deleted file mode 100644 index 90c3db7..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.400/training_log.csv +++ /dev/null @@ -1,15 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,43.634342193603516 -2,43.87071990966797 -3,0.9744042158126831 -4,2.0090606212615967 -5,2.9039292335510254 -6,1.1930429935455322 -7,1.2102395296096802 -8,1.404444694519043 -9,1.4429371356964111 -10,nan -11,nan -12,nan -13,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.500/training_config.txt deleted file mode 100644 index 38628c5..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.500/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.500/training_log.csv deleted file mode 100644 index 61ab901..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.500/training_log.csv +++ /dev/null @@ -1,14 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,1076.67529296875 -2,5.949836730957031 -3,47366.4453125 -4,6.035224914550781 -5,7.525753498077393 -6,8.557132720947266 -7,9.672903060913086 -8,11.375807762145996 -9,nan -10,nan -11,nan -12,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.600/training_config.txt deleted file mode 100644 index e1cd02a..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.600/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.600/training_log.csv deleted file mode 100644 index ca87726..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.600/training_log.csv +++ /dev/null @@ -1,22 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,15860.9775390625 -2,62325.515625 -3,62379.81640625 -4,62310.23046875 -5,61819.40234375 -6,58379.16015625 -7,44791.0390625 -8,29133.533203125 -9,26100.62890625 -10,26191.78515625 -11,26191.7578125 -12,26643.279296875 -13,30401.509765625 -14,33642.29296875 -15,34379.43359375 -16,32445.1875 -17,nan -18,nan -19,nan -20,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.700/training_config.txt deleted file mode 100644 index 6d11523..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.700/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.700/training_log.csv deleted file mode 100644 index 4b1116c..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,33734.078125 -2,62436.953125 -3,62439.21875 -4,62439.21875 -5,62439.21875 -6,62439.21875 -7,62439.21875 -8,62439.21875 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.800/training_config.txt deleted file mode 100644 index ed206a6..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.800/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.800/training_log.csv deleted file mode 100644 index 8a771bf..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,46707.47265625 -2,62439.21875 -3,62439.21875 -4,62439.21875 -5,62439.21875 -6,62439.21875 -7,62439.21875 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.900/training_config.txt deleted file mode 100644 index 90486e6..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.900/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.900/training_log.csv deleted file mode 100644 index e46b66f..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,55912.42578125 -2,62439.21875 -3,62439.21875 -4,62439.21875 -5,62439.21875 -6,62439.21875 -7,62439.21875 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_1.000/training_config.txt deleted file mode 100644 index 51606a2..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_1.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_1.000/training_log.csv deleted file mode 100644 index 717414b..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,59203.26953125 -2,62439.21875 -3,62439.21875 -4,62439.21875 -5,62439.21875 -6,62439.21875 -7,62439.21875 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_16.000/training_config.txt deleted file mode 100644 index 3c1250b..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_16.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_16.000/training_log.csv deleted file mode 100644 index cfb5ab9..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,62600.4609375 -2,62600.4609375 -3,62600.4609375 -4,62600.4609375 -5,62600.4609375 -6,62600.4609375 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_2.000/training_config.txt deleted file mode 100644 index 2965b57..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_2.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_2.000/training_log.csv deleted file mode 100644 index 83926e1..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,62544.9296875 -2,1.8998955488204956 -3,nan -4,nan -5,nan -6,nan diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_4.000/training_config.txt deleted file mode 100644 index 59d25d7..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_4.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_4.000/training_log.csv deleted file mode 100644 index cfb5ab9..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,62600.4609375 -2,62600.4609375 -3,62600.4609375 -4,62600.4609375 -5,62600.4609375 -6,62600.4609375 diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_8.000/training_config.txt deleted file mode 100644 index 1b20c5d..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_8.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def inverse_squared_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return (((1/min_val)**(1/2) - 1) * 1/t_max * (-t_span + t_max) + 1)**-2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_8.000/training_log.csv deleted file mode 100644 index cfb5ab9..0000000 --- a/training/time_weighting_learning_rate_sweep/inverse_squared_mirrored/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,265.3622131347656 -1,62600.4609375 -2,62600.4609375 -3,62600.4609375 -4,62600.4609375 -5,62600.4609375 -6,62600.4609375 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.010/training_config.txt deleted file mode 100644 index 78a2210..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.010/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.010/training_log.csv deleted file mode 100644 index 495595f..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,714.6048583984375 -2,579.4140625 -3,575.1455078125 -4,532.2797241210938 -5,463.6742858886719 -6,437.1304626464844 -7,364.9020690917969 -8,323.7353820800781 -9,293.74853515625 -10,232.21356201171875 -11,214.13485717773438 -12,172.71873474121094 -13,151.7352294921875 -14,141.43789672851562 -15,123.35475158691406 -16,119.86355590820312 -17,124.89099884033203 -18,118.97671508789062 -19,112.73799896240234 -20,122.03052520751953 -21,110.14960479736328 -22,108.66156005859375 -23,105.18172454833984 -24,102.26327514648438 -25,97.96698760986328 -26,95.25424194335938 -27,92.12311553955078 -28,90.68693542480469 -29,89.16133880615234 -30,85.14186096191406 -31,79.66160583496094 -32,77.0797348022461 -33,75.5138168334961 -34,72.2160873413086 -35,66.61758422851562 -36,65.8427734375 -37,65.57572937011719 -38,65.60968017578125 -39,65.81647491455078 -40,65.43336486816406 -41,64.92277526855469 -42,62.744415283203125 -43,59.78923797607422 -44,56.21464920043945 -45,52.4990234375 -46,49.56650161743164 -47,48.11113739013672 -48,46.90332794189453 -49,45.91790771484375 -50,45.15633010864258 -51,44.18566131591797 -52,41.89727783203125 -53,41.4548454284668 -54,41.017112731933594 -55,39.45220184326172 -56,37.775306701660156 -57,36.878787994384766 -58,36.10622024536133 -59,37.28950881958008 -60,37.508846282958984 -61,37.05479431152344 -62,36.46586990356445 -63,36.18096160888672 -64,35.27386474609375 -65,35.0272102355957 -66,34.71477127075195 -67,34.28932571411133 -68,33.6326789855957 -69,31.92080307006836 -70,35.27244186401367 -71,33.98189926147461 -72,33.44644546508789 -73,32.74383544921875 -74,33.34574890136719 -75,32.653160095214844 -76,32.043006896972656 -77,31.541818618774414 -78,31.07904052734375 -79,30.82646369934082 -80,33.20808410644531 -81,32.72014617919922 -82,32.016380310058594 -83,31.32929801940918 -84,31.010784149169922 -85,30.672340393066406 -86,31.21363639831543 -87,30.99217414855957 -88,30.84580421447754 -89,30.729087829589844 -90,30.602596282958984 -91,30.460556030273438 -92,30.306432723999023 -93,30.1444034576416 -94,29.979402542114258 -95,29.815073013305664 -96,29.654067993164062 -97,29.497331619262695 -98,29.343477249145508 -99,29.189556121826172 -100,29.032962799072266 -101,28.87960433959961 -102,28.808874130249023 -103,31.92880630493164 -104,29.599184036254883 -105,28.60799217224121 -106,28.558412551879883 -107,29.081783294677734 -108,28.708251953125 -109,28.889665603637695 -110,29.022218704223633 -111,29.115938186645508 -112,30.249662399291992 -113,29.90050506591797 -114,29.863218307495117 -115,29.919424057006836 -116,30.534366607666016 -117,30.26666831970215 -118,30.28335189819336 -119,30.41321563720703 -120,30.533838272094727 -121,30.628007888793945 -122,30.69277572631836 -123,30.729732513427734 -124,30.74176788330078 -125,30.731460571289062 -126,30.701391220092773 -127,30.654050827026367 -128,30.592269897460938 -129,30.51949691772461 -130,30.440837860107422 -131,30.36627197265625 -132,30.317527770996094 -133,30.326929092407227 -134,30.242076873779297 -135,30.125869750976562 -136,30.05098533630371 -137,29.984874725341797 -138,29.911590576171875 -139,29.824851989746094 -140,29.725130081176758 -141,29.637651443481445 -142,29.64766502380371 -143,29.489408493041992 -144,29.477596282958984 -145,29.446971893310547 -146,29.381731033325195 -147,29.27618980407715 -148,29.128076553344727 -149,28.939863204956055 -150,28.81536293029785 -151,28.66120147705078 -152,28.487306594848633 -153,28.332317352294922 -154,28.06852912902832 -155,27.62640380859375 -156,26.73139190673828 -157,27.301401138305664 -158,27.957942962646484 -159,27.741792678833008 -160,28.28934669494629 -161,28.375587463378906 -162,28.701400756835938 -163,28.958267211914062 -164,28.992534637451172 -165,28.85804557800293 -166,28.637653350830078 -167,28.387672424316406 -168,28.138731002807617 -169,27.910858154296875 -170,27.723718643188477 -171,27.60329818725586 -172,27.56740379333496 -173,27.56722068786621 -174,27.565746307373047 -175,27.552337646484375 -176,27.523012161254883 -177,27.47589111328125 -178,27.41059112548828 -179,27.328439712524414 -180,27.23200035095215 -181,27.12486457824707 -182,27.01094627380371 -183,26.89389419555664 -184,26.776994705200195 -185,26.662721633911133 -186,26.553457260131836 -187,26.451000213623047 -188,26.357133865356445 -189,26.2746524810791 -190,26.208925247192383 -191,26.167104721069336 -192,26.153125762939453 -193,26.16265106201172 -194,26.178991317749023 -195,26.167509078979492 -196,26.119300842285156 -197,26.06168556213379 -198,26.01095962524414 -199,25.970462799072266 -200,25.939260482788086 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.020/training_config.txt deleted file mode 100644 index 43b5da4..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.020/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.020/training_log.csv deleted file mode 100644 index 2140d7b..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,644.5232543945312 -2,432.45135498046875 -3,246.39772033691406 -4,143.67214965820312 -5,146.42625427246094 -6,125.95569610595703 -7,123.57411193847656 -8,138.83303833007812 -9,125.50469207763672 -10,108.40174865722656 -11,83.42002868652344 -12,76.78449249267578 -13,68.7812728881836 -14,57.76449966430664 -15,56.2603874206543 -16,49.126853942871094 -17,44.249114990234375 -18,41.3490104675293 -19,41.7054328918457 -20,38.931312561035156 -21,37.040103912353516 -22,33.5203971862793 -23,31.531551361083984 -24,30.4565486907959 -25,29.40399742126465 -26,28.154945373535156 -27,27.327219009399414 -28,25.034164428710938 -29,24.366701126098633 -30,24.051090240478516 -31,23.84930992126465 -32,23.876127243041992 -33,24.02357292175293 -34,24.505666732788086 -35,24.846723556518555 -36,24.304401397705078 -37,24.000503540039062 -38,22.731884002685547 -39,22.444942474365234 -40,22.33985137939453 -41,21.778657913208008 -42,21.790668487548828 -43,21.87508201599121 -44,20.941659927368164 -45,19.98122215270996 -46,19.67579460144043 -47,18.61096954345703 -48,18.237741470336914 -49,18.051528930664062 -50,17.968360900878906 -51,17.826557159423828 -52,17.463218688964844 -53,17.391155242919922 -54,17.322154998779297 -55,17.249099731445312 -56,17.203142166137695 -57,17.177061080932617 -58,17.156999588012695 -59,17.13202476501465 -60,17.09617805480957 -61,17.047901153564453 -62,16.988624572753906 -63,16.92217254638672 -64,16.85390853881836 -65,16.790945053100586 -66,16.741199493408203 -67,16.70546531677246 -68,16.676738739013672 -69,16.656858444213867 -70,16.632986068725586 -71,16.589929580688477 -72,16.531740188598633 -73,16.476024627685547 -74,16.434349060058594 -75,16.40691375732422 -76,16.387786865234375 -77,16.369760513305664 -78,16.347824096679688 -79,16.319435119628906 -80,16.284263610839844 -81,16.242755889892578 -82,16.190977096557617 -83,16.10414695739746 -84,16.295429229736328 -85,16.23824691772461 -86,16.095855712890625 -87,16.14350128173828 -88,16.12541961669922 -89,16.050771713256836 -90,16.026506423950195 -91,15.952654838562012 -92,15.932204246520996 -93,15.909126281738281 -94,15.902645111083984 -95,15.876679420471191 -96,15.899537086486816 -97,15.892345428466797 -98,15.941157341003418 -99,15.94996166229248 -100,15.920232772827148 -101,15.868099212646484 -102,15.820965766906738 -103,15.816763877868652 -104,15.802373886108398 -105,15.748451232910156 -106,15.746114730834961 -107,15.731457710266113 -108,15.869869232177734 -109,15.86750602722168 -110,15.767420768737793 -111,15.72426700592041 -112,15.736519813537598 -113,15.758694648742676 -114,15.761442184448242 -115,15.718195915222168 -116,15.642991065979004 -117,15.587371826171875 -118,15.606826782226562 -119,15.614462852478027 -120,15.571768760681152 -121,15.50148868560791 -122,15.482123374938965 -123,15.440286636352539 -124,15.53645133972168 -125,15.442999839782715 -126,15.551623344421387 -127,15.592742919921875 -128,15.539392471313477 -129,15.481951713562012 -130,15.44398307800293 -131,15.489453315734863 -132,15.508950233459473 -133,15.453888893127441 -134,15.379463195800781 -135,15.371776580810547 -136,15.367036819458008 -137,15.341314315795898 -138,15.281627655029297 -139,15.27054500579834 -140,15.271455764770508 -141,15.262412071228027 -142,15.240700721740723 -143,15.211735725402832 -144,15.187156677246094 -145,15.162375450134277 -146,15.089510917663574 -147,15.259542465209961 -148,15.007633209228516 -149,14.998392105102539 -150,14.944832801818848 -151,14.95765495300293 -152,14.966155052185059 -153,14.902303695678711 -154,15.197295188903809 -155,15.329123497009277 -156,15.437874794006348 -157,15.27371883392334 -158,15.129127502441406 -159,15.101563453674316 -160,15.21584415435791 -161,15.298949241638184 -162,15.343788146972656 -163,15.359737396240234 -164,15.355294227600098 -165,15.339466094970703 -166,15.318048477172852 -167,15.285005569458008 -168,15.238525390625 -169,15.198894500732422 -170,15.183269500732422 -171,15.174066543579102 -172,15.138912200927734 -173,15.103294372558594 -174,15.074346542358398 -175,15.046134948730469 -176,15.014641761779785 -177,14.966297149658203 -178,14.968417167663574 -179,14.987019538879395 -180,14.958913803100586 -181,14.89310359954834 -182,14.866168022155762 -183,14.85333251953125 -184,14.781657218933105 -185,14.766127586364746 -186,14.784412384033203 -187,14.815872192382812 -188,14.86849308013916 -189,14.879559516906738 -190,14.903319358825684 -191,14.887011528015137 -192,14.82071590423584 -193,14.809667587280273 -194,14.825650215148926 -195,14.81659984588623 -196,14.786943435668945 -197,14.736711502075195 -198,14.690184593200684 -199,14.7051420211792 -200,14.696343421936035 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.040/training_config.txt deleted file mode 100644 index 0d44ea6..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.040/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.040/training_log.csv deleted file mode 100644 index afc451d..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,515.2356567382812 -2,158.76222229003906 -3,107.61880493164062 -4,81.59687042236328 -5,57.54480743408203 -6,37.0859260559082 -7,31.395763397216797 -8,28.18935775756836 -9,24.76126480102539 -10,20.633028030395508 -11,22.062469482421875 -12,22.085235595703125 -13,23.193111419677734 -14,24.520139694213867 -15,24.7772274017334 -16,23.741788864135742 -17,19.050811767578125 -18,11.206725120544434 -19,10.079811096191406 -20,9.478598594665527 -21,9.034078598022461 -22,8.691415786743164 -23,8.432714462280273 -24,8.159854888916016 -25,7.853833198547363 -26,8.623556137084961 -27,7.092395782470703 -28,5.832382678985596 -29,5.519031047821045 -30,5.472887992858887 -31,5.414363861083984 -32,5.340923309326172 -33,5.193665504455566 -34,5.156504154205322 -35,5.077095985412598 -36,4.991715431213379 -37,4.90402364730835 -38,4.811176300048828 -39,4.709787845611572 -40,4.49556303024292 -41,4.43505859375 -42,4.375931739807129 -43,4.319035053253174 -44,4.261437892913818 -45,4.19934606552124 -46,4.124954700469971 -47,4.022601127624512 -48,3.851543664932251 -49,2.866476058959961 -50,2.7723782062530518 -51,2.7122297286987305 -52,2.668445110321045 -53,2.633880615234375 -54,2.604548215866089 -55,2.5778005123138428 -56,2.552496910095215 -57,2.528331756591797 -58,2.505108594894409 -59,2.4827520847320557 -60,2.460965871810913 -61,2.4397599697113037 -62,2.419118881225586 -63,2.3991494178771973 -64,2.3796865940093994 -65,2.360602617263794 -66,2.3418028354644775 -67,2.323221206665039 -68,2.3047902584075928 -69,2.2864797115325928 -70,2.2682607173919678 -71,2.2500863075256348 -72,2.2319228649139404 -73,2.2137386798858643 -74,2.1955113410949707 -75,2.1772241592407227 -76,2.158827781677246 -77,2.140284776687622 -78,2.1215174198150635 -79,2.1024832725524902 -80,2.083136796951294 -81,2.063410758972168 -82,2.0432353019714355 -83,2.0225448608398438 -84,2.001249074935913 -85,1.9792252779006958 -86,1.9563013315200806 -87,1.9322552680969238 -88,1.9068682193756104 -89,1.8798130750656128 -90,1.8508108854293823 -91,1.820331335067749 -92,1.7906063795089722 -93,1.769997000694275 -94,1.7632648944854736 -95,1.7627975940704346 -96,1.7639026641845703 -97,1.7649849653244019 -98,1.7638072967529297 -99,1.758379578590393 -100,1.7479490041732788 -101,1.7330200672149658 -102,1.7149790525436401 -103,1.6954898834228516 -104,1.6758089065551758 -105,1.6569490432739258 -106,1.6408288478851318 -107,1.6294597387313843 -108,1.6209453344345093 -109,1.610831618309021 -110,1.5958671569824219 -111,1.574457049369812 -112,1.5492125749588013 -113,1.5271379947662354 -114,1.5061638355255127 -115,1.4852441549301147 -116,1.465854525566101 -117,1.4507229328155518 -118,1.4422855377197266 -119,1.4363948106765747 -120,1.4285622835159302 -121,1.4176558256149292 -122,1.404306173324585 -123,1.3903326988220215 -124,1.3773859739303589 -125,1.364529013633728 -126,1.3411210775375366 -127,1.2755751609802246 -128,1.286085605621338 -129,1.2874464988708496 -130,1.2850823402404785 -131,1.2812745571136475 -132,1.2711412906646729 -133,1.225295901298523 -134,1.2287336587905884 -135,1.2459962368011475 -136,1.250019907951355 -137,1.247069239616394 -138,1.2402704954147339 -139,1.230010986328125 -140,1.214671015739441 -141,1.19306480884552 -142,1.1762839555740356 -143,1.1889687776565552 -144,1.1883511543273926 -145,1.1645481586456299 -146,1.1529866456985474 -147,1.1585556268692017 -148,1.1597636938095093 -149,1.1510549783706665 -150,1.1370739936828613 -151,1.1259849071502686 -152,1.123328685760498 -153,1.1253968477249146 -154,1.11690354347229 -155,1.1046383380889893 -156,1.0985342264175415 -157,1.0954899787902832 -158,1.091748595237732 -159,1.084695816040039 -160,1.0752232074737549 -161,1.0669336318969727 -162,1.0623160600662231 -163,1.0579501390457153 -164,1.0501227378845215 -165,1.0415986776351929 -166,1.035154938697815 -167,1.0300227403640747 -168,1.0247077941894531 -169,1.0184022188186646 -170,1.0115145444869995 -171,1.0051194429397583 -172,0.999808669090271 -173,0.994986355304718 -174,0.9895349740982056 -175,0.9834290742874146 -176,0.977564811706543 -177,0.9722945690155029 -178,0.9671614170074463 -179,0.9617620706558228 -180,0.9561649560928345 -181,0.9505959749221802 -182,0.9451307654380798 -183,0.9397275447845459 -184,0.9342739582061768 -185,0.9287945032119751 -186,0.9233459830284119 -187,0.9178799986839294 -188,0.9122968912124634 -189,0.9066307544708252 -190,0.9010379314422607 -191,0.895557701587677 -192,0.890086829662323 -193,0.8845425248146057 -194,0.8789696097373962 -195,0.8734771609306335 -196,0.8681140542030334 -197,0.8628330230712891 -198,0.8575904369354248 -199,0.8524259328842163 -200,0.847390353679657 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.050/training_config.txt deleted file mode 100644 index 305e6f5..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.050/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.050/training_log.csv deleted file mode 100644 index 33afacf..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,467.0709533691406 -2,141.0096435546875 -3,80.21915435791016 -4,62.238319396972656 -5,33.127586364746094 -6,24.453948974609375 -7,22.974878311157227 -8,18.442729949951172 -9,12.116485595703125 -10,9.59331226348877 -11,11.168278694152832 -12,12.907947540283203 -13,15.441995620727539 -14,15.75467300415039 -15,15.958130836486816 -16,16.28812599182129 -17,18.145294189453125 -18,18.013530731201172 -19,17.932374954223633 -20,17.708175659179688 -21,16.53782081604004 -22,14.101917266845703 -23,13.081148147583008 -24,12.348359107971191 -25,11.671486854553223 -26,11.287075996398926 -27,10.856710433959961 -28,10.528544425964355 -29,10.261336326599121 -30,10.114738464355469 -31,10.000204086303711 -32,9.849510192871094 -33,9.796695709228516 -34,9.678946495056152 -35,9.540303230285645 -36,9.384657859802246 -37,9.210855484008789 -38,9.00443172454834 -39,8.37180233001709 -40,7.5140814781188965 -41,5.999229907989502 -42,5.210753440856934 -43,4.779099464416504 -44,4.547909259796143 -45,4.327592849731445 -46,4.168142795562744 -47,4.01237154006958 -48,3.7459161281585693 -49,3.6934382915496826 -50,3.6129744052886963 -51,3.5265369415283203 -52,3.436947822570801 -53,3.3332624435424805 -54,3.1480109691619873 -55,2.7184929847717285 -56,2.4664793014526367 -57,2.368072032928467 -58,2.3024120330810547 -59,2.2477588653564453 -60,2.1994872093200684 -61,2.155435562133789 -62,2.11452054977417 -63,2.0760509967803955 -64,2.0391845703125 -65,2.0033860206604004 -66,1.968421220779419 -67,1.934100866317749 -68,1.9003478288650513 -69,1.8671573400497437 -70,1.8343504667282104 -71,1.8016293048858643 -72,1.7684557437896729 -73,1.7335323095321655 -74,1.6934453248977661 -75,1.636460781097412 -76,1.5582153797149658 -77,1.5159642696380615 -78,1.4848518371582031 -79,1.4559129476547241 -80,1.4281798601150513 -81,1.401394248008728 -82,1.3752102851867676 -83,1.349375605583191 -84,1.3237608671188354 -85,1.2979273796081543 -86,1.271578311920166 -87,1.244394302368164 -88,1.2160297632217407 -89,1.186200737953186 -90,1.1548237800598145 -91,1.1225130558013916 -92,1.0911872386932373 -93,1.0633208751678467 -94,1.0402809381484985 -95,1.0208388566970825 -96,1.002177357673645 -97,0.9811755418777466 -98,0.955925703048706 -99,0.9257803559303284 -100,0.890788733959198 -101,0.8538690805435181 -102,0.823354959487915 -103,0.7996653318405151 -104,0.7782007455825806 -105,0.7563647031784058 -106,0.7319372296333313 -107,0.7031870484352112 -108,0.6693129539489746 -109,0.6371129155158997 -110,0.6208493113517761 -111,0.6101266145706177 -112,0.5968101620674133 -113,0.5832498669624329 -114,0.5713470578193665 -115,0.5623881220817566 -116,0.5558032393455505 -117,0.5504727959632874 -118,0.544829249382019 -119,0.5377687215805054 -120,0.5288453698158264 -121,0.5183308124542236 -122,0.507094144821167 -123,0.4963189661502838 -124,0.48710787296295166 -125,0.48009002208709717 -126,0.4752463102340698 -127,0.4720882475376129 -128,0.46996310353279114 -129,0.4682832956314087 -130,0.46658602356910706 -131,0.4645591378211975 -132,0.4619867205619812 -133,0.45879408717155457 -134,0.45505914092063904 -135,0.45099467039108276 -136,0.4469047486782074 -137,0.44310009479522705 -138,0.43979910016059875 -139,0.43706846237182617 -140,0.4348384141921997 -141,0.4329591989517212 -142,0.4312680959701538 -143,0.42962509393692017 -144,0.4279289245605469 -145,0.42612332105636597 -146,0.42419353127479553 -147,0.4221544861793518 -148,0.42004695534706116 -149,0.417923241853714 -150,0.41583943367004395 -151,0.4138461947441101 -152,0.41198205947875977 -153,0.4102642834186554 -154,0.40868425369262695 -155,0.4072115123271942 -156,0.40580135583877563 -157,0.40440642833709717 -158,0.4029877185821533 -159,0.40152955055236816 -160,0.4000336825847626 -161,0.3985176682472229 -162,0.397006094455719 -163,0.395520955324173 -164,0.39408057928085327 -165,0.3926945626735687 -166,0.3913637399673462 -167,0.3900793790817261 -168,0.3888246715068817 -169,0.38758283853530884 -170,0.38634204864501953 -171,0.38509562611579895 -172,0.3838447630405426 -173,0.38259565830230713 -174,0.3813559114933014 -175,0.3801310062408447 -176,0.3789268434047699 -177,0.37774577736854553 -178,0.376587450504303 -179,0.37545090913772583 -180,0.37433069944381714 -181,0.3732208013534546 -182,0.37211620807647705 -183,0.37101438641548157 -184,0.36991602182388306 -185,0.3688261806964874 -186,0.3677462339401245 -187,0.3666793406009674 -188,0.3656255006790161 -189,0.3645850419998169 -190,0.36355581879615784 -191,0.3625369668006897 -192,0.36152634024620056 -193,0.36052247881889343 -194,0.3595251142978668 -195,0.35853350162506104 -196,0.35754939913749695 -197,0.35657355189323425 -198,0.3556077778339386 -199,0.35465186834335327 -200,0.35370588302612305 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.080/training_config.txt deleted file mode 100644 index e91751e..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.080/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.080/training_log.csv deleted file mode 100644 index 64d57eb..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,328.2253723144531 -2,98.93622589111328 -3,80.56061553955078 -4,26.90008544921875 -5,16.48835563659668 -6,11.3106689453125 -7,9.280985832214355 -8,7.837968826293945 -9,5.0850701332092285 -10,4.754053592681885 -11,4.336775302886963 -12,3.706871509552002 -13,2.9151158332824707 -14,2.5085701942443848 -15,2.2845041751861572 -16,2.0855987071990967 -17,1.932998776435852 -18,1.7965223789215088 -19,1.6679306030273438 -20,1.5394158363342285 -21,1.3724098205566406 -22,1.2629584074020386 -23,1.1290388107299805 -24,1.0304137468338013 -25,0.966759204864502 -26,0.9060852527618408 -27,0.8497287631034851 -28,0.8013557195663452 -29,0.7648795247077942 -30,0.7407246828079224 -31,0.7253081798553467 -32,0.7142833471298218 -33,0.7044973373413086 -34,0.694345235824585 -35,0.682824432849884 -36,0.669634222984314 -37,0.6548628211021423 -38,0.6382023692131042 -39,0.6199303269386292 -40,0.6004860997200012 -41,0.5802639126777649 -42,0.5599892139434814 -43,0.5407334566116333 -44,0.5237502455711365 -45,0.5100423693656921 -46,0.5002378225326538 -47,0.4940362274646759 -48,0.49028515815734863 -49,0.487657755613327 -50,0.48513901233673096 -51,0.4820377826690674 -52,0.4779145419597626 -53,0.47259387373924255 -54,0.46623098850250244 -55,0.4594181478023529 -56,0.4531157314777374 -57,0.44829314947128296 -58,0.44518107175827026 -59,0.4434128999710083 -60,0.4424061179161072 -61,0.4416024386882782 -62,0.4406009316444397 -63,0.43917685747146606 -64,0.4372396171092987 -65,0.4347861409187317 -66,0.4318794310092926 -67,0.4286292493343353 -68,0.4252021610736847 -69,0.4217913746833801 -70,0.41861504316329956 -71,0.4158715307712555 -72,0.41368386149406433 -73,0.4120495319366455 -74,0.4108322560787201 -75,0.4098057448863983 -76,0.40873274207115173 -77,0.4074447453022003 -78,0.405885249376297 -79,0.40410634875297546 -80,0.40223121643066406 -81,0.4003974497318268 -82,0.39870890974998474 -83,0.3971964716911316 -84,0.39582714438438416 -85,0.39452850818634033 -86,0.39322584867477417 -87,0.39186957478523254 -88,0.39044442772865295 -89,0.38896697759628296 -90,0.3874801993370056 -91,0.3860328495502472 -92,0.3846619427204132 -93,0.38338491320610046 -94,0.38219282031059265 -95,0.38105592131614685 -96,0.37993407249450684 -97,0.37879273295402527 -98,0.377614825963974 -99,0.3764019012451172 -100,0.37517213821411133 -101,0.3739490807056427 -102,0.3727515935897827 -103,0.3715904951095581 -104,0.37046173214912415 -105,0.36935797333717346 -106,0.3682645559310913 -107,0.36717236042022705 -108,0.36607614159584045 -109,0.3649764955043793 -110,0.3638780117034912 -111,0.36278876662254333 -112,0.3617141842842102 -113,0.3606555163860321 -114,0.3596130609512329 -115,0.3585801124572754 -116,0.35755202174186707 -117,0.3565238416194916 -118,0.3554941415786743 -119,0.35446426272392273 -120,0.3534383475780487 -121,0.3524191975593567 -122,0.35140952467918396 -123,0.35040947794914246 -124,0.349417120218277 -125,0.3484324514865875 -126,0.3474528193473816 -127,0.3464769423007965 -128,0.34550610184669495 -129,0.3445393741130829 -130,0.3435782790184021 -131,0.342623233795166 -132,0.34167519211769104 -133,0.3407343327999115 -134,0.3397994637489319 -135,0.33887001872062683 -136,0.33794543147087097 -137,0.33702516555786133 -138,0.33610907196998596 -139,0.3351990878582001 -140,0.33429402112960815 -141,0.3333960473537445 -142,0.33250322937965393 -143,0.3316167891025543 -144,0.3307357728481293 -145,0.3298594653606415 -146,0.32898885011672974 -147,0.32812219858169556 -148,0.32726117968559265 -149,0.32640406489372253 -150,0.32555174827575684 -151,0.3247039318084717 -152,0.32386043667793274 -153,0.32302114367485046 -154,0.32218554615974426 -155,0.32135432958602905 -156,0.3205272853374481 -157,0.3197043538093567 -158,0.3188854157924652 -159,0.3180709779262543 -160,0.3172609508037567 -161,0.3164548873901367 -162,0.31565481424331665 -163,0.3148590326309204 -164,0.3140682280063629 -165,0.3132818043231964 -166,0.3125007450580597 -167,0.31172433495521545 -168,0.3109533488750458 -169,0.3101876974105835 -170,0.30942684412002563 -171,0.308671772480011 -172,0.3079211115837097 -173,0.3071761727333069 -174,0.30643564462661743 -175,0.30570074915885925 -176,0.3049699664115906 -177,0.30424395203590393 -178,0.30352240800857544 -179,0.302805095911026 -180,0.3020928204059601 -181,0.3013843894004822 -182,0.300680935382843 -183,0.29998138546943665 -184,0.2992863059043884 -185,0.2985955476760864 -186,0.2979087829589844 -187,0.2972266376018524 -188,0.296548455953598 -189,0.29587534070014954 -190,0.29520535469055176 -191,0.29454031586647034 -192,0.2938784658908844 -193,0.29321929812431335 -194,0.2925621271133423 -195,0.29190781712532043 -196,0.2912561297416687 -197,0.29060685634613037 -198,0.2899602949619293 -199,0.28931522369384766 -200,0.28867313265800476 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.100/training_config.txt deleted file mode 100644 index 22c2851..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.100/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.100/training_log.csv deleted file mode 100644 index d15dc45..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,277.01910400390625 -2,180.86083984375 -3,46.946468353271484 -4,21.17747688293457 -5,14.27820873260498 -6,15.071479797363281 -7,13.948244094848633 -8,12.787303924560547 -9,12.44766616821289 -10,12.156209945678711 -11,11.871718406677246 -12,11.587263107299805 -13,11.242308616638184 -14,10.817015647888184 -15,10.276090621948242 -16,9.541985511779785 -17,8.457273483276367 -18,7.116146087646484 -19,5.97390079498291 -20,5.074666500091553 -21,4.52492094039917 -22,4.071018218994141 -23,3.662090301513672 -24,3.2765681743621826 -25,2.8823940753936768 -26,2.4517223834991455 -27,2.0517637729644775 -28,1.7540299892425537 -29,1.5191168785095215 -30,1.3138501644134521 -31,1.1262394189834595 -32,0.9600816965103149 -33,0.8379637002944946 -34,0.7723661065101624 -35,0.7380403280258179 -36,0.7148099541664124 -37,0.6961870789527893 -38,0.6818462014198303 -39,0.6736164093017578 -40,0.6726133227348328 -41,0.6774006485939026 -42,0.6848753094673157 -43,0.6921392679214478 -44,0.697174072265625 -45,0.6988070011138916 -46,0.6962583661079407 -47,0.6894923448562622 -48,0.6787523031234741 -49,0.66464763879776 -50,0.647799015045166 -51,0.6287952661514282 -52,0.6084194779396057 -53,0.5875419974327087 -54,0.5671365857124329 -55,0.5482311844825745 -56,0.5317052602767944 -57,0.5185105204582214 -58,0.509325385093689 -59,0.5041906833648682 -60,0.5024359226226807 -61,0.5028066635131836 -62,0.5038737654685974 -63,0.5044435858726501 -64,0.503728985786438 -65,0.501323401927948 -66,0.4971376955509186 -67,0.4914279580116272 -68,0.48459067940711975 -69,0.4771796464920044 -70,0.4699033498764038 -71,0.4633561372756958 -72,0.45802924036979675 -73,0.4541427493095398 -74,0.4516357481479645 -75,0.45032230019569397 -76,0.4498180150985718 -77,0.4497513771057129 -78,0.4497816562652588 -79,0.4496413767337799 -80,0.4491315186023712 -81,0.4481547474861145 -82,0.4466977119445801 -83,0.4448140859603882 -84,0.4426043629646301 -85,0.4401990473270416 -86,0.4377382695674896 -87,0.43535223603248596 -88,0.4331452548503876 -89,0.431175172328949 -90,0.42945396900177 -91,0.4279484748840332 -92,0.4265981912612915 -93,0.4253290891647339 -94,0.42407119274139404 -95,0.42276886105537415 -96,0.42139101028442383 -97,0.4199311435222626 -98,0.4184076189994812 -99,0.41685324907302856 -100,0.4153052568435669 -101,0.41380006074905396 -102,0.41235867142677307 -103,0.41098904609680176 -104,0.40968582034111023 -105,0.40842950344085693 -106,0.40719810128211975 -107,0.4059658944606781 -108,0.40471479296684265 -109,0.40343424677848816 -110,0.40212011337280273 -111,0.4007790684700012 -112,0.39942455291748047 -113,0.398070752620697 -114,0.3967323303222656 -115,0.39542004466056824 -116,0.3941406309604645 -117,0.3928946554660797 -118,0.3916759490966797 -119,0.39047715067863464 -120,0.38929101824760437 -121,0.38810983300209045 -122,0.3869295120239258 -123,0.3857499063014984 -124,0.3845733404159546 -125,0.38340482115745544 -126,0.38225236535072327 -127,0.38111260533332825 -128,0.3799848258495331 -129,0.37886926531791687 -130,0.37776562571525574 -131,0.3766706883907318 -132,0.3755834400653839 -133,0.37450283765792847 -134,0.37342581152915955 -135,0.37235310673713684 -136,0.3712857961654663 -137,0.37022337317466736 -138,0.36916765570640564 -139,0.36811941862106323 -140,0.3670785129070282 -141,0.36604517698287964 -142,0.3650183081626892 -143,0.363997220993042 -144,0.36298081278800964 -145,0.361968457698822 -146,0.3609600365161896 -147,0.35995519161224365 -148,0.3589547574520111 -149,0.3579595685005188 -150,0.3569692373275757 -151,0.35598519444465637 -152,0.3550071120262146 -153,0.3540344834327698 -154,0.3530677258968353 -155,0.35210612416267395 -156,0.35115039348602295 -157,0.3502004146575928 -158,0.3492548167705536 -159,0.3483150601387024 -160,0.34738093614578247 -161,0.34645214676856995 -162,0.34552890062332153 -163,0.34461119771003723 -164,0.3436993956565857 -165,0.34279313683509827 -166,0.34189271926879883 -167,0.34099724888801575 -168,0.3401075005531311 -169,0.33922308683395386 -170,0.3383437395095825 -171,0.3374699056148529 -172,0.33660081028938293 -173,0.3357371389865875 -174,0.33487918972969055 -175,0.3340260982513428 -176,0.3331786096096039 -177,0.3323364555835724 -178,0.3314986228942871 -179,0.33066654205322266 -180,0.3298390507698059 -181,0.3290161192417145 -182,0.32819876074790955 -183,0.32738593220710754 -184,0.32657724618911743 -185,0.3257744014263153 -186,0.3249759078025818 -187,0.32418179512023926 -188,0.323393315076828 -189,0.3226090967655182 -190,0.32182902097702026 -191,0.32105445861816406 -192,0.3202838897705078 -193,0.319517582654953 -194,0.3187568783760071 -195,0.3179998993873596 -196,0.31724730134010315 -197,0.31649982929229736 -198,0.31575632095336914 -199,0.31501755118370056 -200,0.3142836093902588 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.125/training_config.txt deleted file mode 100644 index 34e7aa8..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.125/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.125/training_log.csv deleted file mode 100644 index 8ae6d83..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,267.4814453125 -2,57.83902359008789 -3,252.9473114013672 -4,12.946844100952148 -5,11.913137435913086 -6,13.949143409729004 -7,12.0337553024292 -8,9.561687469482422 -9,8.745820999145508 -10,8.153474807739258 -11,7.8320207595825195 -12,7.539123058319092 -13,7.254340171813965 -14,6.987481594085693 -15,6.734109401702881 -16,6.493349552154541 -17,6.2522406578063965 -18,6.011004447937012 -19,5.76345157623291 -20,5.504536151885986 -21,5.233809947967529 -22,4.95391845703125 -23,4.681879043579102 -24,4.4416279792785645 -25,4.224401950836182 -26,4.025761127471924 -27,3.8405532836914062 -28,3.6659860610961914 -29,3.4996449947357178 -30,3.3399102687835693 -31,3.185404062271118 -32,3.033684253692627 -33,2.883964776992798 -34,2.736629009246826 -35,2.591517448425293 -36,2.4504435062408447 -37,2.3172500133514404 -38,2.1983258724212646 -39,2.0958786010742188 -40,2.0048820972442627 -41,1.9221938848495483 -42,1.8462400436401367 -43,1.7760820388793945 -44,1.7106459140777588 -45,1.6487377882003784 -46,1.5896329879760742 -47,1.5326951742172241 -48,1.4771908521652222 -49,1.421705961227417 -50,1.364041805267334 -51,1.3059622049331665 -52,1.2468427419662476 -53,1.1861006021499634 -54,1.1245862245559692 -55,1.0631715059280396 -56,1.002364993095398 -57,0.9436951279640198 -58,0.8880720734596252 -59,0.8362874388694763 -60,0.7891390323638916 -61,0.747318685054779 -62,0.7112205624580383 -63,0.6806492209434509 -64,0.6546660661697388 -65,0.6321441531181335 -66,0.6118949055671692 -67,0.5928587317466736 -68,0.5744817852973938 -69,0.5565423369407654 -70,0.5390700697898865 -71,0.5222626328468323 -72,0.5065317153930664 -73,0.49229004979133606 -74,0.479591429233551 -75,0.46868035197257996 -76,0.45976096391677856 -77,0.4529275894165039 -78,0.4481281638145447 -79,0.44514816999435425 -80,0.44362935423851013 -81,0.4431321322917938 -82,0.44319066405296326 -83,0.44338133931159973 -84,0.4433561861515045 -85,0.44287100434303284 -86,0.4417940378189087 -87,0.4400920271873474 -88,0.4378122389316559 -89,0.4350588917732239 -90,0.431966632604599 -91,0.4286746382713318 -92,0.42530500888824463 -93,0.4219514727592468 -94,0.41867566108703613 -95,0.41550424695014954 -96,0.4124416410923004 -97,0.4094792306423187 -98,0.40660160779953003 -99,0.40378621220588684 -100,0.4010186791419983 -101,0.39831241965293884 -102,0.3956887125968933 -103,0.39318060874938965 -104,0.39081257581710815 -105,0.38858914375305176 -106,0.38651177287101746 -107,0.384573370218277 -108,0.3827604353427887 -109,0.38105180859565735 -110,0.37942180037498474 -111,0.3778438866138458 -112,0.37629464268684387 -113,0.374754399061203 -114,0.3732098340988159 -115,0.37165388464927673 -116,0.3700859546661377 -117,0.36850816011428833 -118,0.36692726612091064 -119,0.3653510808944702 -120,0.3637862801551819 -121,0.3622395694255829 -122,0.36071521043777466 -123,0.35921454429626465 -124,0.3577384650707245 -125,0.3562864363193512 -126,0.35485783219337463 -127,0.353451669216156 -128,0.3520672917366028 -129,0.3507064878940582 -130,0.3493700325489044 -131,0.34805911779403687 -132,0.3467726707458496 -133,0.3455108106136322 -134,0.34427401423454285 -135,0.3430602252483368 -136,0.34186869859695435 -137,0.340696781873703 -138,0.3395419716835022 -139,0.33840444684028625 -140,0.33727917075157166 -141,0.3361656963825226 -142,0.33506593108177185 -143,0.33397814631462097 -144,0.3329032361507416 -145,0.3318406343460083 -146,0.3307892978191376 -147,0.3297520875930786 -148,0.3287273049354553 -149,0.327714741230011 -150,0.3267153203487396 -151,0.3257286548614502 -152,0.32475391030311584 -153,0.3237913250923157 -154,0.32283806800842285 -155,0.3218977153301239 -156,0.32096901535987854 -157,0.3200516104698181 -158,0.3191477358341217 -159,0.31825488805770874 -160,0.3173741102218628 -161,0.3165043890476227 -162,0.3156435489654541 -163,0.3147934675216675 -164,0.31395161151885986 -165,0.3131192922592163 -166,0.31229621171951294 -167,0.3114790916442871 -168,0.31067201495170593 -169,0.30987298488616943 -170,0.30908191204071045 -171,0.3082997500896454 -172,0.30752450227737427 -173,0.3067580759525299 -174,0.3059995174407959 -175,0.3052459955215454 -176,0.30450108647346497 -177,0.30376240611076355 -178,0.30303019285202026 -179,0.3023044764995575 -180,0.3015848398208618 -181,0.30087366700172424 -182,0.30016815662384033 -183,0.29946860671043396 -184,0.29877641797065735 -185,0.2980893850326538 -186,0.2974081039428711 -187,0.29673317074775696 -188,0.29606306552886963 -189,0.29539912939071655 -190,0.2947389483451843 -191,0.2940840721130371 -192,0.29343482851982117 -193,0.29278865456581116 -194,0.2921478748321533 -195,0.29151254892349243 -196,0.290880024433136 -197,0.2902514636516571 -198,0.28962647914886475 -199,0.2890037000179291 -200,0.2883843183517456 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.160/training_config.txt deleted file mode 100644 index 7e595b2..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.160/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.160/training_log.csv deleted file mode 100644 index 02a675e..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,282.5343933105469 -2,81936.40625 -3,704.7223510742188 -4,75.1961898803711 -5,195.85736083984375 -6,146.37332153320312 -7,141.4935302734375 -8,60.59278869628906 -9,30.31032371520996 -10,18.547534942626953 -11,15.132040023803711 -12,14.008065223693848 -13,13.254258155822754 -14,12.711091995239258 -15,12.22641658782959 -16,11.754427909851074 -17,11.316543579101562 -18,10.919061660766602 -19,10.56264591217041 -20,10.210345268249512 -21,9.900569915771484 -22,9.640413284301758 -23,9.394006729125977 -24,9.161917686462402 -25,8.931525230407715 -26,8.710038185119629 -27,8.500833511352539 -28,8.301114082336426 -29,8.107853889465332 -30,7.925414562225342 -31,7.7518110275268555 -32,7.578880786895752 -33,7.401764869689941 -34,7.223874092102051 -35,7.048198699951172 -36,6.870311260223389 -37,6.685948848724365 -38,6.493518352508545 -39,6.292201042175293 -40,6.083435535430908 -41,5.863582134246826 -42,5.632739543914795 -43,5.389942646026611 -44,5.131537437438965 -45,4.879828929901123 -46,4.648841857910156 -47,4.434505939483643 -48,4.236936569213867 -49,4.065516948699951 -50,3.914595127105713 -51,3.78149151802063 -52,3.6625075340270996 -53,3.554588794708252 -54,3.45548152923584 -55,3.363426446914673 -56,3.277376174926758 -57,3.196664333343506 -58,3.1204276084899902 -59,3.048051118850708 -60,2.9791085720062256 -61,2.9135308265686035 -62,2.8505759239196777 -63,2.7896971702575684 -64,2.730461359024048 -65,2.6724672317504883 -66,2.615272283554077 -67,2.5589654445648193 -68,2.5028467178344727 -69,2.4464988708496094 -70,2.3890938758850098 -71,2.3296666145324707 -72,2.267533302307129 -73,2.2016632556915283 -74,2.1296722888946533 -75,2.0498433113098145 -76,1.9610942602157593 -77,1.865779995918274 -78,1.770551085472107 -79,1.6807149648666382 -80,1.600847601890564 -81,1.5303136110305786 -82,1.4681917428970337 -83,1.4134773015975952 -84,1.3648968935012817 -85,1.3213130235671997 -86,1.2828843593597412 -87,1.2488080263137817 -88,1.2178987264633179 -89,1.187025785446167 -90,1.1572011709213257 -91,1.1293063163757324 -92,1.1032581329345703 -93,1.078959345817566 -94,1.0562694072723389 -95,1.0350526571273804 -96,1.0151242017745972 -97,0.996297299861908 -98,0.9784253239631653 -99,0.9614090919494629 -100,0.9451614022254944 -101,0.9296005964279175 -102,0.9146380424499512 -103,0.9001818299293518 -104,0.8861470818519592 -105,0.8725488185882568 -106,0.8593700528144836 -107,0.8465539813041687 -108,0.8339667916297913 -109,0.8215185403823853 -110,0.8093109130859375 -111,0.7974091172218323 -112,0.7858025431632996 -113,0.7744677066802979 -114,0.7633665800094604 -115,0.7524880170822144 -116,0.7418389916419983 -117,0.7314122319221497 -118,0.7212250828742981 -119,0.7112820148468018 -120,0.701591432094574 -121,0.6921640634536743 -122,0.6830231547355652 -123,0.6741977334022522 -124,0.6657254099845886 -125,0.6576201319694519 -126,0.649903416633606 -127,0.6426231265068054 -128,0.6358056664466858 -129,0.629462480545044 -130,0.6235944032669067 -131,0.6182011365890503 -132,0.613273024559021 -133,0.6087920665740967 -134,0.6047348976135254 -135,0.601076066493988 -136,0.5977679491043091 -137,0.5947710275650024 -138,0.5920525789260864 -139,0.5895588994026184 -140,0.5872387886047363 -141,0.585040807723999 -142,0.5829035639762878 -143,0.5807514190673828 -144,0.5785495042800903 -145,0.5762696266174316 -146,0.573889970779419 -147,0.5714107155799866 -148,0.5688331723213196 -149,0.5661591291427612 -150,0.5633971095085144 -151,0.5605589747428894 -152,0.5576565861701965 -153,0.554696798324585 -154,0.5516954660415649 -155,0.5486614108085632 -156,0.5456073880195618 -157,0.5425439476966858 -158,0.5394729375839233 -159,0.5363909602165222 -160,0.5333336591720581 -161,0.5303324460983276 -162,0.5273727178573608 -163,0.5244660973548889 -164,0.5215974450111389 -165,0.518760621547699 -166,0.5159709453582764 -167,0.5133069753646851 -168,0.5109410881996155 -169,0.5086806416511536 -170,0.5062718987464905 -171,0.5035780072212219 -172,0.500204861164093 -173,0.495699405670166 -174,0.493967205286026 -175,0.4921104311943054 -176,0.4901411533355713 -177,0.48810285329818726 -178,0.4860207438468933 -179,0.4844428300857544 -180,0.4840221405029297 -181,0.483368843793869 -182,0.4816538989543915 -183,0.47908586263656616 -184,0.4763879179954529 -185,0.4742881655693054 -186,0.47297799587249756 -187,0.47182008624076843 -188,0.4705120325088501 -189,0.4690743088722229 -190,0.46749556064605713 -191,0.4657318592071533 -192,0.4637012183666229 -193,0.4618721604347229 -194,0.4604718089103699 -195,0.4591950476169586 -196,0.4579789638519287 -197,0.4567034840583801 -198,0.45528143644332886 -199,0.4537661671638489 -200,0.4522865414619446 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.200/training_config.txt deleted file mode 100644 index 228babe..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.200/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.200/training_log.csv deleted file mode 100644 index fa3b33a..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,241.42697143554688 -2,31.105365753173828 -3,44.58345031738281 -4,906.2789306640625 -5,16.477094650268555 -6,12.544605255126953 -7,12.428547859191895 -8,13.041348457336426 -9,12.97156810760498 -10,12.556950569152832 -11,11.979304313659668 -12,11.292213439941406 -13,10.554747581481934 -14,9.808772087097168 -15,9.110712051391602 -16,8.487351417541504 -17,7.944581031799316 -18,7.467973709106445 -19,7.005588054656982 -20,6.572394371032715 -21,6.166067600250244 -22,5.781716346740723 -23,5.411543369293213 -24,5.055382251739502 -25,4.709681510925293 -26,4.3810625076293945 -27,4.071853160858154 -28,3.7815723419189453 -29,3.5095624923706055 -30,3.255587339401245 -31,3.020904064178467 -32,2.8011860847473145 -33,2.595519542694092 -34,2.4032905101776123 -35,2.2239577770233154 -36,2.0572237968444824 -37,1.9027714729309082 -38,1.760204553604126 -39,1.628983974456787 -40,1.5086711645126343 -41,1.3989187479019165 -42,1.2987855672836304 -43,1.2076818943023682 -44,1.1248973608016968 -45,1.0501514673233032 -46,0.9828110933303833 -47,0.922360360622406 -48,0.8681997656822205 -49,0.8199650049209595 -50,0.7775169610977173 -51,0.7403843998908997 -52,0.708111047744751 -53,0.6802512407302856 -54,0.6563639640808105 -55,0.63603675365448 -56,0.6188451647758484 -57,0.604389488697052 -58,0.5922591686248779 -59,0.5822196006774902 -60,0.5738810896873474 -61,0.566902756690979 -62,0.5609488487243652 -63,0.5557751059532166 -64,0.551203191280365 -65,0.5470618009567261 -66,0.5431945323944092 -67,0.53951495885849 -68,0.5359542965888977 -69,0.5324420928955078 -70,0.528904914855957 -71,0.5253573656082153 -72,0.5218283534049988 -73,0.5183495879173279 -74,0.5149560570716858 -75,0.5116909742355347 -76,0.5085984468460083 -77,0.5057011842727661 -78,0.5030196905136108 -79,0.5005726218223572 -80,0.49836400151252747 -81,0.49638351798057556 -82,0.49462056159973145 -83,0.4930602014064789 -84,0.4916839599609375 -85,0.4904719591140747 -86,0.4894024431705475 -87,0.4884505569934845 -88,0.4875901937484741 -89,0.4867984354496002 -90,0.48605453968048096 -91,0.48533952236175537 -92,0.4846373498439789 -93,0.48393476009368896 -94,0.4832214117050171 -95,0.4824894666671753 -96,0.4817345142364502 -97,0.48095330595970154 -98,0.4801454246044159 -99,0.479311466217041 -100,0.4784542918205261 -101,0.4775773286819458 -102,0.47668540477752686 -103,0.47578272223472595 -104,0.47487494349479675 -105,0.47396591305732727 -106,0.47306010127067566 -107,0.4721614122390747 -108,0.4712725579738617 -109,0.470397025346756 -110,0.4695364832878113 -111,0.4686923027038574 -112,0.46786460280418396 -113,0.46705344319343567 -114,0.4662591814994812 -115,0.46548065543174744 -116,0.46471622586250305 -117,0.46396404504776 -118,0.4632231891155243 -119,0.4624921381473541 -120,0.46176788210868835 -121,0.46104979515075684 -122,0.4603363275527954 -123,0.45962607860565186 -124,0.4589182734489441 -125,0.45821085572242737 -126,0.4575030207633972 -127,0.4567948281764984 -128,0.4560858905315399 -129,0.45537614822387695 -130,0.45466503500938416 -131,0.4539535641670227 -132,0.4532412588596344 -133,0.45252880454063416 -134,0.45181652903556824 -135,0.45110446214675903 -136,0.4503937363624573 -137,0.44968363642692566 -138,0.4489744305610657 -139,0.4482669234275818 -140,0.4475611746311188 -141,0.4468573331832886 -142,0.4461556375026703 -143,0.44545623660087585 -144,0.44475871324539185 -145,0.44406330585479736 -146,0.4433700740337372 -147,0.4426785409450531 -148,0.44198930263519287 -149,0.44130170345306396 -150,0.44061607122421265 -151,0.43993160128593445 -152,0.43924880027770996 -153,0.4385676383972168 -154,0.4378880560398102 -155,0.43721017241477966 -156,0.4365331530570984 -157,0.4358573853969574 -158,0.43518301844596863 -159,0.4345102608203888 -160,0.43383917212486267 -161,0.43316933512687683 -162,0.4325008988380432 -163,0.4318346083164215 -164,0.4311702847480774 -165,0.430507630109787 -166,0.42984738945961 -167,0.42918872833251953 -168,0.4285317063331604 -169,0.42787623405456543 -170,0.4272218942642212 -171,0.42656973004341125 -172,0.4259191155433655 -173,0.42526987195014954 -174,0.42462193965911865 -175,0.4239753186702728 -176,0.4233306050300598 -177,0.42268723249435425 -178,0.4220452904701233 -179,0.42140451073646545 -180,0.4207651913166046 -181,0.42012739181518555 -182,0.41949164867401123 -183,0.41885724663734436 -184,0.4182240664958954 -185,0.41759228706359863 -186,0.4169619083404541 -187,0.41633322834968567 -188,0.415706068277359 -189,0.4150806665420532 -190,0.4144562780857086 -191,0.413833349943161 -192,0.4132118821144104 -193,0.41259250044822693 -194,0.4119745194911957 -195,0.4113577902317047 -196,0.4107424020767212 -197,0.41012805700302124 -198,0.40951550006866455 -199,0.4089045524597168 -200,0.40829482674598694 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.250/training_config.txt deleted file mode 100644 index de95581..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.250/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.250/training_log.csv deleted file mode 100644 index 25b141b..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.250/training_log.csv +++ /dev/null @@ -1,58 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,292.2798156738281 -2,18.3397216796875 -3,19.458616256713867 -4,17.356569290161133 -5,16.059816360473633 -6,14.426283836364746 -7,12.212666511535645 -8,10.338387489318848 -9,8.988585472106934 -10,7.937220096588135 -11,7.315408229827881 -12,6.946380138397217 -13,6.646531105041504 -14,6.2454657554626465 -15,5.704675197601318 -16,5.099422454833984 -17,4.533779621124268 -18,4.046952247619629 -19,3.643604040145874 -20,3.290754556655884 -21,2.987203598022461 -22,2.7202653884887695 -23,2.4763214588165283 -24,2.240776300430298 -25,2.0144262313842773 -26,1.7974112033843994 -27,1.5917688608169556 -28,1.4026336669921875 -29,1.2382076978683472 -30,1.0937665700912476 -31,0.9645845293998718 -32,0.8514353632926941 -33,0.7539744973182678 -34,0.6697495579719543 -35,0.5974939465522766 -36,0.5368427038192749 -37,0.48736146092414856 -38,0.4483185410499573 -39,0.4191522002220154 -40,0.3988633155822754 -41,0.3859093487262726 -42,0.37941157817840576 -43,0.3787138760089874 -44,0.37590670585632324 -45,0.3739044666290283 -46,0.5653774738311768 -47,0.7424663305282593 -48,14.425512313842773 -49,190.6630401611328 -50,1066.1842041015625 -51,229.60406494140625 -52,36.52690887451172 -53,nan -54,nan -55,nan -56,nan diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.300/training_config.txt deleted file mode 100644 index 1969f94..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.300/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.300/training_log.csv deleted file mode 100644 index e01c3b6..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.300/training_log.csv +++ /dev/null @@ -1,26 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,352.04669189453125 -2,20.37812042236328 -3,22.809329986572266 -4,23.577417373657227 -5,23.361003875732422 -6,22.81104850769043 -7,22.046096801757812 -8,21.20140838623047 -9,20.28021812438965 -10,19.286937713623047 -11,18.248350143432617 -12,17.288070678710938 -13,16.395339965820312 -14,15.574531555175781 -15,14.808667182922363 -16,11.599696159362793 -17,11.070761680603027 -18,2104.847900390625 -19,166574.5 -20,166574.5 -21,166574.5 -22,166574.5 -23,166574.5 -24,166574.5 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.400/training_config.txt deleted file mode 100644 index 8d0d8a0..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.400/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.400/training_log.csv deleted file mode 100644 index 7b74172..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.400/training_log.csv +++ /dev/null @@ -1,17 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,1922.4442138671875 -2,20.176345825195312 -3,19.76601791381836 -4,18.43532943725586 -5,15.609777450561523 -6,18.27033233642578 -7,16.567707061767578 -8,15.500636100769043 -9,13.74911117553711 -10,12.890634536743164 -11,12.645033836364746 -12,nan -13,nan -14,nan -15,nan diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.500/training_config.txt deleted file mode 100644 index d3048fa..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.500/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.500/training_log.csv deleted file mode 100644 index 6836390..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.500/training_log.csv +++ /dev/null @@ -1,14 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,29826.1875 -2,162240.03125 -3,136354.84375 -4,46661.79296875 -5,1070.3817138671875 -6,70.7809066772461 -7,41.843894958496094 -8,36.959983825683594 -9,nan -10,nan -11,nan -12,nan diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.600/training_config.txt deleted file mode 100644 index 31aaa72..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.600/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.600/training_log.csv deleted file mode 100644 index 9d16482..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.600/training_log.csv +++ /dev/null @@ -1,11 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,84248.1796875 -2,166294.21875 -3,166571.734375 -4,166574.5 -5,166574.5 -6,166574.5 -7,166574.5 -8,166574.5 -9,166574.5 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.700/training_config.txt deleted file mode 100644 index 00d6c58..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.700/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.700/training_log.csv deleted file mode 100644 index 8041b41..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,126080.2109375 -2,166556.671875 -3,166574.5 -4,166574.5 -5,166574.5 -6,166574.5 -7,166574.5 -8,166574.5 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.800/training_config.txt deleted file mode 100644 index 5cdafbb..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.800/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.800/training_log.csv deleted file mode 100644 index 5accfbe..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,143977.234375 -2,166574.5 -3,166574.5 -4,166574.5 -5,166574.5 -6,166574.5 -7,166574.5 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_0.900/training_config.txt deleted file mode 100644 index 5a56fec..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.900/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_0.900/training_log.csv deleted file mode 100644 index eecdfed..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,155680.65625 -2,166574.5 -3,166574.5 -4,166574.5 -5,166574.5 -6,166574.5 -7,166574.5 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_1.000/training_config.txt deleted file mode 100644 index 4810c1f..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_1.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_1.000/training_log.csv deleted file mode 100644 index 9faa1fa..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,160731.71875 -2,166574.5 -3,166574.5 -4,166574.5 -5,166574.5 -6,166574.5 -7,166574.5 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_16.000/training_config.txt deleted file mode 100644 index d0551ee..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_16.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_16.000/training_log.csv deleted file mode 100644 index 622506a..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,167107.078125 -2,167107.078125 -3,167107.078125 -4,167107.078125 -5,167107.078125 -6,167107.078125 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_2.000/training_config.txt deleted file mode 100644 index c21ffd7..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_2.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_2.000/training_log.csv deleted file mode 100644 index bfe36a2..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,167078.453125 -2,4.431153297424316 -3,nan -4,nan -5,nan -6,nan diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_4.000/training_config.txt deleted file mode 100644 index 3282f28..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_4.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_4.000/training_log.csv deleted file mode 100644 index 622506a..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,167107.078125 -2,167107.078125 -3,167107.078125 -4,167107.078125 -5,167107.078125 -6,167107.078125 diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/linear/lr_8.000/training_config.txt deleted file mode 100644 index c3f1f1e..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_8.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * t_span**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/linear/lr_8.000/training_log.csv deleted file mode 100644 index 622506a..0000000 --- a/training/time_weighting_learning_rate_sweep/linear/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,848.658935546875 -1,167107.078125 -2,167107.078125 -3,167107.078125 -4,167107.078125 -5,167107.078125 -6,167107.078125 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.010/training_config.txt deleted file mode 100644 index 268bd09..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.010/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.010/training_log.csv deleted file mode 100644 index 28386b4..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,226.0415496826172 -2,197.2387237548828 -3,178.2758026123047 -4,173.6763153076172 -5,158.02955627441406 -6,139.63682556152344 -7,125.52545166015625 -8,108.300048828125 -9,93.97494506835938 -10,78.72343444824219 -11,73.76118469238281 -12,60.26469421386719 -13,53.04522705078125 -14,49.697265625 -15,43.435523986816406 -16,39.3493766784668 -17,35.1627311706543 -18,32.707794189453125 -19,29.73851203918457 -20,27.852968215942383 -21,25.85087776184082 -22,23.811603546142578 -23,19.843202590942383 -24,18.469219207763672 -25,17.30427360534668 -26,16.52859115600586 -27,14.801562309265137 -28,14.091103553771973 -29,13.436502456665039 -30,12.533130645751953 -31,11.755898475646973 -32,11.324661254882812 -33,11.00821590423584 -34,10.816542625427246 -35,10.560376167297363 -36,10.338624000549316 -37,10.204343795776367 -38,9.820556640625 -39,9.4232177734375 -40,9.205160140991211 -41,9.054852485656738 -42,8.93899917602539 -43,8.845490455627441 -44,8.698594093322754 -45,8.625725746154785 -46,8.554396629333496 -47,8.487860679626465 -48,8.425700187683105 -49,8.367027282714844 -50,8.313651084899902 -51,8.270990371704102 -52,8.24176025390625 -53,8.219162940979004 -54,8.181413650512695 -55,8.125813484191895 -56,8.071531295776367 -57,8.017513275146484 -58,7.964171886444092 -59,7.9278130531311035 -60,7.892172813415527 -61,7.853733539581299 -62,7.8161540031433105 -63,7.786566257476807 -64,7.75718879699707 -65,7.7256293296813965 -66,7.688238143920898 -67,7.64783239364624 -68,7.636955738067627 -69,7.6161980628967285 -70,7.5903706550598145 -71,7.557775497436523 -72,7.533744812011719 -73,7.523024082183838 -74,7.506453037261963 -75,7.487668514251709 -76,7.468721389770508 -77,7.451624393463135 -78,7.434808254241943 -79,7.415309429168701 -80,7.392673492431641 -81,7.3651933670043945 -82,7.324393272399902 -83,7.3014936447143555 -84,7.3194169998168945 -85,7.305680274963379 -86,7.285172462463379 -87,7.254552364349365 -88,7.220450401306152 -89,7.228973865509033 -90,7.250252723693848 -91,7.267425537109375 -92,7.254531383514404 -93,7.24343729019165 -94,7.225031852722168 -95,7.182167053222656 -96,7.132328987121582 -97,7.131843090057373 -98,7.121731758117676 -99,7.106603145599365 -100,7.088785648345947 -101,7.088797569274902 -102,7.08595085144043 -103,7.0839738845825195 -104,7.069638729095459 -105,7.062694549560547 -106,7.049798488616943 -107,7.0174126625061035 -108,7.019355773925781 -109,7.00481653213501 -110,6.982387065887451 -111,6.936489582061768 -112,6.960066795349121 -113,6.95684814453125 -114,6.947279930114746 -115,6.93328857421875 -116,6.915172576904297 -117,6.891733169555664 -118,6.854081630706787 -119,6.84824275970459 -120,6.830296039581299 -121,6.80463981628418 -122,6.773972511291504 -123,6.726198196411133 -124,6.726227760314941 -125,6.696313858032227 -126,6.66127347946167 -127,6.644524097442627 -128,6.6316752433776855 -129,6.591607093811035 -130,6.54453706741333 -131,6.515999794006348 -132,6.500110626220703 -133,6.474952220916748 -134,6.445173263549805 -135,6.419588088989258 -136,6.403942584991455 -137,6.384757995605469 -138,6.351598739624023 -139,6.311211109161377 -140,6.280110836029053 -141,6.24654483795166 -142,6.223101615905762 -143,6.196127891540527 -144,6.164536952972412 -145,6.12553071975708 -146,6.075126647949219 -147,6.008592128753662 -148,5.919406890869141 -149,5.827272415161133 -150,5.749283313751221 -151,5.718448638916016 -152,5.6668267250061035 -153,5.607542514801025 -154,5.543971061706543 -155,5.555872917175293 -156,5.523488998413086 -157,5.484347343444824 -158,5.457149982452393 -159,5.413623809814453 -160,5.358579635620117 -161,5.430988311767578 -162,5.428046226501465 -163,5.403169631958008 -164,5.358337879180908 -165,5.317689418792725 -166,5.280011177062988 -167,5.201272487640381 -168,5.173569679260254 -169,5.1147613525390625 -170,5.0577569007873535 -171,5.02606725692749 -172,4.965428352355957 -173,4.91507625579834 -174,4.8895392417907715 -175,4.831740379333496 -176,4.745660781860352 -177,4.671154499053955 -178,4.626039981842041 -179,4.601766109466553 -180,4.593398571014404 -181,4.582977294921875 -182,4.559873580932617 -183,4.52700662612915 -184,4.496830463409424 -185,4.451750755310059 -186,4.406407833099365 -187,4.4066362380981445 -188,4.391368389129639 -189,4.370344638824463 -190,4.3429036140441895 -191,4.312570571899414 -192,4.296023845672607 -193,4.277052879333496 -194,4.254538536071777 -195,4.234200477600098 -196,4.211484909057617 -197,4.185166358947754 -198,4.214677333831787 -199,4.216820240020752 -200,4.208740234375 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.020/training_config.txt deleted file mode 100644 index b447067..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.020/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.020/training_log.csv deleted file mode 100644 index 094decf..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,213.2602081298828 -2,165.03121948242188 -3,122.03816986083984 -4,85.23294067382812 -5,59.68912887573242 -6,52.037654876708984 -7,45.42677688598633 -8,38.5277214050293 -9,29.579437255859375 -10,24.394468307495117 -11,20.293716430664062 -12,17.041677474975586 -13,15.915730476379395 -14,14.806083679199219 -15,14.001134872436523 -16,12.49098014831543 -17,11.341317176818848 -18,9.848942756652832 -19,9.188645362854004 -20,8.603843688964844 -21,8.016204833984375 -22,7.541904926300049 -23,7.439103603363037 -24,7.252079010009766 -25,7.092203140258789 -26,6.856754302978516 -27,6.2946553230285645 -28,6.148245811462402 -29,6.016469955444336 -30,5.895207405090332 -31,5.780801296234131 -32,5.670100212097168 -33,5.560122013092041 -34,5.443406105041504 -35,5.295340061187744 -36,5.201289653778076 -37,5.107529640197754 -38,5.011250019073486 -39,4.905561923980713 -40,4.751293182373047 -41,4.666775703430176 -42,4.5824174880981445 -43,4.495879173278809 -44,4.414167404174805 -45,4.3402910232543945 -46,4.2826337814331055 -47,4.258297443389893 -48,4.226583480834961 -49,4.185098171234131 -50,4.137001037597656 -51,4.082268238067627 -52,4.01715612411499 -53,3.9509389400482178 -54,3.906024932861328 -55,3.8931620121002197 -56,3.864523410797119 -57,3.853398323059082 -58,3.8350913524627686 -59,3.80031418800354 -60,3.7808003425598145 -61,3.7454535961151123 -62,3.7422025203704834 -63,3.715747833251953 -64,3.675211191177368 -65,3.616716146469116 -66,3.570237874984741 -67,3.5409908294677734 -68,3.5142405033111572 -69,3.503190279006958 -70,3.4793572425842285 -71,3.4546892642974854 -72,3.4286139011383057 -73,3.4176039695739746 -74,3.3944432735443115 -75,3.361698865890503 -76,3.339509963989258 -77,3.3160364627838135 -78,3.2967872619628906 -79,3.267470121383667 -80,3.2460830211639404 -81,3.23118257522583 -82,3.212801456451416 -83,3.187382698059082 -84,3.1707053184509277 -85,3.1515696048736572 -86,3.1346583366394043 -87,3.1109619140625 -88,3.0914413928985596 -89,3.0712482929229736 -90,3.055351972579956 -91,3.037804365158081 -92,3.016878604888916 -93,2.995676040649414 -94,2.976057767868042 -95,2.9544153213500977 -96,2.931025981903076 -97,2.9080920219421387 -98,2.8906643390655518 -99,2.873629331588745 -100,2.8569700717926025 -101,2.837078332901001 -102,2.819228410720825 -103,2.8033993244171143 -104,2.788250684738159 -105,2.7728137969970703 -106,2.7569663524627686 -107,2.741225242614746 -108,2.7256979942321777 -109,2.7108967304229736 -110,2.6991024017333984 -111,2.6878044605255127 -112,2.6764702796936035 -113,2.6648144721984863 -114,2.6546082496643066 -115,2.645092487335205 -116,2.635842800140381 -117,2.6268482208251953 -118,2.61807918548584 -119,2.6094915866851807 -120,2.6014111042022705 -121,2.593950033187866 -122,2.586794853210449 -123,2.5800392627716064 -124,2.5733513832092285 -125,2.56697154045105 -126,2.5611252784729004 -127,2.555795192718506 -128,2.551081657409668 -129,2.546800374984741 -130,2.5428643226623535 -131,2.5392801761627197 -132,2.5359504222869873 -133,2.532853126525879 -134,2.5300261974334717 -135,2.5273962020874023 -136,2.5249416828155518 -137,2.522676944732666 -138,2.5205321311950684 -139,2.5185012817382812 -140,2.5165951251983643 -141,2.5147738456726074 -142,2.5130457878112793 -143,2.511413097381592 -144,2.509850025177002 -145,2.5083649158477783 -146,2.506953477859497 -147,2.5055954456329346 -148,2.50429368019104 -149,2.503045082092285 -150,2.501833915710449 -151,2.5006628036499023 -152,2.499533176422119 -153,2.498436450958252 -154,2.4973738193511963 -155,2.4963457584381104 -156,2.4953453540802 -157,2.4943721294403076 -158,2.493427276611328 -159,2.4925050735473633 -160,2.491603374481201 -161,2.4907233715057373 -162,2.489863395690918 -163,2.4890189170837402 -164,2.4881932735443115 -165,2.4873850345611572 -166,2.4865922927856445 -167,2.4858148097991943 -168,2.4850523471832275 -169,2.484304189682007 -170,2.4835667610168457 -171,2.482842206954956 -172,2.4821298122406006 -173,2.481428861618042 -174,2.480739116668701 -175,2.48006010055542 -176,2.479391098022461 -177,2.478731155395508 -178,2.4780802726745605 -179,2.477438449859619 -180,2.4768049716949463 -181,2.4761810302734375 -182,2.4755661487579346 -183,2.4749596118927 -184,2.47436261177063 -185,2.47377347946167 -186,2.4731922149658203 -187,2.472618341445923 -188,2.4720518589019775 -189,2.4714934825897217 -190,2.4709410667419434 -191,2.4703946113586426 -192,2.4698545932769775 -193,2.469320774078369 -194,2.4687933921813965 -195,2.468271017074585 -196,2.4677538871765137 -197,2.4672422409057617 -198,2.466735601425171 -199,2.4662351608276367 -200,2.4657399654388428 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.040/training_config.txt deleted file mode 100644 index 08cb717..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.040/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.040/training_log.csv deleted file mode 100644 index 1caae35..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,188.4126434326172 -2,100.1407699584961 -3,53.49800109863281 -4,39.872188568115234 -5,26.986865997314453 -6,19.623342514038086 -7,16.20302963256836 -8,13.249744415283203 -9,11.64409065246582 -10,10.230192184448242 -11,9.6668701171875 -12,9.67353630065918 -13,9.439249992370605 -14,7.897523403167725 -15,7.060725688934326 -16,6.561639308929443 -17,6.161434650421143 -18,5.815489292144775 -19,5.4752068519592285 -20,4.9327192306518555 -21,4.734984874725342 -22,4.554983139038086 -23,4.339447975158691 -24,4.207964897155762 -25,4.090985298156738 -26,3.9815547466278076 -27,3.877100944519043 -28,3.775721311569214 -29,3.676292896270752 -30,3.576792001724243 -31,3.4732606410980225 -32,3.3926339149475098 -33,3.3300304412841797 -34,3.2762179374694824 -35,3.2209866046905518 -36,3.172370672225952 -37,3.134460926055908 -38,3.101698160171509 -39,3.0676259994506836 -40,3.032180070877075 -41,2.998706340789795 -42,2.9671056270599365 -43,2.9347119331359863 -44,2.900970458984375 -45,2.865792989730835 -46,2.8294126987457275 -47,2.7927298545837402 -48,2.7574150562286377 -49,2.7233500480651855 -50,2.6940085887908936 -51,2.672377824783325 -52,2.655463933944702 -53,2.6417033672332764 -54,2.629901647567749 -55,2.6201374530792236 -56,2.6121890544891357 -57,2.6057944297790527 -58,2.6001105308532715 -59,2.5944149494171143 -60,2.5883829593658447 -61,2.582052230834961 -62,2.5756869316101074 -63,2.5695507526397705 -64,2.5638203620910645 -65,2.558509588241577 -66,2.553586959838867 -67,2.5489509105682373 -68,2.5444791316986084 -69,2.5400729179382324 -70,2.535695791244507 -71,2.5313563346862793 -72,2.5270378589630127 -73,2.5227243900299072 -74,2.518423318862915 -75,2.5142152309417725 -76,2.5101277828216553 -77,2.5062403678894043 -78,2.502612352371216 -79,2.4992470741271973 -80,2.496171474456787 -81,2.4933836460113525 -82,2.4908435344696045 -83,2.488499879837036 -84,2.4863150119781494 -85,2.4842827320098877 -86,2.482415199279785 -87,2.4806971549987793 -88,2.4791319370269775 -89,2.477696657180786 -90,2.4763505458831787 -91,2.4750499725341797 -92,2.4737558364868164 -93,2.4724514484405518 -94,2.471139907836914 -95,2.4698362350463867 -96,2.4685542583465576 -97,2.4673051834106445 -98,2.4660909175872803 -99,2.464911699295044 -100,2.4637680053710938 -101,2.4626567363739014 -102,2.461580514907837 -103,2.460545301437378 -104,2.4595534801483154 -105,2.4586069583892822 -106,2.45770001411438 -107,2.4568326473236084 -108,2.456000328063965 -109,2.4551985263824463 -110,2.454416275024414 -111,2.453651189804077 -112,2.45290207862854 -113,2.4521677494049072 -114,2.4514482021331787 -115,2.4507436752319336 -116,2.450054168701172 -117,2.4493789672851562 -118,2.4487156867980957 -119,2.448065757751465 -120,2.447427988052368 -121,2.4468014240264893 -122,2.4461867809295654 -123,2.4455931186676025 -124,2.4450161457061768 -125,2.444453716278076 -126,2.4439046382904053 -127,2.443366289138794 -128,2.4428391456604004 -129,2.4423224925994873 -130,2.44181489944458 -131,2.441317558288574 -132,2.440828561782837 -133,2.4403483867645264 -134,2.4398772716522217 -135,2.4394125938415527 -136,2.4389541149139404 -137,2.4385011196136475 -138,2.4380533695220947 -139,2.4376115798950195 -140,2.4371745586395264 -141,2.43674373626709 -142,2.436319351196289 -143,2.435901165008545 -144,2.435488224029541 -145,2.435082197189331 -146,2.4346816539764404 -147,2.4342854022979736 -148,2.4338948726654053 -149,2.43350887298584 -150,2.433129072189331 -151,2.4327552318573 -152,2.4323878288269043 -153,2.4320271015167236 -154,2.431671142578125 -155,2.4313204288482666 -156,2.430974006652832 -157,2.430631637573242 -158,2.4302921295166016 -159,2.4299561977386475 -160,2.429624557495117 -161,2.429295539855957 -162,2.4289703369140625 -163,2.428647756576538 -164,2.428327798843384 -165,2.428010940551758 -166,2.427696943283081 -167,2.4273862838745117 -168,2.4270787239074707 -169,2.426774263381958 -170,2.4264721870422363 -171,2.4261724948883057 -172,2.4258761405944824 -173,2.425581932067871 -174,2.4252893924713135 -175,2.424999713897705 -176,2.4247124195098877 -177,2.4244272708892822 -178,2.4241442680358887 -179,2.423863172531128 -180,2.423583745956421 -181,2.423306703567505 -182,2.4230313301086426 -183,2.422757863998413 -184,2.4224860668182373 -185,2.4222164154052734 -186,2.421947956085205 -187,2.4216818809509277 -188,2.4214186668395996 -189,2.421157121658325 -190,2.420898199081421 -191,2.4206414222717285 -192,2.4203875064849854 -193,2.420135974884033 -194,2.4198856353759766 -195,2.419637441635132 -196,2.4193906784057617 -197,2.4191458225250244 -198,2.4189021587371826 -199,2.4186601638793945 -200,2.418419599533081 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.050/training_config.txt deleted file mode 100644 index 1bbbdae..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.050/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.050/training_log.csv deleted file mode 100644 index e9d8d9f..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,177.5484161376953 -2,75.67129516601562 -3,48.331581115722656 -4,29.840576171875 -5,18.89854621887207 -6,14.566293716430664 -7,10.937389373779297 -8,8.684791564941406 -9,7.207244873046875 -10,6.439456462860107 -11,5.952755451202393 -12,5.598975658416748 -13,5.22377347946167 -14,4.950442314147949 -15,4.713745594024658 -16,4.4962158203125 -17,4.282252311706543 -18,4.13154411315918 -19,3.9876415729522705 -20,3.8472652435302734 -21,3.7536253929138184 -22,3.6826882362365723 -23,3.6103572845458984 -24,3.535839557647705 -25,3.4601712226867676 -26,3.383955717086792 -27,3.307387113571167 -28,3.231977939605713 -29,3.1606335639953613 -30,3.100270986557007 -31,3.0602850914001465 -32,3.0345489978790283 -33,3.0125160217285156 -34,2.989884614944458 -35,2.965646743774414 -36,2.939624786376953 -37,2.9119629859924316 -38,2.8829026222229004 -39,2.852640151977539 -40,2.8214008808135986 -41,2.789710283279419 -42,2.7582740783691406 -43,2.728090524673462 -44,2.7002971172332764 -45,2.6761512756347656 -46,2.656055450439453 -47,2.639045238494873 -48,2.623781681060791 -49,2.6090381145477295 -50,2.594036817550659 -51,2.5785975456237793 -52,2.5635297298431396 -53,2.5490708351135254 -54,2.5352895259857178 -55,2.522486448287964 -56,2.5110459327697754 -57,2.5013787746429443 -58,2.4937074184417725 -59,2.4881396293640137 -60,2.484534502029419 -61,2.482591390609741 -62,2.481966018676758 -63,2.4821887016296387 -64,2.4828269481658936 -65,2.4834835529327393 -66,2.483833074569702 -67,2.4837048053741455 -68,2.483063220977783 -69,2.481938362121582 -70,2.48038387298584 -71,2.478485107421875 -72,2.476341724395752 -73,2.474055290222168 -74,2.471695899963379 -75,2.4693150520324707 -76,2.466928482055664 -77,2.4645836353302 -78,2.462329626083374 -79,2.4601662158966064 -80,2.4581496715545654 -81,2.4563148021698 -82,2.4546825885772705 -83,2.4532806873321533 -84,2.452070474624634 -85,2.4510204792022705 -86,2.4500794410705566 -87,2.449208974838257 -88,2.4483768939971924 -89,2.4475672245025635 -90,2.446777820587158 -91,2.4460041522979736 -92,2.4452481269836426 -93,2.444514036178589 -94,2.443798065185547 -95,2.443091869354248 -96,2.4423882961273193 -97,2.441682815551758 -98,2.440972089767456 -99,2.440255880355835 -100,2.4395370483398438 -101,2.4388184547424316 -102,2.4381067752838135 -103,2.437407970428467 -104,2.4367289543151855 -105,2.4360761642456055 -106,2.435453414916992 -107,2.4348669052124023 -108,2.434316396713257 -109,2.4337947368621826 -110,2.4332988262176514 -111,2.432826042175293 -112,2.4323649406433105 -113,2.431910991668701 -114,2.431459665298462 -115,2.4310054779052734 -116,2.4305477142333984 -117,2.430086374282837 -118,2.429622173309326 -119,2.4291586875915527 -120,2.4286978244781494 -121,2.4282405376434326 -122,2.427791118621826 -123,2.427351474761963 -124,2.42692232131958 -125,2.4265031814575195 -126,2.4260926246643066 -127,2.425692558288574 -128,2.425299882888794 -129,2.4249134063720703 -130,2.424530506134033 -131,2.4241511821746826 -132,2.423774242401123 -133,2.423400402069092 -134,2.423027992248535 -135,2.4226579666137695 -136,2.4222898483276367 -137,2.4219253063201904 -138,2.421562433242798 -139,2.421204090118408 -140,2.420849323272705 -141,2.4204981327056885 -142,2.4201507568359375 -143,2.419807195663452 -144,2.4194672107696533 -145,2.419130563735962 -146,2.418797254562378 -147,2.418466091156006 -148,2.418137311935425 -149,2.4178106784820557 -150,2.4174864292144775 -151,2.4171643257141113 -152,2.416849374771118 -153,2.41654109954834 -154,2.4162373542785645 -155,2.4159374237060547 -156,2.4156410694122314 -157,2.4153478145599365 -158,2.4150569438934326 -159,2.414768934249878 -160,2.414482831954956 -161,2.414200782775879 -162,2.4139232635498047 -163,2.413649559020996 -164,2.413379192352295 -165,2.413111448287964 -166,2.4128470420837402 -167,2.4125852584838867 -168,2.4123260974884033 -169,2.412071704864502 -170,2.4118213653564453 -171,2.4115753173828125 -172,2.4113333225250244 -173,2.411095380783081 -174,2.4108598232269287 -175,2.410628080368042 -176,2.4103994369506836 -177,2.4101741313934326 -178,2.4099531173706055 -179,2.4097349643707275 -180,2.4095191955566406 -181,2.4093053340911865 -182,2.4090940952301025 -183,2.4088852405548096 -184,2.408679246902466 -185,2.4084746837615967 -186,2.4082727432250977 -187,2.4080727100372314 -188,2.407874345779419 -189,2.407677173614502 -190,2.4074816703796387 -191,2.4072868824005127 -192,2.4070940017700195 -193,2.4069013595581055 -194,2.406710147857666 -195,2.4065206050872803 -196,2.406332492828369 -197,2.4061458110809326 -198,2.40596079826355 -199,2.4057769775390625 -200,2.4055938720703125 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.080/training_config.txt deleted file mode 100644 index e15e368..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.080/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.080/training_log.csv deleted file mode 100644 index 8d04701..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,146.41900634765625 -2,49.06715774536133 -3,29.240238189697266 -4,16.19744300842285 -5,9.637324333190918 -6,7.662399768829346 -7,6.516317367553711 -8,5.9663543701171875 -9,5.46151876449585 -10,4.964850425720215 -11,4.610069751739502 -12,4.248111724853516 -13,3.990828037261963 -14,3.847820281982422 -15,3.7224812507629395 -16,3.603868007659912 -17,3.4938817024230957 -18,3.391737222671509 -19,3.296656847000122 -20,3.2079861164093018 -21,3.1249642372131348 -22,3.0466275215148926 -23,2.9824297428131104 -24,2.937706232070923 -25,2.895122766494751 -26,2.8527567386627197 -27,2.8119757175445557 -28,2.7742631435394287 -29,2.7407312393188477 -30,2.710843563079834 -31,2.6837172508239746 -32,2.6584506034851074 -33,2.633770227432251 -34,2.6077191829681396 -35,2.5874030590057373 -36,2.5793068408966064 -37,2.5781800746917725 -38,2.5774929523468018 -39,2.5656282901763916 -40,2.542858362197876 -41,2.519730567932129 -42,2.5054235458374023 -43,2.5035059452056885 -44,2.5018320083618164 -45,2.49882173538208 -46,2.4948558807373047 -47,2.490097999572754 -48,2.4845287799835205 -49,2.4781529903411865 -50,2.471174716949463 -51,2.4637749195098877 -52,2.456200122833252 -53,2.4489874839782715 -54,2.442863941192627 -55,2.4388458728790283 -56,2.4372940063476562 -57,2.437526226043701 -58,2.438184976577759 -59,2.4382543563842773 -60,2.4375112056732178 -61,2.436089038848877 -62,2.4342896938323975 -63,2.432469129562378 -64,2.430898904800415 -65,2.4296844005584717 -66,2.4287359714508057 -67,2.4279818534851074 -68,2.427286386489868 -69,2.4265644550323486 -70,2.4257755279541016 -71,2.424922466278076 -72,2.4240026473999023 -73,2.4229958057403564 -74,2.421905040740967 -75,2.420715808868408 -76,2.4195303916931152 -77,2.4185104370117188 -78,2.4177143573760986 -79,2.4171316623687744 -80,2.4166929721832275 -81,2.4163153171539307 -82,2.4159088134765625 -83,2.4154212474823 -84,2.414870500564575 -85,2.4142813682556152 -86,2.413696765899658 -87,2.4131667613983154 -88,2.412684917449951 -89,2.4122495651245117 -90,2.411832809448242 -91,2.411405563354492 -92,2.410953998565674 -93,2.410482406616211 -94,2.4099996089935303 -95,2.4095299243927 -96,2.4090890884399414 -97,2.408682107925415 -98,2.4083139896392822 -99,2.4079768657684326 -100,2.407654047012329 -101,2.407336950302124 -102,2.407012462615967 -103,2.4066781997680664 -104,2.40633487701416 -105,2.4059860706329346 -106,2.405643939971924 -107,2.4053151607513428 -108,2.404999017715454 -109,2.4046988487243652 -110,2.4044110774993896 -111,2.4041292667388916 -112,2.403850555419922 -113,2.403568744659424 -114,2.403282880783081 -115,2.40299654006958 -116,2.402714729309082 -117,2.402440071105957 -118,2.402175188064575 -119,2.4019205570220947 -120,2.401675224304199 -121,2.4014370441436768 -122,2.4012045860290527 -123,2.400972843170166 -124,2.400742292404175 -125,2.4005134105682373 -126,2.4002890586853027 -127,2.400071382522583 -128,2.3998589515686035 -129,2.3996505737304688 -130,2.399444818496704 -131,2.399240732192993 -132,2.399038076400757 -133,2.3988358974456787 -134,2.3986363410949707 -135,2.3984384536743164 -136,2.3982443809509277 -137,2.3980538845062256 -138,2.3978660106658936 -139,2.3976802825927734 -140,2.397495985031128 -141,2.3973138332366943 -142,2.3971340656280518 -143,2.396955966949463 -144,2.3967795372009277 -145,2.3966052532196045 -146,2.396432876586914 -147,2.3962628841400146 -148,2.3960952758789062 -149,2.395930290222168 -150,2.3957669734954834 -151,2.3956058025360107 -152,2.395446300506592 -153,2.3952882289886475 -154,2.395132541656494 -155,2.394979238510132 -156,2.3948280811309814 -157,2.3946783542633057 -158,2.3945295810699463 -159,2.3943827152252197 -160,2.3942365646362305 -161,2.3940927982330322 -162,2.393949508666992 -163,2.3938074111938477 -164,2.3936655521392822 -165,2.3935253620147705 -166,2.393386125564575 -167,2.393247604370117 -168,2.3931100368499756 -169,2.3929736614227295 -170,2.3928377628326416 -171,2.3927018642425537 -172,2.392568826675415 -173,2.392437219619751 -174,2.3923065662384033 -175,2.392180919647217 -176,2.392056941986084 -177,2.391932487487793 -178,2.3918070793151855 -179,2.391680955886841 -180,2.3915562629699707 -181,2.3914337158203125 -182,2.391312599182129 -183,2.3911917209625244 -184,2.3910703659057617 -185,2.390949249267578 -186,2.3908278942108154 -187,2.390706777572632 -188,2.3905863761901855 -189,2.390467643737793 -190,2.390349864959717 -191,2.3902316093444824 -192,2.39011287689209 -193,2.3899953365325928 -194,2.389878511428833 -195,2.389763355255127 -196,2.3896477222442627 -197,2.3895325660705566 -198,2.3894176483154297 -199,2.3893039226531982 -200,2.3891918659210205 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.100/training_config.txt deleted file mode 100644 index 02df252..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.100/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.100/training_log.csv deleted file mode 100644 index 585a0d9..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,135.07530212402344 -2,64.079833984375 -3,36.47005081176758 -4,16.68683433532715 -5,9.118081092834473 -6,7.173774719238281 -7,6.583879470825195 -8,6.110447406768799 -9,5.642327308654785 -10,5.18532133102417 -11,4.729133129119873 -12,4.338897228240967 -13,4.045412063598633 -14,3.952036142349243 -15,3.8766775131225586 -16,3.801008462905884 -17,3.728581428527832 -18,3.6592981815338135 -19,3.59110164642334 -20,3.5224552154541016 -21,3.4532909393310547 -22,3.3845958709716797 -23,3.316838026046753 -24,3.2507688999176025 -25,3.188037395477295 -26,3.130146026611328 -27,3.078106164932251 -28,3.032888889312744 -29,2.995119333267212 -30,2.965100049972534 -31,2.942678213119507 -32,2.9256062507629395 -33,2.909449815750122 -34,2.891047477722168 -35,2.8693432807922363 -36,2.8445305824279785 -37,2.8172404766082764 -38,2.788121461868286 -39,2.7586724758148193 -40,2.730480194091797 -41,2.7049834728240967 -42,2.6826112270355225 -43,2.6627347469329834 -44,2.6446588039398193 -45,2.627593517303467 -46,2.610851764678955 -47,2.593876600265503 -48,2.5764009952545166 -49,2.5587332248687744 -50,2.5410408973693848 -51,2.52347469329834 -52,2.50620698928833 -53,2.4894676208496094 -54,2.473910093307495 -55,2.460052013397217 -56,2.4482133388519287 -57,2.438420057296753 -58,2.430889844894409 -59,2.425494432449341 -60,2.4220755100250244 -61,2.42048716545105 -62,2.4203851222991943 -63,2.421201467514038 -64,2.42232084274292 -65,2.423234462738037 -66,2.4239847660064697 -67,2.424823522567749 -68,2.425804615020752 -69,2.426640033721924 -70,2.4266929626464844 -71,2.42557430267334 -72,2.4235830307006836 -73,2.4213383197784424 -74,2.4192259311676025 -75,2.417339563369751 -76,2.415595531463623 -77,2.413931369781494 -78,2.4123103618621826 -79,2.4107069969177246 -80,2.4092767238616943 -81,2.4080491065979004 -82,2.4070558547973633 -83,2.4063198566436768 -84,2.405810832977295 -85,2.405505418777466 -86,2.405334949493408 -87,2.405240774154663 -88,2.4051673412323 -89,2.4050703048706055 -90,2.4049272537231445 -91,2.404719352722168 -92,2.404447555541992 -93,2.4041149616241455 -94,2.4037272930145264 -95,2.403296947479248 -96,2.4028379917144775 -97,2.402366876602173 -98,2.401900291442871 -99,2.401439905166626 -100,2.401003837585449 -101,2.400606870651245 -102,2.4002482891082764 -103,2.3999345302581787 -104,2.3996613025665283 -105,2.3994193077087402 -106,2.399198532104492 -107,2.398988723754883 -108,2.398776054382324 -109,2.3985626697540283 -110,2.398343086242676 -111,2.3981173038482666 -112,2.3978846073150635 -113,2.3976492881774902 -114,2.397413969039917 -115,2.3971786499023438 -116,2.396941661834717 -117,2.3967056274414062 -118,2.396475076675415 -119,2.3962507247924805 -120,2.3960304260253906 -121,2.395814895629883 -122,2.395603656768799 -123,2.3953967094421387 -124,2.395195484161377 -125,2.3949992656707764 -126,2.3948066234588623 -127,2.3946170806884766 -128,2.3944294452667236 -129,2.394242763519287 -130,2.394057035446167 -131,2.3938710689544678 -132,2.3936843872070312 -133,2.3934967517852783 -134,2.3933093547821045 -135,2.3931217193603516 -136,2.3929343223571777 -137,2.392746686935425 -138,2.3925602436065674 -139,2.392376184463501 -140,2.392200231552124 -141,2.3920304775238037 -142,2.391866445541382 -143,2.3917129039764404 -144,2.3915646076202393 -145,2.3914222717285156 -146,2.391284704208374 -147,2.39115309715271 -148,2.3910255432128906 -149,2.390897274017334 -150,2.390768051147461 -151,2.3906376361846924 -152,2.390507221221924 -153,2.3903756141662598 -154,2.3902435302734375 -155,2.390110969543457 -156,2.389979362487793 -157,2.389847755432129 -158,2.389716625213623 -159,2.3895859718322754 -160,2.3894641399383545 -161,2.3893446922302246 -162,2.389225959777832 -163,2.3891100883483887 -164,2.3889973163604736 -165,2.3888864517211914 -166,2.3887763023376465 -167,2.3886666297912598 -168,2.388558864593506 -169,2.388451337814331 -170,2.3883447647094727 -171,2.3882384300231934 -172,2.3881325721740723 -173,2.3880276679992676 -174,2.387922525405884 -175,2.3878180980682373 -176,2.387714385986328 -177,2.387610673904419 -178,2.387507915496826 -179,2.3874056339263916 -180,2.3873040676116943 -181,2.387202739715576 -182,2.3871021270751953 -183,2.3870012760162354 -184,2.3869006633758545 -185,2.3868002891540527 -186,2.38670015335083 -187,2.3866004943847656 -188,2.386500597000122 -189,2.3864011764526367 -190,2.3863024711608887 -191,2.3862037658691406 -192,2.3861050605773926 -193,2.3860068321228027 -194,2.3859081268310547 -195,2.385810613632202 -196,2.385714054107666 -197,2.385617971420288 -198,2.385521411895752 -199,2.3854246139526367 -200,2.3853280544281006 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.125/training_config.txt deleted file mode 100644 index 83e065a..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.125/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.125/training_log.csv deleted file mode 100644 index 0e6e9b4..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,133.23406982421875 -2,32.82052993774414 -3,15.794758796691895 -4,7.625123500823975 -5,5.433840274810791 -6,4.788426876068115 -7,4.466541290283203 -8,4.203093528747559 -9,3.991551399230957 -10,3.7985196113586426 -11,3.6121151447296143 -12,3.435615301132202 -13,3.272524118423462 -14,3.140265941619873 -15,3.075312614440918 -16,3.040175437927246 -17,3.0068984031677246 -18,2.9731545448303223 -19,2.9384829998016357 -20,2.9028069972991943 -21,2.8665640354156494 -22,2.8289599418640137 -23,2.7905397415161133 -24,2.7518303394317627 -25,2.712588310241699 -26,2.673597574234009 -27,2.634491205215454 -28,2.595644950866699 -29,2.562822103500366 -30,2.5426554679870605 -31,2.5417239665985107 -32,2.556324005126953 -33,2.5665934085845947 -34,2.55672550201416 -35,2.5317978858947754 -36,2.506650447845459 -37,2.4903721809387207 -38,2.4836721420288086 -39,2.482271909713745 -40,2.4823317527770996 -41,2.4816367626190186 -42,2.479339361190796 -43,2.4753761291503906 -44,2.4703369140625 -45,2.465003490447998 -46,2.46016263961792 -47,2.456467866897583 -48,2.4543027877807617 -49,2.452878475189209 -50,2.451493263244629 -51,2.4498484134674072 -52,2.4480957984924316 -53,2.4464457035064697 -54,2.4448657035827637 -55,2.44331955909729 -56,2.441788673400879 -57,2.4401371479034424 -58,2.438398838043213 -59,2.4366965293884277 -60,2.435120105743408 -61,2.4336564540863037 -62,2.4322314262390137 -63,2.4308321475982666 -64,2.429415225982666 -65,2.4279677867889404 -66,2.42649245262146 -67,2.4249374866485596 -68,2.4232945442199707 -69,2.4215567111968994 -70,2.4198179244995117 -71,2.418208122253418 -72,2.416806936264038 -73,2.415736198425293 -74,2.414914608001709 -75,2.414207696914673 -76,2.413490056991577 -77,2.4126768112182617 -78,2.4117348194122314 -79,2.410723924636841 -80,2.4097185134887695 -81,2.4088053703308105 -82,2.408027410507202 -83,2.4073753356933594 -84,2.4068095684051514 -85,2.4062747955322266 -86,2.4057183265686035 -87,2.405123233795166 -88,2.4044899940490723 -89,2.4038407802581787 -90,2.403205633163452 -91,2.402611494064331 -92,2.402080535888672 -93,2.4016027450561523 -94,2.4011456966400146 -95,2.4006948471069336 -96,2.4002346992492676 -97,2.39976167678833 -98,2.399287462234497 -99,2.3988215923309326 -100,2.3983771800994873 -101,2.397955894470215 -102,2.3975412845611572 -103,2.3971400260925293 -104,2.396742582321167 -105,2.3963546752929688 -106,2.395967483520508 -107,2.395583391189575 -108,2.3952064514160156 -109,2.39483904838562 -110,2.3944852352142334 -111,2.3941428661346436 -112,2.3938047885894775 -113,2.393470287322998 -114,2.393134832382202 -115,2.3928017616271973 -116,2.3924741744995117 -117,2.3921523094177246 -118,2.391832113265991 -119,2.3915114402770996 -120,2.3911936283111572 -121,2.390876054763794 -122,2.390559673309326 -123,2.3902511596679688 -124,2.389950752258301 -125,2.389660358428955 -126,2.389378309249878 -127,2.3891043663024902 -128,2.3888394832611084 -129,2.3885817527770996 -130,2.3883349895477295 -131,2.3880958557128906 -132,2.3878602981567383 -133,2.3876283168792725 -134,2.387399196624756 -135,2.3871734142303467 -136,2.386949300765991 -137,2.386732339859009 -138,2.3865184783935547 -139,2.3863072395324707 -140,2.386096954345703 -141,2.3858883380889893 -142,2.385681390762329 -143,2.3854782581329346 -144,2.385277032852173 -145,2.3850810527801514 -146,2.384889841079712 -147,2.3846993446350098 -148,2.384510040283203 -149,2.3843207359313965 -150,2.38413667678833 -151,2.3839597702026367 -152,2.3837857246398926 -153,2.3836143016815186 -154,2.383445978164673 -155,2.3832802772521973 -156,2.383115530014038 -157,2.3829524517059326 -158,2.3827922344207764 -159,2.382634162902832 -160,2.3824782371520996 -161,2.3823232650756836 -162,2.3821706771850586 -163,2.382019281387329 -164,2.3818695545196533 -165,2.381721258163452 -166,2.3815760612487793 -167,2.3814332485198975 -168,2.3812930583953857 -169,2.3811540603637695 -170,2.3810176849365234 -171,2.3808810710906982 -172,2.380746364593506 -173,2.38061261177063 -174,2.380478620529175 -175,2.3803465366363525 -176,2.3802151679992676 -177,2.3800837993621826 -178,2.3799524307250977 -179,2.3798210620880127 -180,2.3796887397766113 -181,2.3795557022094727 -182,2.3794238567352295 -183,2.3792917728424072 -184,2.379159688949585 -185,2.3790266513824463 -186,2.3788933753967285 -187,2.37876033782959 -188,2.3786263465881348 -189,2.378492832183838 -190,2.3783586025238037 -191,2.378223419189453 -192,2.3780879974365234 -193,2.377952814102173 -194,2.377819538116455 -195,2.377687454223633 -196,2.3775577545166016 -197,2.3774311542510986 -198,2.3773088455200195 -199,2.3771908283233643 -200,2.3770766258239746 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.160/training_config.txt deleted file mode 100644 index 385425c..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.160/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.160/training_log.csv deleted file mode 100644 index a899933..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,134.66172790527344 -2,20.256500244140625 -3,14.929855346679688 -4,14.109514236450195 -5,11.147294998168945 -6,10.15562629699707 -7,9.145364761352539 -8,8.049457550048828 -9,6.751832485198975 -10,5.756121635437012 -11,5.238220691680908 -12,4.870668888092041 -13,4.575987815856934 -14,4.319539546966553 -15,4.0870561599731445 -16,3.8714606761932373 -17,3.677786111831665 -18,3.5137717723846436 -19,3.3712379932403564 -20,3.247286558151245 -21,3.1406164169311523 -22,3.048325300216675 -23,2.9682552814483643 -24,2.899580240249634 -25,2.8412024974823 -26,2.792192220687866 -27,2.751798629760742 -28,2.7193779945373535 -29,2.6943156719207764 -30,2.6757280826568604 -31,2.6617722511291504 -32,2.650561809539795 -33,2.6404848098754883 -34,2.630681276321411 -35,2.620598077774048 -36,2.6102347373962402 -37,2.5999786853790283 -38,2.5901355743408203 -39,2.5808053016662598 -40,2.5724213123321533 -41,2.5649006366729736 -42,2.558319330215454 -43,2.5526139736175537 -44,2.547544479370117 -45,2.5431923866271973 -46,2.5393450260162354 -47,2.535717725753784 -48,2.532139301300049 -49,2.528486728668213 -50,2.524782419204712 -51,2.520977735519409 -52,2.517258882522583 -53,2.5135507583618164 -54,2.509875774383545 -55,2.5064594745635986 -56,2.5032529830932617 -57,2.500171184539795 -58,2.49714732170105 -59,2.4942145347595215 -60,2.491166830062866 -61,2.4879233837127686 -62,2.4842844009399414 -63,2.4803519248962402 -64,2.476306438446045 -65,2.4723286628723145 -66,2.4681966304779053 -67,2.463794469833374 -68,2.4594082832336426 -69,2.455152988433838 -70,2.4509713649749756 -71,2.446776866912842 -72,2.442690134048462 -73,2.438704252243042 -74,2.434796094894409 -75,2.4310760498046875 -76,2.4274699687957764 -77,2.424072027206421 -78,2.4209818840026855 -79,2.418269157409668 -80,2.415947437286377 -81,2.414093017578125 -82,2.4127001762390137 -83,2.4116408824920654 -84,2.410914659500122 -85,2.4104645252227783 -86,2.410179376602173 -87,2.4098830223083496 -88,2.409609317779541 -89,2.4093148708343506 -90,2.408992290496826 -91,2.4086618423461914 -92,2.408296823501587 -93,2.4079418182373047 -94,2.4076054096221924 -95,2.407301664352417 -96,2.406952142715454 -97,2.4066224098205566 -98,2.406316041946411 -99,2.4059932231903076 -100,2.405648708343506 -101,2.4052658081054688 -102,2.404843330383301 -103,2.4043874740600586 -104,2.4039289951324463 -105,2.403480052947998 -106,2.403055191040039 -107,2.402634382247925 -108,2.4022269248962402 -109,2.40183162689209 -110,2.4014523029327393 -111,2.4010865688323975 -112,2.4007720947265625 -113,2.4004807472229004 -114,2.400216579437256 -115,2.3999741077423096 -116,2.3997528553009033 -117,2.3995440006256104 -118,2.3993306159973145 -119,2.399106025695801 -120,2.3988735675811768 -121,2.3986339569091797 -122,2.3983893394470215 -123,2.3981409072875977 -124,2.397890329360962 -125,2.397637367248535 -126,2.397386074066162 -127,2.3971359729766846 -128,2.39688777923584 -129,2.3966476917266846 -130,2.3964149951934814 -131,2.396190881729126 -132,2.3959686756134033 -133,2.395752191543579 -134,2.3955395221710205 -135,2.3953394889831543 -136,2.3951449394226074 -137,2.3949553966522217 -138,2.3947789669036865 -139,2.394599676132202 -140,2.3944125175476074 -141,2.3942172527313232 -142,2.3940181732177734 -143,2.393829107284546 -144,2.3936500549316406 -145,2.393476724624634 -146,2.3933091163635254 -147,2.393141746520996 -148,2.3929708003997803 -149,2.3928003311157227 -150,2.392631769180298 -151,2.3924636840820312 -152,2.392298698425293 -153,2.3921382427215576 -154,2.3919870853424072 -155,2.3918378353118896 -156,2.391685962677002 -157,2.39153790473938 -158,2.391392469406128 -159,2.391249418258667 -160,2.391106605529785 -161,2.390965700149536 -162,2.3908276557922363 -163,2.3906898498535156 -164,2.390554428100586 -165,2.390423059463501 -166,2.3902931213378906 -167,2.390160322189331 -168,2.3900299072265625 -169,2.3899011611938477 -170,2.389775037765503 -171,2.3896501064300537 -172,2.3895249366760254 -173,2.389399528503418 -174,2.389275550842285 -175,2.3891525268554688 -176,2.389029026031494 -177,2.3889074325561523 -178,2.388786792755127 -179,2.3886666297912598 -180,2.388547897338867 -181,2.38843035697937 -182,2.388312816619873 -183,2.388195753097534 -184,2.388078451156616 -185,2.3879616260528564 -186,2.3878438472747803 -187,2.387725591659546 -188,2.387608051300049 -189,2.3874900341033936 -190,2.3873729705810547 -191,2.387256145477295 -192,2.3871381282806396 -193,2.3870229721069336 -194,2.3869080543518066 -195,2.386794328689575 -196,2.3866801261901855 -197,2.386566162109375 -198,2.3864517211914062 -199,2.386338710784912 -200,2.386225938796997 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.200/training_config.txt deleted file mode 100644 index 92211d9..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.200/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.200/training_log.csv deleted file mode 100644 index cc2c5b2..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,147.2079315185547 -2,42.75838088989258 -3,16.391845703125 -4,12.609832763671875 -5,9.867650985717773 -6,7.844890594482422 -7,6.155681610107422 -8,5.368279933929443 -9,4.89227819442749 -10,4.525618076324463 -11,4.2084736824035645 -12,3.9558444023132324 -13,3.7646982669830322 -14,3.6113057136535645 -15,3.480771064758301 -16,3.3642067909240723 -17,3.258565664291382 -18,3.164124011993408 -19,3.0808615684509277 -20,3.0086019039154053 -21,2.9471065998077393 -22,2.895554304122925 -23,2.8531265258789062 -24,2.8185994625091553 -25,2.7899599075317383 -26,2.766226291656494 -27,2.7463932037353516 -28,2.7294888496398926 -29,2.714923858642578 -30,2.701977014541626 -31,2.6899542808532715 -32,2.678525686264038 -33,2.6670379638671875 -34,2.6553008556365967 -35,2.644001007080078 -36,2.6336376667022705 -37,2.624398708343506 -38,2.616346836090088 -39,2.609473705291748 -40,2.604989528656006 -41,2.6032330989837646 -42,2.6022908687591553 -43,2.6007697582244873 -44,2.5980329513549805 -45,2.5938518047332764 -46,2.5881094932556152 -47,2.5810770988464355 -48,2.5734751224517822 -49,2.5653700828552246 -50,2.557429075241089 -51,2.5502216815948486 -52,2.5445032119750977 -53,2.540097713470459 -54,2.5363965034484863 -55,2.5329391956329346 -56,2.529336929321289 -57,2.525404930114746 -58,2.521146774291992 -59,2.516601085662842 -60,2.511910915374756 -61,2.50723934173584 -62,2.50264310836792 -63,2.4983115196228027 -64,2.4942712783813477 -65,2.490461587905884 -66,2.4867842197418213 -67,2.482998847961426 -68,2.478928327560425 -69,2.4744482040405273 -70,2.4696779251098633 -71,2.464921474456787 -72,2.4601376056671143 -73,2.4556381702423096 -74,2.4512619972229004 -75,2.4471752643585205 -76,2.4432575702667236 -77,2.4395663738250732 -78,2.436034917831421 -79,2.432717800140381 -80,2.429556131362915 -81,2.4266276359558105 -82,2.423955202102661 -83,2.421696424484253 -84,2.4197559356689453 -85,2.418097972869873 -86,2.4167661666870117 -87,2.4157638549804688 -88,2.415015697479248 -89,2.414469003677368 -90,2.414095640182495 -91,2.413783311843872 -92,2.4134936332702637 -93,2.413201332092285 -94,2.412900686264038 -95,2.4125773906707764 -96,2.412219285964966 -97,2.411802291870117 -98,2.411322832107544 -99,2.4108047485351562 -100,2.410285234451294 -101,2.4097955226898193 -102,2.409323215484619 -103,2.408902406692505 -104,2.4085171222686768 -105,2.4081356525421143 -106,2.407771110534668 -107,2.4074418544769287 -108,2.40714693069458 -109,2.406863212585449 -110,2.406587600708008 -111,2.4063143730163574 -112,2.406041145324707 -113,2.405778408050537 -114,2.405522584915161 -115,2.4052741527557373 -116,2.405013084411621 -117,2.4047436714172363 -118,2.404482364654541 -119,2.4042208194732666 -120,2.403959035873413 -121,2.4037024974823 -122,2.4034533500671387 -123,2.4032108783721924 -124,2.40297794342041 -125,2.402754545211792 -126,2.4025356769561768 -127,2.4023215770721436 -128,2.402106761932373 -129,2.4019062519073486 -130,2.4017138481140137 -131,2.4015300273895264 -132,2.4013495445251465 -133,2.4011788368225098 -134,2.4010159969329834 -135,2.400852918624878 -136,2.4006845951080322 -137,2.4005181789398193 -138,2.4003567695617676 -139,2.4001948833465576 -140,2.40002703666687 -141,2.3998570442199707 -142,2.399693250656128 -143,2.3995304107666016 -144,2.3993704319000244 -145,2.3992137908935547 -146,2.3990695476531982 -147,2.398930072784424 -148,2.3987863063812256 -149,2.398641586303711 -150,2.3984949588775635 -151,2.3983452320098877 -152,2.398195505142212 -153,2.398054838180542 -154,2.3979172706604004 -155,2.3977792263031006 -156,2.3976407051086426 -157,2.3975026607513428 -158,2.397364377975464 -159,2.3972249031066895 -160,2.3970885276794434 -161,2.396953582763672 -162,2.3968186378479004 -163,2.3966846466064453 -164,2.3965516090393066 -165,2.396421432495117 -166,2.396296977996826 -167,2.3961756229400635 -168,2.3960514068603516 -169,2.395923376083374 -170,2.395796537399292 -171,2.395674228668213 -172,2.395552635192871 -173,2.395430564880371 -174,2.395308494567871 -175,2.395186185836792 -176,2.395064353942871 -177,2.394943952560425 -178,2.3948230743408203 -179,2.3947012424468994 -180,2.3945796489715576 -181,2.3944616317749023 -182,2.3943448066711426 -183,2.3942272663116455 -184,2.394108772277832 -185,2.393991470336914 -186,2.3938770294189453 -187,2.3937630653381348 -188,2.3936495780944824 -189,2.393536329269409 -190,2.393423318862915 -191,2.393310308456421 -192,2.393197774887085 -193,2.3930861949920654 -194,2.3929741382598877 -195,2.392864942550659 -196,2.3927547931671143 -197,2.3926432132720947 -198,2.3925325870513916 -199,2.392423391342163 -200,2.392315149307251 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.250/training_config.txt deleted file mode 100644 index f2a6635..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.250/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.250/training_log.csv deleted file mode 100644 index acb7323..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,202.6292724609375 -2,68.19859313964844 -3,21.106904983520508 -4,16.68050765991211 -5,13.912202835083008 -6,12.218961715698242 -7,10.978754043579102 -8,10.019168853759766 -9,9.258337020874023 -10,8.629636764526367 -11,8.11741828918457 -12,7.709742069244385 -13,7.393415451049805 -14,7.141142845153809 -15,6.932973861694336 -16,6.755463123321533 -17,6.595218658447266 -18,6.4408392906188965 -19,6.28521728515625 -20,6.124085426330566 -21,5.955416679382324 -22,5.7806196212768555 -23,5.602560520172119 -24,5.421933174133301 -25,5.239066123962402 -26,5.060652732849121 -27,4.891718864440918 -28,4.733704090118408 -29,4.5857343673706055 -30,4.4481587409973145 -31,4.32133150100708 -32,4.204382419586182 -33,4.096190929412842 -34,3.9958572387695312 -35,3.902536153793335 -36,3.8153419494628906 -37,3.7336699962615967 -38,3.656641721725464 -39,3.583937406539917 -40,3.515064001083374 -41,3.4495396614074707 -42,3.3871476650238037 -43,3.3280632495880127 -44,3.2728657722473145 -45,3.2219443321228027 -46,3.175410509109497 -47,3.1334574222564697 -48,3.096165418624878 -49,3.0637197494506836 -50,3.036102294921875 -51,3.013094902038574 -52,2.9943270683288574 -53,2.9792520999908447 -54,2.9672610759735107 -55,2.9577555656433105 -56,2.950103282928467 -57,2.943782329559326 -58,2.938283681869507 -59,2.9332163333892822 -60,2.9282479286193848 -61,2.923139810562134 -62,2.9177308082580566 -63,2.911940097808838 -64,2.905756711959839 -65,2.8992042541503906 -66,2.8923139572143555 -67,2.8851115703582764 -68,2.877685070037842 -69,2.870124578475952 -70,2.86246657371521 -71,2.854846477508545 -72,2.847249746322632 -73,2.8397250175476074 -74,2.832294464111328 -75,2.8249359130859375 -76,2.817671537399292 -77,2.8105106353759766 -78,2.803478956222534 -79,2.7965657711029053 -80,2.7898008823394775 -81,2.783191442489624 -82,2.7767395973205566 -83,2.770493984222412 -84,2.764411687850952 -85,2.758514165878296 -86,2.7527976036071777 -87,2.7472267150878906 -88,2.741804599761963 -89,2.7365024089813232 -90,2.731334686279297 -91,2.7262890338897705 -92,2.721442937850952 -93,2.716885566711426 -94,2.712584972381592 -95,2.7084176540374756 -96,2.7043585777282715 -97,2.7003605365753174 -98,2.6964564323425293 -99,2.6926450729370117 -100,2.6889076232910156 -101,2.6852974891662598 -102,2.6818315982818604 -103,2.6784555912017822 -104,2.6751530170440674 -105,2.671957492828369 -106,2.668884515762329 -107,2.665889263153076 -108,2.662963390350342 -109,2.660121202468872 -110,2.657414674758911 -111,2.654815435409546 -112,2.652268886566162 -113,2.649825096130371 -114,2.647498846054077 -115,2.64528751373291 -116,2.643259286880493 -117,2.6412947177886963 -118,2.6394424438476562 -119,2.637669801712036 -120,2.6359288692474365 -121,2.6342084407806396 -122,2.632488965988159 -123,2.63075590133667 -124,2.628997325897217 -125,2.6272199153900146 -126,2.625430107116699 -127,2.6236374378204346 -128,2.621842384338379 -129,2.620046854019165 -130,2.618258237838745 -131,2.6164746284484863 -132,2.614684581756592 -133,2.6129016876220703 -134,2.6111254692077637 -135,2.609358072280884 -136,2.6075961589813232 -137,2.6058459281921387 -138,2.6040871143341064 -139,2.602342367172241 -140,2.600619316101074 -141,2.598921775817871 -142,2.5972378253936768 -143,2.5955638885498047 -144,2.593902587890625 -145,2.5922553539276123 -146,2.5906240940093994 -147,2.589007616043091 -148,2.58740234375 -149,2.585811138153076 -150,2.584228038787842 -151,2.582656145095825 -152,2.581088066101074 -153,2.57952880859375 -154,2.5779712200164795 -155,2.5764124393463135 -156,2.5748589038848877 -157,2.5733089447021484 -158,2.571765661239624 -159,2.570228099822998 -160,2.568697690963745 -161,2.567176342010498 -162,2.565662145614624 -163,2.5641469955444336 -164,2.5626296997070312 -165,2.561105728149414 -166,2.5595879554748535 -167,2.558082103729248 -168,2.5565807819366455 -169,2.555079936981201 -170,2.553567886352539 -171,2.5520551204681396 -172,2.5505456924438477 -173,2.549044370651245 -174,2.5475499629974365 -175,2.546058416366577 -176,2.544570207595825 -177,2.543107509613037 -178,2.5416927337646484 -179,2.5403590202331543 -180,2.5390424728393555 -181,2.53773832321167 -182,2.5364463329315186 -183,2.5351691246032715 -184,2.5339009761810303 -185,2.5326344966888428 -186,2.531365394592285 -187,2.5301053524017334 -188,2.5288565158843994 -189,2.527617931365967 -190,2.5263891220092773 -191,2.5251688957214355 -192,2.523958921432495 -193,2.5227532386779785 -194,2.5215539932250977 -195,2.5203604698181152 -196,2.519176959991455 -197,2.5180039405822754 -198,2.516845703125 -199,2.5156984329223633 -200,2.514561891555786 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.300/training_config.txt deleted file mode 100644 index 24c6c13..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.300/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.300/training_log.csv deleted file mode 100644 index 4909d74..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.300/training_log.csv +++ /dev/null @@ -1,34 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,331.31170654296875 -2,21.584110260009766 -3,15.593328475952148 -4,12.629968643188477 -5,11.080791473388672 -6,10.451873779296875 -7,10.187116622924805 -8,9.874967575073242 -9,9.508050918579102 -10,9.198205947875977 -11,8.969419479370117 -12,8.793705940246582 -13,8.639370918273926 -14,8.498120307922363 -15,8.364075660705566 -16,8.231746673583984 -17,8.100773811340332 -18,7.970752716064453 -19,7.842519760131836 -20,7.7166290283203125 -21,7.5929951667785645 -22,7.472010612487793 -23,7.354116439819336 -24,7.240175247192383 -25,7.129673957824707 -26,7.020423412322998 -27,6.911076068878174 -28,6.8023786544799805 -29,nan -30,nan -31,nan -32,nan diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.400/training_config.txt deleted file mode 100644 index 0117152..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.400/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.400/training_log.csv deleted file mode 100644 index a106dd9..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.400/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,3124.86865234375 -2,21.73473358154297 -3,36.142234802246094 -4,16.741289138793945 -5,14.686822891235352 -6,13.793562889099121 -7,13.17497730255127 -8,12.730881690979004 -9,12.420994758605957 -10,12.208780288696289 -11,12.045336723327637 -12,11.901365280151367 -13,11.765559196472168 -14,22.290908813476562 -15,283.6553039550781 -16,14530.3740234375 -17,31576.236328125 -18,31760.20703125 -19,31762.892578125 -20,31762.892578125 -21,31762.892578125 -22,31762.17578125 -23,31745.6640625 -24,30550.61328125 -25,33275.25390625 -26,30771.58203125 -27,13904.7314453125 -28,1828.003662109375 -29,136.54258728027344 -30,33.61140441894531 -31,22.317628860473633 -32,20.7526912689209 -33,20.822362899780273 -34,31.098190307617188 -35,48.035797119140625 -36,40.06608963012695 -37,37.87067413330078 -38,36.47787857055664 -39,34.0139274597168 -40,30.87177848815918 -41,48.31074523925781 -42,58.87482452392578 -43,71.9961166381836 -44,86.86912536621094 -45,100.6619873046875 -46,113.90214538574219 -47,127.12590789794922 -48,138.77223205566406 -49,148.4546661376953 -50,160.9103240966797 -51,173.76275634765625 -52,187.5423126220703 -53,202.37657165527344 -54,217.55531311035156 -55,233.84222412109375 -56,250.3270263671875 -57,266.6524963378906 -58,282.3331604003906 -59,296.5003967285156 -60,309.6226501464844 -61,321.5458068847656 -62,332.2322998046875 -63,341.7755126953125 -64,350.2632141113281 -65,357.731689453125 -66,371.7218933105469 -67,383.6397399902344 -68,394.2464599609375 -69,404.0794677734375 -70,413.1476745605469 -71,421.6086730957031 -72,429.3204040527344 -73,436.252685546875 -74,442.5220031738281 -75,448.1352844238281 -76,453.21990966796875 -77,457.7698669433594 -78,461.77337646484375 -79,465.260009765625 -80,468.1886901855469 -81,470.9059143066406 -82,474.00146484375 -83,476.9258117675781 -84,479.0191345214844 -85,480.4475402832031 -86,482.0821838378906 -87,483.86419677734375 -88,485.3718566894531 -89,486.4767150878906 -90,487.2880859375 -91,487.8635559082031 -92,488.6519470214844 -93,489.5352783203125 -94,489.8951416015625 -95,490.0465393066406 -96,490.40673828125 -97,490.7680969238281 -98,490.9375305175781 -99,490.9213562011719 -100,490.86004638671875 -101,490.9049072265625 -102,490.9129638671875 -103,490.7272644042969 -104,490.4853210449219 -105,490.30859375 -106,490.1407165527344 -107,489.89923095703125 -108,489.5737609863281 -109,489.2200012207031 -110,488.9140930175781 -111,488.60491943359375 -112,488.23077392578125 -113,487.822265625 -114,487.4359130859375 -115,487.0559997558594 -116,486.6448059082031 -117,486.2120361328125 -118,485.7940979003906 -119,485.3811340332031 -120,484.94873046875 -121,484.50396728515625 -122,484.0586853027344 -123,483.6123046875 -124,483.1635437011719 -125,482.7045593261719 -126,482.25390625 -127,481.8076477050781 -128,481.3614196777344 -129,480.9148254394531 -130,480.4734191894531 -131,480.03363037109375 -132,479.5919189453125 -133,479.14935302734375 -134,478.7041931152344 -135,478.2627868652344 -136,477.8260498046875 -137,477.3946228027344 -138,476.96136474609375 -139,476.5303955078125 -140,476.0997314453125 -141,475.66680908203125 -142,475.23004150390625 -143,474.796142578125 -144,474.3642578125 -145,473.93853759765625 -146,473.51654052734375 -147,473.10186767578125 -148,472.6866455078125 -149,472.2686462402344 -150,471.85595703125 -151,471.45263671875 -152,471.0603942871094 -153,470.67645263671875 -154,470.2958068847656 -155,469.9223327636719 -156,469.5496826171875 -157,469.18682861328125 -158,468.8241882324219 -159,468.4742736816406 -160,468.12994384765625 -161,467.7981872558594 -162,467.46795654296875 -163,467.14471435546875 -164,466.8291320800781 -165,466.52685546875 -166,466.2366943359375 -167,465.9458312988281 -168,465.6319580078125 -169,465.3495788574219 -170,465.0767822265625 -171,464.79327392578125 -172,464.5273132324219 -173,464.2740478515625 -174,464.0060119628906 -175,463.7519226074219 -176,463.52294921875 -177,463.2738952636719 -178,463.0224609375 -179,462.78021240234375 -180,462.55169677734375 -181,462.3262634277344 -182,462.1062316894531 -183,461.8888244628906 -184,461.6778259277344 -185,461.4699401855469 -186,461.26763916015625 -187,461.0682373046875 -188,460.8714599609375 -189,460.6760559082031 -190,460.47662353515625 -191,460.2848205566406 -192,460.09649658203125 -193,459.9153747558594 -194,459.7404479980469 -195,459.56890869140625 -196,459.395263671875 -197,459.2215576171875 -198,459.0485534667969 -199,458.87799072265625 -200,458.70794677734375 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.500/training_config.txt deleted file mode 100644 index e7bfb68..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.500/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.500/training_log.csv deleted file mode 100644 index 4d5cec7..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.500/training_log.csv +++ /dev/null @@ -1,21 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,11499.3359375 -2,31049.5234375 -3,30958.478515625 -4,26935.70703125 -5,21758.06640625 -6,13401.3857421875 -7,4981.80126953125 -8,881.1480102539062 -9,13.013978958129883 -10,31.232181549072266 -11,1419.423828125 -12,31919.84375 -13,31925.283203125 -14,31925.90625 -15,31925.90625 -16,31925.90625 -17,31925.90625 -18,31925.90625 -19,31925.90625 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.600/training_config.txt deleted file mode 100644 index a9ab18a..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.600/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.600/training_log.csv deleted file mode 100644 index 70b75b8..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.600/training_log.csv +++ /dev/null @@ -1,12 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,20212.619140625 -2,31684.55859375 -3,31757.576171875 -4,31762.533203125 -5,31762.892578125 -6,31762.892578125 -7,31762.892578125 -8,31762.892578125 -9,31762.892578125 -10,31762.892578125 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.700/training_config.txt deleted file mode 100644 index 008f7dc..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.700/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.700/training_log.csv deleted file mode 100644 index 6586827..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,25531.16796875 -2,31757.841796875 -3,31762.892578125 -4,31762.892578125 -5,31762.892578125 -6,31762.892578125 -7,31762.892578125 -8,31762.892578125 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.800/training_config.txt deleted file mode 100644 index 6da4a37..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.800/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.800/training_log.csv deleted file mode 100644 index a9c9909..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,27906.0546875 -2,31762.892578125 -3,31762.892578125 -4,31762.892578125 -5,31762.892578125 -6,31762.892578125 -7,31762.892578125 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.900/training_config.txt deleted file mode 100644 index 93f2029..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.900/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.900/training_log.csv deleted file mode 100644 index 88e0a4a..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,29724.357421875 -2,31762.892578125 -3,31762.892578125 -4,31762.892578125 -5,31762.892578125 -6,31762.892578125 -7,31762.892578125 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_1.000/training_config.txt deleted file mode 100644 index 39e4afb..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_1.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_1.000/training_log.csv deleted file mode 100644 index 00952ff..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,30709.283203125 -2,31762.892578125 -3,31762.892578125 -4,31762.892578125 -5,31762.892578125 -6,31762.892578125 -7,31762.892578125 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_16.000/training_config.txt deleted file mode 100644 index 783e794..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_16.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_16.000/training_log.csv deleted file mode 100644 index 1b03eee..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,31925.90625 -2,31925.90625 -3,31925.90625 -4,31925.90625 -5,31925.90625 -6,31925.90625 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_2.000/training_config.txt deleted file mode 100644 index 4466f88..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_2.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_2.000/training_log.csv deleted file mode 100644 index 91350af..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,31922.240234375 -2,3758.1083984375 -3,nan -4,nan -5,nan -6,nan diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_4.000/training_config.txt deleted file mode 100644 index 9efccce..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_4.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_4.000/training_log.csv deleted file mode 100644 index 1b03eee..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,31925.90625 -2,31925.90625 -3,31925.90625 -4,31925.90625 -5,31925.90625 -6,31925.90625 diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_8.000/training_config.txt deleted file mode 100644 index 1636424..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_8.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def linear_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**1) * (-t_span + t_max)**1 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_8.000/training_log.csv deleted file mode 100644 index 1b03eee..0000000 --- a/training/time_weighting_learning_rate_sweep/linear_mirrored/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,250.16981506347656 -1,31925.90625 -2,31925.90625 -3,31925.90625 -4,31925.90625 -5,31925.90625 -6,31925.90625 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.010/training_config.txt deleted file mode 100644 index 4d19dc0..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.010/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.010/training_log.csv deleted file mode 100644 index 2214cab..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,581.7706298828125 -2,468.0298156738281 -3,469.007080078125 -4,441.0478515625 -5,390.5971374511719 -6,377.1073913574219 -7,320.7305603027344 -8,291.64117431640625 -9,257.2821960449219 -10,236.177978515625 -11,188.59217834472656 -12,168.631591796875 -13,158.88900756835938 -14,125.61302185058594 -15,116.99671936035156 -16,105.37611389160156 -17,85.10494232177734 -18,92.34785461425781 -19,80.1203384399414 -20,73.8039779663086 -21,71.3592300415039 -22,66.07292938232422 -23,63.71216583251953 -24,60.80020523071289 -25,56.94185256958008 -26,55.365779876708984 -27,55.2053108215332 -28,55.305328369140625 -29,54.92189025878906 -30,54.867183685302734 -31,55.44391632080078 -32,55.07365798950195 -33,54.52503967285156 -34,52.19340896606445 -35,51.061187744140625 -36,50.09609603881836 -37,50.630714416503906 -38,48.24248123168945 -39,46.526615142822266 -40,45.18681716918945 -41,43.969520568847656 -42,42.71685791015625 -43,41.34776306152344 -44,39.84278106689453 -45,38.37215805053711 -46,37.22016906738281 -47,36.12746047973633 -48,37.35497283935547 -49,39.37238693237305 -50,37.65263748168945 -51,38.60310745239258 -52,38.26078414916992 -53,37.37583923339844 -54,28.284252166748047 -55,27.603029251098633 -56,24.083263397216797 -57,23.45296859741211 -58,23.50531768798828 -59,24.07870101928711 -60,22.06142807006836 -61,21.127094268798828 -62,19.596139907836914 -63,19.089017868041992 -64,18.343780517578125 -65,18.20134162902832 -66,17.95937728881836 -67,17.76194953918457 -68,17.74785614013672 -69,17.674169540405273 -70,17.61143684387207 -71,17.721593856811523 -72,17.099611282348633 -73,16.267263412475586 -74,15.887069702148438 -75,15.698832511901855 -76,15.58225154876709 -77,15.50716781616211 -78,15.460354804992676 -79,15.432363510131836 -80,15.411523818969727 -81,15.369704246520996 -82,15.049057960510254 -83,14.430950164794922 -84,14.374759674072266 -85,14.354023933410645 -86,14.34207820892334 -87,14.327835083007812 -88,14.305949211120605 -89,14.274096488952637 -90,14.23151683807373 -91,14.178555488586426 -92,14.116066932678223 -93,14.045594215393066 -94,13.969057083129883 -95,13.888701438903809 -96,13.8069429397583 -97,13.72602367401123 -98,13.648045539855957 -99,13.574447631835938 -100,13.50616455078125 -101,13.443608283996582 -102,13.386941909790039 -103,13.336005210876465 -104,13.290421485900879 -105,13.24963665008545 -106,13.213125228881836 -107,13.180224418640137 -108,13.150297164916992 -109,13.12266731262207 -110,13.096719741821289 -111,13.071844100952148 -112,13.04747486114502 -113,13.02316665649414 -114,12.998566627502441 -115,12.97342586517334 -116,12.947585105895996 -117,12.921022415161133 -118,12.893843650817871 -119,12.866177558898926 -120,12.8382568359375 -121,12.810392379760742 -122,12.782877922058105 -123,12.755934715270996 -124,12.729741096496582 -125,12.70441722869873 -126,12.68005084991455 -127,12.656671524047852 -128,12.634231567382812 -129,12.612651824951172 -130,12.591832160949707 -131,12.571612358093262 -132,12.551867485046387 -133,12.532462120056152 -134,12.513262748718262 -135,12.494150161743164 -136,12.475028038024902 -137,12.455829620361328 -138,12.436532974243164 -139,12.417177200317383 -140,12.397788047790527 -141,12.378355026245117 -142,12.3589506149292 -143,12.339597702026367 -144,12.320383071899414 -145,12.30134391784668 -146,12.282533645629883 -147,12.26398754119873 -148,12.245734214782715 -149,12.227776527404785 -150,12.210081100463867 -151,12.192609786987305 -152,12.175333976745605 -153,12.158190727233887 -154,12.141161918640137 -155,12.12419605255127 -156,12.107274055480957 -157,12.090373992919922 -158,12.073487281799316 -159,12.05663776397705 -160,12.03984546661377 -161,12.023124694824219 -162,12.006511688232422 -163,11.990013122558594 -164,11.973662376403809 -165,11.957443237304688 -166,11.941360473632812 -167,11.925374984741211 -168,11.909476280212402 -169,11.893655776977539 -170,11.877880096435547 -171,11.862139701843262 -172,11.846423149108887 -173,11.830751419067383 -174,11.815116882324219 -175,11.799516677856445 -176,11.783975601196289 -177,11.768503189086914 -178,11.753117561340332 -179,11.737807273864746 -180,11.722573280334473 -181,11.707402229309082 -182,11.692267417907715 -183,11.677176475524902 -184,11.662109375 -185,11.647078514099121 -186,11.632075309753418 -187,11.617114067077637 -188,11.602178573608398 -189,11.587257385253906 -190,11.572362899780273 -191,11.557453155517578 -192,11.542553901672363 -193,11.527619361877441 -194,11.512664794921875 -195,11.497668266296387 -196,11.482609748840332 -197,11.467482566833496 -198,11.452293395996094 -199,11.437017440795898 -200,11.421662330627441 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.020/training_config.txt deleted file mode 100644 index c4d9299..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.020/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.020/training_log.csv deleted file mode 100644 index 0b7260b..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,520.7737426757812 -2,340.912353515625 -3,181.20138549804688 -4,109.0204849243164 -5,103.49079895019531 -6,82.41400146484375 -7,81.9650650024414 -8,78.24170684814453 -9,72.3687973022461 -10,63.24696731567383 -11,62.061683654785156 -12,66.67284393310547 -13,65.0934066772461 -14,63.63484191894531 -15,55.01188659667969 -16,52.24326705932617 -17,50.224578857421875 -18,48.17206954956055 -19,46.309112548828125 -20,44.26233673095703 -21,39.98934555053711 -22,37.72742462158203 -23,36.89242935180664 -24,36.66564178466797 -25,34.78876876831055 -26,33.99452590942383 -27,34.462486267089844 -28,35.10823059082031 -29,33.75347900390625 -30,33.7851448059082 -31,31.820951461791992 -32,31.221418380737305 -33,31.113361358642578 -34,30.841306686401367 -35,30.1240177154541 -36,29.4637508392334 -37,29.009523391723633 -38,28.69603729248047 -39,28.496244430541992 -40,28.335084915161133 -41,28.139307022094727 -42,27.907915115356445 -43,27.654415130615234 -44,27.370792388916016 -45,27.0428466796875 -46,26.674259185791016 -47,26.289630889892578 -48,25.89352798461914 -49,25.457651138305664 -50,26.520111083984375 -51,26.343372344970703 -52,25.6407470703125 -53,25.322864532470703 -54,25.101470947265625 -55,24.864042282104492 -56,24.096227645874023 -57,23.801774978637695 -58,23.72123146057129 -59,24.230247497558594 -60,24.697832107543945 -61,23.69922637939453 -62,23.723480224609375 -63,24.201122283935547 -64,23.745323181152344 -65,23.512136459350586 -66,23.363529205322266 -67,23.384599685668945 -68,23.37881851196289 -69,23.126367568969727 -70,22.93842887878418 -71,23.393400192260742 -72,22.94647216796875 -73,22.91169548034668 -74,23.02279281616211 -75,22.98670768737793 -76,22.826385498046875 -77,22.632251739501953 -78,22.465003967285156 -79,23.719257354736328 -80,22.895612716674805 -81,23.040973663330078 -82,23.22614860534668 -83,22.63787841796875 -84,22.698354721069336 -85,23.19750213623047 -86,22.768156051635742 -87,25.363744735717773 -88,24.384506225585938 -89,25.809680938720703 -90,19.58843421936035 -91,21.98828887939453 -92,20.6356201171875 -93,20.562620162963867 -94,22.760114669799805 -95,21.404109954833984 -96,25.522432327270508 -97,26.105985641479492 -98,25.92936134338379 -99,32.3346061706543 -100,31.617664337158203 -101,32.9183235168457 -102,27.858003616333008 -103,35.7903938293457 -104,36.70928955078125 -105,33.73346710205078 -106,32.49525833129883 -107,34.336299896240234 -108,36.75007247924805 -109,35.68425750732422 -110,31.876840591430664 -111,30.82003402709961 -112,30.37769889831543 -113,30.54852294921875 -114,29.13556480407715 -115,27.691801071166992 -116,27.65964698791504 -117,27.559690475463867 -118,27.6021671295166 -119,27.94300651550293 -120,28.764110565185547 -121,28.88528060913086 -122,29.1801815032959 -123,29.438241958618164 -124,29.349878311157227 -125,29.045076370239258 -126,28.913440704345703 -127,28.997886657714844 -128,28.36014747619629 -129,28.21868133544922 -130,26.912338256835938 -131,26.639419555664062 -132,25.70199966430664 -133,24.036409378051758 -134,23.847993850708008 -135,23.61438751220703 -136,23.941280364990234 -137,23.56439208984375 -138,22.470951080322266 -139,21.976625442504883 -140,22.93682098388672 -141,28.42702293395996 -142,27.997249603271484 -143,26.43709373474121 -144,23.448177337646484 -145,19.735597610473633 -146,28.680511474609375 -147,24.054445266723633 -148,23.906625747680664 -149,21.3740234375 -150,26.378803253173828 -151,18.42880630493164 -152,17.601337432861328 -153,16.304479598999023 -154,14.12382698059082 -155,13.432028770446777 -156,12.875873565673828 -157,12.456687927246094 -158,11.127025604248047 -159,13.40563678741455 -160,11.138041496276855 -161,11.087270736694336 -162,11.172639846801758 -163,11.201703071594238 -164,11.189099311828613 -165,11.143587112426758 -166,11.069830894470215 -167,10.974747657775879 -168,10.908577919006348 -169,10.84056282043457 -170,10.711273193359375 -171,10.519832611083984 -172,10.198266983032227 -173,9.829547882080078 -174,9.735250473022461 -175,9.74103832244873 -176,9.644810676574707 -177,9.500774383544922 -178,9.39627456665039 -179,9.321648597717285 -180,9.259865760803223 -181,9.20258903503418 -182,9.147822380065918 -183,9.09595775604248 -184,9.047322273254395 -185,9.003206253051758 -186,8.964994430541992 -187,8.933485984802246 -188,8.908997535705566 -189,8.89124584197998 -190,8.878981590270996 -191,8.869183540344238 -192,8.857627868652344 -193,8.840396881103516 -194,8.815982818603516 -195,8.786016464233398 -196,8.754426956176758 -197,8.724762916564941 -198,8.698836326599121 -199,8.676846504211426 -200,8.657912254333496 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.040/training_config.txt deleted file mode 100644 index 25049be..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.040/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.040/training_log.csv deleted file mode 100644 index fa0cd56..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,410.368896484375 -2,113.18529510498047 -3,84.77518463134766 -4,59.872005462646484 -5,44.36603546142578 -6,31.51449966430664 -7,24.807832717895508 -8,22.75552749633789 -9,21.89188575744629 -10,24.79701042175293 -11,25.324289321899414 -12,29.470909118652344 -13,26.31184959411621 -14,26.43239402770996 -15,33.78657913208008 -16,29.014957427978516 -17,30.40915298461914 -18,33.02522277832031 -19,31.30943489074707 -20,26.84795379638672 -21,32.32634353637695 -22,21.10965919494629 -23,21.604219436645508 -24,25.191452026367188 -25,21.06332015991211 -26,17.772104263305664 -27,17.394811630249023 -28,19.43138313293457 -29,18.704317092895508 -30,20.003664016723633 -31,19.41387176513672 -32,18.961986541748047 -33,18.76471710205078 -34,18.67190933227539 -35,18.67093276977539 -36,18.804662704467773 -37,18.79817771911621 -38,18.68048095703125 -39,18.426685333251953 -40,17.894227981567383 -41,17.353805541992188 -42,17.128747940063477 -43,17.06204605102539 -44,16.992036819458008 -45,16.799427032470703 -46,16.39339256286621 -47,16.116046905517578 -48,15.703583717346191 -49,15.614557266235352 -50,15.686274528503418 -51,15.755769729614258 -52,15.770317077636719 -53,15.73369312286377 -54,15.665719985961914 -55,15.581155776977539 -56,15.492616653442383 -57,15.426603317260742 -58,15.384469032287598 -59,15.3358154296875 -60,15.271660804748535 -61,15.195670127868652 -62,15.112595558166504 -63,15.024920463562012 -64,14.93224048614502 -65,14.83311653137207 -66,14.749201774597168 -67,14.758663177490234 -68,14.721831321716309 -69,14.700638771057129 -70,14.653159141540527 -71,14.583651542663574 -72,14.51943588256836 -73,14.47624683380127 -74,14.447315216064453 -75,14.424970626831055 -76,14.404275894165039 -77,14.37685489654541 -78,14.337363243103027 -79,14.29471492767334 -80,14.263190269470215 -81,14.246050834655762 -82,14.241560935974121 -83,14.239540100097656 -84,14.22447681427002 -85,14.193379402160645 -86,14.159337997436523 -87,14.135546684265137 -88,14.121784210205078 -89,14.110355377197266 -90,14.094185829162598 -91,14.073676109313965 -92,14.051698684692383 -93,14.029726028442383 -94,14.008625030517578 -95,13.988798141479492 -96,13.977386474609375 -97,13.96567440032959 -98,13.948260307312012 -99,13.927279472351074 -100,13.908082008361816 -101,13.89370346069336 -102,13.877907752990723 -103,13.862129211425781 -104,13.84601879119873 -105,13.82486343383789 -106,13.804795265197754 -107,13.787774085998535 -108,13.768007278442383 -109,13.751520156860352 -110,13.735626220703125 -111,13.716300010681152 -112,13.699606895446777 -113,13.683011054992676 -114,13.669960021972656 -115,13.655876159667969 -116,13.641371726989746 -117,13.62720775604248 -118,13.614686012268066 -119,13.60168743133545 -120,13.590032577514648 -121,13.575486183166504 -122,13.561928749084473 -123,13.548994064331055 -124,13.535585403442383 -125,13.522146224975586 -126,13.508752822875977 -127,13.495174407958984 -128,13.48065185546875 -129,13.46711254119873 -130,13.453911781311035 -131,13.440312385559082 -132,13.427899360656738 -133,13.413697242736816 -134,13.4017972946167 -135,13.386423110961914 -136,13.375391960144043 -137,13.358508110046387 -138,13.346576690673828 -139,13.329976081848145 -140,13.317486763000488 -141,13.302874565124512 -142,13.295974731445312 -143,13.273179054260254 -144,13.257844924926758 -145,13.243656158447266 -146,13.233498573303223 -147,13.221948623657227 -148,13.211332321166992 -149,13.18460750579834 -150,13.17369556427002 -151,13.186066627502441 -152,13.148213386535645 -153,13.180322647094727 -154,13.146881103515625 -155,13.12772274017334 -156,13.089703559875488 -157,13.124689102172852 -158,13.078454971313477 -159,13.086999893188477 -160,13.0311861038208 -161,13.086277961730957 -162,13.003351211547852 -163,13.077146530151367 -164,12.992915153503418 -165,13.041984558105469 -166,12.964427947998047 -167,13.003296852111816 -168,12.898677825927734 -169,12.989709854125977 -170,12.879809379577637 -171,12.932262420654297 -172,12.86334228515625 -173,12.865266799926758 -174,12.814117431640625 -175,12.798364639282227 -176,12.76343059539795 -177,12.763066291809082 -178,12.719914436340332 -179,12.722711563110352 -180,12.69422435760498 -181,12.704399108886719 -182,12.714585304260254 -183,12.643266677856445 -184,12.67757797241211 -185,12.716867446899414 -186,12.634110450744629 -187,12.74094295501709 -188,12.857425689697266 -189,12.836788177490234 -190,12.633393287658691 -191,13.08382511138916 -192,13.45836353302002 -193,14.21115779876709 -194,13.880846977233887 -195,13.392598152160645 -196,13.23855972290039 -197,13.641399383544922 -198,13.158899307250977 -199,13.334186553955078 -200,13.554368019104004 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.050/training_config.txt deleted file mode 100644 index 73e4699..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.050/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.050/training_log.csv deleted file mode 100644 index 8db7d0e..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,370.0522766113281 -2,111.8692398071289 -3,60.797767639160156 -4,51.21772766113281 -5,30.232458114624023 -6,31.17424201965332 -7,28.653284072875977 -8,29.516395568847656 -9,27.535158157348633 -10,23.902883529663086 -11,17.09711265563965 -12,15.620129585266113 -13,16.719482421875 -14,10.121221542358398 -15,8.684925079345703 -16,7.632584571838379 -17,7.089570999145508 -18,7.305871486663818 -19,6.612246513366699 -20,6.602084636688232 -21,6.3671393394470215 -22,6.329634189605713 -23,6.067723751068115 -24,5.690688133239746 -25,5.417752265930176 -26,5.212788105010986 -27,5.076137065887451 -28,4.960348606109619 -29,4.80723762512207 -30,4.693823337554932 -31,4.660793781280518 -32,4.594441890716553 -33,4.606668472290039 -34,4.602094650268555 -35,4.2595696449279785 -36,3.7702674865722656 -37,3.687284469604492 -38,3.633458137512207 -39,3.5677330493927 -40,3.4396212100982666 -41,3.1755290031433105 -42,2.984341859817505 -43,2.895517110824585 -44,2.7489359378814697 -45,2.5556833744049072 -46,2.458944320678711 -47,2.385711193084717 -48,2.327066659927368 -49,2.3114960193634033 -50,2.2860984802246094 -51,2.252836227416992 -52,2.216209888458252 -53,2.17873215675354 -54,2.141385793685913 -55,2.1037375926971436 -56,2.0645031929016113 -57,2.0219411849975586 -58,1.9767848253250122 -59,1.937328815460205 -60,1.9086463451385498 -61,1.8850644826889038 -62,1.8631411790847778 -63,1.841306447982788 -64,1.818933367729187 -65,1.7958297729492188 -66,1.7720681428909302 -67,1.7476648092269897 -68,1.7226775884628296 -69,1.6972249746322632 -70,1.6714588403701782 -71,1.6454819440841675 -72,1.6194641590118408 -73,1.5937281847000122 -74,1.5688430070877075 -75,1.545021891593933 -76,1.5224664211273193 -77,1.5014101266860962 -78,1.481918215751648 -79,1.4638487100601196 -80,1.4468252658843994 -81,1.4306814670562744 -82,1.4147937297821045 -83,1.3990387916564941 -84,1.3834933042526245 -85,1.3683315515518188 -86,1.353670597076416 -87,1.3394429683685303 -88,1.3254815340042114 -89,1.3116724491119385 -90,1.29789137840271 -91,1.2837737798690796 -92,1.2689968347549438 -93,1.2533072233200073 -94,1.2364124059677124 -95,1.2180190086364746 -96,1.1980830430984497 -97,1.177155613899231 -98,1.157707691192627 -99,1.1424654722213745 -100,1.1292250156402588 -101,1.116491436958313 -102,1.103968858718872 -103,1.0916649103164673 -104,1.0796297788619995 -105,1.0679770708084106 -106,1.0567885637283325 -107,1.046143651008606 -108,1.0360825061798096 -109,1.0265926122665405 -110,1.0176066160202026 -111,1.0089861154556274 -112,1.0006167888641357 -113,0.9923815727233887 -114,0.9842138886451721 -115,0.976050078868866 -116,0.9678827524185181 -117,0.9597356915473938 -118,0.9516530632972717 -119,0.9436708092689514 -120,0.9358029961585999 -121,0.9280312657356262 -122,0.9203299880027771 -123,0.9126593470573425 -124,0.9049883484840393 -125,0.8973098397254944 -126,0.8896274566650391 -127,0.8819509148597717 -128,0.8743067979812622 -129,0.8667264580726624 -130,0.8592394590377808 -131,0.851852536201477 -132,0.8445624113082886 -133,0.8373501300811768 -134,0.8301924467086792 -135,0.8230836391448975 -136,0.8160255551338196 -137,0.8090165853500366 -138,0.8020529747009277 -139,0.7951239943504333 -140,0.7882083654403687 -141,0.7812821269035339 -142,0.7743194103240967 -143,0.7672808766365051 -144,0.7601207494735718 -145,0.7527958750724792 -146,0.7452720403671265 -147,0.7375025153160095 -148,0.729436457157135 -149,0.7210358381271362 -150,0.7122822403907776 -151,0.7031646370887756 -152,0.693690836429596 -153,0.6838189959526062 -154,0.6736124157905579 -155,0.6631390452384949 -156,0.6525911688804626 -157,0.6421676278114319 -158,0.6319819092750549 -159,0.6221649646759033 -160,0.6128794550895691 -161,0.6043565273284912 -162,0.5964052677154541 -163,0.5888178944587708 -164,0.5815057754516602 -165,0.5744302272796631 -166,0.5675936937332153 -167,0.5610436797142029 -168,0.5548085570335388 -169,0.5489048361778259 -170,0.5433390736579895 -171,0.5380873084068298 -172,0.5331280827522278 -173,0.5284225940704346 -174,0.5239037871360779 -175,0.5194939374923706 -176,0.515083372592926 -177,0.5105336904525757 -178,0.5056160688400269 -179,0.4998399019241333 -180,0.49199095368385315 -181,0.4786716103553772 -182,0.45341169834136963 -183,0.4371047914028168 -184,0.43262287974357605 -185,0.428873747587204 -186,0.425496369600296 -187,0.42239758372306824 -188,0.41952380537986755 -189,0.4168068766593933 -190,0.41419506072998047 -191,0.4116256535053253 -192,0.40903279185295105 -193,0.4063630998134613 -194,0.4035852253437042 -195,0.40069785714149475 -196,0.39772582054138184 -197,0.3947076201438904 -198,0.39168447256088257 -199,0.388706773519516 -200,0.38580405712127686 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.080/training_config.txt deleted file mode 100644 index 4167293..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.080/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.080/training_log.csv deleted file mode 100644 index bd20852..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,253.987548828125 -2,76.23115539550781 -3,116.44664764404297 -4,53.71223449707031 -5,41.28310775756836 -6,130.91725158691406 -7,179.52505493164062 -8,55.14667510986328 -9,32.30284118652344 -10,24.645599365234375 -11,26.848886489868164 -12,25.39212417602539 -13,21.534809112548828 -14,20.628023147583008 -15,20.023698806762695 -16,19.754894256591797 -17,19.73231315612793 -18,19.698720932006836 -19,19.01545524597168 -20,17.946420669555664 -21,17.399089813232422 -22,17.198881149291992 -23,17.016759872436523 -24,16.838945388793945 -25,16.656475067138672 -26,16.45221710205078 -27,16.211776733398438 -28,15.922755241394043 -29,15.568243980407715 -30,15.13542652130127 -31,14.640965461730957 -32,14.3178129196167 -33,14.023221969604492 -34,13.627725601196289 -35,13.22779369354248 -36,12.843360900878906 -37,12.48727798461914 -38,12.152177810668945 -39,11.839917182922363 -40,11.56177043914795 -41,11.326336860656738 -42,11.130499839782715 -43,10.960489273071289 -44,10.804200172424316 -45,10.654690742492676 -46,10.509428977966309 -47,10.367362022399902 -48,10.228575706481934 -49,10.09389877319336 -50,9.965310096740723 -51,9.843452453613281 -52,9.728758811950684 -53,9.621143341064453 -54,9.519872665405273 -55,9.424113273620605 -56,9.332998275756836 -57,9.245694160461426 -58,9.160956382751465 -59,9.078062057495117 -60,8.996512413024902 -61,8.915810585021973 -62,8.834613800048828 -63,8.752059936523438 -64,8.667486190795898 -65,8.58043384552002 -66,8.490471839904785 -67,8.397614479064941 -68,8.302034378051758 -69,8.204492568969727 -70,8.106618881225586 -71,8.009978294372559 -72,7.915015697479248 -73,7.820973873138428 -74,7.7255988121032715 -75,7.627890586853027 -76,7.529741287231445 -77,7.430533409118652 -78,7.327372074127197 -79,7.220229625701904 -80,7.109520435333252 -81,6.995507717132568 -82,6.878508567810059 -83,6.756538391113281 -84,6.629217147827148 -85,6.497682571411133 -86,6.362703323364258 -87,6.226380825042725 -88,6.091292381286621 -89,5.960796356201172 -90,5.834078788757324 -91,5.708850860595703 -92,5.583800315856934 -93,5.45860481262207 -94,5.33300256729126 -95,5.206200122833252 -96,5.076318740844727 -97,4.94097375869751 -98,4.807156085968018 -99,4.6920928955078125 -100,4.579680919647217 -101,4.466705799102783 -102,4.351119518280029 -103,4.232757091522217 -104,4.118419170379639 -105,4.018263816833496 -106,3.924201250076294 -107,3.8333756923675537 -108,3.7455930709838867 -109,3.6603283882141113 -110,3.577441692352295 -111,3.496868371963501 -112,3.418329954147339 -113,3.342017650604248 -114,3.2674641609191895 -115,3.194373846054077 -116,3.1227829456329346 -117,3.052582025527954 -118,2.9831249713897705 -119,2.913954496383667 -120,2.8450045585632324 -121,2.77643084526062 -122,2.708172559738159 -123,2.639993190765381 -124,2.5718166828155518 -125,2.503614664077759 -126,2.434885025024414 -127,2.364274501800537 -128,2.289760112762451 -129,2.2103121280670166 -130,2.1302995681762695 -131,2.056424856185913 -132,1.9928491115570068 -133,1.9358185529708862 -134,1.8816498517990112 -135,1.8295328617095947 -136,1.7787015438079834 -137,1.728515625 -138,1.6786561012268066 -139,1.6289606094360352 -140,1.5791305303573608 -141,1.528902292251587 -142,1.4781135320663452 -143,1.4269639253616333 -144,1.3756808042526245 -145,1.3239703178405762 -146,1.2719745635986328 -147,1.2199747562408447 -148,1.1692194938659668 -149,1.119960904121399 -150,1.072085976600647 -151,1.0255413055419922 -152,0.9805084466934204 -153,0.937211811542511 -154,0.8956169486045837 -155,0.8554689884185791 -156,0.8170534372329712 -157,0.7804709076881409 -158,0.7456390261650085 -159,0.7124707698822021 -160,0.6809573769569397 -161,0.6510970592498779 -162,0.6226446628570557 -163,0.5953128933906555 -164,0.5691691040992737 -165,0.5440745949745178 -166,0.5199047923088074 -167,0.4965907335281372 -168,0.47410640120506287 -169,0.4524747431278229 -170,0.43165910243988037 -171,0.41154491901397705 -172,0.3920416235923767 -173,0.3732251524925232 -174,0.3550604283809662 -175,0.3376021385192871 -176,0.32086125016212463 -177,0.3048360347747803 -178,0.28957685828208923 -179,0.27533453702926636 -180,0.26190704107284546 -181,0.2490060180425644 -182,0.23662640154361725 -183,0.22499680519104004 -184,0.21421939134597778 -185,0.20416581630706787 -186,0.19473056495189667 -187,0.18596318364143372 -188,0.1780644804239273 -189,0.1711084395647049 -190,0.16488897800445557 -191,0.159454807639122 -192,0.15486742556095123 -193,0.151092067360878 -194,0.14798603951931 -195,0.14547519385814667 -196,0.14363504946231842 -197,0.1423536092042923 -198,0.1414196491241455 -199,0.14078596234321594 -200,0.14040324091911316 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.100/training_config.txt deleted file mode 100644 index a328267..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.100/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.100/training_log.csv deleted file mode 100644 index 6407eda..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,226.78643798828125 -2,169.30392456054688 -3,31.624174118041992 -4,13.067558288574219 -5,7.644680500030518 -6,6.0411176681518555 -7,5.177702903747559 -8,5.056391716003418 -9,4.9485907554626465 -10,4.828255653381348 -11,4.689085960388184 -12,4.532138347625732 -13,4.468266487121582 -14,4.4474778175354 -15,4.4113969802856445 -16,4.3560004234313965 -17,4.285882949829102 -18,4.2041120529174805 -19,4.112846374511719 -20,4.0139241218566895 -21,3.909283399581909 -22,3.79890775680542 -23,3.6831817626953125 -24,3.5619125366210938 -25,3.4348788261413574 -26,3.3012428283691406 -27,3.159842014312744 -28,3.0072290897369385 -29,2.8389363288879395 -30,2.64157772064209 -31,2.39349365234375 -32,2.1415512561798096 -33,1.9266163110733032 -34,1.7257485389709473 -35,1.5395596027374268 -36,1.3776159286499023 -37,1.240488052368164 -38,1.1225584745407104 -39,1.0186032056808472 -40,0.9259491562843323 -41,0.842423677444458 -42,0.7664889693260193 -43,0.6968558430671692 -44,0.6326596736907959 -45,0.5730408430099487 -46,0.5175361037254333 -47,0.4656223952770233 -48,0.4167647361755371 -49,0.3706209361553192 -50,0.3269239068031311 -51,0.28558558225631714 -52,0.24683280289173126 -53,0.21141231060028076 -54,0.18086257576942444 -55,0.15641579031944275 -56,0.13845279812812805 -57,0.12633197009563446 -58,0.11915785074234009 -59,0.1160615086555481 -60,0.11615831404924393 -61,0.11859457194805145 -62,0.12253432720899582 -63,0.12717926502227783 -64,0.13187499344348907 -65,0.13610957562923431 -66,0.13951142132282257 -67,0.14184407889842987 -68,0.14298473298549652 -69,0.14290527999401093 -70,0.14165101945400238 -71,0.13933278620243073 -72,0.13610875606536865 -73,0.13215547800064087 -74,0.12767553329467773 -75,0.12288719415664673 -76,0.11800974607467651 -77,0.11327232420444489 -78,0.10888805985450745 -79,0.10506054759025574 -80,0.10196330398321152 -81,0.09969043731689453 -82,0.09823372960090637 -83,0.0974954292178154 -84,0.09731645882129669 -85,0.09750548005104065 -86,0.09787514060735703 -87,0.09826552867889404 -88,0.09855633229017258 -89,0.09867020696401596 -90,0.09856891632080078 -91,0.09824905544519424 -92,0.09773977845907211 -93,0.09708666801452637 -94,0.09634783118963242 -95,0.09558475762605667 -96,0.09485550969839096 -97,0.09419716894626617 -98,0.09362940490245819 -99,0.09315848350524902 -100,0.09277816116809845 -101,0.09247352927923203 -102,0.09222688525915146 -103,0.09202000498771667 -104,0.09183523803949356 -105,0.09165693074464798 -106,0.09147191047668457 -107,0.09127146005630493 -108,0.09104924649000168 -109,0.09080290049314499 -110,0.09053349494934082 -111,0.09024493396282196 -112,0.08994259685277939 -113,0.08963346481323242 -114,0.08932514488697052 -115,0.08902416378259659 -116,0.08873619139194489 -117,0.08846510946750641 -118,0.08821290731430054 -119,0.08797962963581085 -120,0.08776340633630753 -121,0.08756094425916672 -122,0.08736766129732132 -123,0.08717863261699677 -124,0.08698956668376923 -125,0.08679686486721039 -126,0.08659794181585312 -127,0.08639173209667206 -128,0.08617856353521347 -129,0.08595936745405197 -130,0.08573614060878754 -131,0.08551125973463058 -132,0.08528666198253632 -133,0.08506478369235992 -134,0.08484722673892975 -135,0.08463483303785324 -136,0.08442780375480652 -137,0.08422568440437317 -138,0.08402794599533081 -139,0.08383362740278244 -140,0.08364173769950867 -141,0.08345124870538712 -142,0.08326122909784317 -143,0.08307122439146042 -144,0.08288084715604782 -145,0.08269001543521881 -146,0.0824989303946495 -147,0.08230794221162796 -148,0.08211720734834671 -149,0.08192731440067291 -150,0.08173844963312149 -151,0.08155111223459244 -152,0.0813654288649559 -153,0.08118154108524323 -154,0.0809994637966156 -155,0.08081886917352676 -156,0.08063967525959015 -157,0.08046182990074158 -158,0.08028510957956314 -159,0.08010949939489365 -160,0.07993483543395996 -161,0.07976096868515015 -162,0.07958812266588211 -163,0.07941622287034988 -164,0.07924585789442062 -165,0.07907664030790329 -166,0.07890842854976654 -167,0.07874129712581635 -168,0.0785752534866333 -169,0.07841020822525024 -170,0.07824625819921494 -171,0.07808338850736618 -172,0.07792160660028458 -173,0.07776085287332535 -174,0.07760098576545715 -175,0.07744210213422775 -176,0.07728419452905655 -177,0.07712724804878235 -178,0.07697130739688873 -179,0.07681634277105331 -180,0.0766623467206955 -181,0.07650943100452423 -182,0.07635742425918579 -183,0.07620655745267868 -184,0.07605662196874619 -185,0.07590772956609726 -186,0.07575984299182892 -187,0.07561281323432922 -188,0.07546669989824295 -189,0.07532152533531189 -190,0.07517724484205246 -191,0.07503387331962585 -192,0.07489129155874252 -193,0.07474968582391739 -194,0.07460892200469971 -195,0.07446908205747604 -196,0.0743301510810852 -197,0.07419213652610779 -198,0.0740550234913826 -199,0.07391873747110367 -200,0.07378323376178741 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.125/training_config.txt deleted file mode 100644 index c9f8f4d..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.125/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.125/training_log.csv deleted file mode 100644 index c79efad..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,207.47950744628906 -2,40.656314849853516 -3,566.3568725585938 -4,13.760509490966797 -5,6.479517936706543 -6,5.067081928253174 -7,4.737139701843262 -8,4.569192886352539 -9,4.250545978546143 -10,3.7933292388916016 -11,3.703293561935425 -12,3.574524402618408 -13,3.346031904220581 -14,3.18806791305542 -15,3.065720796585083 -16,2.9467084407806396 -17,2.8251657485961914 -18,2.701751947402954 -19,2.582113742828369 -20,2.4726386070251465 -21,2.3740110397338867 -22,2.2852344512939453 -23,2.2050464153289795 -24,2.130537748336792 -25,2.0593156814575195 -26,1.9900485277175903 -27,1.9215840101242065 -28,1.8524603843688965 -29,1.7816741466522217 -30,1.708741307258606 -31,1.6332776546478271 -32,1.5554555654525757 -33,1.476488471031189 -34,1.397375226020813 -35,1.3193411827087402 -36,1.2436983585357666 -37,1.1721203327178955 -38,1.1052765846252441 -39,1.043169379234314 -40,0.9855554103851318 -41,0.9320846199989319 -42,0.8822200894355774 -43,0.8350799679756165 -44,0.7901110649108887 -45,0.7470859289169312 -46,0.7058184742927551 -47,0.6659914255142212 -48,0.6270641088485718 -49,0.5893626809120178 -50,0.5527290105819702 -51,0.5174515843391418 -52,0.4836897552013397 -53,0.451571524143219 -54,0.42100366950035095 -55,0.39163273572921753 -56,0.3639313578605652 -57,0.3379298150539398 -58,0.3136069178581238 -59,0.2910855710506439 -60,0.2703733444213867 -61,0.25187844038009644 -62,0.23555290699005127 -63,0.22165583074092865 -64,0.20999974012374878 -65,0.20087930560112 -66,0.19448888301849365 -67,0.19017179310321808 -68,0.18721309304237366 -69,0.18490351736545563 -70,0.18283040821552277 -71,0.18076865375041962 -72,0.1786055862903595 -73,0.17633308470249176 -74,0.17406953871250153 -75,0.17197705805301666 -76,0.1701914668083191 -77,0.1687961369752884 -78,0.1678069531917572 -79,0.16717906296253204 -80,0.16680990159511566 -81,0.1665501445531845 -82,0.1662413477897644 -83,0.16574299335479736 -84,0.1649569422006607 -85,0.16383472084999084 -86,0.1623801589012146 -87,0.16063980758190155 -88,0.15869441628456116 -89,0.1566416621208191 -90,0.154584601521492 -91,0.1526145488023758 -92,0.1508082151412964 -93,0.14921407401561737 -94,0.1478511393070221 -95,0.1467081904411316 -96,0.14575226604938507 -97,0.1449354737997055 -98,0.14420732855796814 -99,0.14352580904960632 -100,0.14285866916179657 -101,0.1421857476234436 -102,0.14149872958660126 -103,0.14079904556274414 -104,0.1400948166847229 -105,0.13939699530601501 -106,0.13871654868125916 -107,0.13806065917015076 -108,0.13743190467357635 -109,0.13682854175567627 -110,0.13624469935894012 -111,0.13567234575748444 -112,0.13510417938232422 -113,0.13453449308872223 -114,0.13396042585372925 -115,0.13338178396224976 -116,0.13280120491981506 -117,0.13222333788871765 -118,0.13165368139743805 -119,0.13109783828258514 -120,0.13056030869483948 -121,0.13004407286643982 -122,0.12954960763454437 -123,0.12907598912715912 -124,0.12862104177474976 -125,0.12818175554275513 -126,0.12775486707687378 -127,0.12733766436576843 -128,0.1269279271364212 -129,0.12652425467967987 -130,0.1261260211467743 -131,0.12573333084583282 -132,0.12534643709659576 -133,0.12496596574783325 -134,0.12459225952625275 -135,0.12422552704811096 -136,0.12386555969715118 -137,0.12351223081350327 -138,0.12316502630710602 -139,0.12282338738441467 -140,0.12248682975769043 -141,0.12215519696474075 -142,0.1218281239271164 -143,0.12150580435991287 -144,0.1211882010102272 -145,0.12087563425302505 -146,0.12056833505630493 -147,0.12026635557413101 -148,0.11996965855360031 -149,0.11967822164297104 -150,0.11939191073179245 -151,0.11911037564277649 -152,0.11883322894573212 -153,0.11856015771627426 -154,0.11829086393117905 -155,0.11802516877651215 -156,0.11776299774646759 -157,0.11750399321317673 -158,0.11724824458360672 -159,0.116995669901371 -160,0.11674628406763077 -161,0.11649997532367706 -162,0.11625675857067108 -163,0.1160166934132576 -164,0.11577960848808289 -165,0.1155453622341156 -166,0.1153138130903244 -167,0.11508484929800034 -168,0.11485844105482101 -169,0.11463456600904465 -170,0.11441310495138168 -171,0.11419405788183212 -172,0.11397738009691238 -173,0.1137630045413971 -174,0.11355092376470566 -175,0.11334098875522614 -176,0.11313319206237793 -177,0.1129273772239685 -178,0.11272364109754562 -179,0.11252175271511078 -180,0.11232175678014755 -181,0.11212354898452759 -182,0.11192705482244492 -183,0.11173222959041595 -184,0.1115390732884407 -185,0.11134753376245499 -186,0.11115753650665283 -187,0.11096908152103424 -188,0.11078211665153503 -189,0.11059664934873581 -190,0.11041259765625 -191,0.11022993922233582 -192,0.11004854738712311 -193,0.10986840724945068 -194,0.10968951135873795 -195,0.1095118597149849 -196,0.10933537036180496 -197,0.10916010290384293 -198,0.10898597538471222 -199,0.1088128536939621 -200,0.10864076018333435 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.160/training_config.txt deleted file mode 100644 index 011b2cd..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.160/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.160/training_log.csv deleted file mode 100644 index 03dc358..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,228.90438842773438 -2,17.200395584106445 -3,16.940629959106445 -4,16.01431655883789 -5,14.506689071655273 -6,12.765676498413086 -7,11.12114143371582 -8,9.554487228393555 -9,8.388968467712402 -10,7.699433326721191 -11,7.09894323348999 -12,6.535335063934326 -13,5.974241256713867 -14,5.391449928283691 -15,4.828124523162842 -16,4.31964635848999 -17,3.842970609664917 -18,3.4221689701080322 -19,3.0621087551116943 -20,2.7522213459014893 -21,2.482579231262207 -22,2.2447071075439453 -23,2.0318379402160645 -24,1.8431413173675537 -25,1.6854183673858643 -26,1.558437466621399 -27,1.4574005603790283 -28,1.3735475540161133 -29,1.3011589050292969 -30,1.2300024032592773 -31,1.1614187955856323 -32,1.0900537967681885 -33,1.0182714462280273 -34,0.9479285478591919 -35,0.8770628571510315 -36,0.8056542873382568 -37,0.7342655658721924 -38,0.6638572216033936 -39,0.594894289970398 -40,0.5303130745887756 -41,0.4722886085510254 -42,0.42129164934158325 -43,0.37734454870224 -44,0.3405066132545471 -45,0.31000956892967224 -46,0.28466811776161194 -47,0.26353001594543457 -48,0.24541868269443512 -49,0.229438915848732 -50,0.21520869433879852 -51,0.20243456959724426 -52,0.19071871042251587 -53,0.17945070564746857 -54,0.16853950917720795 -55,0.1582174152135849 -56,0.14855903387069702 -57,0.13965415954589844 -58,0.13139328360557556 -59,0.12376795709133148 -60,0.11694616824388504 -61,0.1109781488776207 -62,0.10590232163667679 -63,0.1017935648560524 -64,0.09857196360826492 -65,0.09614334255456924 -66,0.09442134201526642 -67,0.09329869598150253 -68,0.09267105907201767 -69,0.09243880957365036 -70,0.09249591827392578 -71,0.09274080395698547 -72,0.09308341890573502 -73,0.09344485402107239 -74,0.09377136081457138 -75,0.09402041882276535 -76,0.09416269510984421 -77,0.09418103098869324 -78,0.09407143294811249 -79,0.0938396230340004 -80,0.09349723905324936 -81,0.09306230396032333 -82,0.09255467355251312 -83,0.09199466556310654 -84,0.09140266478061676 -85,0.09079749882221222 -86,0.09019538760185242 -87,0.08960986882448196 -88,0.08905074000358582 -89,0.08852485567331314 -90,0.08803630620241165 -91,0.0875868871808052 -92,0.08717479556798935 -93,0.08679665625095367 -94,0.08644852787256241 -95,0.08612698316574097 -96,0.08582800626754761 -97,0.08554650843143463 -98,0.08527780324220657 -99,0.08501791208982468 -100,0.08476356416940689 -101,0.08451250195503235 -102,0.08426322788000107 -103,0.08401505649089813 -104,0.08376798033714294 -105,0.08352313190698624 -106,0.08328026533126831 -107,0.08304037898778915 -108,0.08280446380376816 -109,0.08257360011339188 -110,0.08234853297472 -111,0.08212979137897491 -112,0.08191744983196259 -113,0.08171164989471436 -114,0.08151215314865112 -115,0.08131831139326096 -116,0.08112943917512894 -117,0.08094492554664612 -118,0.08076371997594833 -119,0.08058513700962067 -120,0.08040846139192581 -121,0.08023299276828766 -122,0.08005841821432114 -123,0.07988433539867401 -124,0.07971060276031494 -125,0.07953721284866333 -126,0.0793641060590744 -127,0.07919149100780487 -128,0.07901956140995026 -129,0.07884877920150757 -130,0.0786791518330574 -131,0.07851091772317886 -132,0.07834435999393463 -133,0.07817952334880829 -134,0.07801641523838043 -135,0.07785505056381226 -136,0.07769538462162018 -137,0.07753734290599823 -138,0.07738101482391357 -139,0.07722630351781845 -140,0.0770731121301651 -141,0.07692138850688934 -142,0.07677110284566879 -143,0.07662226259708405 -144,0.07647474110126495 -145,0.07632837444543839 -146,0.07618318498134613 -147,0.07603957504034042 -148,0.07589714229106903 -149,0.07575570046901703 -150,0.07561518251895905 -151,0.07547545433044434 -152,0.07533644139766693 -153,0.07519841194152832 -154,0.07506129890680313 -155,0.07492505759000778 -156,0.07478982955217361 -157,0.07465555518865585 -158,0.0745222419500351 -159,0.07438972592353821 -160,0.07425817102193832 -161,0.07412760704755783 -162,0.07399783283472061 -163,0.07386884838342667 -164,0.07374073565006256 -165,0.07361356168985367 -166,0.07348722219467163 -167,0.07336162775754929 -168,0.07323679327964783 -169,0.07311282306909561 -170,0.07298966497182846 -171,0.07286720722913742 -172,0.07274536043405533 -173,0.0726241022348404 -174,0.07250363379716873 -175,0.07238384336233139 -176,0.07226458191871643 -177,0.07214594632387161 -178,0.07202796638011932 -179,0.07191070914268494 -180,0.07179395109415054 -181,0.0716777965426445 -182,0.0715622529387474 -183,0.07144737988710403 -184,0.07133304327726364 -185,0.07121921330690384 -186,0.07110600918531418 -187,0.07099342346191406 -188,0.07088141143321991 -189,0.07076987624168396 -190,0.07065898180007935 -191,0.0705486312508583 -192,0.07043889909982681 -193,0.07032972574234009 -194,0.07022123783826828 -195,0.07011338323354721 -196,0.07000617682933807 -197,0.06989967077970505 -198,0.06979379057884216 -199,0.06968853622674942 -200,0.06958387047052383 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.200/training_config.txt deleted file mode 100644 index d76e35d..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.200/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.200/training_log.csv deleted file mode 100644 index 4804ee6..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,224.55673217773438 -2,13.153162002563477 -3,13.104031562805176 -4,11.560168266296387 -5,9.730314254760742 -6,8.42111587524414 -7,7.57038688659668 -8,6.770708084106445 -9,6.003779888153076 -10,5.194929122924805 -11,4.444084644317627 -12,3.7811050415039062 -13,3.1679019927978516 -14,2.681633949279785 -15,2.3744821548461914 -16,2.215884208679199 -17,2.1268365383148193 -18,2.0676209926605225 -19,2.0086779594421387 -20,1.9337031841278076 -21,1.827643632888794 -22,1.6879804134368896 -23,1.524415135383606 -24,1.3531852960586548 -25,1.1851593255996704 -26,1.02968168258667 -27,0.892062783241272 -28,0.7762606739997864 -29,0.6808127760887146 -30,0.603812038898468 -31,0.5427848100662231 -32,0.4943815767765045 -33,0.4561319351196289 -34,0.42463430762290955 -35,0.39691150188446045 -36,0.37134915590286255 -37,0.3463676869869232 -38,0.3216108977794647 -39,0.296696275472641 -40,0.27173367142677307 -41,0.246416375041008 -42,0.22174791991710663 -43,0.1981961727142334 -44,0.17630578577518463 -45,0.1566089242696762 -46,0.1392812877893448 -47,0.12460023164749146 -48,0.11268636584281921 -49,0.10350097715854645 -50,0.09683690220117569 -51,0.09235353022813797 -52,0.08959180861711502 -53,0.08805523067712784 -54,0.08728795498609543 -55,0.08689761161804199 -56,0.08659154921770096 -57,0.08618286997079849 -58,0.0856107845902443 -59,0.08506535738706589 -60,0.08442306518554688 -61,0.08371669799089432 -62,0.08296168595552444 -63,0.08219827711582184 -64,0.0814686194062233 -65,0.08081727474927902 -66,0.08029533177614212 -67,0.07991372793912888 -68,0.07966149598360062 -69,0.07951178401708603 -70,0.07942167669534683 -71,0.07934588193893433 -72,0.07924030721187592 -73,0.0790681391954422 -74,0.07881830632686615 -75,0.07848235219717026 -76,0.0780583918094635 -77,0.07755843549966812 -78,0.07699999213218689 -79,0.07640350610017776 -80,0.07579118013381958 -81,0.0751822292804718 -82,0.07459273189306259 -83,0.07404784858226776 -84,0.07356065511703491 -85,0.07313574105501175 -86,0.07277199625968933 -87,0.07246492058038712 -88,0.07220391184091568 -89,0.07197733223438263 -90,0.0717744380235672 -91,0.07158559560775757 -92,0.07140333950519562 -93,0.07122287899255753 -94,0.07104175537824631 -95,0.07086017727851868 -96,0.07068004459142685 -97,0.07050373405218124 -98,0.0703340619802475 -99,0.07017326354980469 -100,0.07002273201942444 -101,0.06988275796175003 -102,0.06975270807743073 -103,0.06963095813989639 -104,0.06951530277729034 -105,0.06940348446369171 -106,0.06929310411214828 -107,0.06918247789144516 -108,0.06907013058662415 -109,0.0689554288983345 -110,0.06883851438760757 -111,0.06871986389160156 -112,0.06860017031431198 -113,0.06848043203353882 -114,0.06836166977882385 -115,0.06824450939893723 -116,0.06813005357980728 -117,0.06801875680685043 -118,0.06791047006845474 -119,0.06780487298965454 -120,0.06770197302103043 -121,0.06760140508413315 -122,0.06750304996967316 -123,0.06740647554397583 -124,0.06731124967336655 -125,0.06721709668636322 -126,0.06712391972541809 -127,0.06703183054924011 -128,0.06694105267524719 -129,0.06685182452201843 -130,0.06676395982503891 -131,0.06667739897966385 -132,0.06659220159053802 -133,0.06650832295417786 -134,0.06642560660839081 -135,0.06634417921304703 -136,0.06626373529434204 -137,0.06618382781744003 -138,0.06610433012247086 -139,0.06602539122104645 -140,0.0659470185637474 -141,0.06586920469999313 -142,0.06579206883907318 -143,0.06571520864963531 -144,0.06563879549503326 -145,0.06556293368339539 -146,0.06548762321472168 -147,0.06541308015584946 -148,0.06533926725387573 -149,0.06526598334312439 -150,0.06519322842359543 -151,0.06512091308832169 -152,0.06504911929368973 -153,0.06497777998447418 -154,0.06490693241357803 -155,0.06483639031648636 -156,0.06476621329784393 -157,0.06469639390707016 -158,0.06462694704532623 -159,0.06455834209918976 -160,0.06449030339717865 -161,0.06442227959632874 -162,0.064354307949543 -163,0.06428659707307816 -164,0.0642191544175148 -165,0.06415224820375443 -166,0.06408525258302689 -167,0.06401851773262024 -168,0.0639519989490509 -169,0.0638856291770935 -170,0.06381939351558685 -171,0.06375373899936676 -172,0.06368845701217651 -173,0.06362339109182358 -174,0.06355852633714676 -175,0.06349357962608337 -176,0.06342872232198715 -177,0.06336456537246704 -178,0.06330082565546036 -179,0.06323707103729248 -180,0.0631733387708664 -181,0.06310977786779404 -182,0.06304582953453064 -183,0.06298226863145828 -184,0.06291908025741577 -185,0.06285646557807922 -186,0.06279407441616058 -187,0.0627310574054718 -188,0.0626678541302681 -189,0.06260501593351364 -190,0.0625423863530159 -191,0.06247970834374428 -192,0.06241707131266594 -193,0.062354471534490585 -194,0.06229199096560478 -195,0.06223011389374733 -196,0.06216863542795181 -197,0.06210730969905853 -198,0.062045808881521225 -199,0.06198437884449959 -200,0.06192325800657272 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.250/training_config.txt deleted file mode 100644 index f14b82a..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.250/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.250/training_log.csv deleted file mode 100644 index 22790c3..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,266.8197326660156 -2,14.54055404663086 -3,19.628145217895508 -4,19.648784637451172 -5,19.78046226501465 -6,19.501161575317383 -7,18.927734375 -8,18.184207916259766 -9,17.299543380737305 -10,16.204269409179688 -11,14.881640434265137 -12,13.393218994140625 -13,9.68553352355957 -14,6.408955097198486 -15,6.483642578125 -16,7.344072341918945 -17,7.254336833953857 -18,6.509434700012207 -19,5.607182502746582 -20,4.851520538330078 -21,4.398014068603516 -22,4.117559432983398 -23,3.947826623916626 -24,3.827497959136963 -25,3.7265517711639404 -26,3.627323865890503 -27,3.5154473781585693 -28,3.3862922191619873 -29,3.238792896270752 -30,3.082149028778076 -31,2.9271163940429688 -32,2.783066987991333 -33,2.653702735900879 -34,2.5408427715301514 -35,2.442188262939453 -36,2.353109836578369 -37,2.2690939903259277 -38,2.1869912147521973 -39,2.105487585067749 -40,2.0219736099243164 -41,1.9367306232452393 -42,1.8504762649536133 -43,1.7633222341537476 -44,1.6792205572128296 -45,1.6015781164169312 -46,1.5309243202209473 -47,1.4660810232162476 -48,1.4062566757202148 -49,1.3491374254226685 -50,1.291503667831421 -51,1.2314213514328003 -52,1.168433427810669 -53,1.1038058996200562 -54,1.040345311164856 -55,0.9801493287086487 -56,0.9241411685943604 -57,0.8718541264533997 -58,0.8219242691993713 -59,0.7728956937789917 -60,0.7240076065063477 -61,0.6756004691123962 -62,0.6286967396736145 -63,0.5844995379447937 -64,0.5442526936531067 -65,0.5078255534172058 -66,0.4740700423717499 -67,0.44183894991874695 -68,0.41078031063079834 -69,0.3810976445674896 -70,0.3535691797733307 -71,0.32888999581336975 -72,0.30715981125831604 -73,0.28786158561706543 -74,0.2703358829021454 -75,0.2541692554950714 -76,0.23920851945877075 -77,0.2257235199213028 -78,0.21398037672042847 -79,0.2040339559316635 -80,0.1956080049276352 -81,0.18826039135456085 -82,0.1816505491733551 -83,0.17570053040981293 -84,0.17052723467350006 -85,0.16625389456748962 -86,0.16284312307834625 -87,0.16008171439170837 -88,0.1576932966709137 -89,0.15548568964004517 -90,0.15341296792030334 -91,0.1515280306339264 -92,0.14987491071224213 -93,0.14840899407863617 -94,0.14700956642627716 -95,0.14554908871650696 -96,0.1439737230539322 -97,0.14230714738368988 -98,0.14060933887958527 -99,0.13891851902008057 -100,0.13722512125968933 -101,0.13548621535301208 -102,0.13367488980293274 -103,0.13180413842201233 -104,0.12991876900196075 -105,0.12806610763072968 -106,0.12625902891159058 -107,0.12450135499238968 -108,0.1227899044752121 -109,0.12112203985452652 -110,0.11949534714221954 -111,0.117926225066185 -112,0.11643551290035248 -113,0.11502894014120102 -114,0.11370160430669785 -115,0.11244426667690277 -116,0.1112479493021965 -117,0.11011119186878204 -118,0.10903720557689667 -119,0.1080271452665329 -120,0.10707558691501617 -121,0.10617462545633316 -122,0.10530919581651688 -123,0.10446837544441223 -124,0.10365134477615356 -125,0.10285895317792892 -126,0.10208963602781296 -127,0.10133980214595795 -128,0.10060536861419678 -129,0.09988337755203247 -130,0.0991716980934143 -131,0.09847038984298706 -132,0.09778042882680893 -133,0.09710198640823364 -134,0.09643430262804031 -135,0.09577642381191254 -136,0.09512664377689362 -137,0.09448500722646713 -138,0.09385254979133606 -139,0.09322910010814667 -140,0.09261397272348404 -141,0.09200504422187805 -142,0.09140077233314514 -143,0.09079987555742264 -144,0.09020697325468063 -145,0.08962076157331467 -146,0.08904140442609787 -147,0.08847124874591827 -148,0.0879080668091774 -149,0.08735155314207077 -150,0.08680097758769989 -151,0.08625853061676025 -152,0.08572272211313248 -153,0.08519385010004044 -154,0.08467023074626923 -155,0.0841514989733696 -156,0.08363886922597885 -157,0.08313266932964325 -158,0.08263261616230011 -159,0.08213621377944946 -160,0.08164466172456741 -161,0.08115898817777634 -162,0.0806792601943016 -163,0.08020559698343277 -164,0.07973680645227432 -165,0.07927007973194122 -166,0.07880828529596329 -167,0.0783517137169838 -168,0.07789964973926544 -169,0.07745208591222763 -170,0.07700996100902557 -171,0.07658087462186813 -172,0.07616067677736282 -173,0.0757448598742485 -174,0.07533230632543564 -175,0.07492944598197937 -176,0.07453332841396332 -177,0.07414089888334274 -178,0.07375247776508331 -179,0.07337088882923126 -180,0.0729973167181015 -181,0.07262882590293884 -182,0.07226476818323135 -183,0.07190502434968948 -184,0.07155371457338333 -185,0.07120825350284576 -186,0.0708661675453186 -187,0.07052826136350632 -188,0.07019826769828796 -189,0.06987353414297104 -190,0.06955292075872421 -191,0.0692364051938057 -192,0.06892520934343338 -193,0.06861955672502518 -194,0.068319171667099 -195,0.06802286952733994 -196,0.06772947311401367 -197,0.06744187325239182 -198,0.06715774536132812 -199,0.0668768584728241 -200,0.0666007250547409 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.300/training_config.txt deleted file mode 100644 index ec7393a..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.300/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.300/training_log.csv deleted file mode 100644 index b10254c..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.300/training_log.csv +++ /dev/null @@ -1,26 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,350.8663330078125 -2,16.62820053100586 -3,19.833187103271484 -4,21.18271255493164 -5,20.330272674560547 -6,18.894311904907227 -7,17.592159271240234 -8,16.558551788330078 -9,15.71105670928955 -10,15.023347854614258 -11,14.398599624633789 -12,13.8158540725708 -13,13.270523071289062 -14,12.748589515686035 -15,10.572425842285156 -16,9.585554122924805 -17,130.23606872558594 -18,135264.203125 -19,144657.609375 -20,144657.609375 -21,144657.609375 -22,144657.609375 -23,144657.609375 -24,144657.609375 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.400/training_config.txt deleted file mode 100644 index 8855a00..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.400/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.400/training_log.csv deleted file mode 100644 index dab50da..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.400/training_log.csv +++ /dev/null @@ -1,59 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,2139.2958984375 -2,15.871360778808594 -3,15.040173530578613 -4,30.151010513305664 -5,13.41573715209961 -6,15.126680374145508 -7,15.488669395446777 -8,15.400300025939941 -9,15.038491249084473 -10,14.288644790649414 -11,13.195968627929688 -12,12.105541229248047 -13,11.149531364440918 -14,10.608494758605957 -15,23.254711151123047 -16,432.97271728515625 -17,4390.83544921875 -18,6827.92529296875 -19,6056.083984375 -20,2305.225341796875 -21,1030.5155029296875 -22,580.4194946289062 -23,417.8133544921875 -24,375.9845886230469 -25,363.511474609375 -26,423.6347351074219 -27,428.93878173828125 -28,457.9874572753906 -29,434.90380859375 -30,388.47601318359375 -31,405.302734375 -32,396.0299987792969 -33,433.2095031738281 -34,415.62322998046875 -35,448.4204406738281 -36,400.7735900878906 -37,463.89422607421875 -38,422.3512268066406 -39,367.9308776855469 -40,411.2049560546875 -41,298.25262451171875 -42,234.5443115234375 -43,126.51380920410156 -44,110.2239990234375 -45,83.65897369384766 -46,74.9664535522461 -47,71.71820831298828 -48,71.84044647216797 -49,70.30377960205078 -50,65.0676040649414 -51,65.10655212402344 -52,58.479270935058594 -53,41.8557014465332 -54,nan -55,nan -56,nan -57,nan diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.500/training_config.txt deleted file mode 100644 index a6370a3..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.500/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.500/training_log.csv deleted file mode 100644 index 376f529..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.500/training_log.csv +++ /dev/null @@ -1,15 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,28110.3125 -2,141606.625 -3,126754.5625 -4,61593.7578125 -5,3658.652099609375 -6,16.84434700012207 -7,17.455270767211914 -8,18.87449836730957 -9,19.832551956176758 -10,nan -11,nan -12,nan -13,nan diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.600/training_config.txt deleted file mode 100644 index ecc72be..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.600/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.600/training_log.csv deleted file mode 100644 index 4e8618e..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.600/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,74336.546875 -2,144468.65625 -3,144657.609375 -4,144657.609375 -5,144657.609375 -6,144657.609375 -7,144657.609375 -8,144657.609375 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.700/training_config.txt deleted file mode 100644 index fa7dd16..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.700/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.700/training_log.csv deleted file mode 100644 index 4f11c92..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,109735.6328125 -2,144648.0625 -3,144657.609375 -4,144657.609375 -5,144657.609375 -6,144657.609375 -7,144657.609375 -8,144657.609375 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.800/training_config.txt deleted file mode 100644 index fb9d7ee..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.800/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.800/training_log.csv deleted file mode 100644 index fbf68ef..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,125504.2109375 -2,144657.609375 -3,144657.609375 -4,144657.609375 -5,144657.609375 -6,144657.609375 -7,144657.609375 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.900/training_config.txt deleted file mode 100644 index 5413c52..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.900/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_0.900/training_log.csv deleted file mode 100644 index ef3d9e8..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,135553.328125 -2,144657.609375 -3,144657.609375 -4,144657.609375 -5,144657.609375 -6,144657.609375 -7,144657.609375 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_1.000/training_config.txt deleted file mode 100644 index b717d45..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_1.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_1.000/training_log.csv deleted file mode 100644 index 18c906b..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,139899.875 -2,144657.609375 -3,144657.609375 -4,144657.609375 -5,144657.609375 -6,144657.609375 -7,144657.609375 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_16.000/training_config.txt deleted file mode 100644 index 63bbdfd..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_16.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_16.000/training_log.csv deleted file mode 100644 index d268139..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,145091.796875 -2,145091.796875 -3,145091.796875 -4,145091.796875 -5,145091.796875 -6,145091.796875 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_2.000/training_config.txt deleted file mode 100644 index 2e32af1..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_2.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_2.000/training_log.csv deleted file mode 100644 index 966e11c..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,145084.8125 -2,13.705998420715332 -3,nan -4,nan -5,nan -6,nan diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_4.000/training_config.txt deleted file mode 100644 index bb94331..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_4.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_4.000/training_log.csv deleted file mode 100644 index d268139..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,145091.796875 -2,145091.796875 -3,145091.796875 -4,145091.796875 -5,145091.796875 -6,145091.796875 diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic/lr_8.000/training_config.txt deleted file mode 100644 index d5d9d78..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_8.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * t_span**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic/lr_8.000/training_log.csv deleted file mode 100644 index d268139..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,698.05126953125 -1,145091.796875 -2,145091.796875 -3,145091.796875 -4,145091.796875 -5,145091.796875 -6,145091.796875 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.010/training_config.txt deleted file mode 100644 index 63345da..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.010/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.010/training_log.csv deleted file mode 100644 index 2b41d02..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,91.53144836425781 -2,82.58479309082031 -3,76.03575134277344 -4,68.66426849365234 -5,62.031700134277344 -6,53.00569152832031 -7,46.31782913208008 -8,40.532958984375 -9,35.48923873901367 -10,30.542381286621094 -11,27.855972290039062 -12,24.65521812438965 -13,22.904905319213867 -14,20.53262710571289 -15,18.20256233215332 -16,16.06061363220215 -17,14.739778518676758 -18,13.736855506896973 -19,12.426250457763672 -20,11.832944869995117 -21,10.97511100769043 -22,9.394410133361816 -23,8.881789207458496 -24,8.637206077575684 -25,8.442631721496582 -26,7.942625045776367 -27,7.5290985107421875 -28,7.080869197845459 -29,6.7781524658203125 -30,6.559311866760254 -31,6.357673168182373 -32,6.190032482147217 -33,6.041404724121094 -34,5.89907169342041 -35,5.775726318359375 -36,5.666321277618408 -37,5.562668800354004 -38,5.464737892150879 -39,5.369648456573486 -40,5.271976470947266 -41,5.20006799697876 -42,5.12676477432251 -43,5.066548824310303 -44,5.005457401275635 -45,4.942073345184326 -46,4.873614311218262 -47,4.790684700012207 -48,4.730871677398682 -49,4.670947551727295 -50,4.611446380615234 -51,4.539156436920166 -52,4.476180076599121 -53,4.41343355178833 -54,4.343780994415283 -55,4.298095226287842 -56,4.253505229949951 -57,4.2179341316223145 -58,4.175781726837158 -59,4.131801128387451 -60,4.105465888977051 -61,4.052435874938965 -62,4.031918048858643 -63,4.0098652839660645 -64,3.9827728271484375 -65,3.973628282546997 -66,3.956632137298584 -67,3.936418056488037 -68,3.913097620010376 -69,3.882769823074341 -70,3.871727228164673 -71,3.8531548976898193 -72,3.832172155380249 -73,3.808624267578125 -74,3.7921080589294434 -75,3.7756214141845703 -76,3.75784969329834 -77,3.7395176887512207 -78,3.7244317531585693 -79,3.708547592163086 -80,3.6922190189361572 -81,3.6755623817443848 -82,3.658613920211792 -83,3.6413209438323975 -84,3.623535394668579 -85,3.605010747909546 -86,3.5863523483276367 -87,3.572054386138916 -88,3.5567922592163086 -89,3.5413784980773926 -90,3.526238203048706 -91,3.511404037475586 -92,3.4963293075561523 -93,3.479644775390625 -94,3.462015151977539 -95,3.4562528133392334 -96,3.4534666538238525 -97,3.4340341091156006 -98,3.4224605560302734 -99,3.4111008644104004 -100,3.391735076904297 -101,3.373854398727417 -102,3.3596439361572266 -103,3.3504648208618164 -104,3.349353790283203 -105,3.3353688716888428 -106,3.3293404579162598 -107,3.321643352508545 -108,3.3143553733825684 -109,3.3045127391815186 -110,3.2925209999084473 -111,3.2787561416625977 -112,3.265429973602295 -113,3.2451846599578857 -114,3.2367801666259766 -115,3.2360007762908936 -116,3.2270455360412598 -117,3.22371244430542 -118,3.2238216400146484 -119,3.215585231781006 -120,3.2009029388427734 -121,3.1976547241210938 -122,3.196756362915039 -123,3.195823907852173 -124,3.1928391456604004 -125,3.1877055168151855 -126,3.182095766067505 -127,3.1799724102020264 -128,3.1749324798583984 -129,3.1702351570129395 -130,3.164048910140991 -131,3.155409097671509 -132,3.1496810913085938 -133,3.1511576175689697 -134,3.151649236679077 -135,3.147905111312866 -136,3.141369342803955 -137,3.134916305541992 -138,3.1332848072052 -139,3.1303107738494873 -140,3.126478672027588 -141,3.1292450428009033 -142,3.124624252319336 -143,3.1203694343566895 -144,3.1246938705444336 -145,3.1237905025482178 -146,3.119959592819214 -147,3.1130337715148926 -148,3.111943483352661 -149,3.1083433628082275 -150,3.1075599193573 -151,3.103024959564209 -152,3.1012961864471436 -153,3.0987489223480225 -154,3.0945932865142822 -155,3.0950372219085693 -156,3.0948166847229004 -157,3.0911190509796143 -158,3.087763786315918 -159,3.0837936401367188 -160,3.080430507659912 -161,3.0756893157958984 -162,3.079774856567383 -163,3.080228090286255 -164,3.078073501586914 -165,3.074108362197876 -166,3.0685603618621826 -167,3.062000036239624 -168,3.065420150756836 -169,3.0663576126098633 -170,3.065516948699951 -171,3.063508987426758 -172,3.0604846477508545 -173,3.0564310550689697 -174,3.0521609783172607 -175,3.049478054046631 -176,3.0468757152557373 -177,3.0440099239349365 -178,3.0407304763793945 -179,3.0371034145355225 -180,3.033146381378174 -181,3.028855085372925 -182,3.024165391921997 -183,3.018611431121826 -184,3.00980544090271 -185,3.011136293411255 -186,3.0057098865509033 -187,3.001070261001587 -188,2.998333215713501 -189,2.9944698810577393 -190,2.989882469177246 -191,2.9846200942993164 -192,2.9777326583862305 -193,2.9790475368499756 -194,2.9771084785461426 -195,2.9733028411865234 -196,2.9682083129882812 -197,2.9618966579437256 -198,2.953056812286377 -199,2.9496960639953613 -200,2.9466991424560547 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.020/training_config.txt deleted file mode 100644 index 251cd13..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.020/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.020/training_log.csv deleted file mode 100644 index 382ed7e..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,87.18049621582031 -2,70.69342041015625 -3,56.24991989135742 -4,42.017333984375 -5,30.254533767700195 -6,23.668228149414062 -7,18.12079429626465 -8,14.943513870239258 -9,12.053718566894531 -10,10.075502395629883 -11,9.14274787902832 -12,7.600576877593994 -13,6.552332401275635 -14,6.081232070922852 -15,5.6820759773254395 -16,5.311466217041016 -17,5.004429817199707 -18,4.805738925933838 -19,4.630770206451416 -20,4.470080375671387 -21,4.288049221038818 -22,4.10749626159668 -23,3.957082986831665 -24,3.8017055988311768 -25,3.6811463832855225 -26,3.564906358718872 -27,3.455653429031372 -28,3.3678629398345947 -29,3.298696756362915 -30,3.2432894706726074 -31,3.1970109939575195 -32,3.149562120437622 -33,3.102245807647705 -34,3.0549027919769287 -35,3.0071630477905273 -36,2.9577207565307617 -37,2.9002180099487305 -38,2.8664450645446777 -39,2.8375132083892822 -40,2.8072996139526367 -41,2.7738282680511475 -42,2.7423362731933594 -43,2.7147772312164307 -44,2.690305709838867 -45,2.668841600418091 -46,2.6452250480651855 -47,2.619001626968384 -48,2.5907983779907227 -49,2.561455488204956 -50,2.5362823009490967 -51,2.524104595184326 -52,2.50991153717041 -53,2.5012009143829346 -54,2.497453451156616 -55,2.4922549724578857 -56,2.4865760803222656 -57,2.4807751178741455 -58,2.474717378616333 -59,2.4682066440582275 -60,2.461688280105591 -61,2.455411672592163 -62,2.4494080543518066 -63,2.443203926086426 -64,2.4361746311187744 -65,2.428189277648926 -66,2.419321298599243 -67,2.4098198413848877 -68,2.4075496196746826 -69,2.4045324325561523 -70,2.4008355140686035 -71,2.3968441486358643 -72,2.392669200897217 -73,2.3882381916046143 -74,2.3831961154937744 -75,2.3776426315307617 -76,2.375981092453003 -77,2.3715171813964844 -78,2.3698205947875977 -79,2.367795467376709 -80,2.364823579788208 -81,2.360962152481079 -82,2.357996702194214 -83,2.3562676906585693 -84,2.3528997898101807 -85,2.350182056427002 -86,2.3483619689941406 -87,2.3457040786743164 -88,2.343029499053955 -89,2.3413960933685303 -90,2.339385747909546 -91,2.3370914459228516 -92,2.335320234298706 -93,2.3337655067443848 -94,2.331986427307129 -95,2.330171823501587 -96,2.3286538124084473 -97,2.3272361755371094 -98,2.325716018676758 -99,2.324145555496216 -100,2.3226749897003174 -101,2.321342945098877 -102,2.320052146911621 -103,2.318761110305786 -104,2.317507028579712 -105,2.316340208053589 -106,2.315253257751465 -107,2.3141982555389404 -108,2.313148260116577 -109,2.312126874923706 -110,2.311159372329712 -111,2.3102517127990723 -112,2.309380054473877 -113,2.3085219860076904 -114,2.3076791763305664 -115,2.3068671226501465 -116,2.3060927391052246 -117,2.305342197418213 -118,2.3046011924743652 -119,2.303865432739258 -120,2.3031466007232666 -121,2.3024511337280273 -122,2.3017733097076416 -123,2.30110502243042 -124,2.300445795059204 -125,2.299800157546997 -126,2.2991721630096436 -127,2.298560619354248 -128,2.2979624271392822 -129,2.2973721027374268 -130,2.2967910766601562 -131,2.296220302581787 -132,2.2956602573394775 -133,2.2951133251190186 -134,2.2945749759674072 -135,2.2940430641174316 -136,2.293518543243408 -137,2.293002128601074 -138,2.2924933433532715 -139,2.2919914722442627 -140,2.2914962768554688 -141,2.2910070419311523 -142,2.2905240058898926 -143,2.290048360824585 -144,2.289578914642334 -145,2.2891149520874023 -146,2.2886550426483154 -147,2.2881999015808105 -148,2.2877495288848877 -149,2.2873032093048096 -150,2.2868614196777344 -151,2.286423921585083 -152,2.2859904766082764 -153,2.2855608463287354 -154,2.2851357460021973 -155,2.2847158908843994 -156,2.2843010425567627 -157,2.283891201019287 -158,2.2834858894348145 -159,2.283085823059082 -160,2.2826900482177734 -161,2.2822978496551514 -162,2.2819104194641113 -163,2.281526565551758 -164,2.281146764755249 -165,2.280771255493164 -166,2.2803993225097656 -167,2.2800300121307373 -168,2.2796640396118164 -169,2.2793009281158447 -170,2.2789409160614014 -171,2.2785847187042236 -172,2.278231382369995 -173,2.2778818607330322 -174,2.2775354385375977 -175,2.2771921157836914 -176,2.2768516540527344 -177,2.2765138149261475 -178,2.276179552078247 -179,2.275847911834717 -180,2.275519609451294 -181,2.2751941680908203 -182,2.274871349334717 -183,2.2745511531829834 -184,2.274233341217041 -185,2.2739193439483643 -186,2.2736082077026367 -187,2.273300886154175 -188,2.2729954719543457 -189,2.272693634033203 -190,2.272394895553589 -191,2.272099494934082 -192,2.271808385848999 -193,2.271519899368286 -194,2.2712340354919434 -195,2.2709503173828125 -196,2.2706687450408936 -197,2.2703893184661865 -198,2.270112991333008 -199,2.269838571548462 -200,2.2695655822753906 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.040/training_config.txt deleted file mode 100644 index 74261b6..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.040/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.040/training_log.csv deleted file mode 100644 index 84767bc..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,78.62189483642578 -2,45.30236053466797 -3,27.399402618408203 -4,15.863306999206543 -5,10.673423767089844 -6,8.00208568572998 -7,5.734444618225098 -8,4.903866291046143 -9,4.352260589599609 -10,4.007482051849365 -11,3.689675807952881 -12,3.4341366291046143 -13,3.2442619800567627 -14,3.1182918548583984 -15,3.0281503200531006 -16,2.9449241161346436 -17,2.868976593017578 -18,2.802791118621826 -19,2.7474122047424316 -20,2.7017464637756348 -21,2.66341233253479 -22,2.629762649536133 -23,2.599118709564209 -24,2.5703516006469727 -25,2.542860507965088 -26,2.516279458999634 -27,2.4901952743530273 -28,2.4645910263061523 -29,2.439405918121338 -30,2.4148473739624023 -31,2.391270160675049 -32,2.368879556655884 -33,2.347930908203125 -34,2.3296427726745605 -35,2.3146631717681885 -36,2.3023738861083984 -37,2.292567014694214 -38,2.285196304321289 -39,2.2807424068450928 -40,2.279047727584839 -41,2.279095411300659 -42,2.2795426845550537 -43,2.2802839279174805 -44,2.2820591926574707 -45,2.2847461700439453 -46,2.286020278930664 -47,2.2837560176849365 -48,2.279163360595703 -49,2.274705648422241 -50,2.2715439796447754 -51,2.2694742679595947 -52,2.2681097984313965 -53,2.2673118114471436 -54,2.2668137550354004 -55,2.266299247741699 -56,2.2655529975891113 -57,2.2644941806793213 -58,2.263167381286621 -59,2.261718988418579 -60,2.260340929031372 -61,2.2591984272003174 -62,2.258387565612793 -63,2.257906198501587 -64,2.2576680183410645 -65,2.2575602531433105 -66,2.257460117340088 -67,2.257266044616699 -68,2.2569198608398438 -69,2.256409168243408 -70,2.255768060684204 -71,2.2550487518310547 -72,2.2543206214904785 -73,2.253648519515991 -74,2.253063678741455 -75,2.2525763511657715 -76,2.252173900604248 -77,2.2518208026885986 -78,2.251479387283325 -79,2.251127243041992 -80,2.2507569789886475 -81,2.250375270843506 -82,2.2499935626983643 -83,2.249631643295288 -84,2.249300956726074 -85,2.249000072479248 -86,2.2487244606018066 -87,2.2484631538391113 -88,2.2482080459594727 -89,2.247947931289673 -90,2.2476747035980225 -91,2.2473857402801514 -92,2.247089385986328 -93,2.2467925548553467 -94,2.2465038299560547 -95,2.2462263107299805 -96,2.245967149734497 -97,2.245725154876709 -98,2.2454986572265625 -99,2.2452802658081055 -100,2.2450673580169678 -101,2.244853973388672 -102,2.2446389198303223 -103,2.2444229125976562 -104,2.2442073822021484 -105,2.2439937591552734 -106,2.243783473968506 -107,2.243577003479004 -108,2.2433762550354004 -109,2.243182420730591 -110,2.242994546890259 -111,2.2428088188171387 -112,2.2426247596740723 -113,2.242440700531006 -114,2.2422573566436768 -115,2.2420759201049805 -116,2.241896867752075 -117,2.2417216300964355 -118,2.2415506839752197 -119,2.2413830757141113 -120,2.2412185668945312 -121,2.241055488586426 -122,2.240893840789795 -123,2.2407326698303223 -124,2.240572214126587 -125,2.240413188934326 -126,2.24025559425354 -127,2.240100383758545 -128,2.239947557449341 -129,2.2397966384887695 -130,2.2396469116210938 -131,2.2394981384277344 -132,2.239348888397217 -133,2.239199638366699 -134,2.2390503883361816 -135,2.2389004230499268 -136,2.2387502193450928 -137,2.238600254058838 -138,2.238449811935425 -139,2.238300323486328 -140,2.2381515502929688 -141,2.2380027770996094 -142,2.2378697395324707 -143,2.237750768661499 -144,2.2376370429992676 -145,2.2375237941741943 -146,2.2374112606048584 -147,2.2372987270355225 -148,2.2371859550476074 -149,2.237072706222534 -150,2.236959457397461 -151,2.2368454933166504 -152,2.236732006072998 -153,2.2366182804107666 -154,2.2365050315856934 -155,2.2363924980163574 -156,2.236281633377075 -157,2.2361748218536377 -158,2.236072301864624 -159,2.2359745502471924 -160,2.2358779907226562 -161,2.235778331756592 -162,2.2356765270233154 -163,2.235572099685669 -164,2.2354702949523926 -165,2.2353713512420654 -166,2.2352747917175293 -167,2.2351796627044678 -168,2.2350847721099854 -169,2.234989643096924 -170,2.234894275665283 -171,2.2347989082336426 -172,2.234703540802002 -173,2.2346086502075195 -174,2.2345144748687744 -175,2.2344207763671875 -176,2.234328269958496 -177,2.234236478805542 -178,2.234145164489746 -179,2.234053373336792 -180,2.233961343765259 -181,2.2338688373565674 -182,2.2337758541107178 -183,2.2336833477020264 -184,2.2335896492004395 -185,2.2334957122802734 -186,2.2333998680114746 -187,2.2333035469055176 -188,2.233206272125244 -189,2.233109474182129 -190,2.2330119609832764 -191,2.2329139709472656 -192,2.232815980911255 -193,2.232717752456665 -194,2.2326197624206543 -195,2.2325212955474854 -196,2.2324235439300537 -197,2.232325553894043 -198,2.2322275638580322 -199,2.2321298122406006 -200,2.232032060623169 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.050/training_config.txt deleted file mode 100644 index 98a2f5f..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.050/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.050/training_log.csv deleted file mode 100644 index 0e4b894..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,74.54898834228516 -2,37.18050765991211 -3,22.578065872192383 -4,12.1703519821167 -5,8.575193405151367 -6,6.3546624183654785 -7,5.174666881561279 -8,4.606987953186035 -9,4.190138816833496 -10,3.7707369327545166 -11,3.4754652976989746 -12,3.2338778972625732 -13,3.0270330905914307 -14,2.8683810234069824 -15,2.770925998687744 -16,2.704265594482422 -17,2.6493020057678223 -18,2.610383987426758 -19,2.5867154598236084 -20,2.5645229816436768 -21,2.540761947631836 -22,2.514983654022217 -23,2.4873337745666504 -24,2.458324670791626 -25,2.4300642013549805 -26,2.4028921127319336 -27,2.3768234252929688 -28,2.3526601791381836 -29,2.331944704055786 -30,2.3162174224853516 -31,2.3042712211608887 -32,2.2954163551330566 -33,2.2893056869506836 -34,2.2853784561157227 -35,2.2832019329071045 -36,2.2821483612060547 -37,2.281724691390991 -38,2.281895160675049 -39,2.2826035022735596 -40,2.2836711406707764 -41,2.2847280502319336 -42,2.285304307937622 -43,2.2849631309509277 -44,2.283430337905884 -45,2.280658006668091 -46,2.276873826980591 -47,2.272501230239868 -48,2.2680392265319824 -49,2.26401686668396 -50,2.260777235031128 -51,2.2584726810455322 -52,2.257035255432129 -53,2.2562978267669678 -54,2.2559499740600586 -55,2.255697011947632 -56,2.2553181648254395 -57,2.2547106742858887 -58,2.2539052963256836 -59,2.2530064582824707 -60,2.252171277999878 -61,2.251508951187134 -62,2.2510695457458496 -63,2.250798225402832 -64,2.2506048679351807 -65,2.250380754470825 -66,2.2500407695770264 -67,2.249544143676758 -68,2.2488927841186523 -69,2.2481353282928467 -70,2.2473561763763428 -71,2.246635675430298 -72,2.2460315227508545 -73,2.2455761432647705 -74,2.245250940322876 -75,2.245016098022461 -76,2.24481463432312 -77,2.244615316390991 -78,2.244392156600952 -79,2.244166612625122 -80,2.2439160346984863 -81,2.2436492443084717 -82,2.2433807849884033 -83,2.243128538131714 -84,2.2429001331329346 -85,2.242689371109009 -86,2.242485761642456 -87,2.2422852516174316 -88,2.242075204849243 -89,2.2418506145477295 -90,2.2416157722473145 -91,2.2413766384124756 -92,2.241147756576538 -93,2.2409400939941406 -94,2.240755796432495 -95,2.2405879497528076 -96,2.2404308319091797 -97,2.2402806282043457 -98,2.240124225616455 -99,2.2399587631225586 -100,2.2397899627685547 -101,2.2396183013916016 -102,2.2394464015960693 -103,2.2392797470092773 -104,2.2391197681427 -105,2.2389638423919678 -106,2.2388105392456055 -107,2.2386581897735596 -108,2.2385056018829346 -109,2.238351583480835 -110,2.238196611404419 -111,2.238044023513794 -112,2.2378947734832764 -113,2.237748861312866 -114,2.237607717514038 -115,2.2374720573425293 -116,2.237337112426758 -117,2.237201690673828 -118,2.237065076828003 -119,2.2369275093078613 -120,2.236790895462036 -121,2.2366549968719482 -122,2.236520528793335 -123,2.2363877296447754 -124,2.2362563610076904 -125,2.236126661300659 -126,2.235997438430786 -127,2.235867977142334 -128,2.23573899269104 -129,2.2356112003326416 -130,2.235482692718506 -131,2.235354423522949 -132,2.2352256774902344 -133,2.2350974082946777 -134,2.2349700927734375 -135,2.2348427772521973 -136,2.234715700149536 -137,2.2345900535583496 -138,2.2344651222229004 -139,2.234342575073242 -140,2.2342209815979004 -141,2.234100818634033 -142,2.2339823246002197 -143,2.2338647842407227 -144,2.2337486743927 -145,2.2336337566375732 -146,2.233520746231079 -147,2.2334084510803223 -148,2.23329758644104 -149,2.2331881523132324 -150,2.233079433441162 -151,2.2329723834991455 -152,2.232865810394287 -153,2.232760190963745 -154,2.2326557636260986 -155,2.2325518131256104 -156,2.2324488162994385 -157,2.232346534729004 -158,2.2322449684143066 -159,2.2321431636810303 -160,2.2320423126220703 -161,2.2319414615631104 -162,2.231841564178467 -163,2.2317421436309814 -164,2.2316434383392334 -165,2.2315452098846436 -166,2.2314469814300537 -167,2.2313497066497803 -168,2.231252908706665 -169,2.231156826019287 -170,2.231060266494751 -171,2.2309651374816895 -172,2.230870008468628 -173,2.2307751178741455 -174,2.230680465698242 -175,2.2305855751037598 -176,2.2304906845092773 -177,2.2303965091705322 -178,2.230304718017578 -179,2.2302138805389404 -180,2.2301230430603027 -181,2.2300333976745605 -182,2.2299439907073975 -183,2.229856491088867 -184,2.2297701835632324 -185,2.2296841144561768 -186,2.2295985221862793 -187,2.22951340675354 -188,2.229429006576538 -189,2.2293457984924316 -190,2.2292635440826416 -191,2.229182243347168 -192,2.2291018962860107 -193,2.229022741317749 -194,2.2289440631866455 -195,2.2288665771484375 -196,2.2287890911102295 -197,2.2287118434906006 -198,2.228635787963867 -199,2.228559970855713 -200,2.2284843921661377 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.080/training_config.txt deleted file mode 100644 index 89992e4..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.080/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.080/training_log.csv deleted file mode 100644 index c463551..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,62.61245346069336 -2,23.316547393798828 -3,9.417710304260254 -4,5.951552391052246 -5,4.640810012817383 -6,3.948585271835327 -7,3.4141242504119873 -8,3.0919461250305176 -9,2.898284912109375 -10,2.7531776428222656 -11,2.6678943634033203 -12,2.6257898807525635 -13,2.591322898864746 -14,2.5584402084350586 -15,2.5255978107452393 -16,2.492774486541748 -17,2.4607512950897217 -18,2.4306490421295166 -19,2.403808355331421 -20,2.380112409591675 -21,2.359530448913574 -22,2.341832399368286 -23,2.3271501064300537 -24,2.316387891769409 -25,2.3085954189300537 -26,2.3022589683532715 -27,2.296363592147827 -28,2.2914297580718994 -29,2.287623643875122 -30,2.284700632095337 -31,2.2817602157592773 -32,2.278292417526245 -33,2.274036169052124 -34,2.269587993621826 -35,2.2659754753112793 -36,2.2637546062469482 -37,2.2625696659088135 -38,2.2618820667266846 -39,2.2613329887390137 -40,2.260798215866089 -41,2.260157346725464 -42,2.2593483924865723 -43,2.258287191390991 -44,2.256951093673706 -45,2.2553927898406982 -46,2.253720760345459 -47,2.252058506011963 -48,2.2505667209625244 -49,2.2493181228637695 -50,2.248356580734253 -51,2.2476747035980225 -52,2.247241735458374 -53,2.2469584941864014 -54,2.246791124343872 -55,2.2467284202575684 -56,2.2467033863067627 -57,2.246654748916626 -58,2.2465620040893555 -59,2.2464280128479004 -60,2.2462143898010254 -61,2.245950937271118 -62,2.245635747909546 -63,2.2452940940856934 -64,2.244931697845459 -65,2.244562864303589 -66,2.2442233562469482 -67,2.2439024448394775 -68,2.2436025142669678 -69,2.2432985305786133 -70,2.2429919242858887 -71,2.2426836490631104 -72,2.242363452911377 -73,2.2420334815979004 -74,2.2417070865631104 -75,2.2414112091064453 -76,2.2411677837371826 -77,2.240953207015991 -78,2.240764856338501 -79,2.2405929565429688 -80,2.2404258251190186 -81,2.2402548789978027 -82,2.2400758266448975 -83,2.2398884296417236 -84,2.239694595336914 -85,2.2394957542419434 -86,2.2392969131469727 -87,2.239100933074951 -88,2.2389075756073 -89,2.238715648651123 -90,2.2385237216949463 -91,2.2383339405059814 -92,2.2381436824798584 -93,2.2379531860351562 -94,2.2377665042877197 -95,2.237591505050659 -96,2.2374186515808105 -97,2.2372453212738037 -98,2.2370734214782715 -99,2.236900806427002 -100,2.236727237701416 -101,2.236560344696045 -102,2.2363977432250977 -103,2.2362353801727295 -104,2.2360754013061523 -105,2.23591685295105 -106,2.23576021194458 -107,2.2356038093566895 -108,2.2354483604431152 -109,2.235294818878174 -110,2.2351438999176025 -111,2.2349965572357178 -112,2.234851837158203 -113,2.2347114086151123 -114,2.2345709800720215 -115,2.234429121017456 -116,2.2342896461486816 -117,2.23415207862854 -118,2.234015464782715 -119,2.233879327774048 -120,2.233743190765381 -121,2.2336080074310303 -122,2.2334723472595215 -123,2.2333357334136963 -124,2.2331974506378174 -125,2.233058452606201 -126,2.2329201698303223 -127,2.2327818870544434 -128,2.232642650604248 -129,2.2325029373168945 -130,2.2323639392852783 -131,2.2322254180908203 -132,2.2320876121520996 -133,2.231950044631958 -134,2.231811046600342 -135,2.231672763824463 -136,2.231534481048584 -137,2.2313969135284424 -138,2.231260061264038 -139,2.2311244010925293 -140,2.2309906482696533 -141,2.23085880279541 -142,2.2307286262512207 -143,2.2305989265441895 -144,2.230471134185791 -145,2.230344295501709 -146,2.2302193641662598 -147,2.230095386505127 -148,2.2299723625183105 -149,2.2298502922058105 -150,2.229729413986206 -151,2.2296087741851807 -152,2.2294883728027344 -153,2.229367971420288 -154,2.229247808456421 -155,2.2291266918182373 -156,2.2290048599243164 -157,2.228883743286133 -158,2.2287659645080566 -159,2.2286558151245117 -160,2.228548049926758 -161,2.228442907333374 -162,2.2283380031585693 -163,2.2282357215881348 -164,2.2281391620635986 -165,2.2280449867248535 -166,2.227952480316162 -167,2.227860689163208 -168,2.227769374847412 -169,2.227677822113037 -170,2.227586507797241 -171,2.227494955062866 -172,2.2274038791656494 -173,2.2273128032684326 -174,2.227221727371216 -175,2.2271313667297363 -176,2.2270405292510986 -177,2.2269492149353027 -178,2.2268571853637695 -179,2.2267651557922363 -180,2.2266738414764404 -181,2.2265822887420654 -182,2.226491689682007 -183,2.226400852203369 -184,2.2263102531433105 -185,2.22622013092041 -186,2.2261295318603516 -187,2.226039409637451 -188,2.2259485721588135 -189,2.225858211517334 -190,2.2257680892944336 -191,2.2256782054901123 -192,2.2255890369415283 -193,2.2254996299743652 -194,2.2254109382629395 -195,2.2253217697143555 -196,2.2252323627471924 -197,2.2251439094543457 -198,2.22505521774292 -199,2.224966526031494 -200,2.224877119064331 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.100/training_config.txt deleted file mode 100644 index 7a89642..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.100/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.100/training_log.csv deleted file mode 100644 index 8aece33..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,55.91799545288086 -2,20.66969108581543 -3,10.040596008300781 -4,5.699201583862305 -5,4.510552406311035 -6,4.046518325805664 -7,3.5958149433135986 -8,3.2787818908691406 -9,3.0371484756469727 -10,2.870401382446289 -11,2.76939058303833 -12,2.703540802001953 -13,2.658630609512329 -14,2.621814727783203 -15,2.5879228115081787 -16,2.555415153503418 -17,2.524195909500122 -18,2.4939191341400146 -19,2.4644711017608643 -20,2.4363291263580322 -21,2.4095611572265625 -22,2.3842215538024902 -23,2.3611414432525635 -24,2.340850591659546 -25,2.323481798171997 -26,2.310725212097168 -27,2.302870273590088 -28,2.296640157699585 -29,2.2919461727142334 -30,2.2905070781707764 -31,2.2891697883605957 -32,2.284263849258423 -33,2.2747581005096436 -34,2.265350580215454 -35,2.260692596435547 -36,2.258758783340454 -37,2.258692741394043 -38,2.259711265563965 -39,2.2607202529907227 -40,2.2610580921173096 -41,2.2603275775909424 -42,2.2584972381591797 -43,2.255857467651367 -44,2.252854585647583 -45,2.249962091445923 -46,2.2475171089172363 -47,2.245609760284424 -48,2.244229555130005 -49,2.2432971000671387 -50,2.242703676223755 -51,2.2423644065856934 -52,2.242190361022949 -53,2.2420952320098877 -54,2.2419791221618652 -55,2.2417871952056885 -56,2.2414650917053223 -57,2.240973711013794 -58,2.24035906791687 -59,2.2396860122680664 -60,2.239039421081543 -61,2.2385451793670654 -62,2.238236427307129 -63,2.2380590438842773 -64,2.237943410873413 -65,2.237797260284424 -66,2.2376041412353516 -67,2.237318992614746 -68,2.2369470596313477 -69,2.2365291118621826 -70,2.2361226081848145 -71,2.2357521057128906 -72,2.235447406768799 -73,2.2352194786071777 -74,2.2350354194641113 -75,2.234860420227051 -76,2.234689474105835 -77,2.234522581100464 -78,2.2343368530273438 -79,2.234119415283203 -80,2.233886957168579 -81,2.233654260635376 -82,2.2334229946136475 -83,2.2331931591033936 -84,2.2329742908477783 -85,2.232766628265381 -86,2.2325515747070312 -87,2.2323341369628906 -88,2.2321152687072754 -89,2.231898546218872 -90,2.231689214706421 -91,2.2314889430999756 -92,2.231307029724121 -93,2.231144666671753 -94,2.2309882640838623 -95,2.2308335304260254 -96,2.230675458908081 -97,2.230513572692871 -98,2.2303473949432373 -99,2.2301812171936035 -100,2.230012893676758 -101,2.2298550605773926 -102,2.229705333709717 -103,2.2295587062835693 -104,2.2294135093688965 -105,2.2292697429656982 -106,2.229126453399658 -107,2.228982925415039 -108,2.228842258453369 -109,2.228707790374756 -110,2.228576421737671 -111,2.228445053100586 -112,2.2283153533935547 -113,2.2281875610351562 -114,2.228062152862549 -115,2.227938413619995 -116,2.227814197540283 -117,2.2276902198791504 -118,2.227567195892334 -119,2.2274458408355713 -120,2.2273266315460205 -121,2.2272098064422607 -122,2.2270965576171875 -123,2.2269856929779053 -124,2.226877450942993 -125,2.2267708778381348 -126,2.2266652584075928 -127,2.2265610694885254 -128,2.2264578342437744 -129,2.226356267929077 -130,2.2262561321258545 -131,2.2261569499969482 -132,2.2260584831237793 -133,2.225961208343506 -134,2.2258646488189697 -135,2.225769281387329 -136,2.225674629211426 -137,2.2255799770355225 -138,2.2254865169525146 -139,2.225393295288086 -140,2.2253010272979736 -141,2.2252092361450195 -142,2.225119113922119 -143,2.225029945373535 -144,2.224940061569214 -145,2.2248504161834717 -146,2.224759340286255 -147,2.22466778755188 -148,2.2245750427246094 -149,2.2244796752929688 -150,2.2243826389312744 -151,2.2242848873138428 -152,2.2241859436035156 -153,2.224085807800293 -154,2.2239842414855957 -155,2.223881483078003 -156,2.223778009414673 -157,2.2236733436584473 -158,2.2235677242279053 -159,2.2234604358673096 -160,2.223353147506714 -161,2.2232449054718018 -162,2.223132371902466 -163,2.2230191230773926 -164,2.2229104042053223 -165,2.2228026390075684 -166,2.222693920135498 -167,2.222583532333374 -168,2.2224714756011963 -169,2.2223598957061768 -170,2.222249984741211 -171,2.222141742706299 -172,2.2220375537872314 -173,2.2219371795654297 -174,2.2218387126922607 -175,2.221745491027832 -176,2.22165584564209 -177,2.2215683460235596 -178,2.2214813232421875 -179,2.2213947772979736 -180,2.2213082313537598 -181,2.2212226390838623 -182,2.221137762069702 -183,2.2210538387298584 -184,2.220970630645752 -185,2.2208874225616455 -186,2.2208046913146973 -187,2.220722198486328 -188,2.220639228820801 -189,2.2205567359924316 -190,2.2204749584198 -191,2.2203938961029053 -192,2.220313549041748 -193,2.2202341556549072 -194,2.2201554775238037 -195,2.2200772762298584 -196,2.2199995517730713 -197,2.2199225425720215 -198,2.2198476791381836 -199,2.2197728157043457 -200,2.219698190689087 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.125/training_config.txt deleted file mode 100644 index fa0e0d1..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.125/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.125/training_log.csv deleted file mode 100644 index aa4ecec..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,52.888431549072266 -2,12.388626098632812 -3,5.8187408447265625 -4,4.292400360107422 -5,3.6190121173858643 -6,3.2360010147094727 -7,2.943612813949585 -8,2.784775495529175 -9,2.6937713623046875 -10,2.6456379890441895 -11,2.6154422760009766 -12,2.5860393047332764 -13,2.552218198776245 -14,2.5133299827575684 -15,2.472337245941162 -16,2.43209171295166 -17,2.394174575805664 -18,2.3583836555480957 -19,2.326793670654297 -20,2.301739454269409 -21,2.28279972076416 -22,2.2726540565490723 -23,2.2728946208953857 -24,2.284569263458252 -25,2.295586585998535 -26,2.287957191467285 -27,2.2751734256744385 -28,2.2665510177612305 -29,2.2631382942199707 -30,2.2628257274627686 -31,2.2635762691497803 -32,2.2642486095428467 -33,2.2636444568634033 -34,2.2614943981170654 -35,2.258277177810669 -36,2.254934549331665 -37,2.2523269653320312 -38,2.250983953475952 -39,2.25065279006958 -40,2.250823736190796 -41,2.251011610031128 -42,2.2510554790496826 -43,2.250943183898926 -44,2.2507362365722656 -45,2.2503466606140137 -46,2.2496721744537354 -47,2.2485885620117188 -48,2.2473182678222656 -49,2.246185779571533 -50,2.2453930377960205 -51,2.2449235916137695 -52,2.2445878982543945 -53,2.244227886199951 -54,2.2437751293182373 -55,2.243237257003784 -56,2.2426586151123047 -57,2.242159605026245 -58,2.2417683601379395 -59,2.2415034770965576 -60,2.241267681121826 -61,2.241051435470581 -62,2.240771532058716 -63,2.240407943725586 -64,2.2399637699127197 -65,2.239452362060547 -66,2.2389490604400635 -67,2.238480567932129 -68,2.2380778789520264 -69,2.237745761871338 -70,2.237481117248535 -71,2.2372515201568604 -72,2.237006425857544 -73,2.2367441654205322 -74,2.2364447116851807 -75,2.236107349395752 -76,2.235751152038574 -77,2.2353923320770264 -78,2.235069513320923 -79,2.2348105907440186 -80,2.234593152999878 -81,2.2343814373016357 -82,2.2341361045837402 -83,2.2338640689849854 -84,2.2336010932922363 -85,2.2333502769470215 -86,2.233131170272827 -87,2.23293399810791 -88,2.2327449321746826 -89,2.232556104660034 -90,2.2323672771453857 -91,2.232170343399048 -92,2.231963634490967 -93,2.231757164001465 -94,2.231550931930542 -95,2.23134446144104 -96,2.2311501502990723 -97,2.230961561203003 -98,2.230778694152832 -99,2.230595350265503 -100,2.230407953262329 -101,2.230224370956421 -102,2.230043888092041 -103,2.229868173599243 -104,2.2297000885009766 -105,2.229529619216919 -106,2.229355573654175 -107,2.2291860580444336 -108,2.2290244102478027 -109,2.2288622856140137 -110,2.228701591491699 -111,2.228543281555176 -112,2.228384494781494 -113,2.2282259464263916 -114,2.2280707359313965 -115,2.227921962738037 -116,2.2277724742889404 -117,2.2276206016540527 -118,2.227478265762329 -119,2.227337598800659 -120,2.2271950244903564 -121,2.2270519733428955 -122,2.2269084453582764 -123,2.2267651557922363 -124,2.226620674133301 -125,2.226478099822998 -126,2.226337432861328 -127,2.226198673248291 -128,2.2260613441467285 -129,2.225926637649536 -130,2.2257940769195557 -131,2.2256641387939453 -132,2.225538969039917 -133,2.2254185676574707 -134,2.225292682647705 -135,2.2251675128936768 -136,2.225045919418335 -137,2.2249248027801514 -138,2.2248034477233887 -139,2.224682331085205 -140,2.224562406539917 -141,2.2244436740875244 -142,2.224327325820923 -143,2.2242133617401123 -144,2.22410249710083 -145,2.223989486694336 -146,2.223876714706421 -147,2.2237656116485596 -148,2.2236552238464355 -149,2.2235448360443115 -150,2.2234346866607666 -151,2.2233235836029053 -152,2.2232143878936768 -153,2.223109483718872 -154,2.223001003265381 -155,2.2228918075561523 -156,2.2227859497070312 -157,2.222679853439331 -158,2.2225725650787354 -159,2.2224647998809814 -160,2.2223563194274902 -161,2.2222483158111572 -162,2.2221426963806152 -163,2.2220370769500732 -164,2.2219290733337402 -165,2.2218265533447266 -166,2.221723794937134 -167,2.221619129180908 -168,2.2215194702148438 -169,2.221418619155884 -170,2.2213141918182373 -171,2.2212064266204834 -172,2.2210962772369385 -173,2.2209882736206055 -174,2.2208850383758545 -175,2.2207741737365723 -176,2.220659017562866 -177,2.2205536365509033 -178,2.220447540283203 -179,2.220339059829712 -180,2.2202277183532715 -181,2.220118522644043 -182,2.220017671585083 -183,2.2199230194091797 -184,2.2198243141174316 -185,2.219728946685791 -186,2.219634532928467 -187,2.2195370197296143 -188,2.219435453414917 -189,2.219331979751587 -190,2.2192282676696777 -191,2.219125986099243 -192,2.219020128250122 -193,2.218916177749634 -194,2.2188143730163574 -195,2.218712329864502 -196,2.2186098098754883 -197,2.218506336212158 -198,2.2184040546417236 -199,2.2183032035827637 -200,2.218207836151123 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.160/training_config.txt deleted file mode 100644 index b6e50e8..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.160/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.160/training_log.csv deleted file mode 100644 index 44eae3d..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,47.65028762817383 -2,12.13169002532959 -3,7.664369106292725 -4,6.214345932006836 -5,4.616584300994873 -6,4.025419235229492 -7,3.566601514816284 -8,3.2047765254974365 -9,3.0077195167541504 -10,2.8817996978759766 -11,2.7869150638580322 -12,2.708423376083374 -13,2.638946294784546 -14,2.5769031047821045 -15,2.5222253799438477 -16,2.4759716987609863 -17,2.4400815963745117 -18,2.4123692512512207 -19,2.3904807567596436 -20,2.373703718185425 -21,2.3624489307403564 -22,2.3538196086883545 -23,2.3444409370422363 -24,2.3341758251190186 -25,2.323355197906494 -26,2.3131988048553467 -27,2.3044581413269043 -28,2.296921968460083 -29,2.2912096977233887 -30,2.287173271179199 -31,2.283398151397705 -32,2.2796266078948975 -33,2.2752487659454346 -34,2.2701210975646973 -35,2.2644784450531006 -36,2.2594926357269287 -37,2.255873203277588 -38,2.2546677589416504 -39,2.2558958530426025 -40,2.2577390670776367 -41,2.2587554454803467 -42,2.2581896781921387 -43,2.2561728954315186 -44,2.2536160945892334 -45,2.251157760620117 -46,2.249267101287842 -47,2.248121738433838 -48,2.2474119663238525 -49,2.246877908706665 -50,2.2463791370391846 -51,2.2458138465881348 -52,2.2451353073120117 -53,2.2443771362304688 -54,2.2435953617095947 -55,2.2428500652313232 -56,2.2421910762786865 -57,2.241664409637451 -58,2.2412126064300537 -59,2.2408785820007324 -60,2.2405734062194824 -61,2.2403180599212646 -62,2.2400949001312256 -63,2.2398054599761963 -64,2.239495038986206 -65,2.239208698272705 -66,2.2389042377471924 -67,2.2386345863342285 -68,2.238413095474243 -69,2.238165855407715 -70,2.2379519939422607 -71,2.237797498703003 -72,2.2376461029052734 -73,2.2374863624572754 -74,2.237311363220215 -75,2.2371177673339844 -76,2.236905097961426 -77,2.2367050647735596 -78,2.236588954925537 -79,2.236490249633789 -80,2.236370801925659 -81,2.2362265586853027 -82,2.2360620498657227 -83,2.2358920574188232 -84,2.2357423305511475 -85,2.2355940341949463 -86,2.2354629039764404 -87,2.2353601455688477 -88,2.2352397441864014 -89,2.2350869178771973 -90,2.234912157058716 -91,2.234753131866455 -92,2.234605073928833 -93,2.234462261199951 -94,2.2343227863311768 -95,2.23419451713562 -96,2.234025478363037 -97,2.2338383197784424 -98,2.2336843013763428 -99,2.2335712909698486 -100,2.2334585189819336 -101,2.233344554901123 -102,2.233225107192993 -103,2.233102321624756 -104,2.2329773902893066 -105,2.2328507900238037 -106,2.232726573944092 -107,2.232605218887329 -108,2.2324841022491455 -109,2.232365608215332 -110,2.232250213623047 -111,2.2321360111236572 -112,2.232023239135742 -113,2.2319138050079346 -114,2.231806516647339 -115,2.2316994667053223 -116,2.2315924167633057 -117,2.2314867973327637 -118,2.2313802242279053 -119,2.231271743774414 -120,2.231161594390869 -121,2.2310736179351807 -122,2.230977773666382 -123,2.2308766841888428 -124,2.230771780014038 -125,2.2306652069091797 -126,2.2305657863616943 -127,2.230487108230591 -128,2.230396032333374 -129,2.2303121089935303 -130,2.23022723197937 -131,2.230140209197998 -132,2.2300524711608887 -133,2.2299633026123047 -134,2.2298710346221924 -135,2.229783296585083 -136,2.2297019958496094 -137,2.2296149730682373 -138,2.229531764984131 -139,2.2294530868530273 -140,2.2293715476989746 -141,2.2292873859405518 -142,2.229201316833496 -143,2.2291183471679688 -144,2.229036569595337 -145,2.2289490699768066 -146,2.2288665771484375 -147,2.2287838459014893 -148,2.228698968887329 -149,2.2286133766174316 -150,2.228527069091797 -151,2.228447198867798 -152,2.2283644676208496 -153,2.228278398513794 -154,2.2281951904296875 -155,2.2281110286712646 -156,2.2280256748199463 -157,2.22794246673584 -158,2.2278599739074707 -159,2.2277767658233643 -160,2.2276952266693115 -161,2.2276132106781006 -162,2.2275309562683105 -163,2.227449655532837 -164,2.2273666858673096 -165,2.227276563644409 -166,2.227186679840088 -167,2.227099895477295 -168,2.227017402648926 -169,2.2269370555877686 -170,2.2268593311309814 -171,2.2267699241638184 -172,2.2266852855682373 -173,2.2266032695770264 -174,2.22652268409729 -175,2.2264420986175537 -176,2.226360321044922 -177,2.2262773513793945 -178,2.2261946201324463 -179,2.2261109352111816 -180,2.2260286808013916 -181,2.2259464263916016 -182,2.225865125656128 -183,2.2257838249206543 -184,2.2257015705108643 -185,2.2256202697753906 -186,2.225538730621338 -187,2.2254574298858643 -188,2.225376605987549 -189,2.2252962589263916 -190,2.225216865539551 -191,2.225137710571289 -192,2.225057601928711 -193,2.224977970123291 -194,2.224897623062134 -195,2.224818706512451 -196,2.224740505218506 -197,2.2246620655059814 -198,2.2245841026306152 -199,2.224505662918091 -200,2.224428176879883 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.200/training_config.txt deleted file mode 100644 index 425c186..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.200/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.200/training_log.csv deleted file mode 100644 index 2d621eb..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,44.84763717651367 -2,17.578901290893555 -3,7.506513595581055 -4,5.068342685699463 -5,4.171604633331299 -6,3.5817322731018066 -7,3.165100574493408 -8,2.940910577774048 -9,2.7918388843536377 -10,2.685284376144409 -11,2.608059883117676 -12,2.554755210876465 -13,2.5181050300598145 -14,2.4898955821990967 -15,2.467374563217163 -16,2.447208881378174 -17,2.4281115531921387 -18,2.4114904403686523 -19,2.400163173675537 -20,2.392855405807495 -21,2.39040207862854 -22,2.3861947059631348 -23,2.3746395111083984 -24,2.356921911239624 -25,2.3380613327026367 -26,2.3232266902923584 -27,2.3144540786743164 -28,2.3082728385925293 -29,2.302807331085205 -30,2.2971181869506836 -31,2.2909977436065674 -32,2.2847845554351807 -33,2.279242515563965 -34,2.2750303745269775 -35,2.2724788188934326 -36,2.2710742950439453 -37,2.2702014446258545 -38,2.2690510749816895 -39,2.267385482788086 -40,2.265078067779541 -41,2.262272596359253 -42,2.259248733520508 -43,2.256653308868408 -44,2.2544825077056885 -45,2.2527880668640137 -46,2.251295566558838 -47,2.249763250350952 -48,2.2480945587158203 -49,2.2464280128479004 -50,2.2449564933776855 -51,2.243804454803467 -52,2.243009090423584 -53,2.2426061630249023 -54,2.242281198501587 -55,2.241986036300659 -56,2.241651773452759 -57,2.241220474243164 -58,2.2407710552215576 -59,2.24051833152771 -60,2.2403976917266846 -61,2.2403085231781006 -62,2.2402496337890625 -63,2.2401444911956787 -64,2.2399544715881348 -65,2.239694356918335 -66,2.2394089698791504 -67,2.2390925884246826 -68,2.2387614250183105 -69,2.2384438514709473 -70,2.2381114959716797 -71,2.237738847732544 -72,2.2373881340026855 -73,2.2370216846466064 -74,2.2366416454315186 -75,2.236367702484131 -76,2.236157178878784 -77,2.235985040664673 -78,2.235839605331421 -79,2.2356975078582764 -80,2.2355458736419678 -81,2.2353744506835938 -82,2.2351927757263184 -83,2.2350199222564697 -84,2.234858274459839 -85,2.2347052097320557 -86,2.234558582305908 -87,2.23439621925354 -88,2.2342209815979004 -89,2.234072685241699 -90,2.2339131832122803 -91,2.2337491512298584 -92,2.2335832118988037 -93,2.233426332473755 -94,2.2332701683044434 -95,2.2331297397613525 -96,2.232994794845581 -97,2.2328579425811768 -98,2.232722759246826 -99,2.2325849533081055 -100,2.232452154159546 -101,2.2323081493377686 -102,2.232158660888672 -103,2.232004404067993 -104,2.2318482398986816 -105,2.231696844100952 -106,2.2315471172332764 -107,2.231398820877075 -108,2.231250286102295 -109,2.231106758117676 -110,2.230973243713379 -111,2.2308526039123535 -112,2.230741024017334 -113,2.2306275367736816 -114,2.230513334274292 -115,2.2303922176361084 -116,2.230268716812134 -117,2.2301409244537354 -118,2.2300162315368652 -119,2.22989559173584 -120,2.229769229888916 -121,2.229644775390625 -122,2.229520082473755 -123,2.229400634765625 -124,2.2292892932891846 -125,2.229180335998535 -126,2.2290658950805664 -127,2.228954315185547 -128,2.2288429737091064 -129,2.228728771209717 -130,2.228611469268799 -131,2.2284951210021973 -132,2.2283923625946045 -133,2.2282767295837402 -134,2.2281641960144043 -135,2.2280611991882324 -136,2.2279560565948486 -137,2.2278454303741455 -138,2.227734088897705 -139,2.2276229858398438 -140,2.22751522064209 -141,2.227414608001709 -142,2.2273035049438477 -143,2.2271921634674072 -144,2.2270901203155518 -145,2.2269861698150635 -146,2.226879358291626 -147,2.2267680168151855 -148,2.2266600131988525 -149,2.2265546321868896 -150,2.226445436477661 -151,2.226343870162964 -152,2.226236343383789 -153,2.226135015487671 -154,2.226029396057129 -155,2.225926399230957 -156,2.2258238792419434 -157,2.2257165908813477 -158,2.2256107330322266 -159,2.22550630569458 -160,2.225407361984253 -161,2.2253026962280273 -162,2.2251996994018555 -163,2.225095510482788 -164,2.224996566772461 -165,2.224893569946289 -166,2.224788188934326 -167,2.22468900680542 -168,2.2245876789093018 -169,2.224489450454712 -170,2.224393367767334 -171,2.2242939472198486 -172,2.2241978645324707 -173,2.224102020263672 -174,2.22400164604187 -175,2.2239010334014893 -176,2.2238030433654785 -177,2.223705530166626 -178,2.2236080169677734 -179,2.2235107421875 -180,2.2234139442443848 -181,2.223318576812744 -182,2.2232236862182617 -183,2.2231247425079346 -184,2.2230279445648193 -185,2.222930908203125 -186,2.222834825515747 -187,2.22273850440979 -188,2.222641706466675 -189,2.2225444316864014 -190,2.2224483489990234 -191,2.222348690032959 -192,2.2222518920898438 -193,2.222154378890991 -194,2.2220547199249268 -195,2.221954822540283 -196,2.221863031387329 -197,2.2217607498168945 -198,2.22166109085083 -199,2.2215652465820312 -200,2.2214651107788086 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.250/training_config.txt deleted file mode 100644 index 20552bd..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.250/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.250/training_log.csv deleted file mode 100644 index 76358a8..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,47.35834503173828 -2,23.12317657470703 -3,12.356819152832031 -4,9.598474502563477 -5,8.064263343811035 -6,7.018087863922119 -7,6.11281681060791 -8,5.34954309463501 -9,4.7572340965271 -10,4.299698829650879 -11,3.953247547149658 -12,3.6948459148406982 -13,3.5078437328338623 -14,3.375770330429077 -15,3.2818074226379395 -16,3.20894455909729 -17,3.144158363342285 -18,3.0790159702301025 -19,3.0142202377319336 -20,2.9499001502990723 -21,2.8870251178741455 -22,2.8326714038848877 -23,2.779034376144409 -24,2.726001739501953 -25,2.675177812576294 -26,2.6282317638397217 -27,2.5870351791381836 -28,2.5522067546844482 -29,2.5238118171691895 -30,2.5012614727020264 -31,2.4838249683380127 -32,2.4700942039489746 -33,2.458772897720337 -34,2.4487011432647705 -35,2.439683437347412 -36,2.43121600151062 -37,2.4230763912200928 -38,2.4152097702026367 -39,2.4076290130615234 -40,2.4003536701202393 -41,2.3936009407043457 -42,2.3873116970062256 -43,2.381629705429077 -44,2.3768887519836426 -45,2.372692108154297 -46,2.3694164752960205 -47,2.366563320159912 -48,2.3638908863067627 -49,2.361703395843506 -50,2.359363555908203 -51,2.3568613529205322 -52,2.353870153427124 -53,2.350755214691162 -54,2.3479971885681152 -55,2.3452563285827637 -56,2.3425214290618896 -57,2.3398313522338867 -58,2.3375422954559326 -59,2.3352808952331543 -60,2.3329920768737793 -61,2.330690860748291 -62,2.32832407951355 -63,2.3259828090667725 -64,2.323579788208008 -65,2.3210906982421875 -66,2.318527936935425 -67,2.3159399032592773 -68,2.3133461475372314 -69,2.3107728958129883 -70,2.3082220554351807 -71,2.3056328296661377 -72,2.302945375442505 -73,2.3002636432647705 -74,2.29768967628479 -75,2.295095443725586 -76,2.292496919631958 -77,2.289896011352539 -78,2.287307024002075 -79,2.284741163253784 -80,2.282099723815918 -81,2.2793946266174316 -82,2.2765848636627197 -83,2.27382755279541 -84,2.271089553833008 -85,2.268387794494629 -86,2.2655234336853027 -87,2.262864589691162 -88,2.260310411453247 -89,2.2579123973846436 -90,2.255596876144409 -91,2.253446340560913 -92,2.2513957023620605 -93,2.2494518756866455 -94,2.247720241546631 -95,2.2462103366851807 -96,2.2449076175689697 -97,2.2438273429870605 -98,2.2429304122924805 -99,2.242302656173706 -100,2.24190092086792 -101,2.2415642738342285 -102,2.2412872314453125 -103,2.2411909103393555 -104,2.2410130500793457 -105,2.2407748699188232 -106,2.240489959716797 -107,2.240189552307129 -108,2.239828586578369 -109,2.2394280433654785 -110,2.2389845848083496 -111,2.2384979724884033 -112,2.2380001544952393 -113,2.237523317337036 -114,2.2371339797973633 -115,2.236875295639038 -116,2.2366907596588135 -117,2.2365012168884277 -118,2.2363290786743164 -119,2.2361483573913574 -120,2.2359743118286133 -121,2.2358126640319824 -122,2.235685110092163 -123,2.235562801361084 -124,2.2354233264923096 -125,2.2352635860443115 -126,2.2350873947143555 -127,2.2348949909210205 -128,2.234703540802002 -129,2.2345173358917236 -130,2.2343552112579346 -131,2.2342216968536377 -132,2.2340896129608154 -133,2.2339532375335693 -134,2.233798027038574 -135,2.233642101287842 -136,2.2334847450256348 -137,2.23333477973938 -138,2.2331809997558594 -139,2.233023166656494 -140,2.232846736907959 -141,2.2326765060424805 -142,2.2325425148010254 -143,2.2324368953704834 -144,2.2323098182678223 -145,2.2321696281433105 -146,2.232069730758667 -147,2.231959342956543 -148,2.2318201065063477 -149,2.2316641807556152 -150,2.2315423488616943 -151,2.2314395904541016 -152,2.2313334941864014 -153,2.231235980987549 -154,2.23112416267395 -155,2.231001138687134 -156,2.230881929397583 -157,2.2307698726654053 -158,2.2306790351867676 -159,2.2305750846862793 -160,2.2304506301879883 -161,2.230318069458008 -162,2.2302165031433105 -163,2.230116128921509 -164,2.230005979537964 -165,2.229886531829834 -166,2.229769468307495 -167,2.22965931892395 -168,2.2295703887939453 -169,2.229499340057373 -170,2.2294089794158936 -171,2.2292959690093994 -172,2.2292048931121826 -173,2.2291176319122314 -174,2.229027509689331 -175,2.2289366722106934 -176,2.228839874267578 -177,2.22873854637146 -178,2.2286376953125 -179,2.228543281555176 -180,2.228456735610962 -181,2.228365659713745 -182,2.2282631397247314 -183,2.228175640106201 -184,2.2280924320220947 -185,2.2280049324035645 -186,2.227918863296509 -187,2.227832794189453 -188,2.2277610301971436 -189,2.2276737689971924 -190,2.2275795936584473 -191,2.22749662399292 -192,2.227412700653076 -193,2.2273271083831787 -194,2.2272403240203857 -195,2.2271666526794434 -196,2.227083921432495 -197,2.2270021438598633 -198,2.2269210815429688 -199,2.226837396621704 -200,2.2267563343048096 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.300/training_config.txt deleted file mode 100644 index 5176be0..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.300/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.300/training_log.csv deleted file mode 100644 index d62f355..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.300/training_log.csv +++ /dev/null @@ -1,82 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,50.01425552368164 -2,13.452088356018066 -3,9.531667709350586 -4,7.447359561920166 -5,6.714984893798828 -6,6.171786308288574 -7,5.655827522277832 -8,5.322393417358398 -9,5.0658769607543945 -10,4.848609447479248 -11,4.656316757202148 -12,4.481632232666016 -13,4.320370197296143 -14,4.1711201667785645 -15,4.0360846519470215 -16,3.913970470428467 -17,3.8025503158569336 -18,3.6990716457366943 -19,3.6017258167266846 -20,3.5112552642822266 -21,3.425931930541992 -22,3.3375234603881836 -23,3.254878282546997 -24,3.2338883876800537 -25,6.564783573150635 -26,35.67753601074219 -27,103.41808319091797 -28,136.14718627929688 -29,78.20431518554688 -30,44.13471984863281 -31,27.04926109313965 -32,17.97943115234375 -33,13.336041450500488 -34,9.408655166625977 -35,9.953041076660156 -36,7.357443332672119 -37,7.604946136474609 -38,8.371832847595215 -39,8.445878982543945 -40,8.310261726379395 -41,7.419074058532715 -42,6.678732872009277 -43,6.019708156585693 -44,5.9008097648620605 -45,5.9898552894592285 -46,6.179836750030518 -47,6.584438323974609 -48,7.111766815185547 -49,7.113443374633789 -50,7.251547813415527 -51,7.202012062072754 -52,7.162543296813965 -53,7.090674877166748 -54,6.913305282592773 -55,6.66990852355957 -56,6.5683159828186035 -57,6.332614898681641 -58,6.270957946777344 -59,6.186808109283447 -60,6.0718607902526855 -61,6.072871685028076 -62,6.015291213989258 -63,5.879711151123047 -64,5.348692417144775 -65,5.002478122711182 -66,4.781198501586914 -67,4.607629299163818 -68,4.503844261169434 -69,4.563987731933594 -70,4.437986850738525 -71,4.3682942390441895 -72,4.147932052612305 -73,3.8174450397491455 -74,3.511507749557495 -75,3.3864598274230957 -76,3.3238866329193115 -77,nan -78,nan -79,nan -80,nan diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.400/training_config.txt deleted file mode 100644 index 2ab9bbf..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.400/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.400/training_log.csv deleted file mode 100644 index 4fd54bb..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.400/training_log.csv +++ /dev/null @@ -1,16 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,80.77153778076172 -2,11.989009857177734 -3,10.258030891418457 -4,8.513456344604492 -5,10.644143104553223 -6,7.340682506561279 -7,7.257936954498291 -8,7.161683082580566 -9,7.016507625579834 -10,6.830937385559082 -11,nan -12,nan -13,nan -14,nan diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.500/training_config.txt deleted file mode 100644 index 68a5561..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.500/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.500/training_log.csv deleted file mode 100644 index a010455..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.500/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,735.147216796875 -2,8984.443359375 -3,13.156900405883789 -4,11.05036449432373 -5,12.757122039794922 -6,13.071992874145508 -7,10.597452163696289 -8,10.671792030334473 -9,11.983652114868164 -10,132.00418090820312 -11,1666.8143310546875 -12,363.83917236328125 -13,30.79215431213379 -14,30.148286819458008 -15,29.998884201049805 -16,30.110918045043945 -17,30.314420700073242 -18,30.847604751586914 -19,31.811769485473633 -20,33.49662399291992 -21,36.88572311401367 -22,44.21027374267578 -23,60.90687942504883 -24,81.00617218017578 -25,55.51518630981445 -26,41.82770919799805 -27,36.7371711730957 -28,34.9713020324707 -29,33.976966857910156 -30,33.14352035522461 -31,32.409061431884766 -32,31.787574768066406 -33,31.274511337280273 -34,30.908613204956055 -35,30.55628776550293 -36,30.168827056884766 -37,29.80228042602539 -38,29.797937393188477 -39,29.78870391845703 -40,29.775415420532227 -41,29.7568416595459 -42,29.7340145111084 -43,29.70790672302246 -44,29.67440414428711 -45,29.6373291015625 -46,29.597576141357422 -47,29.554460525512695 -48,29.508363723754883 -49,29.461078643798828 -50,29.407148361206055 -51,29.349258422851562 -52,29.28774642944336 -53,29.22288703918457 -54,29.15525245666504 -55,29.084897994995117 -56,29.011003494262695 -57,28.935678482055664 -58,28.857181549072266 -59,28.775671005249023 -60,28.691516876220703 -61,28.604562759399414 -62,28.515792846679688 -63,28.425926208496094 -64,28.33485984802246 -65,28.24390983581543 -66,28.14826202392578 -67,28.040210723876953 -68,27.933135986328125 -69,27.836267471313477 -70,27.762252807617188 -71,27.72992706298828 -72,27.739646911621094 -73,27.776071548461914 -74,27.820451736450195 -75,27.843881607055664 -76,27.82962989807129 -77,27.783645629882812 -78,27.726179122924805 -79,27.676164627075195 -80,27.63591957092285 -81,27.608842849731445 -82,27.59466552734375 -83,27.590309143066406 -84,27.59136962890625 -85,27.59335708618164 -86,27.593488693237305 -87,27.590272903442383 -88,27.58330726623535 -89,27.572914123535156 -90,27.5598087310791 -91,27.54501724243164 -92,27.529722213745117 -93,27.515182495117188 -94,27.502443313598633 -95,27.492189407348633 -96,27.484539031982422 -97,27.479097366333008 -98,27.47515869140625 -99,27.47186279296875 -100,27.46845245361328 -101,27.464384078979492 -102,27.459434509277344 -103,27.453628540039062 -104,27.447193145751953 -105,27.440454483032227 -106,27.433738708496094 -107,27.42736053466797 -108,27.4215145111084 -109,27.41626739501953 -110,27.411590576171875 -111,27.40738296508789 -112,27.40350914001465 -113,27.39981460571289 -114,27.396167755126953 -115,27.392465591430664 -116,27.388681411743164 -117,27.384801864624023 -118,27.380863189697266 -119,27.376909255981445 -120,27.373014450073242 -121,27.36921501159668 -122,27.365562438964844 -123,27.362056732177734 -124,27.358720779418945 -125,27.35551643371582 -126,27.35244369506836 -127,27.34946632385254 -128,27.3465518951416 -129,27.34366798400879 -130,27.340818405151367 -131,27.337993621826172 -132,27.33517074584961 -133,27.332386016845703 -134,27.329635620117188 -135,27.326923370361328 -136,27.32426643371582 -137,27.321666717529297 -138,27.319141387939453 -139,27.316659927368164 -140,27.314254760742188 -141,27.31189727783203 -142,27.30959701538086 -143,27.307329177856445 -144,27.305095672607422 -145,27.302898406982422 -146,27.300739288330078 -147,27.298603057861328 -148,27.296497344970703 -149,27.294424057006836 -150,27.292383193969727 -151,27.290363311767578 -152,27.28837776184082 -153,27.286434173583984 -154,27.284513473510742 -155,27.28262710571289 -156,27.280773162841797 -157,27.278955459594727 -158,27.277151107788086 -159,27.275381088256836 -160,27.27363395690918 -161,27.27190589904785 -162,27.27020263671875 -163,27.268516540527344 -164,27.266857147216797 -165,27.265235900878906 -166,27.26361656188965 -167,27.262027740478516 -168,27.260448455810547 -169,27.25889015197754 -170,27.257354736328125 -171,27.25584602355957 -172,27.254343032836914 -173,27.25286293029785 -174,27.25141143798828 -175,27.249969482421875 -176,27.24852752685547 -177,27.24709701538086 -178,27.245676040649414 -179,27.244260787963867 -180,27.24285316467285 -181,27.241466522216797 -182,27.240081787109375 -183,27.238698959350586 -184,27.237346649169922 -185,27.23598861694336 -186,27.234630584716797 -187,27.233295440673828 -188,27.231966018676758 -189,27.230648040771484 -190,27.229341506958008 -191,27.228042602539062 -192,27.226749420166016 -193,27.22547721862793 -194,27.224206924438477 -195,27.22294807434082 -196,27.22169303894043 -197,27.220455169677734 -198,27.219221115112305 -199,27.218000411987305 -200,27.216787338256836 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.600/training_config.txt deleted file mode 100644 index 70911bf..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.600/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.600/training_log.csv deleted file mode 100644 index 1e03f78..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.600/training_log.csv +++ /dev/null @@ -1,20 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,3105.369384765625 -2,9819.306640625 -3,9834.8291015625 -4,9831.8701171875 -5,9818.0693359375 -6,9791.4228515625 -7,9757.396484375 -8,9671.19140625 -9,8336.791015625 -10,9592.1640625 -11,9822.146484375 -12,9841.8095703125 -13,9846.0 -14,9846.0 -15,9846.0 -16,9846.0 -17,9846.0 -18,9846.0 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.700/training_config.txt deleted file mode 100644 index 609f28d..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.700/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.700/training_log.csv deleted file mode 100644 index 4fa0b39..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,5868.9296875 -2,9843.640625 -3,9846.0 -4,9846.0 -5,9846.0 -6,9846.0 -7,9846.0 -8,9846.0 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.800/training_config.txt deleted file mode 100644 index 0b06643..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.800/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.800/training_log.csv deleted file mode 100644 index 330388a..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,7763.013671875 -2,9846.0 -3,9846.0 -4,9846.0 -5,9846.0 -6,9846.0 -7,9846.0 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.900/training_config.txt deleted file mode 100644 index a31f572..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.900/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.900/training_log.csv deleted file mode 100644 index 9eb25a2..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,8530.0576171875 -2,9846.0 -3,9846.0 -4,9846.0 -5,9846.0 -6,9846.0 -7,9846.0 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_1.000/training_config.txt deleted file mode 100644 index e19aba8..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_1.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_1.000/training_log.csv deleted file mode 100644 index c19c0eb..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,8978.115234375 -2,9846.0 -3,9846.0 -4,9846.0 -5,9846.0 -6,9846.0 -7,9846.0 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_16.000/training_config.txt deleted file mode 100644 index fd9b128..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_16.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_16.000/training_log.csv deleted file mode 100644 index be8dce7..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,9910.626953125 -2,9910.626953125 -3,9910.626953125 -4,9910.626953125 -5,9910.626953125 -6,9910.626953125 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_2.000/training_config.txt deleted file mode 100644 index 201c15a..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_2.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_2.000/training_log.csv deleted file mode 100644 index 391c399..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,9845.5283203125 -2,6.564489364624023 -3,nan -4,nan -5,nan -6,nan diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_4.000/training_config.txt deleted file mode 100644 index 62dd88a..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_4.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_4.000/training_log.csv deleted file mode 100644 index be8dce7..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,9910.626953125 -2,9910.626953125 -3,9910.626953125 -4,9910.626953125 -5,9910.626953125 -6,9910.626953125 diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_8.000/training_config.txt deleted file mode 100644 index a385afd..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_8.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def quadratic_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**2) * (-t_span + t_max)**2 - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_8.000/training_log.csv deleted file mode 100644 index be8dce7..0000000 --- a/training/time_weighting_learning_rate_sweep/quadratic_mirrored/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,99.56229400634766 -1,9910.626953125 -2,9910.626953125 -3,9910.626953125 -4,9910.626953125 -5,9910.626953125 -6,9910.626953125 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.010/training_config.txt deleted file mode 100644 index e913a80..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.010/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.010/training_log.csv deleted file mode 100644 index 0480ded..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,807.437255859375 -2,657.265869140625 -3,626.4368896484375 -4,519.5245971679688 -5,452.6858215332031 -6,362.4673767089844 -7,274.154052734375 -8,219.41561889648438 -9,190.49769592285156 -10,182.4573516845703 -11,135.7289276123047 -12,136.76283264160156 -13,145.03369140625 -14,143.1085968017578 -15,140.9138641357422 -16,138.25674438476562 -17,141.89781188964844 -18,141.08798217773438 -19,143.6170196533203 -20,142.32177734375 -21,141.77774047851562 -22,143.39181518554688 -23,136.8861541748047 -24,134.2937469482422 -25,133.67567443847656 -26,131.65716552734375 -27,129.58372497558594 -28,128.65872192382812 -29,127.57747650146484 -30,126.27096557617188 -31,124.64187622070312 -32,122.84879302978516 -33,121.88170623779297 -34,120.82856750488281 -35,119.17062377929688 -36,118.3114013671875 -37,117.33548736572266 -38,116.04303741455078 -39,114.65664672851562 -40,113.4565200805664 -41,112.45121765136719 -42,111.34632873535156 -43,109.8935317993164 -44,108.22626495361328 -45,106.30899810791016 -46,103.54313659667969 -47,104.60770416259766 -48,103.6973876953125 -49,104.85350799560547 -50,104.76750946044922 -51,104.1424560546875 -52,104.14584350585938 -53,103.48111724853516 -54,103.72035217285156 -55,103.7022705078125 -56,102.79895782470703 -57,102.1320571899414 -58,101.77114868164062 -59,102.61688995361328 -60,107.21199798583984 -61,111.84992980957031 -62,97.1383056640625 -63,90.57194519042969 -64,85.68081665039062 -65,79.0811996459961 -66,64.64263153076172 -67,57.390342712402344 -68,49.49656295776367 -69,41.821632385253906 -70,40.55818176269531 -71,41.21539306640625 -72,40.73186111450195 -73,37.45806121826172 -74,34.42682647705078 -75,33.2751579284668 -76,32.291404724121094 -77,30.465290069580078 -78,25.874267578125 -79,24.937419891357422 -80,22.977582931518555 -81,20.755706787109375 -82,20.116744995117188 -83,19.54094123840332 -84,19.033601760864258 -85,18.587369918823242 -86,18.192903518676758 -87,17.839828491210938 -88,17.517074584960938 -89,17.21331024169922 -90,17.013324737548828 -91,16.664295196533203 -92,16.794862747192383 -93,16.672061920166016 -94,16.5231990814209 -95,16.399044036865234 -96,16.29707145690918 -97,16.213092803955078 -98,16.14314842224121 -99,16.084444046020508 -100,16.035932540893555 -101,15.997516632080078 -102,15.970136642456055 -103,15.955357551574707 -104,15.953569412231445 -105,15.959564208984375 -106,15.958365440368652 -107,15.934432029724121 -108,15.890220642089844 -109,15.840970039367676 -110,15.797087669372559 -111,15.761082649230957 -112,15.731797218322754 -113,15.707502365112305 -114,15.68675422668457 -115,15.668526649475098 -116,15.652140617370605 -117,15.637121200561523 -118,15.623154640197754 -119,15.610005378723145 -120,15.597522735595703 -121,15.585588455200195 -122,15.574125289916992 -123,15.563065528869629 -124,15.552379608154297 -125,15.54202651977539 -126,15.531974792480469 -127,15.522198677062988 -128,15.512687683105469 -129,15.503429412841797 -130,15.494406700134277 -131,15.485611915588379 -132,15.477031707763672 -133,15.468663215637207 -134,15.46049976348877 -135,15.452529907226562 -136,15.444746971130371 -137,15.437137603759766 -138,15.429697036743164 -139,15.422415733337402 -140,15.415281295776367 -141,15.40829086303711 -142,15.401432991027832 -143,15.394697189331055 -144,15.38808822631836 -145,15.381585121154785 -146,15.375191688537598 -147,15.368891716003418 -148,15.362692832946777 -149,15.356585502624512 -150,15.350556373596191 -151,15.344612121582031 -152,15.338732719421387 -153,15.332925796508789 -154,15.327178955078125 -155,15.321488380432129 -156,15.315849304199219 -157,15.310259819030762 -158,15.30471420288086 -159,15.299217224121094 -160,15.293760299682617 -161,15.28834056854248 -162,15.282951354980469 -163,15.277599334716797 -164,15.272284507751465 -165,15.266992568969727 -166,15.261736869812012 -167,15.256503105163574 -168,15.251302719116211 -169,15.246126174926758 -170,15.240973472595215 -171,15.235854148864746 -172,15.230753898620605 -173,15.225679397583008 -174,15.220627784729004 -175,15.215593338012695 -176,15.210585594177246 -177,15.205599784851074 -178,15.200626373291016 -179,15.195672035217285 -180,15.190739631652832 -181,15.185822486877441 -182,15.18092155456543 -183,15.176036834716797 -184,15.171173095703125 -185,15.1663236618042 -186,15.161490440368652 -187,15.1566743850708 -188,15.151877403259277 -189,15.147099494934082 -190,15.142332077026367 -191,15.13758659362793 -192,15.132855415344238 -193,15.128137588500977 -194,15.123438835144043 -195,15.118754386901855 -196,15.11408519744873 -197,15.109427452087402 -198,15.104791641235352 -199,15.100163459777832 -200,15.095549583435059 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.020/training_config.txt deleted file mode 100644 index 02b6c9a..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.020/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.020/training_log.csv deleted file mode 100644 index 9d512e7..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,731.7787475585938 -2,496.84454345703125 -3,306.94866943359375 -4,180.92323303222656 -5,136.40126037597656 -6,115.62543487548828 -7,106.73797607421875 -8,100.86160278320312 -9,89.05351257324219 -10,72.87061309814453 -11,55.16028594970703 -12,48.40664291381836 -13,41.62840270996094 -14,37.9119758605957 -15,31.78240966796875 -16,30.443073272705078 -17,26.75241470336914 -18,25.112323760986328 -19,19.741058349609375 -20,18.748689651489258 -21,18.156600952148438 -22,19.095684051513672 -23,19.042125701904297 -24,18.854440689086914 -25,18.73776626586914 -26,18.72085189819336 -27,17.471939086914062 -28,17.172468185424805 -29,16.949443817138672 -30,16.83091926574707 -31,16.516645431518555 -32,15.915945053100586 -33,15.693516731262207 -34,14.65993595123291 -35,14.249207496643066 -36,14.894733428955078 -37,14.848982810974121 -38,14.81354808807373 -39,14.758028030395508 -40,14.68623161315918 -41,14.600802421569824 -42,14.500652313232422 -43,14.387354850769043 -44,14.026008605957031 -45,13.65851879119873 -46,13.754552841186523 -47,13.571788787841797 -48,12.95299243927002 -49,9.650556564331055 -50,9.176824569702148 -51,8.945859909057617 -52,8.764569282531738 -53,8.579936027526855 -54,8.379670143127441 -55,8.20438289642334 -56,8.064048767089844 -57,7.928339004516602 -58,7.726701736450195 -59,7.5092926025390625 -60,6.910931587219238 -61,6.174147605895996 -62,6.0296525955200195 -63,5.961310863494873 -64,5.875718593597412 -65,5.752846717834473 -66,5.667123794555664 -67,5.566382884979248 -68,5.450902462005615 -69,5.488308429718018 -70,5.579103469848633 -71,5.6609649658203125 -72,5.595219612121582 -73,5.58275842666626 -74,5.510672092437744 -75,5.393791198730469 -76,5.185136318206787 -77,5.154207706451416 -78,5.037581443786621 -79,4.996517181396484 -80,5.061375141143799 -81,5.06773567199707 -82,5.09311056137085 -83,5.201298236846924 -84,5.202146053314209 -85,5.167726039886475 -86,5.129815101623535 -87,5.12885856628418 -88,5.119959354400635 -89,5.0771942138671875 -90,4.914180755615234 -91,4.8762359619140625 -92,4.833711624145508 -93,4.817823886871338 -94,4.8343706130981445 -95,4.829770088195801 -96,4.816397666931152 -97,4.808926105499268 -98,4.79533576965332 -99,4.781705379486084 -100,4.779007911682129 -101,4.774917125701904 -102,4.764187812805176 -103,4.756594181060791 -104,4.7484331130981445 -105,4.731522560119629 -106,4.697985649108887 -107,4.809704780578613 -108,4.656932830810547 -109,4.732325077056885 -110,4.70961856842041 -111,4.67930269241333 -112,4.724748611450195 -113,4.7578959465026855 -114,4.760120868682861 -115,4.771402835845947 -116,4.783640384674072 -117,4.787008285522461 -118,4.777921676635742 -119,4.762223243713379 -120,4.74929666519165 -121,4.741617679595947 -122,4.729865074157715 -123,4.710449695587158 -124,4.690518856048584 -125,4.674312114715576 -126,4.656671524047852 -127,4.634141445159912 -128,4.607158184051514 -129,4.577099323272705 -130,4.546130180358887 -131,4.563267230987549 -132,4.556015491485596 -133,4.538921356201172 -134,4.52142333984375 -135,4.506866455078125 -136,4.492595672607422 -137,4.423681259155273 -138,4.471949577331543 -139,4.564488410949707 -140,4.621625900268555 -141,4.646807670593262 -142,4.652163505554199 -143,4.634771823883057 -144,4.559269428253174 -145,4.524204730987549 -146,4.526828765869141 -147,4.445384979248047 -148,4.34138298034668 -149,4.4172563552856445 -150,4.390066623687744 -151,4.425764560699463 -152,4.448962688446045 -153,4.454751968383789 -154,4.4572954177856445 -155,4.460448741912842 -156,4.460853099822998 -157,4.455862998962402 -158,4.446897029876709 -159,4.436208248138428 -160,4.424838542938232 -161,4.412018775939941 -162,4.396277904510498 -163,4.37742280960083 -164,4.356561183929443 -165,4.334677696228027 -166,4.311337471008301 -167,4.285549163818359 -168,4.257322311401367 -169,4.227550983428955 -170,4.197096824645996 -171,4.145688533782959 -172,4.177540302276611 -173,4.124789237976074 -174,4.177228927612305 -175,4.251550197601318 -176,4.323065280914307 -177,4.370996952056885 -178,4.383293151855469 -179,4.3851318359375 -180,4.378629207611084 -181,4.36619234085083 -182,4.352514743804932 -183,4.340595245361328 -184,4.299007415771484 -185,4.2211833000183105 -186,4.249873638153076 -187,4.234008312225342 -188,4.245405673980713 -189,4.271615028381348 -190,4.2722578048706055 -191,4.256445407867432 -192,4.225311756134033 -193,4.180753707885742 -194,4.179703235626221 -195,4.137465000152588 -196,4.118870735168457 -197,4.11580753326416 -198,4.061666965484619 -199,4.062780380249023 -200,3.984475612640381 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.040/training_config.txt deleted file mode 100644 index 865747b..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.040/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.040/training_log.csv deleted file mode 100644 index dfcfe57..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,591.5038452148438 -2,196.16604614257812 -3,125.8370361328125 -4,108.30062866210938 -5,75.8844223022461 -6,53.050018310546875 -7,44.288631439208984 -8,37.50896453857422 -9,33.266258239746094 -10,33.86820983886719 -11,32.27333450317383 -12,33.11680221557617 -13,34.19162368774414 -14,34.1772575378418 -15,32.260467529296875 -16,31.462488174438477 -17,29.211597442626953 -18,27.752552032470703 -19,27.10386085510254 -20,26.2418212890625 -21,26.36096954345703 -22,27.178312301635742 -23,29.392169952392578 -24,29.525684356689453 -25,29.686050415039062 -26,29.751243591308594 -27,29.57217025756836 -28,29.306671142578125 -29,29.10737419128418 -30,28.96560287475586 -31,28.80039405822754 -32,28.466840744018555 -33,27.881357192993164 -34,26.798349380493164 -35,25.64693832397461 -36,25.28209686279297 -37,24.940988540649414 -38,24.647260665893555 -39,24.30303955078125 -40,24.005659103393555 -41,24.039104461669922 -42,23.493877410888672 -43,24.094161987304688 -44,23.988567352294922 -45,22.985708236694336 -46,22.87265968322754 -47,22.517385482788086 -48,23.018888473510742 -49,21.801591873168945 -50,21.87999725341797 -51,21.457813262939453 -52,20.959638595581055 -53,20.721580505371094 -54,19.674911499023438 -55,17.26120948791504 -56,16.394804000854492 -57,12.508511543273926 -58,13.020453453063965 -59,13.237897872924805 -60,12.662548065185547 -61,11.19826602935791 -62,10.043692588806152 -63,9.96965217590332 -64,9.956052780151367 -65,9.770018577575684 -66,9.610156059265137 -67,9.304797172546387 -68,8.869871139526367 -69,8.501099586486816 -70,8.389547348022461 -71,8.105551719665527 -72,7.895552635192871 -73,7.729948997497559 -74,8.290863990783691 -75,7.526892185211182 -76,7.474808216094971 -77,7.319241523742676 -78,7.118215084075928 -79,7.079657554626465 -80,6.962856769561768 -81,6.781680583953857 -82,6.710170269012451 -83,6.684508323669434 -84,6.629391193389893 -85,6.500277519226074 -86,6.332382678985596 -87,6.225831508636475 -88,6.136198043823242 -89,6.04653263092041 -90,5.952428817749023 -91,5.8519287109375 -92,5.736664772033691 -93,5.915164947509766 -94,5.2595133781433105 -95,5.493518352508545 -96,5.738862991333008 -97,5.936253547668457 -98,5.931519508361816 -99,5.739525318145752 -100,5.564004421234131 -101,5.302282810211182 -102,5.178161144256592 -103,5.171682357788086 -104,5.19559907913208 -105,5.130191326141357 -106,5.050915718078613 -107,5.116981506347656 -108,4.94829797744751 -109,4.9179792404174805 -110,4.79808235168457 -111,4.83597993850708 -112,4.965025424957275 -113,4.9669671058654785 -114,4.900548934936523 -115,4.758577346801758 -116,4.654937267303467 -117,4.632597923278809 -118,4.603660583496094 -119,4.55535888671875 -120,4.526725769042969 -121,4.491343975067139 -122,4.443153381347656 -123,4.389068126678467 -124,4.3298115730285645 -125,4.702991485595703 -126,4.3837785720825195 -127,4.439591407775879 -128,4.4082932472229 -129,4.681153297424316 -130,4.8128204345703125 -131,4.683135986328125 -132,4.557356357574463 -133,4.373788833618164 -134,4.379117012023926 -135,4.404671669006348 -136,4.417639255523682 -137,4.433757781982422 -138,4.43303918838501 -139,4.403845310211182 -140,4.350422382354736 -141,4.286284446716309 -142,4.226678371429443 -143,4.181774139404297 -144,4.148751735687256 -145,4.117979526519775 -146,4.075359344482422 -147,4.037959098815918 -148,4.027650356292725 -149,4.010444641113281 -150,3.980997085571289 -151,3.944582462310791 -152,3.9130852222442627 -153,3.8949804306030273 -154,3.866150140762329 -155,3.84700608253479 -156,3.8121771812438965 -157,3.7918477058410645 -158,3.769448757171631 -159,3.753225564956665 -160,3.735077142715454 -161,3.7102458477020264 -162,3.6796228885650635 -163,3.6478431224823 -164,3.6495723724365234 -165,3.6326146125793457 -166,3.6269943714141846 -167,3.6146857738494873 -168,3.59983229637146 -169,3.5786640644073486 -170,3.5482237339019775 -171,3.526332139968872 -172,3.507338523864746 -173,3.478987216949463 -174,3.442192554473877 -175,3.4111227989196777 -176,3.416905164718628 -177,3.386936902999878 -178,3.37105393409729 -179,3.385917901992798 -180,3.3435306549072266 -181,3.6310336589813232 -182,3.442049980163574 -183,3.6571364402770996 -184,3.8395493030548096 -185,3.96769380569458 -186,4.109410285949707 -187,3.606619358062744 -188,3.3850231170654297 -189,5.5692973136901855 -190,6.344990253448486 -191,5.36829948425293 -192,4.832769870758057 -193,5.08350133895874 -194,5.14008903503418 -195,5.006896495819092 -196,5.143954753875732 -197,5.30037784576416 -198,5.266613483428955 -199,5.173099517822266 -200,4.983301639556885 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.050/training_config.txt deleted file mode 100644 index 1007fa3..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.050/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.050/training_log.csv deleted file mode 100644 index ab711c5..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,538.5786743164062 -2,169.76889038085938 -3,97.7621841430664 -4,62.57284164428711 -5,33.908538818359375 -6,25.959423065185547 -7,18.05864906311035 -8,13.821708679199219 -9,11.930595397949219 -10,10.870644569396973 -11,12.448434829711914 -12,12.620357513427734 -13,13.293448448181152 -14,13.824161529541016 -15,14.019102096557617 -16,14.058930397033691 -17,14.126790046691895 -18,14.048988342285156 -19,13.976696014404297 -20,13.849432945251465 -21,13.675448417663574 -22,13.467289924621582 -23,13.23876953125 -24,12.999126434326172 -25,12.755122184753418 -26,12.51266860961914 -27,11.886329650878906 -28,11.601188659667969 -29,11.141949653625488 -30,9.836174964904785 -31,9.685133934020996 -32,9.571866035461426 -33,9.396143913269043 -34,8.741096496582031 -35,5.6631340980529785 -36,5.524991512298584 -37,5.263748645782471 -38,4.78883695602417 -39,3.625065803527832 -40,3.530364751815796 -41,3.416199207305908 -42,3.2942190170288086 -43,3.021721124649048 -44,2.910646915435791 -45,2.7995171546936035 -46,2.68731951713562 -47,2.5749635696411133 -48,2.4604594707489014 -49,2.340319871902466 -50,2.208909749984741 -51,2.0644211769104004 -52,1.934653401374817 -53,1.8779147863388062 -54,1.868827223777771 -55,1.8513436317443848 -56,1.8135682344436646 -57,1.7540305852890015 -58,1.674786925315857 -59,1.5891894102096558 -60,1.464918851852417 -61,1.4432106018066406 -62,1.418391466140747 -63,1.3954248428344727 -64,1.3830039501190186 -65,1.3738436698913574 -66,1.3583807945251465 -67,1.3347288370132446 -68,1.3049110174179077 -69,1.270324945449829 -70,1.2353490591049194 -71,1.2042980194091797 -72,1.1774436235427856 -73,1.148813009262085 -74,1.1006267070770264 -75,1.0962785482406616 -76,1.1124999523162842 -77,1.0943900346755981 -78,1.0585037469863892 -79,1.007815957069397 -80,1.0301753282546997 -81,1.052006483078003 -82,1.045793890953064 -83,1.0142805576324463 -84,0.985542893409729 -85,0.9973412752151489 -86,0.9975042343139648 -87,0.9851521849632263 -88,0.9594026803970337 -89,0.9524090886116028 -90,0.962571382522583 -91,0.9606706500053406 -92,0.947117030620575 -93,0.9313680529594421 -94,0.9370116591453552 -95,0.9350219964981079 -96,0.9173501133918762 -97,0.9132592082023621 -98,0.9163624048233032 -99,0.9084111452102661 -100,0.8981450796127319 -101,0.902094304561615 -102,0.8976831436157227 -103,0.8871347904205322 -104,0.8879586458206177 -105,0.8846208453178406 -106,0.8754429221153259 -107,0.8755844235420227 -108,0.8728501200675964 -109,0.8657689690589905 -110,0.866111159324646 -111,0.8626345992088318 -112,0.8568440079689026 -113,0.8564632534980774 -114,0.8520569801330566 -115,0.8484014272689819 -116,0.8474389314651489 -117,0.8432751297950745 -118,0.8408186435699463 -119,0.8393150568008423 -120,0.8354101777076721 -121,0.8339513540267944 -122,0.8315384387969971 -123,0.8283336162567139 -124,0.8269376158714294 -125,0.8243138194084167 -126,0.8219829797744751 -127,0.8204445242881775 -128,0.8179077506065369 -129,0.815794825553894 -130,0.8140674829483032 -131,0.8115335702896118 -132,0.8096601366996765 -133,0.8077496290206909 -134,0.8054529428482056 -135,0.8037051558494568 -136,0.8017276525497437 -137,0.7996294498443604 -138,0.7979190945625305 -139,0.7958881258964539 -140,0.7940102815628052 -141,0.7922508716583252 -142,0.7902394533157349 -143,0.7885405421257019 -144,0.7865750789642334 -145,0.7849009037017822 -146,0.7829669713973999 -147,0.7813249230384827 -148,0.7795589566230774 -149,0.778006374835968 -150,0.7763960361480713 -151,0.7749246954917908 -152,0.7734242081642151 -153,0.7720468044281006 -154,0.7706437110900879 -155,0.7693923115730286 -156,0.7681095600128174 -157,0.7669503092765808 -158,0.7658187747001648 -159,0.7647286653518677 -160,0.7637260556221008 -161,0.7627288699150085 -162,0.7617952823638916 -163,0.7609010934829712 -164,0.7600170373916626 -165,0.7591870427131653 -166,0.7583716511726379 -167,0.7575750350952148 -168,0.7568135857582092 -169,0.7560535073280334 -170,0.7553141117095947 -171,0.7545934915542603 -172,0.7538726925849915 -173,0.7531684041023254 -174,0.7524725794792175 -175,0.7517779469490051 -176,0.7510964274406433 -177,0.7504202723503113 -178,0.7497467994689941 -179,0.7490843534469604 -180,0.7484259605407715 -181,0.7477710247039795 -182,0.7471244931221008 -183,0.7464814186096191 -184,0.745843768119812 -185,0.7452144026756287 -186,0.7445897459983826 -187,0.7439699172973633 -188,0.7433578372001648 -189,0.7427506446838379 -190,0.7421488761901855 -191,0.7415544986724854 -192,0.7409653067588806 -193,0.7403817176818848 -194,0.7398062348365784 -195,0.7392361164093018 -196,0.7386708855628967 -197,0.7381131649017334 -198,0.7375606298446655 -199,0.7370139956474304 -200,0.7364771962165833 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.080/training_config.txt deleted file mode 100644 index 18062c2..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.080/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.080/training_log.csv deleted file mode 100644 index 1869cac..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,386.1799011230469 -2,153.2283477783203 -3,88.06382751464844 -4,47.77291488647461 -5,35.112918853759766 -6,29.09575843811035 -7,42.03937911987305 -8,37.31454086303711 -9,31.53988265991211 -10,22.312070846557617 -11,14.146759986877441 -12,15.075289726257324 -13,13.751482963562012 -14,10.044265747070312 -15,9.446763038635254 -16,8.895675659179688 -17,8.517751693725586 -18,8.22031021118164 -19,7.672484397888184 -20,6.058712482452393 -21,5.798981189727783 -22,5.560037136077881 -23,5.312303066253662 -24,5.052734851837158 -25,4.783409118652344 -26,4.496369361877441 -27,4.187773704528809 -28,4.018595218658447 -29,3.8162965774536133 -30,3.694807529449463 -31,3.565812587738037 -32,3.4402904510498047 -33,3.3199524879455566 -34,3.2030138969421387 -35,3.090270519256592 -36,2.982895612716675 -37,2.8820087909698486 -38,2.7878060340881348 -39,2.699531316757202 -40,2.616874933242798 -41,2.5391385555267334 -42,2.4653501510620117 -43,2.3949947357177734 -44,2.3276207447052 -45,2.262878656387329 -46,2.2002451419830322 -47,2.1392011642456055 -48,2.0794215202331543 -49,2.0204174518585205 -50,1.9616354703903198 -51,1.9026938676834106 -52,1.8421677350997925 -53,1.7744535207748413 -54,1.7111523151397705 -55,1.6869769096374512 -56,1.6416257619857788 -57,1.5500051975250244 -58,1.5778775215148926 -59,1.559714436531067 -60,1.5328540802001953 -61,1.4991129636764526 -62,1.4587138891220093 -63,1.4171885251998901 -64,1.3985882997512817 -65,1.3877580165863037 -66,1.366112470626831 -67,1.3329496383666992 -68,1.2925961017608643 -69,1.2602089643478394 -70,1.2408136129379272 -71,1.2185050249099731 -72,1.191644310951233 -73,1.162745475769043 -74,1.1359833478927612 -75,1.1101924180984497 -76,1.085028052330017 -77,1.0946629047393799 -78,1.0434467792510986 -79,1.043411135673523 -80,1.0356172323226929 -81,1.0166758298873901 -82,0.9770859479904175 -83,1.017087459564209 -84,1.007568597793579 -85,0.9416054487228394 -86,0.970000684261322 -87,0.9728701114654541 -88,0.9515090584754944 -89,0.9239073991775513 -90,0.915893018245697 -91,0.9285019636154175 -92,0.9269923567771912 -93,0.8973869681358337 -94,0.9429271817207336 -95,0.9175421595573425 -96,0.9230359792709351 -97,0.9467599391937256 -98,0.9509148001670837 -99,0.9428841471672058 -100,0.921870768070221 -101,0.8881616592407227 -102,0.9561041593551636 -103,0.9151821732521057 -104,0.9133405685424805 -105,0.9466347694396973 -106,0.9619060754776001 -107,0.9659518003463745 -108,0.9607089757919312 -109,0.946609616279602 -110,0.9230477809906006 -111,0.8908901810646057 -112,0.8922721147537231 -113,0.9285843372344971 -114,0.8742300868034363 -115,0.8851419687271118 -116,0.8961707949638367 -117,0.8933148980140686 -118,0.8758135437965393 -119,0.8509334325790405 -120,0.8974928259849548 -121,0.8442395925521851 -122,0.859916090965271 -123,0.8668872714042664 -124,0.8514831066131592 -125,0.8385570645332336 -126,0.8420816659927368 -127,0.8407065868377686 -128,0.8469036817550659 -129,0.8293426036834717 -130,0.8480912446975708 -131,0.8243510127067566 -132,0.8366276621818542 -133,0.8375303745269775 -134,0.8191685080528259 -135,0.8445504307746887 -136,0.8163480758666992 -137,0.8275133967399597 -138,0.8219565749168396 -139,0.8169018030166626 -140,0.815963089466095 -141,0.8167125582695007 -142,0.8171017169952393 -143,0.8090347051620483 -144,0.8162769675254822 -145,0.8107043504714966 -146,0.8137211203575134 -147,0.8036954998970032 -148,0.8148396611213684 -149,0.8045507073402405 -150,0.808903694152832 -151,0.7978589534759521 -152,0.8104634284973145 -153,0.7981459498405457 -154,0.8027852177619934 -155,0.790733814239502 -156,0.8048927783966064 -157,0.7922109365463257 -158,0.797244131565094 -159,0.7829206585884094 -160,0.8019495010375977 -161,0.7872624397277832 -162,0.7949832081794739 -163,0.7772564888000488 -164,0.8040165305137634 -165,0.7807421684265137 -166,0.7920946478843689 -167,0.7747650146484375 -168,0.7955770492553711 -169,0.7713938355445862 -170,0.7804615497589111 -171,0.7653874158859253 -172,0.7887543439865112 -173,0.7707036733627319 -174,0.7806915044784546 -175,0.7630696892738342 -176,0.7889072895050049 -177,0.7639232873916626 -178,0.7749977707862854 -179,0.7596156597137451 -180,0.7799634337425232 -181,0.7587205171585083 -182,0.7679346799850464 -183,0.7550475597381592 -184,0.7727446556091309 -185,0.7566660046577454 -186,0.764670729637146 -187,0.7524145841598511 -188,0.7690910696983337 -189,0.7543812394142151 -190,0.7621438503265381 -191,0.7507357001304626 -192,0.7647091746330261 -193,0.7506689429283142 -194,0.7576397657394409 -195,0.7488349676132202 -196,0.7574329972267151 -197,0.7469272017478943 -198,0.7517327666282654 -199,0.7464093565940857 -200,0.7504549026489258 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.100/training_config.txt deleted file mode 100644 index b528847..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.100/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.100/training_log.csv deleted file mode 100644 index 7f433f1..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,332.523193359375 -2,277.6114196777344 -3,171.2450408935547 -4,56.09416961669922 -5,22.086387634277344 -6,11.885981559753418 -7,8.755386352539062 -8,7.167656898498535 -9,5.9845147132873535 -10,6.31439208984375 -11,5.945330619812012 -12,5.316413402557373 -13,4.610931873321533 -14,4.330792427062988 -15,4.105447292327881 -16,4.038171768188477 -17,3.8754055500030518 -18,3.653285264968872 -19,3.4961371421813965 -20,3.352522611618042 -21,3.171970844268799 -22,3.0388362407684326 -23,2.84688138961792 -24,2.8127620220184326 -25,2.734623670578003 -26,2.6522114276885986 -27,2.5695831775665283 -28,2.48711895942688 -29,2.404973268508911 -30,2.323153495788574 -31,2.241389274597168 -32,2.159329414367676 -33,2.07623291015625 -34,1.9903806447982788 -35,1.89859139919281 -36,1.7988215684890747 -37,1.7011055946350098 -38,1.6385103464126587 -39,1.6294294595718384 -40,1.6285864114761353 -41,1.616818904876709 -42,1.5940148830413818 -43,1.5631160736083984 -44,1.5274370908737183 -45,1.4935526847839355 -46,1.4698699712753296 -47,1.4572726488113403 -48,1.4510260820388794 -49,1.4466404914855957 -50,1.4413011074066162 -51,1.4337972402572632 -52,1.4239580631256104 -53,1.4122374057769775 -54,1.3994430303573608 -55,1.3865077495574951 -56,1.374298334121704 -57,1.36339271068573 -58,1.3539777994155884 -59,1.3458868265151978 -60,1.3386759757995605 -61,1.3317776918411255 -62,1.3247116804122925 -63,1.3171913623809814 -64,1.3091672658920288 -65,1.3007874488830566 -66,1.292306661605835 -67,1.2840352058410645 -68,1.276208758354187 -69,1.2689558267593384 -70,1.262277364730835 -71,1.2560713291168213 -72,1.250186562538147 -73,1.2444485425949097 -74,1.2387280464172363 -75,1.2329515218734741 -76,1.227097988128662 -77,1.2211925983428955 -78,1.2152953147888184 -79,1.209477186203003 -80,1.203800916671753 -81,1.1982933282852173 -82,1.1929422616958618 -83,1.1877108812332153 -84,1.182548999786377 -85,1.177401065826416 -86,1.1722387075424194 -87,1.1670427322387695 -88,1.1618232727050781 -89,1.1566046476364136 -90,1.1514191627502441 -91,1.146304726600647 -92,1.1412733793258667 -93,1.1363317966461182 -94,1.1314698457717896 -95,1.1266757249832153 -96,1.1219276189804077 -97,1.117212176322937 -98,1.112531304359436 -99,1.107883095741272 -100,1.1032682657241821 -101,1.0986952781677246 -102,1.0941699743270874 -103,1.0896974802017212 -104,1.0852795839309692 -105,1.0809122323989868 -106,1.0765846967697144 -107,1.072295904159546 -108,1.0680351257324219 -109,1.0637991428375244 -110,1.0595911741256714 -111,1.055411458015442 -112,1.0512651205062866 -113,1.0471510887145996 -114,1.0430763959884644 -115,1.0390355587005615 -116,1.0350295305252075 -117,1.0310559272766113 -118,1.027115821838379 -119,1.0232073068618774 -120,1.0193248987197876 -121,1.0154755115509033 -122,1.011656641960144 -123,1.0078693628311157 -124,1.0041136741638184 -125,1.0003912448883057 -126,0.9967024326324463 -127,0.9930440187454224 -128,0.9894164800643921 -129,0.9858174920082092 -130,0.98224937915802 -131,0.9787096977233887 -132,0.9752034544944763 -133,0.9717262983322144 -134,0.968281090259552 -135,0.9648640155792236 -136,0.9614758491516113 -137,0.9581148624420166 -138,0.95478355884552 -139,0.9514777064323425 -140,0.9482007026672363 -141,0.9449508786201477 -142,0.9417269825935364 -143,0.9385295510292053 -144,0.9353615045547485 -145,0.932223379611969 -146,0.9291139245033264 -147,0.9260308742523193 -148,0.9229755997657776 -149,0.9199470281600952 -150,0.9169475436210632 -151,0.9139748811721802 -152,0.9110283851623535 -153,0.9081090092658997 -154,0.9052146673202515 -155,0.9023462533950806 -156,0.8995052576065063 -157,0.8966877460479736 -158,0.893897294998169 -159,0.8911302089691162 -160,0.8883870840072632 -161,0.8856672644615173 -162,0.8829717636108398 -163,0.8802983164787292 -164,0.8776479959487915 -165,0.8750186562538147 -166,0.8724127411842346 -167,0.8698272109031677 -168,0.8672634959220886 -169,0.8647199273109436 -170,0.8621995449066162 -171,0.8596993088722229 -172,0.8572195768356323 -173,0.8547597527503967 -174,0.8523221611976624 -175,0.8499016165733337 -176,0.8474986553192139 -177,0.8451125025749207 -178,0.8427467942237854 -179,0.8403998613357544 -180,0.8380727767944336 -181,0.835764467716217 -182,0.8334769010543823 -183,0.831207811832428 -184,0.8289576768875122 -185,0.8267275094985962 -186,0.824516773223877 -187,0.8223260045051575 -188,0.8201525211334229 -189,0.817997395992279 -190,0.8158582448959351 -191,0.8137357831001282 -192,0.8116315603256226 -193,0.8095468878746033 -194,0.8074775338172913 -195,0.805424690246582 -196,0.8033891916275024 -197,0.801372230052948 -198,0.7993710041046143 -199,0.7973867058753967 -200,0.795417070388794 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.125/training_config.txt deleted file mode 100644 index 1272961..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.125/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.125/training_log.csv deleted file mode 100644 index 1158f69..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,316.4949645996094 -2,71.09961700439453 -3,139.52780151367188 -4,27.530668258666992 -5,16.1748046875 -6,14.732619285583496 -7,14.176219940185547 -8,13.390607833862305 -9,12.387110710144043 -10,11.151179313659668 -11,9.749495506286621 -12,7.293849945068359 -13,5.47412633895874 -14,4.688145637512207 -15,4.183182716369629 -16,3.822493553161621 -17,3.479017496109009 -18,3.113290548324585 -19,2.773768424987793 -20,2.509394884109497 -21,2.3113863468170166 -22,2.1385858058929443 -23,1.9796594381332397 -24,1.8315287828445435 -25,1.6948692798614502 -26,1.5732338428497314 -27,1.4680585861206055 -28,1.3822979927062988 -29,1.315976619720459 -30,1.2655291557312012 -31,1.2284713983535767 -32,1.2008792161941528 -33,1.1789764165878296 -34,1.1601263284683228 -35,1.1441682577133179 -36,1.1303703784942627 -37,1.1185795068740845 -38,1.1084915399551392 -39,1.0999385118484497 -40,1.0928882360458374 -41,1.0872769355773926 -42,1.0830663442611694 -43,1.0801634788513184 -44,1.078397512435913 -45,1.077571988105774 -46,1.077405571937561 -47,1.077551007270813 -48,1.0776643753051758 -49,1.0774527788162231 -50,1.0767027139663696 -51,1.0752966403961182 -52,1.07318913936615 -53,1.0704028606414795 -54,1.0670065879821777 -55,1.0631195306777954 -56,1.0588834285736084 -57,1.0544472932815552 -58,1.0499478578567505 -59,1.0454975366592407 -60,1.041176676750183 -61,1.037029504776001 -62,1.03307044506073 -63,1.0292881727218628 -64,1.0256586074829102 -65,1.022151231765747 -66,1.0187309980392456 -67,1.0153753757476807 -68,1.0120742321014404 -69,1.0088189840316772 -70,1.0056180953979492 -71,1.0024842023849487 -72,0.9994326829910278 -73,0.996487021446228 -74,0.9936482310295105 -75,0.9909240007400513 -76,0.9883119463920593 -77,0.9858056902885437 -78,0.9833899140357971 -79,0.981047511100769 -80,0.9787601232528687 -81,0.9765076637268066 -82,0.9742780327796936 -83,0.9720594882965088 -84,0.9698535203933716 -85,0.967647135257721 -86,0.9654411673545837 -87,0.9632358551025391 -88,0.9610382914543152 -89,0.9588506817817688 -90,0.9566733241081238 -91,0.9545105695724487 -92,0.9523590207099915 -93,0.950218677520752 -94,0.9480896592140198 -95,0.945968747138977 -96,0.9438529014587402 -97,0.9417473077774048 -98,0.9396470785140991 -99,0.9375532865524292 -100,0.9354680180549622 -101,0.9333889484405518 -102,0.9313210844993591 -103,0.9292623996734619 -104,0.9272117614746094 -105,0.9251736402511597 -106,0.923147439956665 -107,0.9211309552192688 -108,0.9191268682479858 -109,0.9171333312988281 -110,0.9151516556739807 -111,0.9131806492805481 -112,0.9112198352813721 -113,0.909271240234375 -114,0.9073319435119629 -115,0.9054029583930969 -116,0.9034823179244995 -117,0.9015708565711975 -118,0.8996699452400208 -119,0.897779643535614 -120,0.8958981037139893 -121,0.8940293192863464 -122,0.892167329788208 -123,0.8903158903121948 -124,0.888471782207489 -125,0.8866375088691711 -126,0.884811282157898 -127,0.8829955458641052 -128,0.8811860680580139 -129,0.8793872594833374 -130,0.8775938153266907 -131,0.8758102059364319 -132,0.8740330934524536 -133,0.8722652792930603 -134,0.8705060482025146 -135,0.8687548637390137 -136,0.867011547088623 -137,0.8652780652046204 -138,0.8635518550872803 -139,0.8618339896202087 -140,0.8601266145706177 -141,0.8584274053573608 -142,0.8567379117012024 -143,0.855055570602417 -144,0.8533837199211121 -145,0.8517215251922607 -146,0.8500674962997437 -147,0.8484227657318115 -148,0.846786379814148 -149,0.8451572060585022 -150,0.8435385227203369 -151,0.8419287800788879 -152,0.8403293490409851 -153,0.8387393355369568 -154,0.8371562361717224 -155,0.8355827331542969 -156,0.8340173363685608 -157,0.8324611783027649 -158,0.8309144973754883 -159,0.8293768763542175 -160,0.8278489708900452 -161,0.8263286352157593 -162,0.8248172998428345 -163,0.8233155012130737 -164,0.8218214511871338 -165,0.8203375339508057 -166,0.8188597559928894 -167,0.817391574382782 -168,0.8159299492835999 -169,0.8144770860671997 -170,0.8130321502685547 -171,0.8115954399108887 -172,0.8101672530174255 -173,0.8087461590766907 -174,0.8073340654373169 -175,0.8059252500534058 -176,0.8045139312744141 -177,0.8031025528907776 -178,0.8016928434371948 -179,0.8002842664718628 -180,0.7988758087158203 -181,0.797468900680542 -182,0.7960611581802368 -183,0.7946512699127197 -184,0.7932417392730713 -185,0.7918312549591064 -186,0.7904215455055237 -187,0.789009153842926 -188,0.7875980734825134 -189,0.7861852645874023 -190,0.7847726345062256 -191,0.7833595275878906 -192,0.7819501757621765 -193,0.7805399298667908 -194,0.7791334986686707 -195,0.7777276039123535 -196,0.7763260006904602 -197,0.7749238014221191 -198,0.7735285758972168 -199,0.7721335887908936 -200,0.7707443237304688 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.160/training_config.txt deleted file mode 100644 index 1edc319..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.160/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.160/training_log.csv deleted file mode 100644 index f0147a9..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,335.9512634277344 -2,30.527502059936523 -3,28.199451446533203 -4,22.66502571105957 -5,18.059263229370117 -6,12.430293083190918 -7,9.797639846801758 -8,7.247085094451904 -9,5.682718276977539 -10,4.780050277709961 -11,4.411700248718262 -12,4.178091049194336 -13,3.969977617263794 -14,3.76639986038208 -15,3.558215618133545 -16,3.3386242389678955 -17,3.112138509750366 -18,2.8879356384277344 -19,2.673577070236206 -20,2.4699573516845703 -21,2.2858312129974365 -22,2.1220502853393555 -23,1.9775887727737427 -24,1.8523228168487549 -25,1.7427936792373657 -26,1.6483111381530762 -27,1.5679832696914673 -28,1.5004011392593384 -29,1.4426121711730957 -30,1.39385986328125 -31,1.3530991077423096 -32,1.3172560930252075 -33,1.2843351364135742 -34,1.2536004781723022 -35,1.2254343032836914 -36,1.198063611984253 -37,1.171186923980713 -38,1.1451904773712158 -39,1.1202597618103027 -40,1.096702218055725 -41,1.0748497247695923 -42,1.0549263954162598 -43,1.037066102027893 -44,1.0213910341262817 -45,1.0080403089523315 -46,0.9969237446784973 -47,0.9878203868865967 -48,0.9805881381034851 -49,0.9749135971069336 -50,0.9704961180686951 -51,0.9670442938804626 -52,0.964260995388031 -53,0.961887776851654 -54,0.9596949815750122 -55,0.9575026631355286 -56,0.9551874995231628 -57,0.9526726007461548 -58,0.9499375820159912 -59,0.9469833970069885 -60,0.9438387751579285 -61,0.9405566453933716 -62,0.9371857047080994 -63,0.9337819814682007 -64,0.930378794670105 -65,0.9270028471946716 -66,0.9236634373664856 -67,0.9203620553016663 -68,0.9170932173728943 -69,0.913848876953125 -70,0.9106185436248779 -71,0.907393753528595 -72,0.9041759371757507 -73,0.9009683728218079 -74,0.8977904915809631 -75,0.8946377635002136 -76,0.8915164470672607 -77,0.8884382247924805 -78,0.885416567325592 -79,0.8824590444564819 -80,0.8795759081840515 -81,0.8767724633216858 -82,0.8740473389625549 -83,0.8713968992233276 -84,0.8688094615936279 -85,0.8662724494934082 -86,0.8637820482254028 -87,0.8613348007202148 -88,0.8589197397232056 -89,0.8565291166305542 -90,0.8541582226753235 -91,0.8518038988113403 -92,0.8494635820388794 -93,0.847136914730072 -94,0.8448190093040466 -95,0.8424903154373169 -96,0.8401471972465515 -97,0.8377807140350342 -98,0.8354025483131409 -99,0.8330234289169312 -100,0.8306498527526855 -101,0.8282814025878906 -102,0.8259180784225464 -103,0.8235639333724976 -104,0.8212176561355591 -105,0.8188785314559937 -106,0.816551685333252 -107,0.8142343759536743 -108,0.8119224905967712 -109,0.8096253275871277 -110,0.8073437213897705 -111,0.8050737977027893 -112,0.8028199672698975 -113,0.8005803227424622 -114,0.7983545660972595 -115,0.7961527705192566 -116,0.793969988822937 -117,0.791800320148468 -118,0.7896536588668823 -119,0.787530779838562 -120,0.7854268550872803 -121,0.7833431363105774 -122,0.7812824249267578 -123,0.7792487740516663 -124,0.7772401571273804 -125,0.775252103805542 -126,0.7733014225959778 -127,0.7713822722434998 -128,0.7694870233535767 -129,0.7676323056221008 -130,0.7658222913742065 -131,0.7640605568885803 -132,0.7623742818832397 -133,0.7607773542404175 -134,0.7592635750770569 -135,0.757834255695343 -136,0.7564853429794312 -137,0.7551953196525574 -138,0.7539564371109009 -139,0.7527691721916199 -140,0.7516316771507263 -141,0.7505384683609009 -142,0.7494897246360779 -143,0.7484858632087708 -144,0.7475172877311707 -145,0.7465866208076477 -146,0.7456892728805542 -147,0.7448240518569946 -148,0.7440011501312256 -149,0.7432065010070801 -150,0.7424320578575134 -151,0.7416829466819763 -152,0.7409521341323853 -153,0.7402488589286804 -154,0.739563524723053 -155,0.7389074563980103 -156,0.7382684946060181 -157,0.7376424074172974 -158,0.7370426654815674 -159,0.7364451885223389 -160,0.735863983631134 -161,0.7353056073188782 -162,0.7347503304481506 -163,0.734201967716217 -164,0.7336734533309937 -165,0.7331516146659851 -166,0.7326316833496094 -167,0.7321118712425232 -168,0.7315967679023743 -169,0.7310863137245178 -170,0.7305806279182434 -171,0.7300773859024048 -172,0.7295765280723572 -173,0.7290768623352051 -174,0.7285778522491455 -175,0.7280799746513367 -176,0.7275828123092651 -177,0.72708660364151 -178,0.7265886664390564 -179,0.7260916233062744 -180,0.7255949378013611 -181,0.7250980138778687 -182,0.724600613117218 -183,0.7241045236587524 -184,0.723609447479248 -185,0.7231157422065735 -186,0.7226250171661377 -187,0.7221357226371765 -188,0.7216479778289795 -189,0.7211629152297974 -190,0.7206809520721436 -191,0.7202003002166748 -192,0.7197218537330627 -193,0.7192459106445312 -194,0.7187716960906982 -195,0.7182992100715637 -196,0.7178301215171814 -197,0.7173619270324707 -198,0.7168949246406555 -199,0.7164314389228821 -200,0.7159696817398071 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.200/training_config.txt deleted file mode 100644 index 94cbf23..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.200/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.200/training_log.csv deleted file mode 100644 index e0a84db..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,335.0092468261719 -2,151.54241943359375 -3,10.700976371765137 -4,13.085397720336914 -5,12.77818775177002 -6,12.069238662719727 -7,11.140776634216309 -8,10.275884628295898 -9,9.311700820922852 -10,8.059287071228027 -11,6.898678302764893 -12,6.126998424530029 -13,5.534663200378418 -14,5.0058794021606445 -15,4.391221046447754 -16,3.525318145751953 -17,3.0218451023101807 -18,2.6719038486480713 -19,2.3984129428863525 -20,2.170614719390869 -21,1.9770559072494507 -22,1.8102823495864868 -23,1.6675609350204468 -24,1.5492409467697144 -25,1.4513252973556519 -26,1.370811939239502 -27,1.3057539463043213 -28,1.2547813653945923 -29,1.215281367301941 -30,1.1854201555252075 -31,1.1637417078018188 -32,1.1489492654800415 -33,1.140024185180664 -34,1.135791540145874 -35,1.1353050470352173 -36,1.1375946998596191 -37,1.1417315006256104 -38,1.1467982530593872 -39,1.1519782543182373 -40,1.156601905822754 -41,1.1601803302764893 -42,1.1623095273971558 -43,1.1627386808395386 -44,1.1613500118255615 -45,1.1581553220748901 -46,1.1532702445983887 -47,1.1468998193740845 -48,1.1393095254898071 -49,1.1307975053787231 -50,1.1216751337051392 -51,1.1122454404830933 -52,1.1027865409851074 -53,1.093546986579895 -54,1.0847270488739014 -55,1.07648503780365 -56,1.0689150094985962 -57,1.0620709657669067 -58,1.0559678077697754 -59,1.0505872964859009 -60,1.0458799600601196 -61,1.0417786836624146 -62,1.0382053852081299 -63,1.0350712537765503 -64,1.032289743423462 -65,1.0297765731811523 -66,1.0274534225463867 -67,1.0252552032470703 -68,1.0231257677078247 -69,1.0210171937942505 -70,1.0188950300216675 -71,1.0167362689971924 -72,1.0145294666290283 -73,1.0122673511505127 -74,1.0099518299102783 -75,1.007593035697937 -76,1.0052012205123901 -77,1.0027856826782227 -78,1.0003639459609985 -79,0.9979457855224609 -80,0.9955450892448425 -81,0.9931874871253967 -82,0.9908692240715027 -83,0.988596498966217 -84,0.9863652586936951 -85,0.9841738343238831 -86,0.9820191860198975 -87,0.9798948168754578 -88,0.9777971506118774 -89,0.9757185578346252 -90,0.9736531376838684 -91,0.9715969562530518 -92,0.9695424437522888 -93,0.9674888849258423 -94,0.9654310345649719 -95,0.9633693099021912 -96,0.9613067507743835 -97,0.9592412114143372 -98,0.9571792483329773 -99,0.9551203846931458 -100,0.9530674815177917 -101,0.951025128364563 -102,0.9489921927452087 -103,0.9469716548919678 -104,0.9449682831764221 -105,0.9429776072502136 -106,0.9410004615783691 -107,0.9390381574630737 -108,0.937089741230011 -109,0.9351522326469421 -110,0.933226466178894 -111,0.9313091039657593 -112,0.9294007420539856 -113,0.9275025725364685 -114,0.9256085157394409 -115,0.9237229228019714 -116,0.9218419790267944 -117,0.9199658036231995 -118,0.9180949330329895 -119,0.9162268042564392 -120,0.9143667221069336 -121,0.9125107526779175 -122,0.9106634259223938 -123,0.9088233113288879 -124,0.9069900512695312 -125,0.9051644206047058 -126,0.9033454656600952 -127,0.9015358686447144 -128,0.8997327089309692 -129,0.8979365229606628 -130,0.8961501121520996 -131,0.8943722248077393 -132,0.892602801322937 -133,0.8908423185348511 -134,0.889090359210968 -135,0.8873482942581177 -136,0.8856139779090881 -137,0.8838886022567749 -138,0.8821694850921631 -139,0.8804587125778198 -140,0.8787531852722168 -141,0.8770570755004883 -142,0.8753687143325806 -143,0.8736873269081116 -144,0.8720151782035828 -145,0.8703493475914001 -146,0.8686900734901428 -147,0.8670382499694824 -148,0.8653935790061951 -149,0.8637568950653076 -150,0.862126886844635 -151,0.8605062365531921 -152,0.8588892221450806 -153,0.8572812676429749 -154,0.8556766510009766 -155,0.8540807962417603 -156,0.8524923920631409 -157,0.850910484790802 -158,0.849337100982666 -159,0.8477707505226135 -160,0.8462105989456177 -161,0.8446584939956665 -162,0.8431127667427063 -163,0.8415748476982117 -164,0.8400441408157349 -165,0.8385209441184998 -166,0.8370005488395691 -167,0.8354788422584534 -168,0.8339579105377197 -169,0.8324344754219055 -170,0.8309117555618286 -171,0.829386293888092 -172,0.8278608918190002 -173,0.8263349533081055 -174,0.8248098492622375 -175,0.8232907652854919 -176,0.82177734375 -177,0.8202691078186035 -178,0.8187651634216309 -179,0.817263126373291 -180,0.8157670497894287 -181,0.8142717480659485 -182,0.8127834796905518 -183,0.8112963438034058 -184,0.8098142147064209 -185,0.8083334565162659 -186,0.8068584203720093 -187,0.8053843379020691 -188,0.8039146065711975 -189,0.8024483323097229 -190,0.8009835481643677 -191,0.7995215058326721 -192,0.7980630397796631 -193,0.7966069579124451 -194,0.7951552867889404 -195,0.7937052845954895 -196,0.7922583222389221 -197,0.7908133268356323 -198,0.7893741726875305 -199,0.7879366278648376 -200,0.7865046262741089 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.250/training_config.txt deleted file mode 100644 index 2bf40f3..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.250/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.250/training_log.csv deleted file mode 100644 index cbec06b..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,398.80145263671875 -2,28.78846549987793 -3,48.94115447998047 -4,68.27999114990234 -5,36.714202880859375 -6,24.32845687866211 -7,15.057198524475098 -8,12.053460121154785 -9,10.790151596069336 -10,10.224757194519043 -11,9.868245124816895 -12,9.539421081542969 -13,9.114551544189453 -14,8.599190711975098 -15,8.054933547973633 -16,7.533779144287109 -17,7.056591510772705 -18,6.634969234466553 -19,6.269955158233643 -20,5.956130504608154 -21,5.681754112243652 -22,5.44000768661499 -23,5.224750518798828 -24,5.03078031539917 -25,4.854369163513184 -26,4.691991329193115 -27,4.540895938873291 -28,4.3993659019470215 -29,4.265862941741943 -30,4.139726161956787 -31,4.0187668800354 -32,3.9020028114318848 -33,3.788806200027466 -34,3.678642511367798 -35,3.571173667907715 -36,3.466733932495117 -37,3.364626407623291 -38,3.264988899230957 -39,3.1690216064453125 -40,3.075331687927246 -41,2.983642101287842 -42,2.8938839435577393 -43,2.8060832023620605 -44,2.7201087474823 -45,2.6360812187194824 -46,2.5540082454681396 -47,2.4726409912109375 -48,2.3917152881622314 -49,2.3114655017852783 -50,2.2318992614746094 -51,2.15340518951416 -52,2.076636791229248 -53,2.0013225078582764 -54,1.928083062171936 -55,1.8565359115600586 -56,1.7863892316818237 -57,1.7189491987228394 -58,1.6541107892990112 -59,1.5906535387039185 -60,1.530224084854126 -61,1.4728829860687256 -62,1.4173098802566528 -63,1.3647712469100952 -64,1.3159528970718384 -65,1.2704617977142334 -66,1.2281650304794312 -67,1.1887853145599365 -68,1.1519317626953125 -69,1.1174355745315552 -70,1.0851213932037354 -71,1.0547266006469727 -72,1.0263615846633911 -73,1.0001941919326782 -74,0.97609943151474 -75,0.9542151093482971 -76,0.9345278739929199 -77,0.9168096780776978 -78,0.9005226492881775 -79,0.8862642049789429 -80,0.8743528723716736 -81,0.8643808364868164 -82,0.8560908436775208 -83,0.8490293025970459 -84,0.8430232405662537 -85,0.8377517461776733 -86,0.8329757452011108 -87,0.8284681439399719 -88,0.8240861892700195 -89,0.8197119235992432 -90,0.8152965903282166 -91,0.8107612729072571 -92,0.8059829473495483 -93,0.8009481430053711 -94,0.7956603169441223 -95,0.7901671528816223 -96,0.7845029234886169 -97,0.7786927223205566 -98,0.7728628516197205 -99,0.7670612335205078 -100,0.7613345980644226 -101,0.7557162046432495 -102,0.7503024935722351 -103,0.745132565498352 -104,0.7402545809745789 -105,0.7355826497077942 -106,0.731144368648529 -107,0.7271042466163635 -108,0.7233886122703552 -109,0.7198360562324524 -110,0.7164641618728638 -111,0.7132512331008911 -112,0.7102106213569641 -113,0.7073574066162109 -114,0.7047706246376038 -115,0.7023504376411438 -116,0.6999624371528625 -117,0.6977208852767944 -118,0.6955795288085938 -119,0.6935480237007141 -120,0.6916558146476746 -121,0.689807653427124 -122,0.6880140900611877 -123,0.686299204826355 -124,0.6846776008605957 -125,0.6831387877464294 -126,0.6816695928573608 -127,0.6802664995193481 -128,0.678935170173645 -129,0.67775559425354 -130,0.676680326461792 -131,0.6756781935691833 -132,0.6747416853904724 -133,0.6738641858100891 -134,0.6730533242225647 -135,0.6723119020462036 -136,0.6716258525848389 -137,0.6709651947021484 -138,0.6703293919563293 -139,0.6697344779968262 -140,0.6691959500312805 -141,0.6686386466026306 -142,0.6680845618247986 -143,0.6675760746002197 -144,0.6670697927474976 -145,0.6665632724761963 -146,0.6660711765289307 -147,0.6655929684638977 -148,0.6651238203048706 -149,0.664681613445282 -150,0.6642518639564514 -151,0.6638525128364563 -152,0.6634779572486877 -153,0.663112998008728 -154,0.6627636551856995 -155,0.6624249219894409 -156,0.6620904207229614 -157,0.6617740392684937 -158,0.6614599823951721 -159,0.6611456274986267 -160,0.6608454585075378 -161,0.6605478525161743 -162,0.6602579951286316 -163,0.6599735617637634 -164,0.6596910357475281 -165,0.6594159007072449 -166,0.65914386510849 -167,0.6588843464851379 -168,0.6586417555809021 -169,0.6584055423736572 -170,0.6581792235374451 -171,0.6579527258872986 -172,0.6577364802360535 -173,0.657518744468689 -174,0.6573062539100647 -175,0.6570990085601807 -176,0.6568955183029175 -177,0.6566910743713379 -178,0.6564890742301941 -179,0.6562927961349487 -180,0.6561076045036316 -181,0.6559115052223206 -182,0.6557169556617737 -183,0.6555345058441162 -184,0.6553536057472229 -185,0.6551691293716431 -186,0.6549844741821289 -187,0.6548036336898804 -188,0.6546416878700256 -189,0.6544657945632935 -190,0.6542781591415405 -191,0.6541115045547485 -192,0.6539437770843506 -193,0.6537767648696899 -194,0.6536096334457397 -195,0.6534441709518433 -196,0.6532793045043945 -197,0.6531191468238831 -198,0.6529602408409119 -199,0.6528052091598511 -200,0.6526519656181335 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.300/training_config.txt deleted file mode 100644 index 5f937ca..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.300/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.300/training_log.csv deleted file mode 100644 index 3eff661..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.300/training_log.csv +++ /dev/null @@ -1,35 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,513.085205078125 -2,27.43408203125 -3,30.42536735534668 -4,30.194875717163086 -5,28.787744522094727 -6,26.747900009155273 -7,24.03295135498047 -8,21.200258255004883 -9,19.01063346862793 -10,17.33818817138672 -11,15.873290061950684 -12,14.625022888183594 -13,13.62175464630127 -14,12.832741737365723 -15,12.230611801147461 -16,11.799936294555664 -17,11.5252046585083 -18,11.375308990478516 -19,11.304755210876465 -20,11.251453399658203 -21,11.159953117370605 -22,11.000463485717773 -23,10.769023895263672 -24,10.48403549194336 -25,10.171374320983887 -26,9.856008529663086 -27,9.560256958007812 -28,9.250208854675293 -29,11.785369873046875 -30,nan -31,nan -32,nan -33,nan diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.400/training_config.txt deleted file mode 100644 index bfb70d0..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.400/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.400/training_log.csv deleted file mode 100644 index 1255217..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.400/training_log.csv +++ /dev/null @@ -1,18 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,4060.378662109375 -2,33.085716247558594 -3,36.91214370727539 -4,33.73778533935547 -5,40.400142669677734 -6,22.30826187133789 -7,22.49983787536621 -8,22.807147979736328 -9,22.685630798339844 -10,22.29779052734375 -11,21.78571319580078 -12,21.20783042907715 -13,nan -14,nan -15,nan -16,nan diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.500/training_config.txt deleted file mode 100644 index 364b685..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.500/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.500/training_log.csv deleted file mode 100644 index d97721e..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.500/training_log.csv +++ /dev/null @@ -1,16 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,45308.96484375 -2,176048.1875 -3,157038.296875 -4,77062.03125 -5,4471.66650390625 -6,42.77729415893555 -7,28.205411911010742 -8,27.80960464477539 -9,27.591005325317383 -10,35.59136199951172 -11,nan -12,nan -13,nan -14,nan diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.600/training_config.txt deleted file mode 100644 index 532a3d8..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.600/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.600/training_log.csv deleted file mode 100644 index 49b6063..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.600/training_log.csv +++ /dev/null @@ -1,12 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,100543.7890625 -2,179767.90625 -3,180204.53125 -4,180242.65625 -5,180247.296875 -6,180247.296875 -7,180247.296875 -8,180247.296875 -9,180247.296875 -10,180247.296875 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.700/training_config.txt deleted file mode 100644 index c06445a..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.700/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.700/training_log.csv deleted file mode 100644 index 76ea380..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,138292.75 -2,180206.421875 -3,180247.296875 -4,180247.296875 -5,180247.296875 -6,180247.296875 -7,180247.296875 -8,180247.296875 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.800/training_config.txt deleted file mode 100644 index 9d079ac..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.800/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.800/training_log.csv deleted file mode 100644 index 03fc32f..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,156804.171875 -2,180247.296875 -3,180247.296875 -4,180247.296875 -5,180247.296875 -6,180247.296875 -7,180247.296875 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_0.900/training_config.txt deleted file mode 100644 index 1bf58e6..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.900/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_0.900/training_log.csv deleted file mode 100644 index 812123f..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,168813.796875 -2,180247.296875 -3,180247.296875 -4,180247.296875 -5,180247.296875 -6,180247.296875 -7,180247.296875 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_1.000/training_config.txt deleted file mode 100644 index b10c186..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_1.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_1.000/training_log.csv deleted file mode 100644 index 80826c3..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_1.000/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,174278.984375 -2,180131.53125 -3,6776.32275390625 -4,39.34663009643555 -5,nan -6,nan -7,nan -8,nan diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_16.000/training_config.txt deleted file mode 100644 index 5dc9a28..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_16.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_16.000/training_log.csv deleted file mode 100644 index 6e77a6a..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,180847.984375 -2,180847.984375 -3,180847.984375 -4,180847.984375 -5,180847.984375 -6,180847.984375 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_2.000/training_config.txt deleted file mode 100644 index fd936ab..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_2.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_2.000/training_log.csv deleted file mode 100644 index 0f9bbfa..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,180823.40625 -2,4.7152228355407715 -3,nan -4,nan -5,nan -6,nan diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_4.000/training_config.txt deleted file mode 100644 index c07bba7..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_4.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_4.000/training_log.csv deleted file mode 100644 index 6e77a6a..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,180847.984375 -2,180847.984375 -3,180847.984375 -4,180847.984375 -5,180847.984375 -6,180847.984375 diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root/lr_8.000/training_config.txt deleted file mode 100644 index 88aff02..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_8.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * t_span**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root/lr_8.000/training_log.csv deleted file mode 100644 index 6e77a6a..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,952.5866088867188 -1,180847.984375 -2,180847.984375 -3,180847.984375 -4,180847.984375 -5,180847.984375 -6,180847.984375 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.010/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.010/training_config.txt deleted file mode 100644 index ca97f7d..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.010/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.01 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.010/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.010/training_log.csv deleted file mode 100644 index 4746214..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.010/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,417.44354248046875 -2,356.2298583984375 -3,320.4499206542969 -4,281.71246337890625 -5,236.9629364013672 -6,195.23777770996094 -7,159.06483459472656 -8,136.82614135742188 -9,109.90361022949219 -10,101.93202209472656 -11,86.46979522705078 -12,83.56134033203125 -13,78.52536010742188 -14,74.6343002319336 -15,71.90534210205078 -16,71.43607330322266 -17,63.664039611816406 -18,62.46768569946289 -19,57.16442108154297 -20,50.82235336303711 -21,46.42164611816406 -22,42.83635330200195 -23,40.55123519897461 -24,37.781272888183594 -25,35.376373291015625 -26,32.693302154541016 -27,30.178386688232422 -28,29.122760772705078 -29,27.328567504882812 -30,26.309858322143555 -31,26.138647079467773 -32,24.709928512573242 -33,24.30590057373047 -34,24.480119705200195 -35,23.20392417907715 -36,23.199039459228516 -37,23.36579132080078 -38,23.468463897705078 -39,23.476970672607422 -40,23.444135665893555 -41,23.394926071166992 -42,23.340238571166992 -43,23.28974723815918 -44,23.303922653198242 -45,23.17449951171875 -46,22.962440490722656 -47,22.86463737487793 -48,22.776363372802734 -49,22.691593170166016 -50,22.608524322509766 -51,22.526968002319336 -52,22.449098587036133 -53,22.377384185791016 -54,22.313495635986328 -55,22.257183074951172 -56,22.207000732421875 -57,22.160186767578125 -58,22.110515594482422 -59,21.93588638305664 -60,21.919065475463867 -61,21.89190101623535 -62,21.855527877807617 -63,21.813495635986328 -64,21.7694149017334 -65,21.72593116760254 -66,21.68473243713379 -67,21.645946502685547 -68,21.71541976928711 -69,21.578689575195312 -70,21.522357940673828 -71,21.546703338623047 -72,21.53227996826172 -73,21.503366470336914 -74,21.468002319335938 -75,21.42753791809082 -76,21.381980895996094 -77,21.330007553100586 -78,21.26821517944336 -79,21.17042350769043 -80,21.053421020507812 -81,20.932003021240234 -82,20.616804122924805 -83,20.616621017456055 -84,20.58661460876465 -85,18.03250503540039 -86,17.414548873901367 -87,17.27162742614746 -88,17.15705680847168 -89,17.046932220458984 -90,16.930131912231445 -91,16.805191040039062 -92,16.680274963378906 -93,16.56300926208496 -94,16.423681259155273 -95,16.357830047607422 -96,16.305524826049805 -97,16.24888801574707 -98,16.190837860107422 -99,16.13146209716797 -100,16.070728302001953 -101,16.00857162475586 -102,15.94521713256836 -103,15.880389213562012 -104,15.814112663269043 -105,15.74853801727295 -106,15.69567584991455 -107,15.662550926208496 -108,15.63148307800293 -109,15.598675727844238 -110,15.564839363098145 -111,15.530150413513184 -112,15.494645118713379 -113,15.458438873291016 -114,15.421796798706055 -115,15.384846687316895 -116,15.347696304321289 -117,15.310457229614258 -118,15.273283958435059 -119,15.23629379272461 -120,15.199531555175781 -121,15.163043022155762 -122,15.126790046691895 -123,15.09078311920166 -124,15.055047035217285 -125,15.019574165344238 -126,14.98437213897705 -127,14.94943904876709 -128,14.914775848388672 -129,14.880356788635254 -130,14.846121788024902 -131,14.811943054199219 -132,14.777750968933105 -133,14.743474006652832 -134,14.708903312683105 -135,14.673869132995605 -136,14.638175010681152 -137,14.601536750793457 -138,14.563677787780762 -139,14.524191856384277 -140,14.481945037841797 -141,14.435291290283203 -142,14.381257057189941 -143,14.312359809875488 -144,14.20585823059082 -145,13.906972885131836 -146,14.055404663085938 -147,13.993156433105469 -148,13.88037395477295 -149,13.875324249267578 -150,13.864439010620117 -151,13.849298477172852 -152,13.8302583694458 -153,13.807801246643066 -154,13.782422065734863 -155,13.754471778869629 -156,13.724139213562012 -157,13.691170692443848 -158,13.654210090637207 -159,13.606847763061523 -160,13.535569190979004 -161,13.557734489440918 -162,13.541640281677246 -163,13.508573532104492 -164,13.431634902954102 -165,13.426639556884766 -166,13.556713104248047 -167,13.509446144104004 -168,13.407920837402344 -169,13.395540237426758 -170,13.380680084228516 -171,13.365177154541016 -172,13.344866752624512 -173,13.297018051147461 -174,13.29455852508545 -175,13.304505348205566 -176,13.293830871582031 -177,13.271093368530273 -178,13.227715492248535 -179,13.238592147827148 -180,13.205022811889648 -181,13.191763877868652 -182,13.167562484741211 -183,13.155982971191406 -184,13.152639389038086 -185,13.134974479675293 -186,13.108119010925293 -187,13.060593605041504 -188,13.048404693603516 -189,13.028077125549316 -190,13.024598121643066 -191,12.990913391113281 -192,12.94953441619873 -193,12.931900978088379 -194,12.902139663696289 -195,12.843152046203613 -196,12.867379188537598 -197,12.850001335144043 -198,12.822518348693848 -199,12.787980079650879 -200,12.746197700500488 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.020/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.020/training_config.txt deleted file mode 100644 index 3a79589..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.020/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.02 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.020/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.020/training_log.csv deleted file mode 100644 index 5cf427c..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.020/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,388.5149230957031 -2,288.32568359375 -3,197.84759521484375 -4,129.6507568359375 -5,102.1158218383789 -6,87.69896697998047 -7,81.23541259765625 -8,76.59988403320312 -9,69.7276840209961 -10,60.000858306884766 -11,54.44560623168945 -12,44.26963424682617 -13,38.94356155395508 -14,32.970279693603516 -15,28.56719970703125 -16,25.42699432373047 -17,23.342700958251953 -18,19.161298751831055 -19,17.896968841552734 -20,17.437898635864258 -21,16.759380340576172 -22,16.11382293701172 -23,15.5103120803833 -24,14.347790718078613 -25,13.283761024475098 -26,11.96332836151123 -27,11.371164321899414 -28,10.261887550354004 -29,9.662712097167969 -30,9.277570724487305 -31,8.84338092803955 -32,8.434415817260742 -33,8.360620498657227 -34,8.35948371887207 -35,8.267024993896484 -36,8.177800178527832 -37,8.014981269836426 -38,8.017026901245117 -39,8.02104377746582 -40,7.997307300567627 -41,7.959289073944092 -42,7.915879249572754 -43,7.869636535644531 -44,7.821484565734863 -45,7.771714687347412 -46,7.720294952392578 -47,7.666778564453125 -48,7.609862804412842 -49,7.544285297393799 -50,7.450257301330566 -51,7.440820217132568 -52,7.397703170776367 -53,7.349554061889648 -54,7.299560546875 -55,7.248414039611816 -56,7.196322917938232 -57,7.143311023712158 -58,7.089328765869141 -59,7.034249782562256 -60,6.9778056144714355 -61,6.919564723968506 -62,6.858902931213379 -63,6.794840335845947 -64,6.725536823272705 -65,6.645544528961182 -66,6.489086151123047 -67,6.430202484130859 -68,6.367949485778809 -69,6.297735691070557 -70,6.213688373565674 -71,6.0945305824279785 -72,5.883443832397461 -73,5.806734561920166 -74,5.78186559677124 -75,5.745685577392578 -76,5.68419075012207 -77,5.684043884277344 -78,5.651706218719482 -79,5.590080738067627 -80,5.63822078704834 -81,5.628061771392822 -82,5.602982997894287 -83,5.5463056564331055 -84,5.554369926452637 -85,5.486206531524658 -86,5.517299175262451 -87,5.489830493927002 -88,5.441673755645752 -89,5.3487629890441895 -90,5.37045431137085 -91,5.35951042175293 -92,5.338001251220703 -93,5.3086371421813965 -94,5.264793395996094 -95,5.236780166625977 -96,5.213849067687988 -97,5.173133373260498 -98,5.135712146759033 -99,5.1193695068359375 -100,5.094021797180176 -101,5.056421756744385 -102,5.013903617858887 -103,5.004875183105469 -104,4.964243412017822 -105,4.949340343475342 -106,4.924378871917725 -107,4.878110885620117 -108,4.838876247406006 -109,4.8174638748168945 -110,4.750545978546143 -111,4.801477432250977 -112,4.814953327178955 -113,4.8060808181762695 -114,4.773760795593262 -115,4.769498348236084 -116,4.756224632263184 -117,4.731024265289307 -118,4.696333885192871 -119,4.645833969116211 -120,4.5929388999938965 -121,4.559905529022217 -122,4.492279529571533 -123,4.451961517333984 -124,4.398751735687256 -125,4.348456382751465 -126,4.380577087402344 -127,4.38193941116333 -128,4.358918190002441 -129,4.300955772399902 -130,4.344906806945801 -131,4.376981735229492 -132,4.386204242706299 -133,4.369286060333252 -134,4.347282886505127 -135,4.310729503631592 -136,4.281660556793213 -137,4.239060878753662 -138,4.186554908752441 -139,4.1617913246154785 -140,4.127417087554932 -141,4.11793327331543 -142,4.096151351928711 -143,4.046483516693115 -144,4.072659015655518 -145,4.071051120758057 -146,4.040170192718506 -147,3.9814414978027344 -148,3.9934420585632324 -149,3.9523472785949707 -150,3.9368557929992676 -151,3.989248514175415 -152,3.9979307651519775 -153,3.976290225982666 -154,3.947150230407715 -155,3.905409812927246 -156,3.871814727783203 -157,3.8168649673461914 -158,3.8017258644104004 -159,3.8024301528930664 -160,3.7843899726867676 -161,3.7931711673736572 -162,3.772047519683838 -163,3.7222652435302734 -164,3.7033493518829346 -165,3.712002754211426 -166,3.6984097957611084 -167,3.6747450828552246 -168,3.6393654346466064 -169,3.6387429237365723 -170,3.637225866317749 -171,3.618889808654785 -172,3.6003692150115967 -173,3.5785276889801025 -174,3.5512359142303467 -175,3.53637433052063 -176,3.5033576488494873 -177,3.49082350730896 -178,3.4688010215759277 -179,3.440269947052002 -180,3.429558515548706 -181,3.417003631591797 -182,3.394415855407715 -183,3.362882137298584 -184,3.3216960430145264 -185,3.2982919216156006 -186,3.2707858085632324 -187,3.2625362873077393 -188,3.254371166229248 -189,3.2444279193878174 -190,3.2178070545196533 -191,3.184422492980957 -192,3.1861495971679688 -193,3.1609137058258057 -194,3.144387722015381 -195,3.1368110179901123 -196,3.1151466369628906 -197,3.1002869606018066 -198,3.090139150619507 -199,3.065962791442871 -200,3.058790922164917 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.040/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.040/training_config.txt deleted file mode 100644 index a2447ea..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.040/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.04 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.040/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.040/training_log.csv deleted file mode 100644 index 0251d80..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.040/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,333.9476318359375 -2,155.03976440429688 -3,84.9561538696289 -4,64.51033782958984 -5,46.74817657470703 -6,36.76055908203125 -7,29.89360237121582 -8,29.684371948242188 -9,23.683612823486328 -10,21.350736618041992 -11,20.67304229736328 -12,20.525436401367188 -13,20.90983009338379 -14,21.05864143371582 -15,20.901620864868164 -16,20.731857299804688 -17,20.427465438842773 -18,20.065685272216797 -19,19.681549072265625 -20,19.45899772644043 -21,19.73203468322754 -22,19.04068946838379 -23,18.896047592163086 -24,18.698383331298828 -25,18.303815841674805 -26,17.619897842407227 -27,17.409809112548828 -28,17.242284774780273 -29,16.993349075317383 -30,16.604412078857422 -31,16.05521011352539 -32,15.587133407592773 -33,15.201038360595703 -34,15.014988899230957 -35,14.809565544128418 -36,14.538385391235352 -37,14.23918342590332 -38,13.992138862609863 -39,13.82217025756836 -40,13.68884563446045 -41,13.539056777954102 -42,13.338607788085938 -43,13.08395767211914 -44,12.810590744018555 -45,12.589780807495117 -46,12.465526580810547 -47,12.358150482177734 -48,12.215189933776855 -49,12.035802841186523 -50,11.872581481933594 -51,11.760162353515625 -52,11.654192924499512 -53,11.511221885681152 -54,11.258962631225586 -55,11.462969779968262 -56,10.977035522460938 -57,10.848877906799316 -58,10.181991577148438 -59,10.24824333190918 -60,10.09183120727539 -61,9.783814430236816 -62,9.421387672424316 -63,9.005825996398926 -64,8.75805377960205 -65,8.53797721862793 -66,8.249222755432129 -67,7.992242336273193 -68,7.8847856521606445 -69,7.59429407119751 -70,6.002155303955078 -71,5.760382652282715 -72,5.534321308135986 -73,5.303043842315674 -74,5.032135009765625 -75,4.726396083831787 -76,4.396093368530273 -77,4.118173122406006 -78,3.922938108444214 -79,3.341996431350708 -80,3.5385539531707764 -81,3.636686086654663 -82,3.587907552719116 -83,3.4666974544525146 -84,3.355300188064575 -85,3.2311339378356934 -86,3.0928635597229004 -87,3.0840883255004883 -88,3.2243945598602295 -89,3.060483932495117 -90,3.0560333728790283 -91,3.0396714210510254 -92,3.0171473026275635 -93,2.992083787918091 -94,2.9843170642852783 -95,2.983302116394043 -96,2.9749608039855957 -97,2.962059736251831 -98,2.945017099380493 -99,2.9224483966827393 -100,2.8995041847229004 -101,2.8965611457824707 -102,2.859257221221924 -103,2.8198163509368896 -104,2.86720871925354 -105,2.870509386062622 -106,2.9302568435668945 -107,2.949276924133301 -108,2.9505653381347656 -109,2.9583377838134766 -110,2.9471793174743652 -111,2.933460235595703 -112,2.9207229614257812 -113,2.8905444145202637 -114,2.8596391677856445 -115,2.850029230117798 -116,2.835357666015625 -117,2.815476179122925 -118,2.7794744968414307 -119,2.796846628189087 -120,2.8918421268463135 -121,2.933924913406372 -122,2.940732479095459 -123,2.929478645324707 -124,2.896311044692993 -125,2.834937810897827 -126,2.764465093612671 -127,2.7600884437561035 -128,2.7797091007232666 -129,2.786696195602417 -130,2.787468671798706 -131,2.7827305793762207 -132,2.7672691345214844 -133,2.7978954315185547 -134,2.800612688064575 -135,2.7966551780700684 -136,2.789097309112549 -137,2.777519464492798 -138,2.7606351375579834 -139,2.7281157970428467 -140,2.7540042400360107 -141,2.7703537940979004 -142,2.774820327758789 -143,2.771756410598755 -144,2.7615530490875244 -145,2.739053249359131 -146,2.6988842487335205 -147,2.707169771194458 -148,2.7325994968414307 -149,2.745894193649292 -150,2.7517783641815186 -151,2.7510461807250977 -152,2.7398955821990967 -153,2.7613589763641357 -154,2.763636827468872 -155,2.7583439350128174 -156,2.748481273651123 -157,2.7306923866271973 -158,2.701725482940674 -159,2.6946861743927 -160,2.6781375408172607 -161,2.666314125061035 -162,2.6706643104553223 -163,2.652224540710449 -164,2.661486864089966 -165,2.6524453163146973 -166,2.6549456119537354 -167,2.65739369392395 -168,2.6472840309143066 -169,2.654202938079834 -170,2.6515233516693115 -171,2.6424355506896973 -172,2.641402244567871 -173,2.6424858570098877 -174,2.6363775730133057 -175,2.6356723308563232 -176,2.6308977603912354 -177,2.628366231918335 -178,2.6290671825408936 -179,2.6272761821746826 -180,2.6245133876800537 -181,2.622614622116089 -182,2.6222598552703857 -183,2.6216325759887695 -184,2.6188926696777344 -185,2.6175386905670166 -186,2.6177992820739746 -187,2.616044759750366 -188,2.6132445335388184 -189,2.6120567321777344 -190,2.6119678020477295 -191,2.6109209060668945 -192,2.611003875732422 -193,2.609879493713379 -194,2.607459306716919 -195,2.606472969055176 -196,2.6061112880706787 -197,2.60807204246521 -198,2.6081345081329346 -199,2.6019935607910156 -200,2.6042041778564453 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.050/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.050/training_config.txt deleted file mode 100644 index d6bf225..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.050/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.05 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.050/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.050/training_log.csv deleted file mode 100644 index 55dfc20..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.050/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,311.241943359375 -2,118.7367935180664 -3,76.92609405517578 -4,54.529842376708984 -5,36.472049713134766 -6,26.9465274810791 -7,23.795753479003906 -8,20.734407424926758 -9,16.38165855407715 -10,15.501265525817871 -11,15.794343948364258 -12,15.574952125549316 -13,15.423700332641602 -14,15.251497268676758 -15,14.828925132751465 -16,12.59529972076416 -17,11.304582595825195 -18,10.546975135803223 -19,10.01076602935791 -20,9.468347549438477 -21,8.503687858581543 -22,8.07300090789795 -23,7.470166206359863 -24,6.8030595779418945 -25,6.461020469665527 -26,6.183757781982422 -27,5.960231304168701 -28,5.766604423522949 -29,5.587358474731445 -30,5.412132263183594 -31,5.222555160522461 -32,4.98077392578125 -33,4.851628303527832 -34,4.8059210777282715 -35,4.733010292053223 -36,4.6380534172058105 -37,4.5220417976379395 -38,4.379622936248779 -39,4.210934638977051 -40,4.133485794067383 -41,4.120495796203613 -42,4.111120223999023 -43,4.078516483306885 -44,4.025075912475586 -45,3.95341157913208 -46,3.8692691326141357 -47,3.7952880859375 -48,3.745790719985962 -49,3.7017405033111572 -50,3.651315689086914 -51,3.5926320552825928 -52,3.5265262126922607 -53,3.4559457302093506 -54,3.3904500007629395 -55,3.3394060134887695 -56,3.3000497817993164 -57,3.271043300628662 -58,3.242811679840088 -59,3.208120107650757 -60,3.165616512298584 -61,3.1174821853637695 -62,3.068352222442627 -63,3.024082660675049 -64,2.989363670349121 -65,2.9640841484069824 -66,2.942019462585449 -67,2.919667959213257 -68,2.8964123725891113 -69,2.8724796772003174 -70,2.848897695541382 -71,2.827073097229004 -72,2.8078556060791016 -73,2.7907934188842773 -74,2.774881362915039 -75,2.7597146034240723 -76,2.745368719100952 -77,2.732088088989258 -78,2.7200610637664795 -79,2.708817720413208 -80,2.6980834007263184 -81,2.687577486038208 -82,2.6773436069488525 -83,2.6679272651672363 -84,2.660104513168335 -85,2.653986930847168 -86,2.6490423679351807 -87,2.644538164138794 -88,2.640085458755493 -89,2.6355395317077637 -90,2.6309518814086914 -91,2.626511812210083 -92,2.6224496364593506 -93,2.618828058242798 -94,2.615692377090454 -95,2.612940788269043 -96,2.610339879989624 -97,2.607728958129883 -98,2.605069398880005 -99,2.6024010181427 -100,2.599790573120117 -101,2.5972650051116943 -102,2.5948283672332764 -103,2.592473268508911 -104,2.5901992321014404 -105,2.5880038738250732 -106,2.585906505584717 -107,2.5839340686798096 -108,2.582101821899414 -109,2.5804059505462646 -110,2.578826427459717 -111,2.577298402786255 -112,2.575778007507324 -113,2.574249029159546 -114,2.572711706161499 -115,2.571190118789673 -116,2.569713830947876 -117,2.568293571472168 -118,2.5669236183166504 -119,2.5655927658081055 -120,2.564274787902832 -121,2.562955379486084 -122,2.561633586883545 -123,2.5603177547454834 -124,2.5590274333953857 -125,2.557774782180786 -126,2.556565523147583 -127,2.555400848388672 -128,2.5542795658111572 -129,2.5531952381134033 -130,2.5521419048309326 -131,2.5511202812194824 -132,2.5501320362091064 -133,2.5491721630096436 -134,2.548232316970825 -135,2.5473101139068604 -136,2.5464043617248535 -137,2.54551100730896 -138,2.5446274280548096 -139,2.5437564849853516 -140,2.542897939682007 -141,2.542051315307617 -142,2.541215658187866 -143,2.5403900146484375 -144,2.539576292037964 -145,2.538775682449341 -146,2.5379936695098877 -147,2.537240505218506 -148,2.536508560180664 -149,2.5357930660247803 -150,2.5350890159606934 -151,2.5343925952911377 -152,2.5337045192718506 -153,2.533024787902832 -154,2.5323524475097656 -155,2.5316901206970215 -156,2.5310356616973877 -157,2.5303878784179688 -158,2.529747247695923 -159,2.5291147232055664 -160,2.5284881591796875 -161,2.5278704166412354 -162,2.5272603034973145 -163,2.526658773422241 -164,2.526064395904541 -165,2.5254764556884766 -166,2.524895668029785 -167,2.524320363998413 -168,2.5237503051757812 -169,2.5231854915618896 -170,2.52262544631958 -171,2.522071123123169 -172,2.52152156829834 -173,2.5209763050079346 -174,2.520435094833374 -175,2.5198988914489746 -176,2.5193674564361572 -177,2.51884126663208 -178,2.5183210372924805 -179,2.517805814743042 -180,2.517296552658081 -181,2.516794204711914 -182,2.51629900932312 -183,2.51580810546875 -184,2.515321969985962 -185,2.5148420333862305 -186,2.5143675804138184 -187,2.513896942138672 -188,2.5134308338165283 -189,2.5129706859588623 -190,2.5125174522399902 -191,2.512070417404175 -192,2.511627435684204 -193,2.51119327545166 -194,2.51076602935791 -195,2.5103461742401123 -196,2.5099337100982666 -197,2.509528160095215 -198,2.5091280937194824 -199,2.5087342262268066 -200,2.5083446502685547 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.080/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.080/training_config.txt deleted file mode 100644 index 0cfa172..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.080/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.08 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.080/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.080/training_log.csv deleted file mode 100644 index cae5810..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.080/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,244.3745880126953 -2,81.97740936279297 -3,77.857666015625 -4,45.17182540893555 -5,27.486318588256836 -6,14.79367733001709 -7,10.827469825744629 -8,7.718698024749756 -9,7.180932521820068 -10,6.68117094039917 -11,6.214954376220703 -12,5.907735347747803 -13,5.5873308181762695 -14,5.2996439933776855 -15,5.00650691986084 -16,4.701308727264404 -17,4.441305160522461 -18,4.2984232902526855 -19,4.1493096351623535 -20,3.99883770942688 -21,3.8936257362365723 -22,3.845712423324585 -23,3.8055574893951416 -24,3.761474132537842 -25,3.7138679027557373 -26,3.663846254348755 -27,3.612367630004883 -28,3.5595128536224365 -29,3.505897283554077 -30,3.4522130489349365 -31,3.398937225341797 -32,3.34653902053833 -33,3.2955586910247803 -34,3.2462916374206543 -35,3.199104070663452 -36,3.1542861461639404 -37,3.112034797668457 -38,3.072058916091919 -39,3.0345160961151123 -40,2.999133825302124 -41,2.965812921524048 -42,2.9345145225524902 -43,2.9051995277404785 -44,2.878122091293335 -45,2.854544162750244 -46,2.8362486362457275 -47,2.82195782661438 -48,2.8065054416656494 -49,2.7885589599609375 -50,2.7675867080688477 -51,2.7434468269348145 -52,2.716958522796631 -53,2.6908187866210938 -54,2.6693341732025146 -55,2.652442216873169 -56,2.6388328075408936 -57,2.6273975372314453 -58,2.617556571960449 -59,2.6089956760406494 -60,2.601651906967163 -61,2.595339059829712 -62,2.590059757232666 -63,2.5857083797454834 -64,2.5823168754577637 -65,2.57979679107666 -66,2.5780413150787354 -67,2.5769147872924805 -68,2.57629656791687 -69,2.575939655303955 -70,2.575552463531494 -71,2.574817657470703 -72,2.5735745429992676 -73,2.5717499256134033 -74,2.5693626403808594 -75,2.56658673286438 -76,2.5636661052703857 -77,2.5609028339385986 -78,2.5585460662841797 -79,2.556769609451294 -80,2.5555288791656494 -81,2.5546858310699463 -82,2.554033041000366 -83,2.553380250930786 -84,2.5525989532470703 -85,2.5516357421875 -86,2.5505053997039795 -87,2.5492680072784424 -88,2.5480117797851562 -89,2.5468215942382812 -90,2.545741319656372 -91,2.5447866916656494 -92,2.543933153152466 -93,2.5431313514709473 -94,2.542325258255005 -95,2.5414798259735107 -96,2.5405819416046143 -97,2.53964900970459 -98,2.538698196411133 -99,2.537766933441162 -100,2.536885976791382 -101,2.536051034927368 -102,2.5352368354797363 -103,2.534424304962158 -104,2.533595323562622 -105,2.5327229499816895 -106,2.531771421432495 -107,2.5307133197784424 -108,2.529599905014038 -109,2.528508186340332 -110,2.52746844291687 -111,2.5264902114868164 -112,2.5255930423736572 -113,2.524780511856079 -114,2.5240323543548584 -115,2.5233571529388428 -116,2.522704601287842 -117,2.5220866203308105 -118,2.5214967727661133 -119,2.5209226608276367 -120,2.5203659534454346 -121,2.5198209285736084 -122,2.5192770957946777 -123,2.5187289714813232 -124,2.5181691646575928 -125,2.517592668533325 -126,2.517000198364258 -127,2.5164010524749756 -128,2.5157980918884277 -129,2.515197515487671 -130,2.51460862159729 -131,2.514035940170288 -132,2.5134830474853516 -133,2.512948989868164 -134,2.5124361515045166 -135,2.5119409561157227 -136,2.511460304260254 -137,2.5109970569610596 -138,2.5105412006378174 -139,2.510094404220581 -140,2.5096545219421387 -141,2.509220838546753 -142,2.5087904930114746 -143,2.508363723754883 -144,2.507939577102661 -145,2.507519006729126 -146,2.5071027278900146 -147,2.5066909790039062 -148,2.5062832832336426 -149,2.5058834552764893 -150,2.50549054145813 -151,2.50510311126709 -152,2.5047190189361572 -153,2.504340410232544 -154,2.5039663314819336 -155,2.503599166870117 -156,2.50323748588562 -157,2.5028820037841797 -158,2.502530813217163 -159,2.502183198928833 -160,2.5018396377563477 -161,2.5015006065368652 -162,2.5011653900146484 -163,2.500833034515381 -164,2.5005040168762207 -165,2.500178337097168 -166,2.4998562335968018 -167,2.4995369911193848 -168,2.499220848083496 -169,2.4989094734191895 -170,2.498600721359253 -171,2.498295545578003 -172,2.497992515563965 -173,2.4976933002471924 -174,2.4973976612091064 -175,2.4971060752868652 -176,2.496819496154785 -177,2.4965367317199707 -178,2.49625825881958 -179,2.4959847927093506 -180,2.495715856552124 -181,2.495452404022217 -182,2.4951932430267334 -183,2.494938850402832 -184,2.494687557220459 -185,2.494439125061035 -186,2.4941937923431396 -187,2.4939510822296143 -188,2.4937117099761963 -189,2.4934747219085693 -190,2.49324107170105 -191,2.493013381958008 -192,2.4927899837493896 -193,2.4925708770751953 -194,2.4923558235168457 -195,2.4921436309814453 -196,2.4919347763061523 -197,2.4917290210723877 -198,2.491525173187256 -199,2.4913246631622314 -200,2.4911258220672607 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.100/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.100/training_config.txt deleted file mode 100644 index 5c21669..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.100/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.100/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.100/training_log.csv deleted file mode 100644 index 97dbcc6..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.100/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,227.91400146484375 -2,144.65541076660156 -3,82.6039810180664 -4,35.368682861328125 -5,14.420293807983398 -6,11.543237686157227 -7,11.409295082092285 -8,11.019989967346191 -9,10.667518615722656 -10,10.341033935546875 -11,9.955023765563965 -12,9.531441688537598 -13,9.06412410736084 -14,8.642601013183594 -15,8.330805778503418 -16,8.039302825927734 -17,7.735342502593994 -18,7.397258758544922 -19,7.037418842315674 -20,6.718796253204346 -21,6.4561262130737305 -22,6.21888542175293 -23,5.981663227081299 -24,5.739588737487793 -25,5.500685691833496 -26,5.274210453033447 -27,5.065432548522949 -28,4.874978065490723 -29,4.702284812927246 -30,4.546573638916016 -31,4.4083027839660645 -32,4.28896427154541 -33,4.190129280090332 -34,4.1119842529296875 -35,4.053347110748291 -36,4.010560989379883 -37,3.979066848754883 -38,3.9543259143829346 -39,3.9335575103759766 -40,3.914203405380249 -41,3.89290189743042 -42,3.867672920227051 -43,3.8375930786132812 -44,3.802696704864502 -45,3.7637407779693604 -46,3.721846580505371 -47,3.678412914276123 -48,3.6349129676818848 -49,3.592541217803955 -50,3.552243709564209 -51,3.5144894123077393 -52,3.479823589324951 -53,3.448302984237671 -54,3.419459581375122 -55,3.3928370475769043 -56,3.368063449859619 -57,3.3447351455688477 -58,3.3224992752075195 -59,3.3010432720184326 -60,3.280106782913208 -61,3.2594833374023438 -62,3.2390055656433105 -63,3.218590497970581 -64,3.198225259780884 -65,3.177893877029419 -66,3.157632350921631 -67,3.137535572052002 -68,3.117743968963623 -69,3.0982184410095215 -70,3.079014301300049 -71,3.0602095127105713 -72,3.0418574810028076 -73,3.0239346027374268 -74,3.006425142288208 -75,2.989377021789551 -76,2.9728124141693115 -77,2.956688642501831 -78,2.940946340560913 -79,2.9255411624908447 -80,2.9104161262512207 -81,2.895514965057373 -82,2.8807976245880127 -83,2.8662216663360596 -84,2.8517727851867676 -85,2.8374409675598145 -86,2.8232386112213135 -87,2.8091936111450195 -88,2.795335292816162 -89,2.7816953659057617 -90,2.76830792427063 -91,2.7552084922790527 -92,2.742398738861084 -93,2.7298665046691895 -94,2.717543363571167 -95,2.705476999282837 -96,2.693840265274048 -97,2.6827237606048584 -98,2.672166109085083 -99,2.662142753601074 -100,2.6526083946228027 -101,2.6435577869415283 -102,2.6349756717681885 -103,2.626856803894043 -104,2.6191906929016113 -105,2.6119754314422607 -106,2.605206251144409 -107,2.5988657474517822 -108,2.5929007530212402 -109,2.5872559547424316 -110,2.581831216812134 -111,2.576538324356079 -112,2.571302652359009 -113,2.5660064220428467 -114,2.5605311393737793 -115,2.5547733306884766 -116,2.549027681350708 -117,2.54341983795166 -118,2.538033962249756 -119,2.5329387187957764 -120,2.528188705444336 -121,2.5238075256347656 -122,2.519817352294922 -123,2.516207218170166 -124,2.5129716396331787 -125,2.510097026824951 -126,2.50754976272583 -127,2.505370855331421 -128,2.5035483837127686 -129,2.5020644664764404 -130,2.5009076595306396 -131,2.5000228881835938 -132,2.4993715286254883 -133,2.4989049434661865 -134,2.4985668659210205 -135,2.4983267784118652 -136,2.498152256011963 -137,2.498008966445923 -138,2.497885227203369 -139,2.497760534286499 -140,2.497615337371826 -141,2.4974400997161865 -142,2.497230052947998 -143,2.4969804286956787 -144,2.4966890811920166 -145,2.4963555335998535 -146,2.4959757328033447 -147,2.495546817779541 -148,2.495087146759033 -149,2.4946091175079346 -150,2.494128942489624 -151,2.493662118911743 -152,2.4932096004486084 -153,2.4927711486816406 -154,2.4923551082611084 -155,2.4919703006744385 -156,2.4916207790374756 -157,2.4913036823272705 -158,2.4910173416137695 -159,2.490760564804077 -160,2.490522623062134 -161,2.490302562713623 -162,2.4900968074798584 -163,2.489900827407837 -164,2.489712715148926 -165,2.4895293712615967 -166,2.4893500804901123 -167,2.489170789718628 -168,2.488992929458618 -169,2.4888150691986084 -170,2.488638401031494 -171,2.4884607791900635 -172,2.488281488418579 -173,2.4881012439727783 -174,2.487919807434082 -175,2.487736463546753 -176,2.4875521659851074 -177,2.4873673915863037 -178,2.4871833324432373 -179,2.4870007038116455 -180,2.4868197441101074 -181,2.486640453338623 -182,2.4864633083343506 -183,2.4862897396087646 -184,2.486121416091919 -185,2.485954999923706 -186,2.485790729522705 -187,2.485628843307495 -188,2.485469341278076 -189,2.48531174659729 -190,2.4851558208465576 -191,2.485001802444458 -192,2.484848976135254 -193,2.484698534011841 -194,2.4845492839813232 -195,2.484401226043701 -196,2.4842545986175537 -197,2.484109401702881 -198,2.483964443206787 -199,2.483820915222168 -200,2.4836783409118652 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.125/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.125/training_config.txt deleted file mode 100644 index 6aad3a9..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.125/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.125 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.125/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.125/training_log.csv deleted file mode 100644 index d45d01b..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.125/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,224.97056579589844 -2,57.203556060791016 -3,43.53931427001953 -4,21.88982582092285 -5,20.10477638244629 -6,14.879633903503418 -7,9.915104866027832 -8,8.60065746307373 -9,7.625808238983154 -10,6.7785844802856445 -11,5.8287672996521 -12,5.291732311248779 -13,4.927998065948486 -14,4.6371660232543945 -15,4.394495964050293 -16,4.185304164886475 -17,4.002267360687256 -18,3.839343309402466 -19,3.693824291229248 -20,3.5634844303131104 -21,3.4468955993652344 -22,3.3423869609832764 -23,3.2493696212768555 -24,3.1682965755462646 -25,3.1015632152557373 -26,3.05606746673584 -27,3.036527633666992 -28,3.0277137756347656 -29,3.023024559020996 -30,3.020691394805908 -31,3.0186824798583984 -32,3.0162513256073 -33,3.012892484664917 -34,3.0083727836608887 -35,3.0025064945220947 -36,2.9952521324157715 -37,2.9867451190948486 -38,2.9770498275756836 -39,2.966383934020996 -40,2.9547133445739746 -41,2.942403554916382 -42,2.929781675338745 -43,2.9172475337982178 -44,2.9051458835601807 -45,2.89400577545166 -46,2.883847236633301 -47,2.8747761249542236 -48,2.866760730743408 -49,2.8597190380096436 -50,2.8534927368164062 -51,2.847931385040283 -52,2.8428733348846436 -53,2.8381636142730713 -54,2.833684206008911 -55,2.829343795776367 -56,2.825073480606079 -57,2.820829391479492 -58,2.8165783882141113 -59,2.812304735183716 -60,2.807976484298706 -61,2.8036036491394043 -62,2.7991867065429688 -63,2.794738531112671 -64,2.790259838104248 -65,2.7857513427734375 -66,2.7812607288360596 -67,2.776787519454956 -68,2.7723424434661865 -69,2.7679407596588135 -70,2.763613700866699 -71,2.759336233139038 -72,2.755139112472534 -73,2.751024007797241 -74,2.746995210647583 -75,2.7430429458618164 -76,2.7391374111175537 -77,2.735165596008301 -78,2.7311882972717285 -79,2.7271950244903564 -80,2.723184823989868 -81,2.719143867492676 -82,2.715036630630493 -83,2.710844039916992 -84,2.706573247909546 -85,2.7022318840026855 -86,2.697787046432495 -87,2.693286895751953 -88,2.6887364387512207 -89,2.6841511726379395 -90,2.6795167922973633 -91,2.67482328414917 -92,2.670067548751831 -93,2.6652233600616455 -94,2.6603634357452393 -95,2.6554982662200928 -96,2.6505815982818604 -97,2.6456453800201416 -98,2.640695571899414 -99,2.635680913925171 -100,2.630539655685425 -101,2.6252472400665283 -102,2.619823932647705 -103,2.6142818927764893 -104,2.60868501663208 -105,2.6030309200286865 -106,2.5973289012908936 -107,2.5916059017181396 -108,2.5859177112579346 -109,2.5803475379943848 -110,2.574903726577759 -111,2.5696442127227783 -112,2.5645854473114014 -113,2.5597522258758545 -114,2.5550711154937744 -115,2.5504908561706543 -116,2.5460093021392822 -117,2.5416462421417236 -118,2.5375194549560547 -119,2.53367280960083 -120,2.530214548110962 -121,2.5272445678710938 -122,2.5247910022735596 -123,2.5229480266571045 -124,2.5215835571289062 -125,2.5206475257873535 -126,2.5199904441833496 -127,2.519425392150879 -128,2.5187947750091553 -129,2.5179901123046875 -130,2.516991138458252 -131,2.515836715698242 -132,2.514572858810425 -133,2.5132713317871094 -134,2.512025833129883 -135,2.510871171951294 -136,2.509899139404297 -137,2.5091183185577393 -138,2.508462429046631 -139,2.5079822540283203 -140,2.507624387741089 -141,2.5073235034942627 -142,2.5070714950561523 -143,2.5068440437316895 -144,2.5066072940826416 -145,2.506334066390991 -146,2.5060195922851562 -147,2.5056724548339844 -148,2.505315065383911 -149,2.504953384399414 -150,2.504599094390869 -151,2.504249095916748 -152,2.503937244415283 -153,2.5036590099334717 -154,2.503404378890991 -155,2.5031487941741943 -156,2.5029056072235107 -157,2.5026967525482178 -158,2.502504348754883 -159,2.5023136138916016 -160,2.502124547958374 -161,2.5019307136535645 -162,2.501737117767334 -163,2.5015456676483154 -164,2.5013577938079834 -165,2.501173734664917 -166,2.5009922981262207 -167,2.50081205368042 -168,2.5006344318389893 -169,2.500458240509033 -170,2.5002832412719727 -171,2.5001094341278076 -172,2.4999377727508545 -173,2.4997639656066895 -174,2.4995901584625244 -175,2.4994170665740967 -176,2.4992477893829346 -177,2.4990811347961426 -178,2.498917818069458 -179,2.4987573623657227 -180,2.4985997676849365 -181,2.498445749282837 -182,2.4982926845550537 -183,2.4981417655944824 -184,2.4979915618896484 -185,2.497840642929077 -186,2.497687339782715 -187,2.4975345134735107 -188,2.497382402420044 -189,2.4972312450408936 -190,2.497082471847534 -191,2.496934413909912 -192,2.496788263320923 -193,2.496642589569092 -194,2.496497631072998 -195,2.4963533878326416 -196,2.4962093830108643 -197,2.4960649013519287 -198,2.495919942855835 -199,2.495774507522583 -200,2.4956281185150146 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.160/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.160/training_config.txt deleted file mode 100644 index bf31d9e..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.160/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.160/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.160/training_log.csv deleted file mode 100644 index e34fdec..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.160/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,226.8415985107422 -2,28.727375030517578 -3,19.447574615478516 -4,20.62757682800293 -5,19.27591323852539 -6,17.813037872314453 -7,14.796233177185059 -8,12.640740394592285 -9,11.282238960266113 -10,10.141789436340332 -11,8.980539321899414 -12,7.792398929595947 -13,6.99091911315918 -14,6.453000545501709 -15,6.056644916534424 -16,5.718233108520508 -17,5.4030561447143555 -18,5.096240043640137 -19,4.791772365570068 -20,4.509129524230957 -21,4.25894832611084 -22,4.041905403137207 -23,3.8516008853912354 -24,3.685837745666504 -25,3.539287567138672 -26,3.4100751876831055 -27,3.2967536449432373 -28,3.198535203933716 -29,3.1142587661743164 -30,3.0439953804016113 -31,2.9902219772338867 -32,2.9534034729003906 -33,2.9321787357330322 -34,2.9222042560577393 -35,2.919342041015625 -36,2.9198317527770996 -37,2.9201114177703857 -38,2.919468879699707 -39,2.9172797203063965 -40,2.9127862453460693 -41,2.9056873321533203 -42,2.8960680961608887 -43,2.8841495513916016 -44,2.870063543319702 -45,2.854246139526367 -46,2.8370718955993652 -47,2.8190014362335205 -48,2.80047345161438 -49,2.7819173336029053 -50,2.7636878490448 -51,2.746108055114746 -52,2.729541301727295 -53,2.7141165733337402 -54,2.6999945640563965 -55,2.6870064735412598 -56,2.6750032901763916 -57,2.6641299724578857 -58,2.6544251441955566 -59,2.645799398422241 -60,2.6382343769073486 -61,2.6316518783569336 -62,2.625885009765625 -63,2.6207215785980225 -64,2.6159074306488037 -65,2.611363649368286 -66,2.6070027351379395 -67,2.603048324584961 -68,2.599607467651367 -69,2.596324920654297 -70,2.592695713043213 -71,2.588884115219116 -72,2.5848186016082764 -73,2.580507516860962 -74,2.576226234436035 -75,2.572129726409912 -76,2.56838059425354 -77,2.5650134086608887 -78,2.5621464252471924 -79,2.559676170349121 -80,2.5574333667755127 -81,2.5553367137908936 -82,2.5532655715942383 -83,2.551168441772461 -84,2.548957109451294 -85,2.54660964012146 -86,2.5439887046813965 -87,2.5407888889312744 -88,2.5374953746795654 -89,2.5341765880584717 -90,2.5308852195739746 -91,2.52767276763916 -92,2.5245425701141357 -93,2.521531820297241 -94,2.5186712741851807 -95,2.5159404277801514 -96,2.5132925510406494 -97,2.510746479034424 -98,2.508326292037964 -99,2.5060479640960693 -100,2.5039520263671875 -101,2.5020647048950195 -102,2.5004193782806396 -103,2.4990477561950684 -104,2.4979162216186523 -105,2.4969639778137207 -106,2.4961769580841064 -107,2.4955592155456543 -108,2.4951140880584717 -109,2.4948112964630127 -110,2.494574546813965 -111,2.494403123855591 -112,2.494277000427246 -113,2.4941799640655518 -114,2.494096040725708 -115,2.4940054416656494 -116,2.493896007537842 -117,2.493746757507324 -118,2.493551731109619 -119,2.493312120437622 -120,2.493027925491333 -121,2.492711305618286 -122,2.4923856258392334 -123,2.4920475482940674 -124,2.491718053817749 -125,2.491401433944702 -126,2.491096258163452 -127,2.4908106327056885 -128,2.4905412197113037 -129,2.4902825355529785 -130,2.4900331497192383 -131,2.489793300628662 -132,2.489563226699829 -133,2.4893438816070557 -134,2.4891390800476074 -135,2.4889464378356934 -136,2.488757371902466 -137,2.488572835922241 -138,2.488389492034912 -139,2.4882054328918457 -140,2.4880237579345703 -141,2.4878413677215576 -142,2.4876444339752197 -143,2.487441301345825 -144,2.487239360809326 -145,2.4870445728302 -146,2.486858606338501 -147,2.4866788387298584 -148,2.486514091491699 -149,2.4863476753234863 -150,2.486166477203369 -151,2.4859721660614014 -152,2.485781192779541 -153,2.4856040477752686 -154,2.4854319095611572 -155,2.485264301300049 -156,2.485100746154785 -157,2.4849414825439453 -158,2.484781503677368 -159,2.484621286392212 -160,2.4844632148742676 -161,2.484309196472168 -162,2.484156847000122 -163,2.4840078353881836 -164,2.4838597774505615 -165,2.4837138652801514 -166,2.483569622039795 -167,2.483426094055176 -168,2.483285665512085 -169,2.4831488132476807 -170,2.483013153076172 -171,2.4828789234161377 -172,2.482743501663208 -173,2.4826087951660156 -174,2.4824769496917725 -175,2.482344388961792 -176,2.482212543487549 -177,2.4820806980133057 -178,2.481947898864746 -179,2.4818150997161865 -180,2.4816808700561523 -181,2.481548547744751 -182,2.4814178943634033 -183,2.481287956237793 -184,2.481158971786499 -185,2.481031656265259 -186,2.480905771255493 -187,2.4807820320129395 -188,2.4806602001190186 -189,2.480539321899414 -190,2.4804179668426514 -191,2.4802980422973633 -192,2.4801793098449707 -193,2.480062484741211 -194,2.4799461364746094 -195,2.4798331260681152 -196,2.4797205924987793 -197,2.4796085357666016 -198,2.4794979095458984 -199,2.47938871383667 -200,2.4792795181274414 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.200/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.200/training_config.txt deleted file mode 100644 index 0be5b11..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.200/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.200/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.200/training_log.csv deleted file mode 100644 index 9c9ac15..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.200/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,240.2086639404297 -2,65.05992126464844 -3,16.52082061767578 -4,11.790352821350098 -5,8.460886001586914 -6,6.494465351104736 -7,5.559576511383057 -8,4.893065452575684 -9,4.273072719573975 -10,3.9121768474578857 -11,3.684067487716675 -12,3.520134210586548 -13,3.3920276165008545 -14,3.288567543029785 -15,3.203423023223877 -16,3.1329309940338135 -17,3.0744779109954834 -18,3.0256452560424805 -19,2.985074281692505 -20,2.951965808868408 -21,2.9249467849731445 -22,2.9026365280151367 -23,2.8842742443084717 -24,2.8688783645629883 -25,2.8557066917419434 -26,2.844245433807373 -27,2.834031105041504 -28,2.824702024459839 -29,2.8159971237182617 -30,2.807640552520752 -31,2.799514055252075 -32,2.7914793491363525 -33,2.783773899078369 -34,2.7764456272125244 -35,2.7694382667541504 -36,2.7627251148223877 -37,2.7561943531036377 -38,2.7495615482330322 -39,2.7430667877197266 -40,2.7367944717407227 -41,2.730682611465454 -42,2.724771022796631 -43,2.71905779838562 -44,2.713679790496826 -45,2.7085444927215576 -46,2.7037923336029053 -47,2.699338436126709 -48,2.6953036785125732 -49,2.69173002243042 -50,2.6882834434509277 -51,2.684846878051758 -52,2.681394577026367 -53,2.678068161010742 -54,2.6747329235076904 -55,2.6713931560516357 -56,2.6680378913879395 -57,2.6646952629089355 -58,2.6615171432495117 -59,2.658437967300415 -60,2.6553735733032227 -61,2.6524717807769775 -62,2.649472236633301 -63,2.6463558673858643 -64,2.6431899070739746 -65,2.639993906021118 -66,2.636838912963867 -67,2.633701801300049 -68,2.6304948329925537 -69,2.6272225379943848 -70,2.6239278316497803 -71,2.620607614517212 -72,2.6172516345977783 -73,2.6138932704925537 -74,2.610595464706421 -75,2.6073458194732666 -76,2.6041011810302734 -77,2.600815773010254 -78,2.5974204540252686 -79,2.593928575515747 -80,2.5904242992401123 -81,2.586963653564453 -82,2.5835652351379395 -83,2.5801820755004883 -84,2.5767934322357178 -85,2.5734009742736816 -86,2.5699758529663086 -87,2.566606044769287 -88,2.5631580352783203 -89,2.5596282482147217 -90,2.5559849739074707 -91,2.552252769470215 -92,2.5485870838165283 -93,2.544985771179199 -94,2.541372060775757 -95,2.537822961807251 -96,2.534313201904297 -97,2.530989646911621 -98,2.5277724266052246 -99,2.524771213531494 -100,2.5219478607177734 -101,2.5193119049072266 -102,2.516885995864868 -103,2.5146641731262207 -104,2.512643814086914 -105,2.510861873626709 -106,2.509324550628662 -107,2.508068084716797 -108,2.50705623626709 -109,2.5061824321746826 -110,2.5055203437805176 -111,2.5050313472747803 -112,2.5046021938323975 -113,2.504234552383423 -114,2.5038814544677734 -115,2.5035691261291504 -116,2.503183364868164 -117,2.502822160720825 -118,2.5024070739746094 -119,2.501934766769409 -120,2.5014355182647705 -121,2.5009214878082275 -122,2.500417470932007 -123,2.49995756149292 -124,2.499469518661499 -125,2.499027967453003 -126,2.4986627101898193 -127,2.498363733291626 -128,2.49811053276062 -129,2.497910976409912 -130,2.4977669715881348 -131,2.4976277351379395 -132,2.4975061416625977 -133,2.4974048137664795 -134,2.4973034858703613 -135,2.4971985816955566 -136,2.497096300125122 -137,2.4969983100891113 -138,2.4968855381011963 -139,2.4967637062072754 -140,2.496628522872925 -141,2.49647855758667 -142,2.4963278770446777 -143,2.4961719512939453 -144,2.4960124492645264 -145,2.4958574771881104 -146,2.4957079887390137 -147,2.4955692291259766 -148,2.4954311847686768 -149,2.4952926635742188 -150,2.4951655864715576 -151,2.495032787322998 -152,2.494904041290283 -153,2.4947803020477295 -154,2.4946632385253906 -155,2.4945507049560547 -156,2.4944400787353516 -157,2.4943296909332275 -158,2.4942219257354736 -159,2.4941108226776123 -160,2.493997812271118 -161,2.493901014328003 -162,2.4938037395477295 -163,2.493708372116089 -164,2.4936137199401855 -165,2.493516206741333 -166,2.493422746658325 -167,2.493335485458374 -168,2.493241310119629 -169,2.4931397438049316 -170,2.4930341243743896 -171,2.4929354190826416 -172,2.4928343296051025 -173,2.4927303791046143 -174,2.492633581161499 -175,2.492537021636963 -176,2.4924397468566895 -177,2.492347002029419 -178,2.4922549724578857 -179,2.492161512374878 -180,2.4920661449432373 -181,2.491975784301758 -182,2.4918811321258545 -183,2.49178147315979 -184,2.4916882514953613 -185,2.491591453552246 -186,2.4914913177490234 -187,2.4913902282714844 -188,2.4912877082824707 -189,2.4911904335021973 -190,2.4910902976989746 -191,2.4909865856170654 -192,2.4908838272094727 -193,2.4907820224761963 -194,2.4906771183013916 -195,2.4905683994293213 -196,2.4904632568359375 -197,2.490354299545288 -198,2.490241527557373 -199,2.4901344776153564 -200,2.4900267124176025 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.250/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.250/training_config.txt deleted file mode 100644 index bcbd223..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.250/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.25 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.250/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.250/training_log.csv deleted file mode 100644 index 284c228..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.250/training_log.csv +++ /dev/null @@ -1,202 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,328.1573486328125 -2,364.39324951171875 -3,31.002161026000977 -4,29.68213653564453 -5,29.239431381225586 -6,28.594139099121094 -7,27.785932540893555 -8,26.90545654296875 -9,25.971424102783203 -10,25.035974502563477 -11,24.058813095092773 -12,23.03606414794922 -13,22.009601593017578 -14,20.98329734802246 -15,19.982166290283203 -16,19.033889770507812 -17,18.139842987060547 -18,17.28249740600586 -19,16.461946487426758 -20,15.666667938232422 -21,14.87453842163086 -22,14.101818084716797 -23,13.346879005432129 -24,12.619579315185547 -25,11.92662239074707 -26,11.268649101257324 -27,10.650503158569336 -28,10.074653625488281 -29,9.53803539276123 -30,9.045944213867188 -31,8.602211952209473 -32,8.20938777923584 -33,7.857864856719971 -34,7.543343544006348 -35,7.263674736022949 -36,7.016350746154785 -37,6.797411918640137 -38,6.603886127471924 -39,6.432085990905762 -40,6.278926849365234 -41,6.141266822814941 -42,6.016331195831299 -43,5.901740074157715 -44,5.795086860656738 -45,5.69323205947876 -46,5.596700191497803 -47,5.526229381561279 -48,5.1297173500061035 -49,5.459120750427246 -50,5.974985122680664 -51,6.539168834686279 -52,7.111213207244873 -53,7.713098526000977 -54,8.391037940979004 -55,9.22575569152832 -56,10.366639137268066 -57,12.205950736999512 -58,16.993988037109375 -59,29.95743179321289 -60,51.49414825439453 -61,50.95806884765625 -62,34.00261306762695 -63,24.039562225341797 -64,17.464197158813477 -65,14.941766738891602 -66,13.291476249694824 -67,12.08646297454834 -68,11.153380393981934 -69,10.419628143310547 -70,9.836477279663086 -71,9.376750946044922 -72,8.992509841918945 -73,8.661561012268066 -74,8.370994567871094 -75,8.112353324890137 -76,7.87932825088501 -77,7.665690898895264 -78,7.466954708099365 -79,7.281339645385742 -80,7.107974529266357 -81,6.946061611175537 -82,6.795234203338623 -83,6.655519008636475 -84,6.52569055557251 -85,6.405058860778809 -86,6.2923431396484375 -87,6.186916351318359 -88,6.087775707244873 -89,5.9942193031311035 -90,5.904959678649902 -91,5.819399833679199 -92,5.737473964691162 -93,5.658637046813965 -94,5.582746982574463 -95,5.510342121124268 -96,5.440402507781982 -97,5.372181415557861 -98,5.305744171142578 -99,5.2421393394470215 -100,5.180514335632324 -101,5.120778560638428 -102,5.062896251678467 -103,5.0073771476745605 -104,4.953482151031494 -105,4.900930881500244 -106,4.849393844604492 -107,4.799614429473877 -108,4.751705646514893 -109,4.706655979156494 -110,4.663846015930176 -111,4.623539924621582 -112,4.585273265838623 -113,4.548714637756348 -114,4.516064167022705 -115,4.487917900085449 -116,4.466826915740967 -117,4.432258129119873 -118,4.3876872062683105 -119,4.352308750152588 -120,4.319817066192627 -121,4.2877678871154785 -122,4.258301258087158 -123,4.229930877685547 -124,4.202466011047363 -125,4.175791263580322 -126,4.1494245529174805 -127,4.123399257659912 -128,4.0978193283081055 -129,4.072751998901367 -130,4.048053741455078 -131,4.0237555503845215 -132,3.999804973602295 -133,3.9762187004089355 -134,3.9532408714294434 -135,3.9307329654693604 -136,3.908733367919922 -137,3.8873207569122314 -138,3.8664605617523193 -139,3.8462016582489014 -140,3.8264575004577637 -141,3.807121515274048 -142,3.7881996631622314 -143,3.7696797847747803 -144,3.751436710357666 -145,3.7333827018737793 -146,3.715515613555908 -147,3.6978919506073 -148,3.6805806159973145 -149,3.6635751724243164 -150,3.6468749046325684 -151,3.630514144897461 -152,3.614542245864868 -153,3.598841667175293 -154,3.5832386016845703 -155,3.56784725189209 -156,3.552675724029541 -157,3.5377538204193115 -158,3.5230603218078613 -159,3.50852632522583 -160,3.4940969944000244 -161,3.4798941612243652 -162,3.4658539295196533 -163,3.4519269466400146 -164,3.4380621910095215 -165,3.4242911338806152 -166,3.410611867904663 -167,3.397022008895874 -168,3.383622407913208 -169,3.3703296184539795 -170,3.357187032699585 -171,3.344119071960449 -172,3.331109046936035 -173,3.3181753158569336 -174,3.3053970336914062 -175,3.2927353382110596 -176,3.2802255153656006 -177,3.267794370651245 -178,3.2554779052734375 -179,3.2433035373687744 -180,3.231255054473877 -181,3.2192819118499756 -182,3.2074012756347656 -183,3.195646286010742 -184,3.1840522289276123 -185,3.1726267337799072 -186,3.1612720489501953 -187,3.150068998336792 -188,3.1390514373779297 -189,3.1284751892089844 -190,3.1181983947753906 -191,3.1081299781799316 -192,3.098372459411621 -193,3.0889759063720703 -194,3.0798256397247314 -195,3.0709214210510254 -196,3.0623159408569336 -197,3.0540049076080322 -198,3.045955181121826 -199,3.0380821228027344 -200,3.0303852558135986 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.300/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.300/training_config.txt deleted file mode 100644 index a09d2b6..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.300/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.3 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.300/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.300/training_log.csv deleted file mode 100644 index 4e67d9b..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.300/training_log.csv +++ /dev/null @@ -1,39 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,504.04986572265625 -2,29.25644302368164 -3,32.18950271606445 -4,26.751977920532227 -5,20.54332733154297 -6,16.877056121826172 -7,15.237258911132812 -8,15.5487060546875 -9,14.833020210266113 -10,13.61668872833252 -11,12.881878852844238 -12,12.53397274017334 -13,12.333929061889648 -14,12.169219017028809 -15,12.000025749206543 -16,11.815340042114258 -17,11.617063522338867 -18,11.408769607543945 -19,11.19556713104248 -20,10.980958938598633 -21,10.768643379211426 -22,10.563733100891113 -23,10.37044906616211 -24,10.19104290008545 -25,10.026124954223633 -26,9.878422737121582 -27,9.744662284851074 -28,9.616373062133789 -29,9.489035606384277 -30,9.357518196105957 -31,9.21961784362793 -32,8.136822700500488 -33,8.063982009887695 -34,nan -35,nan -36,nan -37,nan diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.400/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.400/training_config.txt deleted file mode 100644 index 0c3bd51..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.400/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.400/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.400/training_log.csv deleted file mode 100644 index 7d9d1cd..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.400/training_log.csv +++ /dev/null @@ -1,18 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,4066.74609375 -2,25.84347152709961 -3,33.18013000488281 -4,24.108125686645508 -5,18.539525985717773 -6,17.58995246887207 -7,16.840782165527344 -8,16.15195655822754 -9,15.525141716003418 -10,15.000173568725586 -11,14.586549758911133 -12,14.278099060058594 -13,nan -14,nan -15,nan -16,nan diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.500/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.500/training_config.txt deleted file mode 100644 index 5b525b9..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.500/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.5 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.500/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.500/training_log.csv deleted file mode 100644 index 6485c26..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.500/training_log.csv +++ /dev/null @@ -1,20 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,20169.580078125 -2,69188.0625 -3,67710.0703125 -4,52076.36328125 -5,27178.53125 -6,1195.4798583984375 -7,21.71302604675293 -8,22.763479232788086 -9,26.007081985473633 -10,29.136579513549805 -11,31.8521785736084 -12,65956.78125 -13,71006.21875 -14,71006.21875 -15,71006.21875 -16,71006.21875 -17,71006.21875 -18,71006.21875 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.600/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.600/training_config.txt deleted file mode 100644 index 99de6a5..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.600/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.6 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.600/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.600/training_log.csv deleted file mode 100644 index d920233..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.600/training_log.csv +++ /dev/null @@ -1,11 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,39824.3359375 -2,70529.09375 -3,70693.3984375 -4,70701.59375 -5,70701.59375 -6,70701.59375 -7,70701.59375 -8,70701.59375 -9,70701.59375 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.700/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.700/training_config.txt deleted file mode 100644 index 3cffcb2..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.700/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.7 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.700/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.700/training_log.csv deleted file mode 100644 index a8d001b..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.700/training_log.csv +++ /dev/null @@ -1,10 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,53664.828125 -2,70690.6328125 -3,70701.59375 -4,70701.59375 -5,70701.59375 -6,70701.59375 -7,70701.59375 -8,70701.59375 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.800/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.800/training_config.txt deleted file mode 100644 index 8d652fc..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.800/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.800/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.800/training_log.csv deleted file mode 100644 index 275d2fa..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.800/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,60295.6640625 -2,70701.59375 -3,70701.59375 -4,70701.59375 -5,70701.59375 -6,70701.59375 -7,70701.59375 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.900/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.900/training_config.txt deleted file mode 100644 index 9d092eb..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.900/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 0.9 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.900/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.900/training_log.csv deleted file mode 100644 index 790e47b..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_0.900/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,64792.453125 -2,70701.59375 -3,70701.59375 -4,70701.59375 -5,70701.59375 -6,70701.59375 -7,70701.59375 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_1.000/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_1.000/training_config.txt deleted file mode 100644 index 440c382..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_1.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 1 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_1.000/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_1.000/training_log.csv deleted file mode 100644 index 9e5cf3f..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_1.000/training_log.csv +++ /dev/null @@ -1,9 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,67292.1171875 -2,70701.59375 -3,70701.59375 -4,70701.59375 -5,70701.59375 -6,70701.59375 -7,70701.59375 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_16.000/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_16.000/training_config.txt deleted file mode 100644 index 1e8b323..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_16.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 16 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_16.000/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_16.000/training_log.csv deleted file mode 100644 index 3362708..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_16.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,71006.21875 -2,71006.21875 -3,71006.21875 -4,71006.21875 -5,71006.21875 -6,71006.21875 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_2.000/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_2.000/training_config.txt deleted file mode 100644 index dc7dcd3..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_2.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 2 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_2.000/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_2.000/training_log.csv deleted file mode 100644 index 5f7018f..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_2.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,70972.5 -2,22.73408317565918 -3,nan -4,nan -5,nan -6,nan diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_4.000/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_4.000/training_config.txt deleted file mode 100644 index d5b7d1c..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_4.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 4 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_4.000/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_4.000/training_log.csv deleted file mode 100644 index 3362708..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_4.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,71006.21875 -2,71006.21875 -3,71006.21875 -4,71006.21875 -5,71006.21875 -6,71006.21875 diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_8.000/training_config.txt b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_8.000/training_config.txt deleted file mode 100644 index 2916654..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_8.000/training_config.txt +++ /dev/null @@ -1,48 +0,0 @@ -Base controller path: /home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/controller_base.pth -Time Span: 0 to 10, Points: 1000 -Learning Rate: 8 -Weight Decay: 0 - -Loss Function: - def loss_fn(state_traj, t_span): - theta = state_traj[:, :, 0] # Size: [batch_size, t_points] - desired_theta = state_traj[:, :, 3] # Size: [batch_size, t_points] - - min_weight = 0.01 # Weights are on the range [min_weight, 1] - weights = weight_fn(t_span, min_val=min_weight) # Initially Size: [t_points] - # Reshape or expand weights to match theta dimensions - weights = weights.view(-1, 1) # Now Size: [batch_size, t_points] - - # Calculate the weighted loss - return torch.mean(weights * (theta - desired_theta) ** 2) - -Weight Function: -def square_root_mirrored(t_span: Union[torch.Tensor, List[float]], t_max: float = None, min_val: float = 0.01) -> torch.Tensor: - t_span = t_span.clone().detach() if isinstance(t_span, torch.Tensor) else torch.tensor(t_span) - t_max = t_max if t_max is not None else t_span[-1] - return min_val + ((1 - min_val) / (t_max)**(1/2)) * (-t_span + t_max)**(1/2) - -Training Cases: -[theta0, omega0, alpha0, desired_theta] -[0.5235987901687622, 0.0, 0.0, 0.0] -[-0.5235987901687622, 0.0, 0.0, 0.0] -[2.094395160675049, 0.0, 0.0, 0.0] -[-2.094395160675049, 0.0, 0.0, 0.0] -[0.0, 1.0471975803375244, 0.0, 0.0] -[0.0, -1.0471975803375244, 0.0, 0.0] -[0.0, 6.2831854820251465, 0.0, 0.0] -[0.0, -6.2831854820251465, 0.0, 0.0] -[0.0, 0.0, 0.0, 6.2831854820251465] -[0.0, 0.0, 0.0, -6.2831854820251465] -[0.0, 0.0, 0.0, 1.5707963705062866] -[0.0, 0.0, 0.0, -1.5707963705062866] -[0.0, 0.0, 0.0, 1.0471975803375244] -[0.0, 0.0, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 0.0] -[-0.7853981852531433, -3.1415927410125732, 0.0, 0.0] -[1.5707963705062866, -3.1415927410125732, 0.0, 1.0471975803375244] -[-1.5707963705062866, 3.1415927410125732, 0.0, -1.0471975803375244] -[0.7853981852531433, 3.1415927410125732, 0.0, 6.2831854820251465] -[-0.7853981852531433, -3.1415927410125732, 0.0, 6.2831854820251465] -[1.5707963705062866, -3.1415927410125732, 0.0, 12.566370964050293] -[-1.5707963705062866, 3.1415927410125732, 0.0, -12.566370964050293] diff --git a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_8.000/training_log.csv b/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_8.000/training_log.csv deleted file mode 100644 index 3362708..0000000 --- a/training/time_weighting_learning_rate_sweep/square_root_mirrored/lr_8.000/training_log.csv +++ /dev/null @@ -1,8 +0,0 @@ -Epoch,Loss -0,472.03131103515625 -1,71006.21875 -2,71006.21875 -3,71006.21875 -4,71006.21875 -5,71006.21875 -6,71006.21875 diff --git a/training/time_weighting_learning_rate_sweep_training.py b/training/time_weighting_learning_rate_sweep_training.py index 6394be0..e63220c 100644 --- a/training/time_weighting_learning_rate_sweep_training.py +++ b/training/time_weighting_learning_rate_sweep_training.py @@ -30,7 +30,7 @@ t_start, t_end, t_points = 0, 10, 1000 t_span = torch.linspace(t_start, t_end, t_points, device=device) # Output directory setup -base_output_dir = "time_weighting_learning_rate_sweep" +base_output_dir = "training_files/time_weighting_learning_rate_sweep" os.makedirs(base_output_dir, exist_ok=True) # Weight decay diff --git a/training/time_weighting_training.py b/training/time_weighting_training.py index e5ac808..7fddace 100644 --- a/training/time_weighting_training.py +++ b/training/time_weighting_training.py @@ -30,7 +30,7 @@ t_start, t_end, t_points = 0, 10, 1000 t_span = torch.linspace(t_start, t_end, t_points, device=device) # Specify directory for storing results -output_dir = "time_weighting" +output_dir = "training_files/time_weighting" os.makedirs(output_dir, exist_ok=True) # Optimizer values