from multiprocessing import Pool, cpu_count import os import numpy as np import json from simulation import run_simulation from data_processing import get_controller_files # Constants and setup initial_conditions = { "small_perturbation": (0.1*np.pi, 0.0, 0.0, 0.0), "large_perturbation": (-np.pi, 0.0, 0.0, 0), "overshoot_vertical_test": (-0.1*np.pi, 2*np.pi, 0.0, 0.0), "overshoot_angle_test": (0.2*np.pi, 2*np.pi, 0.0, 0.3*np.pi), "extreme_perturbation": (4*np.pi, 0.0, 0.0, 0), } # Loss function names (originals and mirrored) loss_functions = ["constant", "linear", "quadratic", "cubic", "inverse", "inverse_squared", "inverse_cubed"] loss_functions_mirrored = ["linear", "quadratic", "cubic", "inverse", "inverse_squared", "inverse_cubed"] loss_functions_mirrored = [i+"_mirrored" for i in loss_functions_mirrored] loss_functions = loss_functions + loss_functions_mirrored # Simulation parameters epoch_range = (0, 1000) # Start and end epoch (now larger) epoch_step = 1 # Interval between epochs dt = 0.02 # Time step for simulation num_steps = 500 # Number of simulation steps # Directory to save results output_dir = "/home/judson/Neural-Networks-in-GNC/inverted_pendulum/analysis/time_weighting2" os.makedirs(output_dir, exist_ok=True) # Run simulations for each condition and save a separate JSON file per condition for condition_name, initial_condition in initial_conditions.items(): print(f"Running condition: {condition_name}") cond_results = {} # Dictionary for this condition for loss_function in loss_functions: # Build directory for controller files directory = f"/home/judson/Neural-Networks-in-GNC/inverted_pendulum/training/time_weighting/{loss_function}/controllers" controllers = get_controller_files(directory, epoch_range, epoch_step) tasks = [(c, initial_condition, directory, dt, num_steps) for c in controllers] print(f" Processing loss function: {loss_function}") with Pool(min(cpu_count(), 16)) as pool: results = pool.map(run_simulation, tasks) results.sort(key=lambda x: x[0]) # Sort by epoch (assumed to be x[0]) epochs, state_histories, _ = zip(*results) # Extract theta from each state history (first state component) theta_over_epochs = [[float(state[0]) for state in history] for history in state_histories] epochs = [float(ep) for ep in epochs] cond_results[loss_function] = [epochs, theta_over_epochs] # Helper to convert numpy types to native Python types for JSON serialization def default_converter(o): if isinstance(o, np.integer): return int(o) if isinstance(o, np.floating): return float(o) if isinstance(o, np.ndarray): return o.tolist() raise TypeError # Save condition results to its own JSON file condition_output_path = os.path.join(output_dir, f"{condition_name}.json") with open(condition_output_path, "w") as f: json.dump(cond_results, f, default=default_converter, indent=2) print(f"Results for condition {condition_name} saved to {condition_output_path}.")