commit 3c59598d38e81ebc4334578494a4d7ccb72be892
Author: SchrodingerError <131827896+SchrodingerError@users.noreply.github.com>
Date: Wed Aug 14 14:40:12 2024 -0500
Initial Commit
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 0000000..84576e7
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,3 @@
+{
+ "cmake.configureOnOpen": false
+}
\ No newline at end of file
diff --git a/Output Files.zip b/Output Files.zip
new file mode 100644
index 0000000..1f4e301
Binary files /dev/null and b/Output Files.zip differ
diff --git a/Output Files/Flow Sensor 1/Isolated/Sensitivity Analysis on Flow Sensor 1 log.png b/Output Files/Flow Sensor 1/Isolated/Sensitivity Analysis on Flow Sensor 1 log.png
new file mode 100644
index 0000000..e956128
Binary files /dev/null and b/Output Files/Flow Sensor 1/Isolated/Sensitivity Analysis on Flow Sensor 1 log.png differ
diff --git a/Output Files/Flow Sensor 1/Isolated/Sensitivity Analysis on Flow Sensor 1.png b/Output Files/Flow Sensor 1/Isolated/Sensitivity Analysis on Flow Sensor 1.png
new file mode 100644
index 0000000..13527af
Binary files /dev/null and b/Output Files/Flow Sensor 1/Isolated/Sensitivity Analysis on Flow Sensor 1.png differ
diff --git a/Output Files/Flow Sensor 1/Isolated/Sensitivity Ratio Analysis on Flow Sensor 1 log.png b/Output Files/Flow Sensor 1/Isolated/Sensitivity Ratio Analysis on Flow Sensor 1 log.png
new file mode 100644
index 0000000..6689927
Binary files /dev/null and b/Output Files/Flow Sensor 1/Isolated/Sensitivity Ratio Analysis on Flow Sensor 1 log.png differ
diff --git a/Output Files/Flow Sensor 1/Isolated/Sensitivity Ratio Analysis on Flow Sensor 1.png b/Output Files/Flow Sensor 1/Isolated/Sensitivity Ratio Analysis on Flow Sensor 1.png
new file mode 100644
index 0000000..27b619b
Binary files /dev/null and b/Output Files/Flow Sensor 1/Isolated/Sensitivity Ratio Analysis on Flow Sensor 1.png differ
diff --git a/Output Files/Flow Sensor 1/Non-Isolated/Sensitivity Analysis on Flow Sensor 1 log.png b/Output Files/Flow Sensor 1/Non-Isolated/Sensitivity Analysis on Flow Sensor 1 log.png
new file mode 100644
index 0000000..c745592
Binary files /dev/null and b/Output Files/Flow Sensor 1/Non-Isolated/Sensitivity Analysis on Flow Sensor 1 log.png differ
diff --git a/Output Files/Flow Sensor 1/Non-Isolated/Sensitivity Analysis on Flow Sensor 1.png b/Output Files/Flow Sensor 1/Non-Isolated/Sensitivity Analysis on Flow Sensor 1.png
new file mode 100644
index 0000000..adc1091
Binary files /dev/null and b/Output Files/Flow Sensor 1/Non-Isolated/Sensitivity Analysis on Flow Sensor 1.png differ
diff --git a/Output Files/Flow Sensor 1/Non-Isolated/Sensitivity Ratio Analysis on Flow Sensor 1 log.png b/Output Files/Flow Sensor 1/Non-Isolated/Sensitivity Ratio Analysis on Flow Sensor 1 log.png
new file mode 100644
index 0000000..9b7e4e1
Binary files /dev/null and b/Output Files/Flow Sensor 1/Non-Isolated/Sensitivity Ratio Analysis on Flow Sensor 1 log.png differ
diff --git a/Output Files/Flow Sensor 1/Non-Isolated/Sensitivity Ratio Analysis on Flow Sensor 1.png b/Output Files/Flow Sensor 1/Non-Isolated/Sensitivity Ratio Analysis on Flow Sensor 1.png
new file mode 100644
index 0000000..f5f83eb
Binary files /dev/null and b/Output Files/Flow Sensor 1/Non-Isolated/Sensitivity Ratio Analysis on Flow Sensor 1.png differ
diff --git a/Output Files/PT 1/Isolated/Sensitivity Analysis on PT 1 log.png b/Output Files/PT 1/Isolated/Sensitivity Analysis on PT 1 log.png
new file mode 100644
index 0000000..ba79b73
Binary files /dev/null and b/Output Files/PT 1/Isolated/Sensitivity Analysis on PT 1 log.png differ
diff --git a/Output Files/PT 1/Isolated/Sensitivity Analysis on PT 1.png b/Output Files/PT 1/Isolated/Sensitivity Analysis on PT 1.png
new file mode 100644
index 0000000..987816e
Binary files /dev/null and b/Output Files/PT 1/Isolated/Sensitivity Analysis on PT 1.png differ
diff --git a/Output Files/PT 1/Isolated/Sensitivity Ratio Analysis on PT 1 log.png b/Output Files/PT 1/Isolated/Sensitivity Ratio Analysis on PT 1 log.png
new file mode 100644
index 0000000..837a136
Binary files /dev/null and b/Output Files/PT 1/Isolated/Sensitivity Ratio Analysis on PT 1 log.png differ
diff --git a/Output Files/PT 1/Isolated/Sensitivity Ratio Analysis on PT 1.png b/Output Files/PT 1/Isolated/Sensitivity Ratio Analysis on PT 1.png
new file mode 100644
index 0000000..32b621f
Binary files /dev/null and b/Output Files/PT 1/Isolated/Sensitivity Ratio Analysis on PT 1.png differ
diff --git a/Output Files/PT 1/Non-Isolated/Sensitivity Analysis on PT 1 log.png b/Output Files/PT 1/Non-Isolated/Sensitivity Analysis on PT 1 log.png
new file mode 100644
index 0000000..5f4617a
Binary files /dev/null and b/Output Files/PT 1/Non-Isolated/Sensitivity Analysis on PT 1 log.png differ
diff --git a/Output Files/PT 1/Non-Isolated/Sensitivity Analysis on PT 1.png b/Output Files/PT 1/Non-Isolated/Sensitivity Analysis on PT 1.png
new file mode 100644
index 0000000..d836221
Binary files /dev/null and b/Output Files/PT 1/Non-Isolated/Sensitivity Analysis on PT 1.png differ
diff --git a/Output Files/PT 1/Non-Isolated/Sensitivity Ratio Analysis on PT 1 log.png b/Output Files/PT 1/Non-Isolated/Sensitivity Ratio Analysis on PT 1 log.png
new file mode 100644
index 0000000..d18b41e
Binary files /dev/null and b/Output Files/PT 1/Non-Isolated/Sensitivity Ratio Analysis on PT 1 log.png differ
diff --git a/Output Files/PT 1/Non-Isolated/Sensitivity Ratio Analysis on PT 1.png b/Output Files/PT 1/Non-Isolated/Sensitivity Ratio Analysis on PT 1.png
new file mode 100644
index 0000000..2d64f49
Binary files /dev/null and b/Output Files/PT 1/Non-Isolated/Sensitivity Ratio Analysis on PT 1.png differ
diff --git a/Output Files/PT 1/Range Analysis/Range Analysis on PT 1.png b/Output Files/PT 1/Range Analysis/Range Analysis on PT 1.png
new file mode 100644
index 0000000..038574d
Binary files /dev/null and b/Output Files/PT 1/Range Analysis/Range Analysis on PT 1.png differ
diff --git a/Output Files/RTD 1/Isolated/Sensitivity Analysis on RTD 1 log.png b/Output Files/RTD 1/Isolated/Sensitivity Analysis on RTD 1 log.png
new file mode 100644
index 0000000..bd89a45
Binary files /dev/null and b/Output Files/RTD 1/Isolated/Sensitivity Analysis on RTD 1 log.png differ
diff --git a/Output Files/RTD 1/Isolated/Sensitivity Analysis on RTD 1.png b/Output Files/RTD 1/Isolated/Sensitivity Analysis on RTD 1.png
new file mode 100644
index 0000000..134fd39
Binary files /dev/null and b/Output Files/RTD 1/Isolated/Sensitivity Analysis on RTD 1.png differ
diff --git a/Output Files/RTD 1/Isolated/Sensitivity Ratio Analysis on RTD 1 log.png b/Output Files/RTD 1/Isolated/Sensitivity Ratio Analysis on RTD 1 log.png
new file mode 100644
index 0000000..5d59352
Binary files /dev/null and b/Output Files/RTD 1/Isolated/Sensitivity Ratio Analysis on RTD 1 log.png differ
diff --git a/Output Files/RTD 1/Isolated/Sensitivity Ratio Analysis on RTD 1.png b/Output Files/RTD 1/Isolated/Sensitivity Ratio Analysis on RTD 1.png
new file mode 100644
index 0000000..6935b21
Binary files /dev/null and b/Output Files/RTD 1/Isolated/Sensitivity Ratio Analysis on RTD 1.png differ
diff --git a/Output Files/RTD 1/Non-Isolated/Sensitivity Analysis on RTD 1 log.png b/Output Files/RTD 1/Non-Isolated/Sensitivity Analysis on RTD 1 log.png
new file mode 100644
index 0000000..b655520
Binary files /dev/null and b/Output Files/RTD 1/Non-Isolated/Sensitivity Analysis on RTD 1 log.png differ
diff --git a/Output Files/RTD 1/Non-Isolated/Sensitivity Analysis on RTD 1.png b/Output Files/RTD 1/Non-Isolated/Sensitivity Analysis on RTD 1.png
new file mode 100644
index 0000000..997577b
Binary files /dev/null and b/Output Files/RTD 1/Non-Isolated/Sensitivity Analysis on RTD 1.png differ
diff --git a/Output Files/RTD 1/Non-Isolated/Sensitivity Ratio Analysis on RTD 1 log.png b/Output Files/RTD 1/Non-Isolated/Sensitivity Ratio Analysis on RTD 1 log.png
new file mode 100644
index 0000000..52791ef
Binary files /dev/null and b/Output Files/RTD 1/Non-Isolated/Sensitivity Ratio Analysis on RTD 1 log.png differ
diff --git a/Output Files/RTD 1/Non-Isolated/Sensitivity Ratio Analysis on RTD 1.png b/Output Files/RTD 1/Non-Isolated/Sensitivity Ratio Analysis on RTD 1.png
new file mode 100644
index 0000000..31ca9e4
Binary files /dev/null and b/Output Files/RTD 1/Non-Isolated/Sensitivity Ratio Analysis on RTD 1.png differ
diff --git a/Output Files/RTD 1/Range Analysis/Range Analysis on RTD 1.png b/Output Files/RTD 1/Range Analysis/Range Analysis on RTD 1.png
new file mode 100644
index 0000000..4d3c610
Binary files /dev/null and b/Output Files/RTD 1/Range Analysis/Range Analysis on RTD 1.png differ
diff --git a/Output Files/System Uncertainty Report.pdf b/Output Files/System Uncertainty Report.pdf
new file mode 100644
index 0000000..3d1d16c
Binary files /dev/null and b/Output Files/System Uncertainty Report.pdf differ
diff --git a/Output Files/System/Histogram with Outliers.png b/Output Files/System/Histogram with Outliers.png
new file mode 100644
index 0000000..bebf5fb
Binary files /dev/null and b/Output Files/System/Histogram with Outliers.png differ
diff --git a/Output Files/System/Histogram without Outliers.png b/Output Files/System/Histogram without Outliers.png
new file mode 100644
index 0000000..be5fc03
Binary files /dev/null and b/Output Files/System/Histogram without Outliers.png differ
diff --git a/Output Files/System/Scatter with Outliers.png b/Output Files/System/Scatter with Outliers.png
new file mode 100644
index 0000000..9a1a25e
Binary files /dev/null and b/Output Files/System/Scatter with Outliers.png differ
diff --git a/Output Files/System/Scatter without Outliers.png b/Output Files/System/Scatter without Outliers.png
new file mode 100644
index 0000000..1d8f7d7
Binary files /dev/null and b/Output Files/System/Scatter without Outliers.png differ
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..e69de29
diff --git a/code/PDF_Generator.py b/code/PDF_Generator.py
new file mode 100644
index 0000000..f5cf34f
--- /dev/null
+++ b/code/PDF_Generator.py
@@ -0,0 +1,247 @@
+import os
+from fpdf import FPDF
+
+from PIL import Image
+
+class PDFGenerator(FPDF):
+ def __init__(self):
+ super().__init__()
+
+ # Lists for storing items to memory
+ self.stored_memory: list = []
+ self.toc_entries: list = [] # List to store TOC entries
+
+ def add_header(self, text, level: int = 1):
+ """ Adds a header to the PDF. Level determines the size of the header. """
+ size = {1: 18, 2: 16, 3: 14}.get(level, 18)
+ self.set_font('DejaVu', 'B', size)
+ self.multi_cell(0, 5, text)
+ self.ln(2) # Add a small line break after the header
+
+ def add_text(self, text, bold: bool = False, indent: int = 0, newline=True):
+ """ Adds normal text to the PDF """
+ if bold == True:
+ self.set_font('DejaVu', 'B', 8)
+ else:
+ self.set_font('DejaVu', '', 8)
+ indents = " " * 4 * indent
+
+ self.multi_cell(0, 5, indents + text)
+ self.ln(1)
+
+ def add_centered_text(self, text, bold: bool = False):
+ """ Adds centered text to the PDF """
+ if bold == True:
+ self.set_font('DejaVu', 'B', 8)
+ else:
+ self.set_font('DejaVu', '', 8)
+ self.multi_cell(0, 5, text, align='C')
+ self.ln(1)
+
+ def add_centered_image(self, image_path: str, width_ratio: float = 0.5, caption=None):
+ """ Adds an image centered on the page, optionally with a caption below it. """
+ with Image.open(image_path) as img:
+ image_width, image_height = img.size
+
+ pdf_image_width = self.w * width_ratio
+ aspect_ratio = image_height / image_width
+ pdf_image_height = pdf_image_width * aspect_ratio
+ x_position = (self.w - pdf_image_width) / 2
+
+ # Center the image
+ self.ln(1)
+ self.image(image_path, x=x_position, w=pdf_image_width, h=pdf_image_height)
+
+ # If a caption is provided, add it below the image
+ if caption:
+ self.set_font('DejaVu', 'I', 6)
+ self.multi_cell(0, 10, caption, 0, 1, 'C')
+
+ def format_value(self, value):
+ """Format a number to 5 significant digits."""
+ try:
+ return f"{value:.5g}"
+ except (ValueError, TypeError):
+ return str(value)
+
+ def create_metrics_table(self, title, metrics_list, column_labels):
+ # Make title
+ self.add_centered_text(title, bold=True)
+
+ # Define row labels based on the keys of the first dictionary in the metrics list
+ row_labels = list(metrics_list[0].keys())
+
+ # Define column widths (you can adjust these as needed)
+ col_widths = [40] + [13] * len(column_labels)
+ cell_height = 10 # Adjust cell height as needed
+
+ # Calculate the total width of the table
+ table_width = sum(col_widths)
+
+ # Define font settings
+ self.set_font('DejaVu', 'B', 6)
+
+ # Calculate the starting x position to center the table
+ start_x = (210 - table_width) / 2 # Assuming A4 size paper (210mm width)
+
+ # Set the starting x position
+ self.set_x(start_x)
+
+ # Add header row
+ self.cell(col_widths[0], 10, 'Metrics/Sensor Noise Gain', 1, 0, 'C')
+ for col_label in column_labels:
+ self.cell(col_widths[1], cell_height, self.format_value(col_label), 1, 0, 'C')
+ self.ln()
+
+ # Add data rows
+ self.set_font('DejaVu', '', 6)
+ for row_label in row_labels:
+ self.set_x(start_x) # Reset the x position for each row
+ x, y = self.get_x(), self.get_y() # Save the current position
+ self.set_font('DejaVu', 'B', 6)
+ self.multi_cell(col_widths[0], cell_height, row_label, 1, 'C')
+ self.set_font('DejaVu', '', 6)
+ self.set_xy(x + col_widths[0], y) # Adjust position for the next cells
+ for metrics in metrics_list:
+ self.cell(col_widths[1], cell_height, self.format_value(metrics[row_label]), 1, 0, 'C')
+ self.ln()
+
+ def make_pdf_from_memory(self, filepath: str) -> None:
+ '''Makes the pdf from the stored list'''
+ self.set_margins(5, 5, 5) # left, top, and right margins in mm
+
+ # Load a Unicode font
+ current_directory = os.path.dirname(__file__) # Get the directory where the script is located
+ self.add_font('DejaVu', '', f"{current_directory}\\fonts\\dejavu-sans-condensed.ttf", uni=True)
+ self.add_font('DejaVu', 'B', f"{current_directory}\\fonts\\dejavu-sans-condensedbold.ttf", uni=True)
+ self.add_font('DejaVu', 'I', f"{current_directory}\\fonts\\dejavu-sans-condensedoblique.ttf", uni=True)
+ self.set_font('DejaVu', '', 8) # Set DejaVu as the default font
+
+
+ self.add_header("Table of Contents")
+ self.add_newline_memory()
+ self.set_font('DejaVu', '', 8)
+ for entry in self.toc_entries:
+ self.cell(0, 10, f"{entry['text']}", 0, 0, 'L')
+ self.cell(-20, 10, f"{entry['page']}", 0, 1, 'R')
+
+ self.add_page()
+
+ for item in self.stored_memory:
+ match item["type"]:
+ case "header":
+ self.add_header(item["text"], item["level"])
+ case "text":
+ self.add_text(item["text"], item["bold"], item["indent"])
+ case "centered_text":
+ self.add_centered_text(item["text"], item["bold"])
+ case "centered_image":
+ self.add_centered_image(item["image_path"], item["width_ratio"], item["caption"])
+ case "page":
+ self.add_page()
+ case "newline":
+ self.ln(1)
+ case "metrics_table":
+ self.create_metrics_table(item["title"], item["metrics_list"], item["column_labels"])
+
+ self.output(filepath)
+
+ def add_header_memory(self, text, level: int = 1, toc=True):
+ dict_to_add = {
+ "type": "header",
+ "text": text,
+ "level": level
+ }
+ self.stored_memory.append(dict_to_add)
+
+ # Track header for TOC
+ if toc:
+ self.toc_entries.append({
+ "text": text,
+ "level": level,
+ "page": sum(1 for item in self.stored_memory if item.get("type") == "page") + 2
+ })
+
+
+ def add_text_memory(self, text, bold: bool = False, indent: int = 0, newline: bool = True):
+ dict_to_add = {
+ "type": "text",
+ "text": text,
+ "bold": bold,
+ "indent": indent,
+ "newline": newline
+ }
+ self.stored_memory.append(dict_to_add)
+
+
+ def add_centered_text_memory(self, text, bold: bool = False):
+ dict_to_add = {
+ "type": "centered_text",
+ "text": text,
+ "bold": bold
+ }
+ self.stored_memory.append(dict_to_add)
+
+
+ def add_centered_image_memory(self, image_path: str, width_ratio: float = 0.5, caption=None):
+ dict_to_add = {
+ "type": "centered_image",
+ "image_path": image_path,
+ "width_ratio": width_ratio,
+ "caption": caption
+ }
+ self.stored_memory.append(dict_to_add)
+
+ def add_page_memory(self) -> None:
+ dict_to_add = {
+ "type": "page",
+ }
+ self.stored_memory.append(dict_to_add)
+
+ def add_newline_memory(self) -> None:
+ dict_to_add = {
+ "type": "newline",
+ }
+ self.stored_memory.append(dict_to_add)
+
+ def add_metrics_list_memory(self, metrics, indent=0) -> None:
+ if "true_value" in metrics:
+ self.add_text_memory(f"True System Output Value: {metrics['true_value']}", indent=indent, bold=True)
+ if "median" in metrics:
+ self.add_text_memory(f"Median: {metrics['median']}", indent=indent, bold=True)
+ if "average" in metrics:
+ self.add_text_memory(f"Average of Monte-Carlo: {metrics['average']}", indent=indent, bold=True)
+ if "average_percent_difference" in metrics:
+ self.add_text_memory(f"Average Percent Difference: {metrics['average_percent_difference']} %", indent=indent, bold=True)
+ if "min_val" in metrics:
+ self.add_text_memory(f"Minimum Value: {metrics['min_val']}", indent=indent)
+ if "max_val" in metrics:
+ self.add_text_memory(f"Maximum Value: {metrics['max_val']}", indent=indent)
+ if "std_dev" in metrics:
+ self.add_text_memory(f"Standard Deviation: ±{metrics['std_dev']}", indent=indent)
+ if "std_dev_percent" in metrics:
+ self.add_text_memory(f"Standard Deviation Percent: {metrics['std_dev_percent']} %", indent=indent)
+ if "mean_absolute_error" in metrics:
+ self.add_text_memory(f"Mean Absolute Error: {metrics['mean_absolute_error']}", indent=indent)
+ if "mean_absolute_percent_error" in metrics:
+ self.add_text_memory(f"Mean Absolute Percent Error: {metrics['mean_absolute_percent_error']} %", indent=indent)
+ if "max_2std_error" in metrics:
+ self.add_text_memory(f"Max Error 95% of the Time: {metrics['max_2std_error']}", indent=indent)
+ if "max_2std_percent_error" in metrics:
+ self.add_text_memory(f"Max Percent Error 95% of the Time: {metrics['max_2std_percent_error']} %", indent=indent, bold=True)
+ if "max_3std_error" in metrics:
+ self.add_text_memory(f"Max Error 99.73% of the Time: {metrics['max_3std_error']}", indent=indent)
+ if "max_3std_percent_error" in metrics:
+ self.add_text_memory(f"Max Percent Error 99.73% of the Time: {metrics['max_3std_percent_error']} %", indent=indent)
+
+
+
+ def add_metrics_table_memory(self, title, metrics_list, column_labels) -> None:
+ dict_to_add = {
+ "type": "metrics_table",
+ "title": title,
+ "metrics_list": metrics_list,
+ "column_labels": column_labels
+ }
+ self.stored_memory.append(dict_to_add)
+
diff --git a/code/System_Uncertainty_Monte_Carlo.py b/code/System_Uncertainty_Monte_Carlo.py
new file mode 100644
index 0000000..6944417
--- /dev/null
+++ b/code/System_Uncertainty_Monte_Carlo.py
@@ -0,0 +1,672 @@
+import multiprocessing
+import os
+import shutil
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.ticker as ticker
+
+from PDF_Generator import PDFGenerator
+
+from typing import TYPE_CHECKING
+if TYPE_CHECKING:
+ from inputs.Inputs import Input
+
+class SystemUncertaintyMonteCarlo():
+ def __init__(self, system_input: 'Input'):
+ self.system_input = system_input
+ self.true_value = self.system_input.get_true()
+
+ output_dir = "Output Files"
+
+ # Ensure the output directory is fresh each time
+ if os.path.exists(output_dir):
+ shutil.rmtree(output_dir) # Remove the existing directory with all its content
+
+ # Recreate the directory
+ os.makedirs(output_dir)
+
+ # Setup the PDF Generator
+ self.pdf = PDFGenerator()
+ self.pdf.add_page()
+ self.pdf_report_file = 'Output Files\\System Uncertainty Report.pdf'
+
+ self._generate_system_setup()
+
+ def save_report(self) -> None:
+ '''Saves the pdf'''
+ self.pdf.make_pdf_from_memory(self.pdf_report_file)
+ print(f"Saved PDF to {self.pdf_report_file}")
+
+ def _generate_definitions(self) -> None:
+ self.pdf.add_header_memory("Definitions")
+ self.pdf.add_text_memory("True System Output Value", bold=True, newline=False)
+ self.pdf.add_text_memory("True output of the system without any noise.", indent=1)
+
+ self.pdf.add_text_memory("Average of Monte-Carlo", bold=True, newline=False)
+ self.pdf.add_text_memory("Average value of all runs with applied error.", indent=1)
+
+ self.pdf.add_text_memory("Max Error 95% of the Time/max_2std_error", bold=True, newline=False)
+ self.pdf.add_text_memory("95% of all readings are within this error of the true value.", indent=1)
+
+ self.pdf.add_text_memory("Max Error 99.73% of the Time/max_3std_error", bold=True, newline=False)
+ self.pdf.add_text_memory("99.73% of all readings are within this error of the true value.", indent=1)
+
+ self.pdf.add_text_memory("Isolated Sensitivity Analysis", bold=True, newline=False)
+ self.pdf.add_text_memory("Only the specified input has applied error which has an 'error gain' multiplied to only its error. Every other input has zero applied error.", indent=1)
+
+ self.pdf.add_text_memory("Non-Isolated Sensitivity Analysis", bold=True, newline=False)
+ self.pdf.add_text_memory("Every input has applied error. The specified input has an 'error gain' multiplied to only its error.", indent=1)
+
+ self.pdf.add_text_memory("Sensitivity Ratio", bold=True, newline=False)
+ self.pdf.add_text_memory("The ratio of (specified error gain):(error with error gain = 1).", indent=1)
+
+ self.pdf.add_text_memory("Confidence Level of Regression Fiting", bold=True, newline=False)
+ self.pdf.add_text_memory("The regression fits up to a 4th degree polynomial, exponential, or log function. Confidence is 1 / (1st best RMSE):(2nd best RMSE).", indent=1)
+
+ self.pdf.add_text_memory("Range Analysis", bold=True, newline=False)
+ self.pdf.add_text_memory("The specified input has its 'true value' swept over a range while other inputs are held constant. Error is applied throughout the system.", indent=1)
+
+ def _generate_system_setup(self) -> None:
+ self._generate_definitions()
+
+ self.pdf.add_page_memory()
+ print("System Governing Equation:")
+ self.pdf.add_header_memory("System Governing Equation")
+
+ arithmetic_string = self.system_input.get_arithmetic()
+ print(arithmetic_string)
+
+ self.pdf.add_text_memory(arithmetic_string)
+
+ print("\n")
+ print("System Error Settings:")
+ self.pdf.add_newline_memory()
+ self.pdf.add_header_memory("System Error Settings")
+
+ string = "\t"
+ string += self.system_input.get_input_errors()
+ string = string.replace("\n\n\n", "\n\n")
+ string = string.replace("\n", "\n\t")
+ string = string.rstrip() # Remove trailing newlines and other whitespace characters
+ print(string)
+ print("\n"*3)
+
+ self.pdf.add_text_memory(string)
+
+
+ def _print_and_save(self, text: str, indent: int = 0) -> None:
+ '''Prints a string to the terminal and to a file'''
+ indents = "\t" * indent
+ print(indents, end="")
+ print(text)
+ with open(self.report_file, 'a', encoding='utf-8') as f: # Open file in append mode with UTF-8 encoding
+ f.write(indents)
+ f.write(text + '\n')
+
+ def _calculate_metrics(self, results: list[float]) -> dict:
+ results_array = np.array(results)
+
+ # Calculate key statistics
+ average = np.mean(results_array)
+ average_percent_difference = (average - self.true_value) / self.true_value * 100
+ std_dev = np.std(results_array)
+ median = np.median(results_array)
+ min_val = np.min(results_array)
+ max_val = np.max(results_array)
+ std_dev_percent = (std_dev / self.true_value) * 100
+ mean_absolute_error = np.mean(np.abs(results_array - self.true_value))
+ mean_absolute_percent_error = (mean_absolute_error) / self.true_value * 100
+ max_2std_error = max(abs(average + 2*std_dev - self.true_value), abs(average - 2*std_dev - self.true_value))
+ max_2std_percent_error = max_2std_error / self.true_value * 100
+ max_3std_error = max(abs(average + 3*std_dev - self.true_value), abs(average - 3*std_dev - self.true_value))
+ max_3std_percent_error = max_3std_error / self.true_value * 100
+
+ # Organize them into a dictionary
+ metrics = {
+ "true_value": self.true_value,
+ "median": median,
+ "average": average,
+ "average_percent_difference": average_percent_difference,
+ "min_val": min_val,
+ "max_val": max_val,
+ "std_dev": std_dev,
+ "std_dev_percent": std_dev_percent,
+ "mean_absolute_error": mean_absolute_error,
+ "mean_absolute_percent_error": mean_absolute_percent_error,
+ "max_2std_error": max_2std_error,
+ "max_2std_percent_error": max_2std_percent_error,
+ "max_3std_error": max_3std_error,
+ "max_3std_percent_error": max_3std_percent_error,
+ }
+
+
+ return metrics
+
+
+
+ def _run_system_monte_carlo(self, num_of_tests) -> list[float]:
+ results = []
+ for _ in range(num_of_tests):
+ try:
+ value = self.system_input.get_reading()
+ results.append(value)
+ except:
+ continue
+ return results
+
+ def _run_system_monte_carlo_multiprocessed(self, num_of_tests: int, num_of_processes) -> list[float]:
+ if num_of_processes == 1:
+ return self._run_system_monte_carlo(num_of_tests)
+
+ # Split the number of tests among the processes
+ tests_per_process = num_of_tests // num_of_processes
+ remaining_tests = num_of_tests % num_of_processes
+
+ # Create a list to store the number of tests each process will run
+ tasks = [tests_per_process] * num_of_processes
+ for i in range(remaining_tests):
+ tasks[i] += 1
+
+ # Create a pool of worker processes
+ with multiprocessing.Pool(processes=num_of_processes) as pool:
+ # Map the tasks to the worker processes
+ results = pool.starmap(self._run_system_monte_carlo, [(task,) for task in tasks])
+
+ # Flatten the list of results
+ results = np.array([item for sublist in results for item in sublist])
+ results_without_none = np.array([item for item in results if item is not None])
+
+ return results_without_none
+
+ def _generate_system_histogram(self, results: list[float], with_outliers: bool = True) -> str:
+ results = np.array(results)
+
+ # Calculate IQR and identify outliers
+ Q1 = np.percentile(results, 25)
+ Q3 = np.percentile(results, 75)
+ IQR = Q3 - Q1
+ outliers = (results < (Q1 - 1.5 * IQR)) | (results > (Q3 + 1.5 * IQR))
+
+ # Filter results if outliers are not included
+ if not with_outliers:
+ results = results[~outliers]
+
+ plt.figure(figsize=(18, 6))
+
+ # Histogram of filtered data
+ plt.hist(results, bins=100, edgecolor='black', alpha=0.7)
+ plt.axvline(x=self.true_value, color='r', linestyle='--', label='True Value')
+ plt.title('Histogram of System Results' + (' with Outliers' if with_outliers else ' without Outliers'))
+ plt.xlabel('System Output')
+ plt.ylabel('Frequency')
+ plt.legend() # Show the legend for the true value line
+
+ # Save the figure to a file
+ if not os.path.exists("Output Files\\System"):
+ os.makedirs("Output Files\\System") # Create the directory if it does not exist
+ filename = "Output Files\\System\\Histogram" + (' with Outliers' if with_outliers else ' without Outliers') + ".png"
+ print(f"\tSaving '{filename}'")
+ plt.savefig(filename, dpi=500)
+ plt.close()
+
+ return filename
+
+ def _generate_system_scatter_plot(self, results: list[float], with_outliers: bool = True) -> str:
+ '''Makes a scatter plot and saves. Returns the file name it saves to'''
+ results = np.array(results)
+
+ # Calculate IQR and identify outliers
+ Q1 = np.percentile(results, 25)
+ Q3 = np.percentile(results, 75)
+ IQR = Q3 - Q1
+ outliers = (results < (Q1 - 1.5 * IQR)) | (results > (Q3 + 1.5 * IQR))
+
+ # Filter results if outliers are not included
+ if not with_outliers:
+ results_filtered = results[~outliers]
+
+ # Check if the number of results is more than 1000, randomly sample if so
+ if len(results) > 1000:
+ results_filtered = np.random.choice(results, 1000, replace=False)
+ else:
+ results_filtered = results
+
+ # Generate indices for x-axis
+ indices = np.arange(len(results_filtered))
+
+ # Calculate mean and standard deviations
+ mean = np.mean(results)
+ std_dev = np.std(results)
+
+ # Scatter plot
+ plt.figure(figsize=(10, 6))
+ plt.scatter(indices, results_filtered, alpha=0.6, marker='o')
+ plt.axhline(y=self.true_value, color='r', linestyle='--', label='True Value')
+ plt.axhline(y=mean + std_dev, color='black', linestyle='--', label='1st Dev')
+ plt.axhline(y=mean - std_dev, color='black', linestyle='--')
+ plt.title('Scatter Plot of System Results' + (' with Outliers' if with_outliers else ' without Outliers'))
+ plt.xlabel('Run #')
+ plt.ylabel('System Output')
+ plt.legend()
+
+ # Save the figure to a file
+ if not os.path.exists("Output Files\\System"):
+ os.makedirs("Output Files\\System") # Create the directory if it does not exist
+ filename = "Output Files\\System\\Scatter" + (' with Outliers' if with_outliers else ' without Outliers') + ".png"
+ print(f"\tSaving '{filename}'")
+ plt.savefig(filename, dpi=500)
+ plt.close()
+
+ return filename
+
+ def _generate_system_report(self, metrics: dict, indent: int = 0) -> None:
+ self.pdf.add_metrics_list_memory(metrics, indent)
+
+ def perform_system_analysis(self, monte_carlo_settings:dict = dict()) -> None:
+ num_of_tests: int = monte_carlo_settings.get("num_runs", 1_000)
+ num_of_processes: int = monte_carlo_settings.get("num_processes", 10)
+
+ print(f"System Monte-Carlo Results:")
+
+ self.pdf.add_page_memory()
+ self.pdf.add_header_memory("Entire System Text Results", level=2)
+
+
+ results = self._run_system_monte_carlo_multiprocessed(num_of_tests, num_of_processes)
+
+ metrics = self._calculate_metrics(results)
+ self._generate_system_report(metrics, indent=1)
+
+ print(f"Plotting Entire Monte-Carlo Results:")
+
+ self.pdf.add_page_memory()
+ self.pdf.add_header_memory("Entire System Histograms", level=2)
+ filename = self._generate_system_histogram(results, with_outliers=True)
+ self.pdf.add_centered_image_memory(filename, width_ratio=1.15)
+
+ filename = self._generate_system_histogram(results, with_outliers=False)
+ self.pdf.add_centered_image_memory(filename, width_ratio=1.15)
+
+ self.pdf.add_page_memory()
+ self.pdf.add_header_memory("Entire System Scatter Plots", level=2)
+ filename = self._generate_system_scatter_plot(results, with_outliers=True)
+ self.pdf.add_centered_image_memory(filename, width_ratio=1)
+
+ self._generate_system_scatter_plot(results, with_outliers=False)
+ self.pdf.add_centered_image_memory(filename, width_ratio=1)
+
+ print("\n")
+
+
+
+
+
+
+ def _fit_to_polynomial(self, x_values: np.ndarray, y_values: np.ndarray) -> tuple[str, float, str]:
+ """
+ Fit the given x and y values to various models up to a polynomial degree, determine which fits best,
+ provide a confidence measure based on RMSE comparisons, and return the best fit function as a string up to the degree of the best model.
+
+ Parameters:
+ x_values (np.ndarray): Array of x values, must be positive for ln(x) and all y_values must be positive for exponential fit.
+ y_values (np.ndarray): Array of y values, must be positive for the exponential and logarithmic fits.
+
+ Returns:
+ tuple: The best fitting model type, a confidence score, and the best fit function as a string.
+ """
+
+ models = {
+ "constant": np.ones_like(x_values),
+ "linear": x_values,
+ "quadratic": x_values**2,
+ "cubic": x_values**3,
+ "quartic": x_values**4,
+ "exponential": x_values, # will use log(y) for fitting
+ "logarithmic": np.log(x_values)
+ }
+
+ best_model = None
+ min_rmse = np.inf
+ rmse_values = {}
+ coefficients = {}
+
+ # Initial fit to find the best model type
+ for model_name, model_values in models.items():
+ # Prepare the response variable and design matrix
+ if model_name == "exponential":
+ transformed_y = np.log(y_values)
+ else:
+ transformed_y = y_values
+
+ A = np.column_stack((np.ones_like(x_values), model_values))
+
+ try:
+ # Solve the least squares problem
+ coeffs, _, _, _ = np.linalg.lstsq(A, transformed_y, rcond=None)
+ coefficients[model_name] = coeffs # Store coefficients
+
+ # Predict y values using the model
+ if model_name == "exponential":
+ y_pred = np.exp(A @ coeffs)
+ else:
+ y_pred = A @ coeffs
+
+ # Calculate RMSE
+ rmse = np.sqrt(np.mean((y_values - y_pred)**2))
+ rmse_values[model_name] = rmse
+
+ # Update best model if current RMSE is lower
+ if rmse < min_rmse:
+ min_rmse = rmse
+ best_model = model_name
+ except np.linalg.LinAlgError:
+ print(f"SVD did not converge for the {model_name} model.")
+ continue
+
+ # Construct a new polynomial up to the degree of the best model, if it's a polynomial
+ indexes = ["constant", "linear", "quadratic", "cubic", "quartic"]
+ if best_model in ["constant", "linear", "quadratic", "cubic", "quartic"]:
+ degree = indexes.index(best_model)
+ model_terms = np.column_stack([x_values**i for i in range(degree + 1)])
+ coeffs, _, _, _ = np.linalg.lstsq(model_terms, y_values, rcond=None)
+
+ # Generate the function string
+ function_str = " + ".join(f"{coeffs[i]:.4f}*x^{i}" if i > 0 else f"{coeffs[i]:.4f}" for i in range(degree + 1))
+
+ elif best_model == "exponential":
+ function_str = f"{coefficients[best_model][0]:.4f} + {coefficients[best_model][1]:.4f}*e^x"
+ elif best_model == "logarithmic":
+ function_str = f"{coefficients[best_model][0]:.4f} + {coefficients[best_model][1]:.4f}*ln(x)"
+
+ # Calculate confidence measure
+ rmse_values_list = [value for value in rmse_values.values()]
+ rmse_values_list.sort()
+ # average_rmse = (np.sum(list(rmse_values.values())) - min_rmse) / (len(rmse_values) - 1)
+ ratio = rmse_values_list[1] / min_rmse if min_rmse != 0 else 0
+ confidence = ratio * (1 / (ratio + 1))
+
+ return best_model, confidence, function_str
+
+ def _generate_plot(self, x, x_label, y, y_label, title, directory: str, log=False) -> str:
+ # Function to create plots
+ if not os.path.exists(directory):
+ os.makedirs(directory) # Create the directory if it does not exist
+
+ plt.figure(figsize=(10, 5))
+ plt.plot(x, y, marker='o')
+ plt.xlabel(x_label)
+ plt.ylabel(y_label)
+ plt.title(title)
+ plt.grid(True)
+
+ if log: # Set y-axis to logarithmic scale
+ plt.yscale('log')
+ plt.gca().yaxis.set_major_formatter(ticker.ScalarFormatter())
+
+ filename = f"{directory}\\{title}{' log' if log else ''}.png"
+ plt.savefig(filename, dpi=500) # Save the figure
+ plt.close() # Close the plot to free up memory
+ print(f"\tSaving '{filename}'",)
+ return filename
+
+ def _generate_sensitivity_plots(self, gain_values: np.ndarray, metrics_list: list[dict], name: str, directory: str) -> list[float]:
+ # Finding the index for gain_value = 1
+ index_gain_one = np.where(gain_values == 1)[0][0]
+
+ # Prepare data for plotting
+ max_2std_error = [m['max_2std_error'] for m in metrics_list]
+ max_2std_percent_error = [m['max_2std_percent_error'] for m in metrics_list]
+
+ # Calculate ratios
+ max_2std_error_ratio = max_2std_error / max_2std_error[index_gain_one]
+
+ # Function to create plots
+ if not os.path.exists(directory):
+ os.makedirs(directory) # Create the directory if it does not exist
+
+ # Plotting absolute metrics
+ filename_1 = self._generate_plot(gain_values, "Error Gain", max_2std_percent_error, 'Max 2σ Error Percent (%)', f"Sensitivity Analysis on {name}", directory, log=False)
+ filename_2 = self._generate_plot(gain_values, "Error Gain", max_2std_percent_error, 'Max 2σ Error Percent (%)', f"Sensitivity Analysis on {name}", directory, log=True)
+
+ # Plotting relative metrics (ratios)
+ filename_3 = self._generate_plot(gain_values, "Error Gain", max_2std_error_ratio, 'Max 2σ Error Ratio', f"Sensitivity Ratio Analysis on {name}", directory, log=False)
+ filename_4 = self._generate_plot(gain_values, "Error Gain", max_2std_error_ratio, 'Max 2σ Error Ratio', f"Sensitivity Ratio Analysis on {name}", directory, log=True)
+
+ return [filename_1, filename_2, filename_3, filename_4]
+
+ def _generate_gain_values(self, gain_settings) -> np.ndarray:
+ min_gain = gain_settings[0]
+ max_gain = gain_settings[1]
+ num_points = gain_settings[2]
+
+ if min_gain < 1 < max_gain:
+ # Calculate the number of points for each segment
+ num_points_each = num_points // 2
+
+ # Generate uniformly spaced values between min_gain and 1
+ lower_half = np.linspace(min_gain, 1, num_points_each, endpoint=False)
+
+ # Generate uniformly spaced values between 1 and max_gain
+ upper_half = np.linspace(1, max_gain, num_points_each, endpoint=True)
+
+ # Combine both halves
+ gain_values = np.concatenate((lower_half, upper_half))
+ else:
+ # Generate uniformly spaced values as initially intended
+ gain_values = np.linspace(min_gain, max_gain, num_points)
+
+ # Add 1 to the list if not there
+ if 1 not in gain_values:
+ gain_values = np.sort(np.append(gain_values, 1))
+
+ return gain_values
+
+ def _run_isolated_monte_carlo(self, input_to_isolate: 'Input', num_of_tests: int) -> list[float]:
+ results = []
+ for _ in range(num_of_tests):
+ try:
+ value = self.system_input.get_reading_isolating_input(input_to_isolate)
+ results.append(value)
+ except:
+ continue
+ return results
+
+ def _run_isolated_monte_carlo_multiprocessed(self, input_to_isolate: 'Input', num_of_tests: int, num_of_processes) -> list[float]:
+ if num_of_processes == 1:
+ return self._run_isolated_monte_carlo(input_to_isolate, num_of_tests)
+
+ # Split the number of tests among the processes
+ tests_per_process = num_of_tests // num_of_processes
+ remaining_tests = num_of_tests % num_of_processes
+
+ # Create a list to store the number of tests each process will run
+ tasks = [tests_per_process] * num_of_processes
+ for i in range(remaining_tests):
+ tasks[i] += 1
+
+ # Create a pool of worker processes
+ with multiprocessing.Pool(processes=num_of_processes) as pool:
+ # Map the tasks to the worker processes
+ results = pool.starmap(self._run_isolated_monte_carlo, [(input_to_isolate, task) for task in tasks])
+
+ # Flatten the list of results
+ results = np.array([item for sublist in results for item in sublist])
+ results_without_none = np.array([item for item in results if item is not None])
+
+ return results_without_none
+
+ def perform_sensitivity_analysis(self, input_to_analyze: 'Input', gain_settings: list[float], monte_carlo_settings:dict = dict()) -> None:
+ self.system_input.reset_error_gain()
+
+ num_of_tests: int = monte_carlo_settings.get("num_runs", 1_000)
+ num_of_processes: int = monte_carlo_settings.get("num_processes", 10)
+
+ input_name = input_to_analyze.get_name()
+
+ # Perform isolated analysis where only this input has error
+ print(f"Isolated Sensitivity Analysis on {input_name}:")
+ metrics_list: list = []
+ gain_values = self._generate_gain_values(gain_settings)
+
+ for i in range(len(gain_values)):
+ gain_value = gain_values[i]
+ input_to_analyze.set_error_gain(gain_value)
+ results = self._run_isolated_monte_carlo_multiprocessed(input_to_analyze, num_of_tests, num_of_processes)
+ metrics = self._calculate_metrics(results)
+ metrics_list.append(metrics)
+
+ print(f"\tError Gain of {gain_value}")
+
+ self.pdf.add_page_memory()
+ self.pdf.add_header_memory(f"Isolated Sensitivity Analysis on {input_name}", level=1)
+ self.pdf.add_metrics_table_memory(f"Impact of {input_name} Noise Gain on System without Noise", metrics_list, gain_values)
+
+ directory = f"Output Files\\{input_name}\\Isolated"
+ print(f"\nPlotting Isolated Sensitivity for {input_name}:")
+ filenames: list = self._generate_sensitivity_plots(gain_values, metrics_list, input_name, directory)
+
+ self.pdf.add_page_memory()
+ self.pdf.add_header_memory(f"Isolated Sensitivity Analysis on {input_name} Plots", level=1)
+ for i in range(len(filenames)):
+ if i % 2 == 0:
+ self.pdf.add_centered_image_memory(filenames[i], width_ratio=1)
+ else:
+ self.pdf.add_newline_memory()
+
+ values = [metric["max_2std_percent_error"] for metric in metrics_list]
+ polynomial, confidence, eq_string = self._fit_to_polynomial(gain_values, values)
+
+ regression_string = f"Relationship between sensor Error Gain and Max 2σ Error Percent (%) is: {polynomial} with a confidence level of {confidence*100:.2f} %."
+ regression_string += f"\nThe equation of best fit is: {eq_string}."
+ self.pdf.add_text_memory(regression_string)
+ print()
+
+
+ # Perform non-isolated analysis where only this input has error
+ print(f"Non-Isolated Sensitivity Analysis on {input_name}:")
+ metrics_list: list = []
+ self.system_input.reset_error_gain()
+ for i in range(len(gain_values)):
+ gain_value = gain_values[i]
+ input_to_analyze.set_error_gain(gain_value)
+ results = self._run_system_monte_carlo_multiprocessed(num_of_tests, num_of_processes)
+ metrics = self._calculate_metrics(results)
+ metrics_list.append(metrics)
+ print(f"\tError Gain of {gain_value}")
+
+ self.pdf.add_page_memory()
+ self.pdf.add_header_memory(f"Non-Isolated Sensitivity Analysis on {input_name}", level=1)
+ self.pdf.add_metrics_table_memory(f"Impact of {input_name} Noise Gain on System with Noise", metrics_list, gain_values)
+
+
+ directory = f"Output Files\\{input_name}\\Non-Isolated"
+ print(f"\nPlotting Non-Isolated Sensitivity for {input_name}:")
+ filenames: list = self._generate_sensitivity_plots(gain_values, metrics_list, input_name, directory)
+
+ self.pdf.add_page_memory()
+ self.pdf.add_header_memory(f"Non-Isolated Sensitivity Analysis on {input_name} Plots", level=1)
+ for i in range(len(filenames)):
+ if i % 2 == 0:
+ self.pdf.add_centered_image_memory(filenames[i], width_ratio=1)
+
+
+ values = [metric["max_2std_percent_error"] for metric in metrics_list]
+ polynomial, confidence, eq_string = self._fit_to_polynomial(gain_values, values)
+
+ regression_string = f"Relationship between sensor Error Gain and Max 2σ Error Percent (%) is: {polynomial} with a confidence level of {confidence*100:.2f} %."
+ regression_string += f"\nThe equation of best fit is: {eq_string}."
+ self.pdf.add_text_memory(regression_string)
+
+ # Print the text report of the metrics
+ print("\n"*3)
+
+
+
+
+
+
+
+ def _plot_range_analysis(self, points, metrics_list, true_values, sensor_name, directory: str) -> str:
+ averages = [metrics['average'] for metrics in metrics_list]
+ std_devs = [metrics['std_dev'] for metrics in metrics_list]
+
+ upper_bound_1std = [avg + std for avg, std in zip(averages, std_devs)]
+ lower_bound_1std = [avg - std for avg, std in zip(averages, std_devs)]
+
+
+ upper_bound_1std = [avg + std for avg, std in zip(averages, std_devs)]
+ lower_bound_1std = [avg - std for avg, std in zip(averages, std_devs)]
+
+ plt.figure(figsize=(12, 6))
+ plt.plot(points, true_values, label='True Value', color="red", linestyle="--")
+ plt.plot(points, averages, label='Average', color='blue')
+ plt.plot(points, upper_bound_1std, label='1st Dev', color='black', linestyle='--')
+ plt.plot(points, lower_bound_1std, color='black', linestyle='--')
+
+ plt.xlabel(f'{sensor_name} Input Value')
+ plt.ylabel('System Output')
+ plt.title(f"Range Analysis on {sensor_name}")
+ plt.legend()
+ plt.grid(True)
+
+ # Save the figure
+ if not os.path.exists(directory):
+ os.makedirs(directory) # Create the directory if it does not exist
+ filename = f"{directory}\\Range Analysis on {sensor_name}.png"
+ plt.savefig(filename, dpi=500)
+ plt.close()
+
+ print(f"Range analysis plot saved to '{filename}'")
+
+ return filename
+
+
+ def perform_range_analysis(self, input_to_analyze: 'Input', range_settings: list[float], monte_carlo_settings:dict = dict()) -> None:
+ self.system_input.reset_error_gain()
+
+ num_of_tests: int = monte_carlo_settings.get("num_runs", 1_000)
+ num_of_processes: int = monte_carlo_settings.get("num_processes", 10)
+
+ input_name = input_to_analyze.get_name()
+ directory = f"Output Files\\{input_name}\\Range Analysis"
+
+ # Perform isolated analysis where only this input has error
+ print(f"Range Analysis on {input_name}:")
+
+ # get the original true value to return back to
+ original_true_value = input_to_analyze.get_true()
+
+ # Generate a uniform list using range_settings
+ min_val, max_val, num_points = range_settings
+ points = np.linspace(min_val, max_val, num_points)
+
+ metrics_list = []
+ true_values = []
+
+ for point in points:
+ print(f"\tTest Value is {point}")
+ # Set the input to the current point in the range
+ input_to_analyze.set_true(point)
+
+ true_value = self.system_input.get_true()
+ true_values.append(true_value)
+
+ # Run the Monte Carlo simulation for the current point
+ results = self._run_system_monte_carlo_multiprocessed(num_of_tests, num_of_processes)
+
+ # Calculate metrics for the current results
+ metrics = self._calculate_metrics(results)
+ metrics_list.append(metrics)
+
+ # Plot the results
+ filename = self._plot_range_analysis(points, metrics_list, true_values, input_name, directory)
+
+ # Reset the original true value
+ input_to_analyze.set_true(original_true_value)
+
+ self.pdf.add_page_memory()
+ self.pdf.add_header_memory(f"Range Analysis on {input_name}", level=1)
+ self.pdf.add_centered_image_memory(filename, width_ratio=1)
+ self.pdf.add_newline_memory()
+
+ print()
\ No newline at end of file
diff --git a/code/__pycache__/Flow_Speed_Sensors.cpython-311.pyc b/code/__pycache__/Flow_Speed_Sensors.cpython-311.pyc
new file mode 100644
index 0000000..c874d26
Binary files /dev/null and b/code/__pycache__/Flow_Speed_Sensors.cpython-311.pyc differ
diff --git a/code/__pycache__/Fluid_Lookup.cpython-311.pyc b/code/__pycache__/Fluid_Lookup.cpython-311.pyc
new file mode 100644
index 0000000..b5bec62
Binary files /dev/null and b/code/__pycache__/Fluid_Lookup.cpython-311.pyc differ
diff --git a/code/__pycache__/Force_Sensors.cpython-311.pyc b/code/__pycache__/Force_Sensors.cpython-311.pyc
new file mode 100644
index 0000000..b6308ba
Binary files /dev/null and b/code/__pycache__/Force_Sensors.cpython-311.pyc differ
diff --git a/code/__pycache__/Inputs.cpython-311.pyc b/code/__pycache__/Inputs.cpython-311.pyc
new file mode 100644
index 0000000..80d777c
Binary files /dev/null and b/code/__pycache__/Inputs.cpython-311.pyc differ
diff --git a/code/__pycache__/Math_Functions.cpython-311.pyc b/code/__pycache__/Math_Functions.cpython-311.pyc
new file mode 100644
index 0000000..af49d7c
Binary files /dev/null and b/code/__pycache__/Math_Functions.cpython-311.pyc differ
diff --git a/code/__pycache__/PDF_Generator.cpython-311.pyc b/code/__pycache__/PDF_Generator.cpython-311.pyc
new file mode 100644
index 0000000..50eb439
Binary files /dev/null and b/code/__pycache__/PDF_Generator.cpython-311.pyc differ
diff --git a/code/__pycache__/Pressure_Sensors.cpython-311.pyc b/code/__pycache__/Pressure_Sensors.cpython-311.pyc
new file mode 100644
index 0000000..fa5787e
Binary files /dev/null and b/code/__pycache__/Pressure_Sensors.cpython-311.pyc differ
diff --git a/code/__pycache__/Sensors.cpython-311.pyc b/code/__pycache__/Sensors.cpython-311.pyc
new file mode 100644
index 0000000..faa3064
Binary files /dev/null and b/code/__pycache__/Sensors.cpython-311.pyc differ
diff --git a/code/__pycache__/System_Uncertainty_Monte_Carlo.cpython-311.pyc b/code/__pycache__/System_Uncertainty_Monte_Carlo.cpython-311.pyc
new file mode 100644
index 0000000..8d47942
Binary files /dev/null and b/code/__pycache__/System_Uncertainty_Monte_Carlo.cpython-311.pyc differ
diff --git a/code/__pycache__/Temperature_Sensors.cpython-311.pyc b/code/__pycache__/Temperature_Sensors.cpython-311.pyc
new file mode 100644
index 0000000..5e8927a
Binary files /dev/null and b/code/__pycache__/Temperature_Sensors.cpython-311.pyc differ
diff --git a/code/_example_mass_flow_rate.py.py b/code/_example_mass_flow_rate.py.py
new file mode 100644
index 0000000..b8fc36a
--- /dev/null
+++ b/code/_example_mass_flow_rate.py.py
@@ -0,0 +1,129 @@
+import matplotlib.pyplot as plt
+import math
+
+from inputs.Inputs import AveragingInput, PhysicalInput
+from inputs.Pressure_Sensors import PressureTransmitter
+from inputs.Temperature_Sensors import RTD, Thermocouple
+from inputs.Force_Sensors import LoadCell
+from inputs.Flow_Speed_Sensors import TurbineFlowMeter
+from inputs.Fluid_Lookup import DensityLookup
+from inputs.Math_Functions import Integration
+
+from System_Uncertainty_Monte_Carlo import SystemUncertaintyMonteCarlo
+
+from typing import List, Tuple
+from typing import TYPE_CHECKING
+if TYPE_CHECKING:
+ from inputs.Inputs import Input, CombinedInput
+
+
+def plot_sensitivity_analysis(gain_values: list, stdev_values: list, normal_stdev: float, sensor_name: str):
+ # Plotting the standard deviation values against the gain settings
+ plt.figure(figsize=(10, 6))
+ plt.plot(gain_values, stdev_values, marker='o', linestyle='-', color='b')
+ plt.axhline(y=normal_stdev, color='r', linestyle='--', label='Normal System STD Dev')
+ plt.title(f'Sensitivity Analysis for Error Gain on {sensor_name}')
+ plt.xlabel('Error Gain')
+ plt.ylabel('Standard Deviation of Output')
+ plt.grid(True)
+ plt.show()
+
+
+def main(num_of_tests: int = 1000, num_of_processes: int = 1):
+ # Define true values and sensor error ranges
+ pt_1_errors = {
+ "k": 2, # How many standard deviations each of these errors represents (if applicable)
+ "fso": 100, # full scale output so that stdev can be calculated for errors based on FSO
+ "static_error_band": 0.15, # +-% of FSO
+ "repeatability": 0.02, # +-% of FSO
+ "hysteresis": 0.07, # +-% of FSO
+ "non-linearity": 0.15, # +-% of FSO
+ "temperature_zero_error": 0.005, # +% of FSO per degree F off from calibration temp
+ "temperature_offset": 30, # Degrees F off from calibration temp
+ }
+
+ RTD_1_errors = {
+ "k": 3, # How many standard deviations each of these errors represents (if applicable)
+ "class": "A", # class AA, A, B, C
+ }
+
+ flow_1_errors = {
+ "k": 2, # How many standard deviations each of these errors represents (if applicable)
+ "fso": 20, # full scale output so that stdev can be calculated for errors based on FSO
+ "static_error_band": 0.25, # +-% of FSO
+ }
+
+ pipe_diameter_error = {
+ "tolerance": 0.05,
+ "cte": 8.8E-6, # degrees F^-1
+ "temperature_offset": -100 # Degrees off from when the measurement was taken
+ }
+
+ # Create sensor instances
+ pressure_transmitter_1 = PressureTransmitter(25, pt_1_errors, name="PT 1")
+ pressure_transmitter_2 = PressureTransmitter(20, pt_1_errors, name="PT 2")
+ temperature_sensor_1 = RTD(250, RTD_1_errors, name="RTD 1")
+
+ average_pressure = AveragingInput([pressure_transmitter_1, pressure_transmitter_2])
+
+ # Create DensityLookup instance
+ density_lookup = DensityLookup(pressure_sensor=average_pressure, temperature_sensor=temperature_sensor_1)
+ flow_speed = TurbineFlowMeter(10, flow_1_errors, "Flow Sensor 1")
+ pipe_diameter = PhysicalInput(1, pipe_diameter_error, name="Pipe Diameter")
+
+ # Define the combined sensor
+ combined_input: CombinedInput = math.pi * density_lookup * flow_speed * (pipe_diameter / 12 / 2)**2
+
+ monte_carlo = SystemUncertaintyMonteCarlo(combined_input)
+
+ monte_carlo.perform_system_analysis(monte_carlo_settings={
+ "num_runs": num_of_tests,
+ "num_processes": num_of_processes
+ })
+
+ sensitivity_analysis_num_runs = num_of_tests // 100
+
+ monte_carlo.perform_sensitivity_analysis(input_to_analyze=flow_speed,
+ gain_settings=[1, 10, 10],
+ monte_carlo_settings={
+ "num_runs": sensitivity_analysis_num_runs,
+ "num_processes": num_of_processes
+ })
+ monte_carlo.perform_sensitivity_analysis(input_to_analyze=pressure_transmitter_1,
+ gain_settings=[0.1, 10, 10],
+ monte_carlo_settings={
+ "num_runs": sensitivity_analysis_num_runs,
+ "num_processes": num_of_processes
+ })
+ monte_carlo.perform_sensitivity_analysis(input_to_analyze=temperature_sensor_1,
+ gain_settings=[0.01, 1, 10],
+ monte_carlo_settings={
+ "num_runs": sensitivity_analysis_num_runs,
+ "num_processes": num_of_processes
+ })
+
+ monte_carlo.perform_range_analysis(input_to_analyze=pressure_transmitter_1,
+ range_settings=[28, 31, 20],
+ monte_carlo_settings={
+ "num_runs": sensitivity_analysis_num_runs,
+ "num_processes": num_of_processes
+ }
+ )
+
+ monte_carlo.perform_range_analysis(input_to_analyze=temperature_sensor_1,
+ range_settings=[237, 244, 20],
+ monte_carlo_settings={
+ "num_runs": sensitivity_analysis_num_runs,
+ "num_processes": num_of_processes
+ }
+ )
+
+ monte_carlo.save_report()
+
+
+
+
+if __name__ == "__main__":
+ num_of_tests = 1_000_000
+ num_of_processes = 10
+ main(num_of_tests, num_of_processes)
\ No newline at end of file
diff --git a/code/fonts/dejavu-sans-bold.ttf b/code/fonts/dejavu-sans-bold.ttf
new file mode 100644
index 0000000..6d65fa7
Binary files /dev/null and b/code/fonts/dejavu-sans-bold.ttf differ
diff --git a/code/fonts/dejavu-sans-boldoblique.ttf b/code/fonts/dejavu-sans-boldoblique.ttf
new file mode 100644
index 0000000..753f2d8
Binary files /dev/null and b/code/fonts/dejavu-sans-boldoblique.ttf differ
diff --git a/code/fonts/dejavu-sans-condensed.ttf b/code/fonts/dejavu-sans-condensed.ttf
new file mode 100644
index 0000000..3259bc2
Binary files /dev/null and b/code/fonts/dejavu-sans-condensed.ttf differ
diff --git a/code/fonts/dejavu-sans-condensedbold.ttf b/code/fonts/dejavu-sans-condensedbold.ttf
new file mode 100644
index 0000000..22987c6
Binary files /dev/null and b/code/fonts/dejavu-sans-condensedbold.ttf differ
diff --git a/code/fonts/dejavu-sans-condensedboldoblique.ttf b/code/fonts/dejavu-sans-condensedboldoblique.ttf
new file mode 100644
index 0000000..f5fa0ca
Binary files /dev/null and b/code/fonts/dejavu-sans-condensedboldoblique.ttf differ
diff --git a/code/fonts/dejavu-sans-condensedoblique.ttf b/code/fonts/dejavu-sans-condensedoblique.ttf
new file mode 100644
index 0000000..7fde907
Binary files /dev/null and b/code/fonts/dejavu-sans-condensedoblique.ttf differ
diff --git a/code/fonts/dejavu-sans-extralight.ttf b/code/fonts/dejavu-sans-extralight.ttf
new file mode 100644
index 0000000..b09f32d
Binary files /dev/null and b/code/fonts/dejavu-sans-extralight.ttf differ
diff --git a/code/fonts/dejavu-sans-mono-bold.ttf b/code/fonts/dejavu-sans-mono-bold.ttf
new file mode 100644
index 0000000..8184ced
Binary files /dev/null and b/code/fonts/dejavu-sans-mono-bold.ttf differ
diff --git a/code/fonts/dejavu-sans-mono.ttf b/code/fonts/dejavu-sans-mono.ttf
new file mode 100644
index 0000000..f578602
Binary files /dev/null and b/code/fonts/dejavu-sans-mono.ttf differ
diff --git a/code/fonts/dejavu-sans-monoboldoblique.ttf b/code/fonts/dejavu-sans-monoboldoblique.ttf
new file mode 100644
index 0000000..754dca7
Binary files /dev/null and b/code/fonts/dejavu-sans-monoboldoblique.ttf differ
diff --git a/code/fonts/dejavu-sans-monooblique.ttf b/code/fonts/dejavu-sans-monooblique.ttf
new file mode 100644
index 0000000..4c858d4
Binary files /dev/null and b/code/fonts/dejavu-sans-monooblique.ttf differ
diff --git a/code/fonts/dejavu-sans-oblique.ttf b/code/fonts/dejavu-sans-oblique.ttf
new file mode 100644
index 0000000..999bac7
Binary files /dev/null and b/code/fonts/dejavu-sans-oblique.ttf differ
diff --git a/code/fonts/dejavu-sans.ttf b/code/fonts/dejavu-sans.ttf
new file mode 100644
index 0000000..e5f7eec
Binary files /dev/null and b/code/fonts/dejavu-sans.ttf differ
diff --git a/code/fonts/dejavuserif-bold.ttf b/code/fonts/dejavuserif-bold.ttf
new file mode 100644
index 0000000..3bb755f
Binary files /dev/null and b/code/fonts/dejavuserif-bold.ttf differ
diff --git a/code/fonts/dejavuserif-bolditalic.ttf b/code/fonts/dejavuserif-bolditalic.ttf
new file mode 100644
index 0000000..a36dd4b
Binary files /dev/null and b/code/fonts/dejavuserif-bolditalic.ttf differ
diff --git a/code/fonts/dejavuserif-italic.ttf b/code/fonts/dejavuserif-italic.ttf
new file mode 100644
index 0000000..805daf2
Binary files /dev/null and b/code/fonts/dejavuserif-italic.ttf differ
diff --git a/code/fonts/dejavuserif.ttf b/code/fonts/dejavuserif.ttf
new file mode 100644
index 0000000..0b803d2
Binary files /dev/null and b/code/fonts/dejavuserif.ttf differ
diff --git a/code/fonts/dejavuserifcondensed-bold.ttf b/code/fonts/dejavuserifcondensed-bold.ttf
new file mode 100644
index 0000000..222bf13
Binary files /dev/null and b/code/fonts/dejavuserifcondensed-bold.ttf differ
diff --git a/code/fonts/dejavuserifcondensed-bolditalic.ttf b/code/fonts/dejavuserifcondensed-bolditalic.ttf
new file mode 100644
index 0000000..e446636
Binary files /dev/null and b/code/fonts/dejavuserifcondensed-bolditalic.ttf differ
diff --git a/code/fonts/dejavuserifcondensed-italic.ttf b/code/fonts/dejavuserifcondensed-italic.ttf
new file mode 100644
index 0000000..c529df3
Binary files /dev/null and b/code/fonts/dejavuserifcondensed-italic.ttf differ
diff --git a/code/fonts/dejavuserifcondensed.ttf b/code/fonts/dejavuserifcondensed.ttf
new file mode 100644
index 0000000..d3959b3
Binary files /dev/null and b/code/fonts/dejavuserifcondensed.ttf differ
diff --git a/code/fonts/mplus-1mn-bold.ttf b/code/fonts/mplus-1mn-bold.ttf
new file mode 100644
index 0000000..5f26678
Binary files /dev/null and b/code/fonts/mplus-1mn-bold.ttf differ
diff --git a/code/fonts/mplus-1mn-medium.ttf b/code/fonts/mplus-1mn-medium.ttf
new file mode 100644
index 0000000..a85c66b
Binary files /dev/null and b/code/fonts/mplus-1mn-medium.ttf differ
diff --git a/code/fonts/mplus-1mn-regular.ttf b/code/fonts/mplus-1mn-regular.ttf
new file mode 100644
index 0000000..394fb2f
Binary files /dev/null and b/code/fonts/mplus-1mn-regular.ttf differ
diff --git a/code/fonts/mplus-1mn-thin.ttf b/code/fonts/mplus-1mn-thin.ttf
new file mode 100644
index 0000000..de41c15
Binary files /dev/null and b/code/fonts/mplus-1mn-thin.ttf differ
diff --git a/code/fonts/mplus-1p-regular.ttf b/code/fonts/mplus-1p-regular.ttf
new file mode 100644
index 0000000..b64dee2
Binary files /dev/null and b/code/fonts/mplus-1p-regular.ttf differ
diff --git a/code/fonts/notoserif-bold.ttf b/code/fonts/notoserif-bold.ttf
new file mode 100644
index 0000000..580ff28
Binary files /dev/null and b/code/fonts/notoserif-bold.ttf differ
diff --git a/code/fonts/notoserif-bold_italic.ttf b/code/fonts/notoserif-bold_italic.ttf
new file mode 100644
index 0000000..32d38af
Binary files /dev/null and b/code/fonts/notoserif-bold_italic.ttf differ
diff --git a/code/fonts/notoserif-italic.ttf b/code/fonts/notoserif-italic.ttf
new file mode 100644
index 0000000..1d3cc3a
Binary files /dev/null and b/code/fonts/notoserif-italic.ttf differ
diff --git a/code/fonts/notoserif-regular.ttf b/code/fonts/notoserif-regular.ttf
new file mode 100644
index 0000000..a1c6f10
Binary files /dev/null and b/code/fonts/notoserif-regular.ttf differ
diff --git a/code/fonts/opensans-bold.ttf b/code/fonts/opensans-bold.ttf
new file mode 100644
index 0000000..7b52945
Binary files /dev/null and b/code/fonts/opensans-bold.ttf differ
diff --git a/code/fonts/opensans-bolditalic.ttf b/code/fonts/opensans-bolditalic.ttf
new file mode 100644
index 0000000..a670e14
Binary files /dev/null and b/code/fonts/opensans-bolditalic.ttf differ
diff --git a/code/fonts/opensans-extrabold.ttf b/code/fonts/opensans-extrabold.ttf
new file mode 100644
index 0000000..3660681
Binary files /dev/null and b/code/fonts/opensans-extrabold.ttf differ
diff --git a/code/fonts/opensans-extrabolditalic.ttf b/code/fonts/opensans-extrabolditalic.ttf
new file mode 100644
index 0000000..8c4c15d
Binary files /dev/null and b/code/fonts/opensans-extrabolditalic.ttf differ
diff --git a/code/fonts/opensans-italic.ttf b/code/fonts/opensans-italic.ttf
new file mode 100644
index 0000000..e6c5414
Binary files /dev/null and b/code/fonts/opensans-italic.ttf differ
diff --git a/code/fonts/opensans-light.ttf b/code/fonts/opensans-light.ttf
new file mode 100644
index 0000000..563872c
Binary files /dev/null and b/code/fonts/opensans-light.ttf differ
diff --git a/code/fonts/opensans-lightitalic.ttf b/code/fonts/opensans-lightitalic.ttf
new file mode 100644
index 0000000..5ebe2a2
Binary files /dev/null and b/code/fonts/opensans-lightitalic.ttf differ
diff --git a/code/fonts/opensans-regular.ttf b/code/fonts/opensans-regular.ttf
new file mode 100644
index 0000000..2e31d02
Binary files /dev/null and b/code/fonts/opensans-regular.ttf differ
diff --git a/code/fonts/opensans-semibold.ttf b/code/fonts/opensans-semibold.ttf
new file mode 100644
index 0000000..99db86a
Binary files /dev/null and b/code/fonts/opensans-semibold.ttf differ
diff --git a/code/fonts/opensans-semibolditalic.ttf b/code/fonts/opensans-semibolditalic.ttf
new file mode 100644
index 0000000..8cad4e3
Binary files /dev/null and b/code/fonts/opensans-semibolditalic.ttf differ
diff --git a/code/inputs/Flow_Speed_Sensors.py b/code/inputs/Flow_Speed_Sensors.py
new file mode 100644
index 0000000..1e23d95
--- /dev/null
+++ b/code/inputs/Flow_Speed_Sensors.py
@@ -0,0 +1,9 @@
+import random
+
+from inputs.Inputs import Input
+
+class FlowSpeedSensor(Input):
+ pass
+
+class TurbineFlowMeter(FlowSpeedSensor):
+ pass
\ No newline at end of file
diff --git a/code/inputs/Fluid_Lookup.py b/code/inputs/Fluid_Lookup.py
new file mode 100644
index 0000000..4c87167
--- /dev/null
+++ b/code/inputs/Fluid_Lookup.py
@@ -0,0 +1,134 @@
+import random
+import CoolProp.CoolProp as CP
+
+from inputs.Inputs import Input, AveragingInput
+
+from inputs.Pressure_Sensors import PressureSensor
+from inputs.Temperature_Sensors import TemperatureSensor
+
+def psi_to_pascals(psi: float) -> float:
+ '''Convert PSIA to Pascals'''
+ return psi*6894.7572932
+
+def F_to_K(f: float) -> float:
+ '''Convert Fahrenheit to Kelvin'''
+ return (f- 32) * 5/9 + 273.15
+
+def kg_m3_to_lbm_ft3(kg_m3: float) -> float:
+ '''Concerts kg/m^3 to lbm/ft^3'''
+ return kg_m3 * 0.0624279606
+
+class FluidLookup(Input):
+ def __init__(self, sensor1: 'Input', sensor2: 'Input', fluid: str = 'Water', name: str = None):
+ super().__init__(true_value=0.0, name=name)
+ self.sensor1 = sensor1 # Pressure sensor
+ self.sensor2 = sensor2 # Temperature sensor
+ self.fluid = fluid
+
+ self.uncertainties = { # In percent of table value
+ 0: 0.0001, # Liquid phase
+ 1: 0.05, # Vapor
+ 2: 0.05, # Supercritical
+ 3: 0.05, # Supercritical gas
+ 4: 0.05, # Supercritical liquid
+ 5: 0.05, # Two-phase
+ 6: 0.1, # Near critical point
+ 8: 0.05 # Solid
+ }
+
+ def get_input_errors(self) -> str:
+ string = ""
+ if isinstance(self.sensor1, Input):
+ string += f"{self.sensor1.get_input_errors()}\n"
+ if isinstance(self.sensor2, Input):
+ string += f"{self.sensor2.get_input_errors()}\n"
+ return string
+
+ def get_arithmetic(self) -> str:
+ lookup_type = type(self).__name__
+
+ return f"{lookup_type}({self.sensor1.get_arithmetic()}, {self.sensor2.get_arithmetic()})"
+
+class DensityLookup(FluidLookup):
+ def __init__(self, pressure_sensor: 'PressureSensor', temperature_sensor: 'TemperatureSensor', fluid: str = 'Water', name: str = None):
+ if not isinstance(pressure_sensor, PressureSensor):
+ if isinstance(pressure_sensor, AveragingInput):
+ if not isinstance(pressure_sensor.get_first(), PressureSensor):
+ raise ValueError(f"Sensor 1 for {self.name} must be a PressureSensor or an AveragingInput of a PressureSensor")
+ else:
+ raise ValueError(f"Sensor 1 for {self.name} must be a PressureSensor or an AveragingInput of a PressureSensor")
+ if not isinstance(temperature_sensor, TemperatureSensor):
+ if isinstance(temperature_sensor, AveragingInput):
+ if not isinstance(temperature_sensor.get_first(), TemperatureSensor):
+ raise ValueError(f"Sensor 1 for {self.name} must be a TemperatureSensor or an AveragingInput of a TemperatureSensor")
+ else:
+ raise ValueError(f"Sensor 1 for {self.name} must be a TemperatureSensor or an AveragingInput of a TemperatureSensor")
+ super().__init__(pressure_sensor, temperature_sensor, fluid, name=name)
+
+ def calc_error(self, pressure: float, temperature: float, density: float) -> float:
+ # Check the phase of the fluid to determine the uncertainty according to NIST
+ phase_index = int(CP.PropsSI('Phase', 'T', temperature, 'P', pressure, self.fluid))
+
+ uncertainty_percentage = self.uncertainties[phase_index]
+
+ std_dev = density * (uncertainty_percentage / 100)
+
+ return random.gauss(0, std_dev)
+
+ def get_reading(self) -> float:
+ pressure = psi_to_pascals(self.sensor1.get_reading()) # Convert from psi to Pa
+ if pressure < 0:
+ pressure = 0.01
+ temperature = F_to_K(self.sensor2.get_reading()) # Convert from F to K
+ if temperature < 0:
+ temperature = 0.01
+ density = CP.PropsSI('D', 'P', pressure, 'T', temperature, self.fluid)
+ density = kg_m3_to_lbm_ft3(density)
+
+ error = self.calc_error(pressure, temperature, density)
+
+ density += error
+
+ # Append the final value to self.all_readings for final calculations at the end
+ self.all_readings.append(density)
+
+ return density
+
+ def get_reading_isolating_input(self, input_to_isolate: 'Input'):
+ '''Gets true value from input except from the input to isolate'''
+ pressure = psi_to_pascals(self.sensor1.get_reading_isolating_input(input_to_isolate)) # Convert from psi to Pa
+ if pressure < 0:
+ pressure = 0.01
+ temperature = F_to_K(self.sensor2.get_reading_isolating_input(input_to_isolate)) # Convert from F to K
+ if temperature < 0:
+ temperature = 0.01
+ density = CP.PropsSI('D', 'P', pressure, 'T', temperature, self.fluid)
+ density = kg_m3_to_lbm_ft3(density)
+
+ # Append the final value to self.all_readings for final calculations at the end
+ self.all_readings.append(density)
+
+ return density
+
+ def get_true(self) -> float:
+ pressure = psi_to_pascals(self.sensor1.get_true()) # Convert from psi to Pa
+ if pressure < 0:
+ pressure = 0.01
+ temperature = F_to_K(self.sensor2.get_true()) # Convert from F to K
+ if temperature < 0:
+ temperature = 0.01
+ density = CP.PropsSI('D', 'P', pressure, 'T', temperature, self.fluid)
+ density = kg_m3_to_lbm_ft3(density)
+
+ error = self.calc_error(pressure, temperature, density)
+
+ density += error
+
+ # Append the final value to self.all_readings for final calculations at the end
+ self.all_readings.append(density)
+
+ return density
+
+ def reset_error_gain(self) -> None:
+ self.sensor1.reset_error_gain()
+ self.sensor2.reset_error_gain()
\ No newline at end of file
diff --git a/code/inputs/Force_Sensors.py b/code/inputs/Force_Sensors.py
new file mode 100644
index 0000000..2a4f294
--- /dev/null
+++ b/code/inputs/Force_Sensors.py
@@ -0,0 +1,9 @@
+import random
+
+from inputs.Inputs import Input
+
+class ForceSensor(Input):
+ pass
+
+class LoadCell(ForceSensor):
+ pass
\ No newline at end of file
diff --git a/code/inputs/Inputs.py b/code/inputs/Inputs.py
new file mode 100644
index 0000000..607b5ce
--- /dev/null
+++ b/code/inputs/Inputs.py
@@ -0,0 +1,392 @@
+from random import gauss
+
+from typing import TYPE_CHECKING
+if TYPE_CHECKING:
+ from inputs.Pressure_Sensors import PressureSensor
+ from inputs.Temperature_Sensors import TemperatureSensor
+ from inputs.Force_Sensors import ForceSensor
+
+
+
+class Input():
+ def __init__(self, true_value: float, input_errors: dict = dict(), name: str = None):
+ self.true_value = true_value
+ self.input_errors = input_errors
+ self.name = name if name is not None else type(self).__name__
+
+ self.parse_input_errors()
+ self.error_gain: float = 1.0
+
+ # Values to be used for data analysis at the end of the Monte Carlo
+ self.all_readings: list[float] = []
+ self.overall_average: float = 0.0
+ self.overall_uncertainty: float = 0.0
+
+
+ def __add__(self, other) -> 'CombinedInput':
+ return CombinedInput(self, other, 'add')
+
+ def __sub__(self, other) -> 'CombinedInput':
+ return CombinedInput(self, other, 'sub')
+
+ def __mul__(self, other) -> 'CombinedInput':
+ return CombinedInput(self, other, 'mul')
+
+ def __truediv__(self, other) -> 'CombinedInput':
+ return CombinedInput(self, other, 'div')
+
+ def __radd__(self, other) -> 'CombinedInput':
+ return CombinedInput(self, other, 'add')
+
+ def __rsub__(self, other) -> 'CombinedInput':
+ return CombinedInput(self, other, 'sub')
+
+ def __rmul__(self, other) -> 'CombinedInput':
+ return CombinedInput(self, other, 'mul')
+
+ def __rtruediv__(self, other) -> 'CombinedInput':
+ return CombinedInput(self, other, 'div')
+
+ def __pow__(self, exponent) -> 'CombinedInput':
+ return CombinedInput(self, exponent, 'pow')
+
+ def __rpow__(self, other)-> 'CombinedInput':
+ return CombinedInput(other, self, 'pow')
+
+ def get_name(self) -> str:
+ return self.name
+
+ def get_input_errors(self) -> str:
+ return self.name + "\n" + str(self.input_errors)
+
+ def get_arithmetic(self) -> str:
+ return "{" + self.get_name() + "}"
+
+ def set_error_gain(self, new_gain: float) -> None:
+ self.error_gain = new_gain
+
+ def parse_input_errors(self) -> None:
+ self.k = self.input_errors.get("k", 2) # By default, assume each uncertainty is given as 2 stdev
+ self.input_errors["k"] = self.k
+
+ # Static error band
+ self.fso = self.input_errors.get("fso", 0.0)
+ self.input_errors["fso"] = self.fso
+ self.static_error_stdev = self.fso * self.input_errors.get("static_error_band", 0.0) / 100 / self.k
+ self.input_errors["static_error_stdev"] = self.static_error_stdev
+
+ # Repeatability error
+ self.repeatability_error_stdev = self.fso * self.input_errors.get("repeatability", 0.0) / 100 / self.k
+ self.input_errors["repeatability_error_stdev"] = self.repeatability_error_stdev
+
+ # Hysteresis error
+ self.hysteresis_error_stdev = self.fso * self.input_errors.get("hysteresis", 0.0) / 100 / self.k
+ self.input_errors["hysteresis_error_stdev"] = self.hysteresis_error_stdev
+
+ # Non-linearity
+ self.nonlinearity_error_stdev = self.fso * self.input_errors.get("non-linearity", 0.0) / 100 / self.k
+ self.input_errors["non-linearity_error_stdev"] = self.nonlinearity_error_stdev
+
+ # Thermal error
+ self.temperature_offset = self.input_errors.get("temperature_offset", 0.0)
+ self.input_errors["temperature_offset"] = self.temperature_offset
+ self.thermal_error_offset = self.fso * self.temperature_offset * self.input_errors.get("temperature_zero_error", 0.0) / 100
+ self.input_errors["thermal_error_offset"] = self.thermal_error_offset
+
+
+
+ def calc_static_error(self) -> float:
+ return gauss(0, self.static_error_stdev)
+ def calc_repeatability_error(self) -> float:
+ return gauss(0, self.repeatability_error_stdev)
+ def calc_hysteresis_error(self) -> float:
+ return gauss(0, self.hysteresis_error_stdev)
+ def calc_nonlinearity_error(self) -> float:
+ return gauss(0, self.nonlinearity_error_stdev)
+ def calc_thermal_zero_error(self) -> float:
+ return self.thermal_error_offset
+
+ def calc_error(self) -> float:
+ static_error = self.calc_static_error()
+ repeatability_error = self.calc_repeatability_error()
+ hysteresis_error = self.calc_hysteresis_error()
+ nonlinearity_error = self.calc_nonlinearity_error()
+ thermal_zero_error = self.calc_thermal_zero_error()
+ return (static_error + repeatability_error + hysteresis_error + nonlinearity_error + thermal_zero_error) * self.error_gain
+
+ def get_reading(self) -> float:
+ '''Apply the pre-specified error and return a realistic value to mimic the reading of a sensor with error.'''
+ error: float = self.calc_error()
+ noisy_value: float = self.true_value + error
+
+ # Append the final value to self.all_readings for final calculations at the end
+ self.all_readings.append(noisy_value)
+
+ return noisy_value
+
+ def get_reading_isolating_input(self, input_to_isolate: 'Input'):
+ '''Gets true value from input except from the input to isolate'''
+ if self == input_to_isolate:
+ return self.get_reading()
+ else:
+ return self.get_true()
+
+ def get_true(self) -> float:
+ return self.true_value
+
+ def set_true(self, new_true: float) -> None:
+ self.true_value = new_true
+ self.parse_input_errors()
+
+ def reset_error_gain(self) -> None:
+ self.set_error_gain(1.0)
+
+class CombinedInput(Input):
+ def __init__(self, sensor1, sensor2, operation):
+ self.sensor1: 'Input' = sensor1
+ self.sensor2: 'Input' = sensor2
+ self.operation = operation
+
+ def get_input_errors(self) -> str:
+ string = ""
+ if isinstance(self.sensor1, Input):
+ string += f"{self.sensor1.get_input_errors()}\n"
+ if isinstance(self.sensor2, Input):
+ string += f"{self.sensor2.get_input_errors()}\n"
+ return string
+
+ def get_arithmetic(self) -> str:
+ operation_char = ""
+ match self.operation:
+ case "add":
+ operation_char = "+"
+ case "sub":
+ operation_char = "-"
+ case "mul":
+ operation_char = "*"
+ case "div":
+ operation_char = "/"
+ case "pow":
+ operation_char = "**"
+
+ string = f"({self.sensor1.get_arithmetic() if isinstance(self.sensor1, Input) else str(self.sensor1)} "
+ string += operation_char
+ string += f" {self.sensor2.get_arithmetic() if isinstance(self.sensor2, Input) else str(self.sensor2)})"
+ return string
+
+ def get_reading(self) -> float:
+ reading1 = self.sensor1.get_reading() if isinstance(self.sensor1, Input) or isinstance(self.sensor1, CombinedInput) else self.sensor1
+ reading2 = self.sensor2.get_reading() if isinstance(self.sensor2, Input) or isinstance(self.sensor2, CombinedInput) else self.sensor2
+
+ # No need to add error as the error already comes from the sensor.get_reading()
+
+ if self.operation == 'add':
+ return reading1 + reading2
+ elif self.operation == 'sub':
+ return reading1 - reading2
+ elif self.operation == 'mul':
+ return reading1 * reading2
+ elif self.operation == 'div':
+ if reading2 == 0:
+ raise ZeroDivisionError("Division by zero is undefined.")
+ return reading1 / reading2
+ elif self.operation == 'pow':
+ return reading1 ** reading2
+ else:
+ raise ValueError(f"Unsupported operation: {self.operation}")
+
+ def get_reading_isolating_input(self, input_to_isolate: 'Input'):
+ '''Gets true value from every input except the input to isolate'''
+ reading1 = self.sensor1.get_reading_isolating_input(input_to_isolate) if isinstance(self.sensor1, Input) or isinstance(self.sensor1, CombinedInput) else self.sensor1
+ reading2 = self.sensor2.get_reading_isolating_input(input_to_isolate) if isinstance(self.sensor2, Input) or isinstance(self.sensor2, CombinedInput) else self.sensor2
+
+ if self.operation == 'add':
+ return reading1 + reading2
+ elif self.operation == 'sub':
+ return reading1 - reading2
+ elif self.operation == 'mul':
+ return reading1 * reading2
+ elif self.operation == 'div':
+ if reading2 == 0:
+ raise ZeroDivisionError("Division by zero is undefined.")
+ return reading1 / reading2
+ elif self.operation == 'pow':
+ return reading1 ** reading2
+ else:
+ raise ValueError(f"Unsupported operation: {self.operation}")
+
+ def get_true(self) -> float:
+ reading1 = self.sensor1.get_true() if isinstance(self.sensor1, Input) or isinstance(self.sensor1, CombinedInput) else self.sensor1
+ reading2 = self.sensor2.get_true() if isinstance(self.sensor2, Input) or isinstance(self.sensor2, CombinedInput) else self.sensor2
+
+ # No need to add error as the error already comes from the sensor.get_reading()
+
+ if self.operation == 'add':
+ return reading1 + reading2
+ elif self.operation == 'sub':
+ return reading1 - reading2
+ elif self.operation == 'mul':
+ return reading1 * reading2
+ elif self.operation == 'div':
+ if reading2 == 0:
+ raise ZeroDivisionError("Division by zero is undefined.")
+ return reading1 / reading2
+ elif self.operation == 'pow':
+ return reading1 ** reading2
+ else:
+ raise ValueError(f"Unsupported operation: {self.operation}")
+
+ def reset_error_gain(self) -> None:
+ '''Resets the error gain of all connected inputs to 1.0'''
+ if isinstance(self.sensor1, Input):
+ self.sensor1.reset_error_gain()
+ if isinstance(self.sensor2, Input):
+ self.sensor2.reset_error_gain()
+
+ def __add__(self, other) -> 'CombinedInput':
+ return CombinedInput(self, other, 'add')
+
+ def __sub__(self, other) -> 'CombinedInput':
+ return CombinedInput(self, other, 'sub')
+
+ def __mul__(self, other) -> 'CombinedInput':
+ return CombinedInput(self, other, 'mul')
+
+ def __truediv__(self, other) -> 'CombinedInput':
+ return CombinedInput(self, other, 'div')
+
+ def __pow__(self, other)-> 'CombinedInput':
+ return CombinedInput(self, other, 'pow')
+
+ def __radd__(self, other)-> 'CombinedInput':
+ return CombinedInput(other, self, 'add')
+
+ def __rsub__(self, other)-> 'CombinedInput':
+ return CombinedInput(other, self, 'sub')
+
+ def __rmul__(self, other)-> 'CombinedInput':
+ return CombinedInput(other, self, 'mul')
+
+ def __rtruediv__(self, other)-> 'CombinedInput':
+ return CombinedInput(other, self, 'div')
+
+ def __rpow__(self, other)-> 'CombinedInput':
+ return CombinedInput(other, self, 'pow')
+
+class AveragingInput(Input):
+ def __init__(self, sensors: list['Input']):
+ if not sensors:
+ raise ValueError("The list of sensors cannot be empty")
+
+ # Ensure all sensors are of the same parent type
+ parent_class = type(sensors[0])
+ while not issubclass(parent_class, Input):
+ parent_class = parent_class.__bases__[0]
+
+ super().__init__(true_value=0.0, input_errors={})
+ self.sensors = sensors
+
+ def get_first(self) -> 'Input':
+ return self.sensors[0]
+
+ def get_input_errors(self) -> str:
+ string = ""
+ for sensor in self.sensors:
+ if isinstance(sensor, Input):
+ string += f"{sensor.get_input_errors()}\n\n"
+ return string
+
+ def get_arithmetic(self) -> str:
+ string = f"Average({self.sensors[0].get_arithmetic()}"
+ if len(self.sensors) != 1:
+ for sensor in self.sensors[1:]:
+ string += ", "
+ string += sensor.get_arithmetic()
+ return string + ")"
+
+ def get_reading(self) -> float:
+ total = sum(sensor.get_reading() for sensor in self.sensors)
+ average = total / len(self.sensors)
+
+ # No need to add error as the error already comes from the sensor.get_reading()
+
+ # Append the final value to self.all_readings for final calculations at the end
+ self.all_readings.append(average)
+
+ return average
+
+ def get_reading_isolating_input(self, input_to_isolate: 'Input'):
+ '''Gets true value from input except from the input to isolate'''
+ total = sum(sensor.get_reading_isolating_input(input_to_isolate) for sensor in self.sensors)
+ average = total / len(self.sensors)
+
+ # No need to add error as the error already comes from the sensor.get_reading()
+
+ # Append the final value to self.all_readings for final calculations at the end
+ self.all_readings.append(average)
+
+ return average
+
+ def get_true(self) -> float:
+ total = sum(sensor.get_true() for sensor in self.sensors)
+ average = total / len(self.sensors)
+
+ # Append the final value to self.all_readings for final calculations at the end
+ self.all_readings.append(average)
+
+ return average
+
+ def reset_error_gain(self) -> None:
+ sensor: Input = None
+ for sensor in self.sensors:
+ sensor.reset_error_gain()
+
+class PhysicalInput(Input):
+ '''A numerical input with an associated tolerance (or uncertainty in %). An example is the diameter of an orifice.
+ Typically, tolerance represents 3 standard deviations. So, an orifice of 5mm with tolerance of 1mm
+ would fall within [4, 6] 99.73% of the time.'''
+ def __init__(self, true_value: float, input_errors: dict = dict(), name: str = None):
+ super().__init__(true_value, input_errors=input_errors, name=name)
+
+
+ def parse_input_errors(self) -> None:
+ self.k = self.input_errors.get("k", 3) # By default, assume each uncertainty is given as 3 stdev
+ self.input_errors["k"] = self.k
+ self.tolerance:float = self.input_errors.get("tolerance", None)
+ self.input_errors["tolerance"] = self.tolerance
+ self.uncertainty:float = self.input_errors.get("uncertainty", None)
+ self.input_errors["uncertainty"] = self.uncertainty
+
+ if self.tolerance is not None:
+ self.std_dev: float = self.tolerance / self.k # Because tolerance represents 3 stdev
+ else:
+ self.std_dev = self.true_value * self.uncertainty / 100
+ self.input_errors["std_dev"] = self.std_dev
+
+ # Handle any temperature contract or expansion
+ # Set the self.true_value to a new value after contract
+ # Set self.measured_value is what we will apply an error to
+ self.measured_value = self.true_value
+
+ if "temperature_offset" in self.input_errors:
+ self.cte = self.input_errors.get("cte", 8.8E-6)
+ self.temperature_offset = self.input_errors.get("temperature_offset", 0)
+ expansion_amount = self.measured_value * self.cte * self.temperature_offset
+ self.true_value = self.measured_value + expansion_amount
+
+ def get_arithmetic(self) -> str:
+ return "{" + str(self.name) + "}"
+
+ def calc_error(self) -> float:
+ return gauss(0, self.std_dev)
+
+ def get_reading(self) -> float:
+ '''Apply the pre-specified error and return a realistic value to mimic the reading of a sensor with error.'''
+ error: float = self.calc_error()
+ noisy_value: float = self.measured_value + error
+
+ # Append the final value to self.all_readings for final calculations at the end
+ self.all_readings.append(noisy_value)
+
+ return noisy_value
+
\ No newline at end of file
diff --git a/code/inputs/Math_Functions.py b/code/inputs/Math_Functions.py
new file mode 100644
index 0000000..a181b21
--- /dev/null
+++ b/code/inputs/Math_Functions.py
@@ -0,0 +1,27 @@
+from inputs.Inputs import Input
+
+class MathFunction(Input):
+ pass
+
+class Integration(MathFunction):
+ def __init__(self, input: 'Input', time_delta: float, frequency: float, name: str = None):
+ super().__init__(true_value=0.0, input_errors={}, name=name)
+ self.input: 'Input' = input
+ self.time_delta = time_delta
+ self.time_step = 1 / frequency
+
+ def get_reading(self) -> float:
+ t = 0.0
+ sum = 0.0
+ while t < self.time_delta:
+ sum += self.input.get_reading() * self.time_step
+ t += self.time_step
+ return sum
+
+ def get_true(self) -> float:
+ t = 0.0
+ sum = 0.0
+ while t < self.time_delta:
+ sum += self.input.get_true() * self.time_step
+ t += self.time_step
+ return sum
\ No newline at end of file
diff --git a/code/inputs/Pressure_Sensors.py b/code/inputs/Pressure_Sensors.py
new file mode 100644
index 0000000..65e0046
--- /dev/null
+++ b/code/inputs/Pressure_Sensors.py
@@ -0,0 +1,9 @@
+import random
+
+from inputs.Inputs import Input
+
+class PressureSensor(Input):
+ pass
+
+class PressureTransmitter(PressureSensor):
+ pass
\ No newline at end of file
diff --git a/code/inputs/Temperature_Sensors.py b/code/inputs/Temperature_Sensors.py
new file mode 100644
index 0000000..e2fabee
--- /dev/null
+++ b/code/inputs/Temperature_Sensors.py
@@ -0,0 +1,47 @@
+from random import gauss
+
+from inputs.Inputs import Input
+
+def F_to_C(f: float) -> float:
+ return (f - 32) / 1.8
+
+class TemperatureSensor(Input):
+ pass
+
+class RTD(TemperatureSensor):
+ def parse_input_errors(self) -> None:
+ self.k = self.input_errors.get("k", 2) # By default, assume each uncertainty is given as 2 stdev
+ self.class_code = self.input_errors.get("class", "A")
+
+ self.max_allowable_class_error = 0
+
+ true_value_celsius = F_to_C(self.true_value)
+
+ # First find the allowable error in Celsius as defined by IEC 60751:2022
+ match self.class_code:
+ case "AA":
+ self.max_allowable_class_error = 0.1 + 0.0017 * abs(true_value_celsius)
+ case "A":
+ self.max_allowable_class_error = 0.15 + 0.002 * abs(true_value_celsius)
+ case "B":
+ self.max_allowable_class_error = 0.3 + 0.005 * abs(true_value_celsius)
+ case "C":
+ self.max_allowable_class_error = 0.6 + 0.01 * abs(true_value_celsius)
+
+
+ # Convert the error from Celsius to Fahrenheit
+ self.max_allowable_class_error *= 1.8
+
+ self.error_stdev = self.max_allowable_class_error / self.k
+
+ self.input_errors["error_stdev"] = self.error_stdev
+
+ def calc_class_error(self) -> float:
+ return gauss(0, self.error_stdev)
+
+ def calc_error(self) -> float:
+ class_error = self.calc_class_error()
+ return (class_error) * self.error_gain
+
+class Thermocouple(TemperatureSensor):
+ pass
\ No newline at end of file
diff --git a/code/inputs/__pycache__/Flow_Speed_Sensors.cpython-311.pyc b/code/inputs/__pycache__/Flow_Speed_Sensors.cpython-311.pyc
new file mode 100644
index 0000000..6ec39af
Binary files /dev/null and b/code/inputs/__pycache__/Flow_Speed_Sensors.cpython-311.pyc differ
diff --git a/code/inputs/__pycache__/Fluid_Lookup.cpython-311.pyc b/code/inputs/__pycache__/Fluid_Lookup.cpython-311.pyc
new file mode 100644
index 0000000..47caf3c
Binary files /dev/null and b/code/inputs/__pycache__/Fluid_Lookup.cpython-311.pyc differ
diff --git a/code/inputs/__pycache__/Force_Sensors.cpython-311.pyc b/code/inputs/__pycache__/Force_Sensors.cpython-311.pyc
new file mode 100644
index 0000000..b2db5f4
Binary files /dev/null and b/code/inputs/__pycache__/Force_Sensors.cpython-311.pyc differ
diff --git a/code/inputs/__pycache__/Inputs.cpython-311.pyc b/code/inputs/__pycache__/Inputs.cpython-311.pyc
new file mode 100644
index 0000000..f1531aa
Binary files /dev/null and b/code/inputs/__pycache__/Inputs.cpython-311.pyc differ
diff --git a/code/inputs/__pycache__/Math_Functions.cpython-311.pyc b/code/inputs/__pycache__/Math_Functions.cpython-311.pyc
new file mode 100644
index 0000000..3f222ca
Binary files /dev/null and b/code/inputs/__pycache__/Math_Functions.cpython-311.pyc differ
diff --git a/code/inputs/__pycache__/Pressure_Sensors.cpython-311.pyc b/code/inputs/__pycache__/Pressure_Sensors.cpython-311.pyc
new file mode 100644
index 0000000..dda856b
Binary files /dev/null and b/code/inputs/__pycache__/Pressure_Sensors.cpython-311.pyc differ
diff --git a/code/inputs/__pycache__/Temperature_Sensors.cpython-311.pyc b/code/inputs/__pycache__/Temperature_Sensors.cpython-311.pyc
new file mode 100644
index 0000000..baf26b1
Binary files /dev/null and b/code/inputs/__pycache__/Temperature_Sensors.cpython-311.pyc differ
diff --git a/code/test.py b/code/test.py
new file mode 100644
index 0000000..db9c564
--- /dev/null
+++ b/code/test.py
@@ -0,0 +1,42 @@
+from fpdf import FPDF
+
+class PDFWithTable(FPDF):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def header(self):
+ self.set_font('Arial', 'B', 12)
+ self.cell(0, 10, 'Table Example', 0, 1, 'C')
+
+ def add_table(self, data, col_widths, col_names):
+ # Add a header row
+ self.set_font('Arial', 'B', 12)
+ for i, col_name in enumerate(col_names):
+ self.cell(col_widths[i], 10, col_name, 1, 0, 'C')
+ self.ln()
+
+ # Add data rows
+ self.set_font('Arial', '', 12)
+ for row in data:
+ for i, item in enumerate(row):
+ self.cell(col_widths[i], 10, str(item), 1, 0, 'C')
+ self.ln()
+
+# Sample data
+column_names = ['Name', 'Age', 'City']
+data = [
+ ['Alice', 30, 'New York'],
+ ['Bob', 25, 'San Francisco'],
+ ['Charlie', 35, 'Los Angeles']
+]
+column_widths = [40, 30, 50] # Adjust column widths as needed
+
+# Initialize PDF
+pdf = PDFWithTable()
+pdf.add_page()
+
+# Add table to PDF
+pdf.add_table(data, column_widths, column_names)
+
+# Save PDF
+pdf.output('table_example.pdf')
diff --git a/documentation/.$UML.drawio.bkp b/documentation/.$UML.drawio.bkp
new file mode 100644
index 0000000..3f6ff8f
--- /dev/null
+++ b/documentation/.$UML.drawio.bkp
@@ -0,0 +1,124 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/documentation/UML.drawio b/documentation/UML.drawio
new file mode 100644
index 0000000..8203c8b
--- /dev/null
+++ b/documentation/UML.drawio
@@ -0,0 +1,505 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+