diff --git a/docs/master/build/build.svg b/docs/master/build/build.svg new file mode 100644 index 00000000..9cd2a24e --- /dev/null +++ b/docs/master/build/build.svg @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + build + build + + + passing + passing + + diff --git a/docs/master/coverage/.gitignore b/docs/master/coverage/.gitignore new file mode 100644 index 00000000..ccccf142 --- /dev/null +++ b/docs/master/coverage/.gitignore @@ -0,0 +1,2 @@ +# Created by coverage.py +* diff --git a/docs/master/coverage/badge.svg b/docs/master/coverage/badge.svg new file mode 100644 index 00000000..0e475438 --- /dev/null +++ b/docs/master/coverage/badge.svg @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + coverage + coverage + 86% + 86% + + diff --git a/docs/master/coverage/coverage_html.js b/docs/master/coverage/coverage_html.js new file mode 100644 index 00000000..59348828 --- /dev/null +++ b/docs/master/coverage/coverage_html.js @@ -0,0 +1,624 @@ +// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +// For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +// Coverage.py HTML report browser code. +/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */ +/*global coverage: true, document, window, $ */ + +coverage = {}; + +// General helpers +function debounce(callback, wait) { + let timeoutId = null; + return function(...args) { + clearTimeout(timeoutId); + timeoutId = setTimeout(() => { + callback.apply(this, args); + }, wait); + }; +}; + +function checkVisible(element) { + const rect = element.getBoundingClientRect(); + const viewBottom = Math.max(document.documentElement.clientHeight, window.innerHeight); + const viewTop = 30; + return !(rect.bottom < viewTop || rect.top >= viewBottom); +} + +function on_click(sel, fn) { + const elt = document.querySelector(sel); + if (elt) { + elt.addEventListener("click", fn); + } +} + +// Helpers for table sorting +function getCellValue(row, column = 0) { + const cell = row.cells[column] // nosemgrep: eslint.detect-object-injection + if (cell.childElementCount == 1) { + const child = cell.firstElementChild + if (child instanceof HTMLTimeElement && child.dateTime) { + return child.dateTime + } else if (child instanceof HTMLDataElement && child.value) { + return child.value + } + } + return cell.innerText || cell.textContent; +} + +function rowComparator(rowA, rowB, column = 0) { + let valueA = getCellValue(rowA, column); + let valueB = getCellValue(rowB, column); + if (!isNaN(valueA) && !isNaN(valueB)) { + return valueA - valueB + } + return valueA.localeCompare(valueB, undefined, {numeric: true}); +} + +function sortColumn(th) { + // Get the current sorting direction of the selected header, + // clear state on other headers and then set the new sorting direction + const currentSortOrder = th.getAttribute("aria-sort"); + [...th.parentElement.cells].forEach(header => header.setAttribute("aria-sort", "none")); + if (currentSortOrder === "none") { + th.setAttribute("aria-sort", th.dataset.defaultSortOrder || "ascending"); + } else { + th.setAttribute("aria-sort", currentSortOrder === "ascending" ? "descending" : "ascending"); + } + + const column = [...th.parentElement.cells].indexOf(th) + + // Sort all rows and afterwards append them in order to move them in the DOM + Array.from(th.closest("table").querySelectorAll("tbody tr")) + .sort((rowA, rowB) => rowComparator(rowA, rowB, column) * (th.getAttribute("aria-sort") === "ascending" ? 1 : -1)) + .forEach(tr => tr.parentElement.appendChild(tr) ); +} + +// Find all the elements with data-shortcut attribute, and use them to assign a shortcut key. +coverage.assign_shortkeys = function () { + document.querySelectorAll("[data-shortcut]").forEach(element => { + document.addEventListener("keypress", event => { + if (event.target.tagName.toLowerCase() === "input") { + return; // ignore keypress from search filter + } + if (event.key === element.dataset.shortcut) { + element.click(); + } + }); + }); +}; + +// Create the events for the filter box. +coverage.wire_up_filter = function () { + // Cache elements. + const table = document.querySelector("table.index"); + const table_body_rows = table.querySelectorAll("tbody tr"); + const no_rows = document.getElementById("no_rows"); + + // Observe filter keyevents. + document.getElementById("filter").addEventListener("input", debounce(event => { + // Keep running total of each metric, first index contains number of shown rows + const totals = new Array(table.rows[0].cells.length).fill(0); + // Accumulate the percentage as fraction + totals[totals.length - 1] = { "numer": 0, "denom": 0 }; // nosemgrep: eslint.detect-object-injection + + // Hide / show elements. + table_body_rows.forEach(row => { + if (!row.cells[0].textContent.includes(event.target.value)) { + // hide + row.classList.add("hidden"); + return; + } + + // show + row.classList.remove("hidden"); + totals[0]++; + + for (let column = 1; column < totals.length; column++) { + // Accumulate dynamic totals + cell = row.cells[column] // nosemgrep: eslint.detect-object-injection + if (column === totals.length - 1) { + // Last column contains percentage + const [numer, denom] = cell.dataset.ratio.split(" "); + totals[column]["numer"] += parseInt(numer, 10); // nosemgrep: eslint.detect-object-injection + totals[column]["denom"] += parseInt(denom, 10); // nosemgrep: eslint.detect-object-injection + } else { + totals[column] += parseInt(cell.textContent, 10); // nosemgrep: eslint.detect-object-injection + } + } + }); + + // Show placeholder if no rows will be displayed. + if (!totals[0]) { + // Show placeholder, hide table. + no_rows.style.display = "block"; + table.style.display = "none"; + return; + } + + // Hide placeholder, show table. + no_rows.style.display = null; + table.style.display = null; + + const footer = table.tFoot.rows[0]; + // Calculate new dynamic sum values based on visible rows. + for (let column = 1; column < totals.length; column++) { + // Get footer cell element. + const cell = footer.cells[column]; // nosemgrep: eslint.detect-object-injection + + // Set value into dynamic footer cell element. + if (column === totals.length - 1) { + // Percentage column uses the numerator and denominator, + // and adapts to the number of decimal places. + const match = /\.([0-9]+)/.exec(cell.textContent); + const places = match ? match[1].length : 0; + const { numer, denom } = totals[column]; // nosemgrep: eslint.detect-object-injection + cell.dataset.ratio = `${numer} ${denom}`; + // Check denom to prevent NaN if filtered files contain no statements + cell.textContent = denom + ? `${(numer * 100 / denom).toFixed(places)}%` + : `${(100).toFixed(places)}%`; + } else { + cell.textContent = totals[column]; // nosemgrep: eslint.detect-object-injection + } + } + })); + + // Trigger change event on setup, to force filter on page refresh + // (filter value may still be present). + document.getElementById("filter").dispatchEvent(new Event("input")); +}; + +coverage.INDEX_SORT_STORAGE = "COVERAGE_INDEX_SORT_2"; + +// Loaded on index.html +coverage.index_ready = function () { + coverage.assign_shortkeys(); + coverage.wire_up_filter(); + document.querySelectorAll("[data-sortable] th[aria-sort]").forEach( + th => th.addEventListener("click", e => sortColumn(e.target)) + ); + + // Look for a localStorage item containing previous sort settings: + const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE); + + if (stored_list) { + const {column, direction} = JSON.parse(stored_list); + const th = document.querySelector("[data-sortable]").tHead.rows[0].cells[column]; // nosemgrep: eslint.detect-object-injection + th.setAttribute("aria-sort", direction === "ascending" ? "descending" : "ascending"); + th.click() + } + + // Watch for page unload events so we can save the final sort settings: + window.addEventListener("unload", function () { + const th = document.querySelector('[data-sortable] th[aria-sort="ascending"], [data-sortable] [aria-sort="descending"]'); + if (!th) { + return; + } + localStorage.setItem(coverage.INDEX_SORT_STORAGE, JSON.stringify({ + column: [...th.parentElement.cells].indexOf(th), + direction: th.getAttribute("aria-sort"), + })); + }); + + on_click(".button_prev_file", coverage.to_prev_file); + on_click(".button_next_file", coverage.to_next_file); + + on_click(".button_show_hide_help", coverage.show_hide_help); +}; + +// -- pyfile stuff -- + +coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS"; + +coverage.pyfile_ready = function () { + // If we're directed to a particular line number, highlight the line. + var frag = location.hash; + if (frag.length > 2 && frag[1] === "t") { + document.querySelector(frag).closest(".n").classList.add("highlight"); + coverage.set_sel(parseInt(frag.substr(2), 10)); + } else { + coverage.set_sel(0); + } + + on_click(".button_toggle_run", coverage.toggle_lines); + on_click(".button_toggle_mis", coverage.toggle_lines); + on_click(".button_toggle_exc", coverage.toggle_lines); + on_click(".button_toggle_par", coverage.toggle_lines); + + on_click(".button_next_chunk", coverage.to_next_chunk_nicely); + on_click(".button_prev_chunk", coverage.to_prev_chunk_nicely); + on_click(".button_top_of_page", coverage.to_top); + on_click(".button_first_chunk", coverage.to_first_chunk); + + on_click(".button_prev_file", coverage.to_prev_file); + on_click(".button_next_file", coverage.to_next_file); + on_click(".button_to_index", coverage.to_index); + + on_click(".button_show_hide_help", coverage.show_hide_help); + + coverage.filters = undefined; + try { + coverage.filters = localStorage.getItem(coverage.LINE_FILTERS_STORAGE); + } catch(err) {} + + if (coverage.filters) { + coverage.filters = JSON.parse(coverage.filters); + } + else { + coverage.filters = {run: false, exc: true, mis: true, par: true}; + } + + for (cls in coverage.filters) { + coverage.set_line_visibilty(cls, coverage.filters[cls]); // nosemgrep: eslint.detect-object-injection + } + + coverage.assign_shortkeys(); + coverage.init_scroll_markers(); + coverage.wire_up_sticky_header(); + + document.querySelectorAll("[id^=ctxs]").forEach( + cbox => cbox.addEventListener("click", coverage.expand_contexts) + ); + + // Rebuild scroll markers when the window height changes. + window.addEventListener("resize", coverage.build_scroll_markers); +}; + +coverage.toggle_lines = function (event) { + const btn = event.target.closest("button"); + const category = btn.value + const show = !btn.classList.contains("show_" + category); + coverage.set_line_visibilty(category, show); + coverage.build_scroll_markers(); + coverage.filters[category] = show; + try { + localStorage.setItem(coverage.LINE_FILTERS_STORAGE, JSON.stringify(coverage.filters)); + } catch(err) {} +}; + +coverage.set_line_visibilty = function (category, should_show) { + const cls = "show_" + category; + const btn = document.querySelector(".button_toggle_" + category); + if (btn) { + if (should_show) { + document.querySelectorAll("#source ." + category).forEach(e => e.classList.add(cls)); + btn.classList.add(cls); + } + else { + document.querySelectorAll("#source ." + category).forEach(e => e.classList.remove(cls)); + btn.classList.remove(cls); + } + } +}; + +// Return the nth line div. +coverage.line_elt = function (n) { + return document.getElementById("t" + n)?.closest("p"); +}; + +// Set the selection. b and e are line numbers. +coverage.set_sel = function (b, e) { + // The first line selected. + coverage.sel_begin = b; + // The next line not selected. + coverage.sel_end = (e === undefined) ? b+1 : e; +}; + +coverage.to_top = function () { + coverage.set_sel(0, 1); + coverage.scroll_window(0); +}; + +coverage.to_first_chunk = function () { + coverage.set_sel(0, 1); + coverage.to_next_chunk(); +}; + +coverage.to_prev_file = function () { + window.location = document.getElementById("prevFileLink").href; +} + +coverage.to_next_file = function () { + window.location = document.getElementById("nextFileLink").href; +} + +coverage.to_index = function () { + location.href = document.getElementById("indexLink").href; +} + +coverage.show_hide_help = function () { + const helpCheck = document.getElementById("help_panel_state") + helpCheck.checked = !helpCheck.checked; +} + +// Return a string indicating what kind of chunk this line belongs to, +// or null if not a chunk. +coverage.chunk_indicator = function (line_elt) { + const classes = line_elt?.className; + if (!classes) { + return null; + } + const match = classes.match(/\bshow_\w+\b/); + if (!match) { + return null; + } + return match[0]; +}; + +coverage.to_next_chunk = function () { + const c = coverage; + + // Find the start of the next colored chunk. + var probe = c.sel_end; + var chunk_indicator, probe_line; + while (true) { + probe_line = c.line_elt(probe); + if (!probe_line) { + return; + } + chunk_indicator = c.chunk_indicator(probe_line); + if (chunk_indicator) { + break; + } + probe++; + } + + // There's a next chunk, `probe` points to it. + var begin = probe; + + // Find the end of this chunk. + var next_indicator = chunk_indicator; + while (next_indicator === chunk_indicator) { + probe++; + probe_line = c.line_elt(probe); + next_indicator = c.chunk_indicator(probe_line); + } + c.set_sel(begin, probe); + c.show_selection(); +}; + +coverage.to_prev_chunk = function () { + const c = coverage; + + // Find the end of the prev colored chunk. + var probe = c.sel_begin-1; + var probe_line = c.line_elt(probe); + if (!probe_line) { + return; + } + var chunk_indicator = c.chunk_indicator(probe_line); + while (probe > 1 && !chunk_indicator) { + probe--; + probe_line = c.line_elt(probe); + if (!probe_line) { + return; + } + chunk_indicator = c.chunk_indicator(probe_line); + } + + // There's a prev chunk, `probe` points to its last line. + var end = probe+1; + + // Find the beginning of this chunk. + var prev_indicator = chunk_indicator; + while (prev_indicator === chunk_indicator) { + probe--; + if (probe <= 0) { + return; + } + probe_line = c.line_elt(probe); + prev_indicator = c.chunk_indicator(probe_line); + } + c.set_sel(probe+1, end); + c.show_selection(); +}; + +// Returns 0, 1, or 2: how many of the two ends of the selection are on +// the screen right now? +coverage.selection_ends_on_screen = function () { + if (coverage.sel_begin === 0) { + return 0; + } + + const begin = coverage.line_elt(coverage.sel_begin); + const end = coverage.line_elt(coverage.sel_end-1); + + return ( + (checkVisible(begin) ? 1 : 0) + + (checkVisible(end) ? 1 : 0) + ); +}; + +coverage.to_next_chunk_nicely = function () { + if (coverage.selection_ends_on_screen() === 0) { + // The selection is entirely off the screen: + // Set the top line on the screen as selection. + + // This will select the top-left of the viewport + // As this is most likely the span with the line number we take the parent + const line = document.elementFromPoint(0, 0).parentElement; + if (line.parentElement !== document.getElementById("source")) { + // The element is not a source line but the header or similar + coverage.select_line_or_chunk(1); + } else { + // We extract the line number from the id + coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10)); + } + } + coverage.to_next_chunk(); +}; + +coverage.to_prev_chunk_nicely = function () { + if (coverage.selection_ends_on_screen() === 0) { + // The selection is entirely off the screen: + // Set the lowest line on the screen as selection. + + // This will select the bottom-left of the viewport + // As this is most likely the span with the line number we take the parent + const line = document.elementFromPoint(document.documentElement.clientHeight-1, 0).parentElement; + if (line.parentElement !== document.getElementById("source")) { + // The element is not a source line but the header or similar + coverage.select_line_or_chunk(coverage.lines_len); + } else { + // We extract the line number from the id + coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10)); + } + } + coverage.to_prev_chunk(); +}; + +// Select line number lineno, or if it is in a colored chunk, select the +// entire chunk +coverage.select_line_or_chunk = function (lineno) { + var c = coverage; + var probe_line = c.line_elt(lineno); + if (!probe_line) { + return; + } + var the_indicator = c.chunk_indicator(probe_line); + if (the_indicator) { + // The line is in a highlighted chunk. + // Search backward for the first line. + var probe = lineno; + var indicator = the_indicator; + while (probe > 0 && indicator === the_indicator) { + probe--; + probe_line = c.line_elt(probe); + if (!probe_line) { + break; + } + indicator = c.chunk_indicator(probe_line); + } + var begin = probe + 1; + + // Search forward for the last line. + probe = lineno; + indicator = the_indicator; + while (indicator === the_indicator) { + probe++; + probe_line = c.line_elt(probe); + indicator = c.chunk_indicator(probe_line); + } + + coverage.set_sel(begin, probe); + } + else { + coverage.set_sel(lineno); + } +}; + +coverage.show_selection = function () { + // Highlight the lines in the chunk + document.querySelectorAll("#source .highlight").forEach(e => e.classList.remove("highlight")); + for (let probe = coverage.sel_begin; probe < coverage.sel_end; probe++) { + coverage.line_elt(probe).querySelector(".n").classList.add("highlight"); + } + + coverage.scroll_to_selection(); +}; + +coverage.scroll_to_selection = function () { + // Scroll the page if the chunk isn't fully visible. + if (coverage.selection_ends_on_screen() < 2) { + const element = coverage.line_elt(coverage.sel_begin); + coverage.scroll_window(element.offsetTop - 60); + } +}; + +coverage.scroll_window = function (to_pos) { + window.scroll({top: to_pos, behavior: "smooth"}); +}; + +coverage.init_scroll_markers = function () { + // Init some variables + coverage.lines_len = document.querySelectorAll("#source > p").length; + + // Build html + coverage.build_scroll_markers(); +}; + +coverage.build_scroll_markers = function () { + const temp_scroll_marker = document.getElementById("scroll_marker") + if (temp_scroll_marker) temp_scroll_marker.remove(); + // Don't build markers if the window has no scroll bar. + if (document.body.scrollHeight <= window.innerHeight) { + return; + } + + const marker_scale = window.innerHeight / document.body.scrollHeight; + const line_height = Math.min(Math.max(3, window.innerHeight / coverage.lines_len), 10); + + let previous_line = -99, last_mark, last_top; + + const scroll_marker = document.createElement("div"); + scroll_marker.id = "scroll_marker"; + document.getElementById("source").querySelectorAll( + "p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par" + ).forEach(element => { + const line_top = Math.floor(element.offsetTop * marker_scale); + const line_number = parseInt(element.querySelector(".n a").id.substr(1)); + + if (line_number === previous_line + 1) { + // If this solid missed block just make previous mark higher. + last_mark.style.height = `${line_top + line_height - last_top}px`; + } else { + // Add colored line in scroll_marker block. + last_mark = document.createElement("div"); + last_mark.id = `m${line_number}`; + last_mark.classList.add("marker"); + last_mark.style.height = `${line_height}px`; + last_mark.style.top = `${line_top}px`; + scroll_marker.append(last_mark); + last_top = line_top; + } + + previous_line = line_number; + }); + + // Append last to prevent layout calculation + document.body.append(scroll_marker); +}; + +coverage.wire_up_sticky_header = function () { + const header = document.querySelector("header"); + const header_bottom = ( + header.querySelector(".content h2").getBoundingClientRect().top - + header.getBoundingClientRect().top + ); + + function updateHeader() { + if (window.scrollY > header_bottom) { + header.classList.add("sticky"); + } else { + header.classList.remove("sticky"); + } + } + + window.addEventListener("scroll", updateHeader); + updateHeader(); +}; + +coverage.expand_contexts = function (e) { + var ctxs = e.target.parentNode.querySelector(".ctxs"); + + if (!ctxs.classList.contains("expanded")) { + var ctxs_text = ctxs.textContent; + var width = Number(ctxs_text[0]); + ctxs.textContent = ""; + for (var i = 1; i < ctxs_text.length; i += width) { + key = ctxs_text.substring(i, i + width).trim(); + ctxs.appendChild(document.createTextNode(contexts[key])); + ctxs.appendChild(document.createElement("br")); + } + ctxs.classList.add("expanded"); + } +}; + +document.addEventListener("DOMContentLoaded", () => { + if (document.body.classList.contains("indexfile")) { + coverage.index_ready(); + } else { + coverage.pyfile_ready(); + } +}); diff --git a/docs/master/coverage/d_88c635a12c52f913___init___py.html b/docs/master/coverage/d_88c635a12c52f913___init___py.html new file mode 100644 index 00000000..1ceaa130 --- /dev/null +++ b/docs/master/coverage/d_88c635a12c52f913___init___py.html @@ -0,0 +1,104 @@ + + + + + Coverage for aixcalibuha/calibration/__init__.py: 100% + + + + + +
+
+

+ Coverage for aixcalibuha/calibration/__init__.py: + 100% +

+ +

+ 2 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.4.1, + created at 2024-01-27 10:48 +0000 +

+ +
+
+
+

1""" 

+

2Package containing modules and classes to perform 

+

3calibration tasks. 

+

4""" 

+

5 

+

6from .calibrator import Calibrator 

+

7from .multi_class_calibrator import MultipleClassCalibrator 

+
+ + + diff --git a/docs/master/coverage/d_88c635a12c52f913_calibrator_py.html b/docs/master/coverage/d_88c635a12c52f913_calibrator_py.html new file mode 100644 index 00000000..1a11e956 --- /dev/null +++ b/docs/master/coverage/d_88c635a12c52f913_calibrator_py.html @@ -0,0 +1,723 @@ + + + + + Coverage for aixcalibuha/calibration/calibrator.py: 57% + + + + + +
+
+

+ Coverage for aixcalibuha/calibration/calibrator.py: + 57% +

+ +

+ 226 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.4.1, + created at 2024-01-27 10:48 +0000 +

+ +
+
+
+

1""" 

+

2Module containing the basic class to calibrate 

+

3a dynamic model, e.g. a modelica model. 

+

4""" 

+

5 

+

6import os 

+

7import json 

+

8import time 

+

9import logging 

+

10from typing import Dict 

+

11from copy import copy 

+

12import numpy as np 

+

13import pandas as pd 

+

14from ebcpy import data_types, Optimizer 

+

15from ebcpy.simulationapi import SimulationAPI 

+

16from aixcalibuha.utils import visualizer, MaxIterationsReached 

+

17from aixcalibuha import CalibrationClass, Goals, TunerParas 

+

18 

+

19 

+

20class Calibrator(Optimizer): 

+

21 """ 

+

22 This class can Calibrator be used for single 

+

23 time-intervals of calibration. 

+

24 

+

25 :param str,os.path.normpath cd: 

+

26 Working directory 

+

27 :param ebcpy.simulationapi.SimulationAPI sim_api: 

+

28 Simulation-API for running the models 

+

29 :param CalibrationClass calibration_class: 

+

30 Class with information on Goals and tuner-parameters for calibration 

+

31 :keyword str result_path: 

+

32 If given, then the resulting parameter values will be stored in a JSON file 

+

33 at the given path. 

+

34 :keyword float timedelta: 

+

35 If you use this class for calibrating a single time-interval, 

+

36 you can set the timedelta to instantiate the simulation before 

+

37 actually evaluating it for the objective. 

+

38 The given float (default is 0) is subtracted from the start_time 

+

39 of your calibration_class. You can find a visualisation of said timedelta 

+

40 in the img folder of the project. 

+

41 :keyword boolean save_files: 

+

42 If true, all simulation files for each iteration will be saved! 

+

43 :keyword boolean verbose_logging: 

+

44 Default is True. If False, the standard Logger without 

+

45 Visualization in Form of plots is used. 

+

46 If you use this, the following keyword arguments below will help 

+

47 to further adjust the logging. 

+

48 :keyword boolean show_plot: 

+

49 Default is True. If False, all created plots are not shown during 

+

50 calibration but only stored at the end of the process. 

+

51 :keyword boolean create_tsd_plot: 

+

52 Default is True. If False, the plot of the time series data (goals) 

+

53 is not created and thus shown in during calibration. It therefore is 

+

54 also not stored, even if you set the save_tsd_plot keyword-argument to true. 

+

55 :keyword boolean save_tsd_plot: 

+

56 Default is False. If True, at each iteration the created plot of the 

+

57 time-series is saved. This may make the process much slower 

+

58 :keyword boolean fail_on_error: 

+

59 Default is False. If True, the calibration will stop with an error if 

+

60 the simulation fails. See also: ``ret_val_on_error`` 

+

61 :keyword float,np.NAN ret_val_on_error: 

+

62 Default is np.NAN. If ``fail_on_error`` is false, you can specify here 

+

63 which value to return in the case of a failed simulation. Possible 

+

64 options are np.NaN, np.inf or some other high numbers. be aware that this 

+

65 max influence the solver. 

+

66 :keyword dict fixed_parameters: 

+

67 Default is an empty dict. This dict may be used to add certain parameters 

+

68 to the simulation which are not tuned / variable during calibration. 

+

69 Such parameters may be used if the default values in the model don't 

+

70 represent the parameter values you want to use. 

+

71 :keyword boolean apply_penalty: 

+

72 Default is true. Specifies if a penalty function should be applied or not. 

+

73 :keyword boolean penalty_factor: 

+

74 Default is 0. Quantifies the impact of the penalty term on the objective function. 

+

75 The penalty factor is added to the objective function. 

+

76 :keyword boolean recalibration_count: 

+

77 Default is 0. Works as a counter and specifies the current cycle of recalibration. 

+

78 :keyword boolean perform_square_deviation: 

+

79 Default is false. 

+

80 If true the penalty function will evaluate the penalty factor with a quadratic approach. 

+

81 :keyword int max_itercount: 

+

82 Default is Infinity. 

+

83 Maximum number of iterations of calibration. 

+

84 This may be useful to explicitly limit the calibration 

+

85 time. 

+

86 :keyword str plot_file_type: 

+

87 File ending of created plots. 

+

88 Any supported option in matplotlib, e.g. svg, png, pdf ... 

+

89 Default is png 

+

90 

+

91 """ 

+

92 

+

93 def __init__(self, 

+

94 cd: str, 

+

95 sim_api: SimulationAPI, 

+

96 calibration_class: CalibrationClass, 

+

97 **kwargs): 

+

98 """Instantiate instance attributes""" 

+

99 # %% Kwargs 

+

100 # Initialize supported keywords with default value 

+

101 # Pop the items so they wont be added when calling the 

+

102 # __init__ of the parent class. Always pop with a default value in case 

+

103 # the keyword is not passed. 

+

104 self.verbose_logging = kwargs.pop("verbose_logging", True) 

+

105 self.save_files = kwargs.pop("save_files", False) 

+

106 self.timedelta = kwargs.pop("timedelta", 0) 

+

107 self.fail_on_error = kwargs.pop("fail_on_error", False) 

+

108 self.ret_val_on_error = kwargs.pop("ret_val_on_error", np.NAN) 

+

109 self.fixed_parameters = kwargs.pop("fixed_parameters", {}) 

+

110 self.apply_penalty = kwargs.pop("apply_penalty", True) 

+

111 self.penalty_factor = kwargs.pop("penalty_factor", 0) 

+

112 self.recalibration_count = kwargs.pop("recalibration_count", 0) 

+

113 self.perform_square_deviation = kwargs.pop("square_deviation", False) 

+

114 self.result_path = kwargs.pop('result_path', None) 

+

115 self.max_itercount = kwargs.pop('max_itercount', np.inf) 

+

116 self.at_calibration = True # Boolean to indicate if validating or calibrating 

+

117 # Extract kwargs for the visualizer 

+

118 visualizer_kwargs = { 

+

119 "save_tsd_plot": kwargs.pop("save_tsd_plot", False), 

+

120 "create_tsd_plot": kwargs.pop("create_tsd_plot", True), 

+

121 "show_plot": kwargs.pop("show_plot", True), 

+

122 "show_plot_pause_time": kwargs.pop("show_plot_pause_time", 1e-3), 

+

123 "file_type": kwargs.pop("plot_file_type", "png"), 

+

124 } 

+

125 

+

126 # Check if types are correct: 

+

127 # Booleans: 

+

128 _bool_kwargs = ["save_files"] 

+

129 for bool_keyword in _bool_kwargs: 

+

130 keyword_value = self.__getattribute__(bool_keyword) 

+

131 if not isinstance(keyword_value, bool): 

+

132 raise TypeError(f"Given {bool_keyword} is of type " 

+

133 f"{type(keyword_value).__name__} but should be type bool") 

+

134 

+

135 # %% Initialize all public parameters 

+

136 super().__init__(cd, **kwargs) 

+

137 # Set sim_api 

+

138 self.sim_api = sim_api 

+

139 

+

140 if not isinstance(calibration_class, CalibrationClass): 

+

141 raise TypeError(f"calibration_classes is of type {type(calibration_class).__name__} " 

+

142 f"but should be CalibrationClass") 

+

143 self.calibration_class = calibration_class 

+

144 # Scale tuner on boundaries 

+

145 self.x0 = self.tuner_paras.scale(self.tuner_paras.get_initial_values()) 

+

146 if self.tuner_paras.bounds is None: 

+

147 self.bounds = None 

+

148 else: 

+

149 # As tuner-parameters are scaled between 0 and 1, the scaled bounds are always 0 and 1 

+

150 self.bounds = [(0, 1) for i in range(len(self.x0))] 

+

151 # Add the values to the simulation setup. 

+

152 self.sim_api.set_sim_setup( 

+

153 {"start_time": self.calibration_class.start_time - self.timedelta, 

+

154 "stop_time": self.calibration_class.stop_time} 

+

155 ) 

+

156 

+

157 # %% Setup the logger 

+

158 # De-register the logger setup in the optimization class: 

+

159 if self.verbose_logging: 

+

160 self.logger = visualizer.CalibrationVisualizer( 

+

161 cd=cd, 

+

162 name=self.__class__.__name__, 

+

163 calibration_class=self.calibration_class, 

+

164 logger=self.logger, 

+

165 **visualizer_kwargs 

+

166 ) 

+

167 else: 

+

168 self.logger = visualizer.CalibrationLogger( 

+

169 cd=cd, 

+

170 name=self.__class__.__name__, 

+

171 calibration_class=self.calibration_class, 

+

172 logger=self.logger 

+

173 ) 

+

174 

+

175 self.cd_of_class = cd # Single class does not need an extra folder 

+

176 

+

177 # Set the output interval according the the given Goals 

+

178 mean_freq = self.goals.get_meas_frequency() 

+

179 self.logger.log("Setting output_interval of simulation according " 

+

180 f"to measurement target data frequency: {mean_freq}") 

+

181 self.sim_api.sim_setup.output_interval = mean_freq 

+

182 

+

183 def obj(self, xk, *args): 

+

184 """ 

+

185 Default objective function. 

+

186 The usual function will be implemented here: 

+

187 

+

188 1. Convert the set to modelica-units 

+

189 2. Simulate the converted-set 

+

190 3. Get data as a dataFrame 

+

191 4. Get penalty factor for the penalty function 

+

192 5. Calculate the objective based on statistical values 

+

193 

+

194 :param np.array xk: 

+

195 Array with normalized values for the minimizer 

+

196 :param int work_id: 

+

197 id for worker in Multiprocessing 

+

198 :return: 

+

199 Objective value based on the used quality measurement 

+

200 :rtype: float 

+

201 """ 

+

202 # Info: This function is called by the optimization framework (scipy, dlib, etc.) 

+

203 # Initialize class objects 

+

204 self._current_iterate = xk 

+

205 self._counter += 1 

+

206 

+

207 # Convert set if multiple goals of different scales are used 

+

208 xk_descaled = self.tuner_paras.descale(xk) 

+

209 

+

210 # Set initial values of variable and fixed parameters 

+

211 self.sim_api.result_names = self.goals.get_sim_var_names() 

+

212 initial_names = self.tuner_paras.get_names() 

+

213 parameters = self.fixed_parameters.copy() 

+

214 parameters.update({name: value for name, value in zip(initial_names, xk_descaled.values)}) 

+

215 # Simulate 

+

216 # pylint: disable=broad-except 

+

217 try: 

+

218 # Generate the folder name for the calibration 

+

219 if self.save_files: 

+

220 savepath_files = os.path.join(self.sim_api.cd, 

+

221 f"simulation_{self._counter}") 

+

222 _filepath = self.sim_api.simulate( 

+

223 parameters=parameters, 

+

224 return_option="savepath", 

+

225 savepath=savepath_files, 

+

226 inputs=self.calibration_class.inputs, 

+

227 **self.calibration_class.input_kwargs 

+

228 ) 

+

229 # %% Load results and write to goals object 

+

230 sim_target_data = data_types.TimeSeriesData(_filepath) 

+

231 else: 

+

232 sim_target_data = self.sim_api.simulate( 

+

233 parameters=parameters, 

+

234 inputs=self.calibration_class.inputs, 

+

235 **self.calibration_class.input_kwargs 

+

236 ) 

+

237 except Exception as err: 

+

238 if self.fail_on_error: 

+

239 self.logger.error("Simulation failed. Raising the error.") 

+

240 raise err 

+

241 self.logger.error( 

+

242 f"Simulation failed. Returning '{self.ret_val_on_error}' " 

+

243 f"for the optimization. Error message: {err}" 

+

244 ) 

+

245 return self.ret_val_on_error 

+

246 

+

247 total_res, unweighted_objective = self._kpi_and_logging_calculation( 

+

248 xk_descaled=xk_descaled, 

+

249 counter=self._counter, 

+

250 results=sim_target_data 

+

251 ) 

+

252 

+

253 return total_res, unweighted_objective 

+

254 

+

255 def mp_obj(self, x, *args): 

+

256 # Initialize list for results 

+

257 num_evals = len(x) 

+

258 total_res_list = np.empty([num_evals, 1]) 

+

259 # Set initial values of variable and fixed parameters 

+

260 self.sim_api.result_names = self.goals.get_sim_var_names() 

+

261 initial_names = self.tuner_paras.get_names() 

+

262 parameters = self.fixed_parameters.copy() 

+

263 

+

264 parameter_list = [] 

+

265 xk_descaled_list = [] 

+

266 for _xk_single in x: 

+

267 # Convert set if multiple goals of different scales are used 

+

268 xk_descaled = self.tuner_paras.descale(_xk_single) 

+

269 xk_descaled_list.append(xk_descaled) 

+

270 # Update Parameters 

+

271 parameter_copy = parameters.copy() 

+

272 parameter_copy.update( 

+

273 {name: value for name, value in zip(initial_names, xk_descaled.values)}) 

+

274 parameter_list.append(parameter_copy) 

+

275 

+

276 # Simulate 

+

277 if self.save_files: 

+

278 result_file_names = [f"simulation_{self._counter + idx}" for idx in 

+

279 range(len(parameter_list))] 

+

280 _filepaths = self.sim_api.simulate( 

+

281 parameters=parameter_list, 

+

282 return_option="savepath", 

+

283 savepath=self.sim_api.cd, 

+

284 result_file_name=result_file_names, 

+

285 fail_on_error=self.fail_on_error, 

+

286 inputs=self.calibration_class.inputs, 

+

287 **self.calibration_class.input_kwargs 

+

288 ) 

+

289 # Load results 

+

290 results = [] 

+

291 for _filepath in _filepaths: 

+

292 if _filepath is None: 

+

293 results.append(None) 

+

294 else: 

+

295 results.append(data_types.TimeSeriesData(_filepath)) 

+

296 else: 

+

297 results = self.sim_api.simulate( 

+

298 parameters=parameter_list, 

+

299 inputs=self.calibration_class.inputs, 

+

300 fail_on_error=self.fail_on_error, 

+

301 **self.calibration_class.input_kwargs 

+

302 ) 

+

303 

+

304 for idx, result in enumerate(results): 

+

305 self._counter += 1 

+

306 self._current_iterate = result 

+

307 if result is None: 

+

308 total_res_list[idx] = self.ret_val_on_error 

+

309 continue 

+

310 total_res, unweighted_objective = self._kpi_and_logging_calculation( 

+

311 xk_descaled=xk_descaled_list[idx], 

+

312 counter=self._counter, 

+

313 results=result 

+

314 ) 

+

315 # Add single objective to objective list of total Population 

+

316 total_res_list[idx] = total_res 

+

317 

+

318 return total_res_list 

+

319 

+

320 def _kpi_and_logging_calculation(self, *, xk_descaled, counter, results): 

+

321 """ 

+

322 Function to calculate everything needed in the obj or mp_obj 

+

323 function after the simulation finished. 

+

324 

+

325 """ 

+

326 xk = self.tuner_paras.scale(xk_descaled) 

+

327 

+

328 self.goals.set_sim_target_data(results) 

+

329 # Trim results based on start and end-time of cal class 

+

330 self.goals.set_relevant_time_intervals(self.calibration_class.relevant_intervals) 

+

331 

+

332 # %% Evaluate the current objective 

+

333 # Penalty function (get penalty factor) 

+

334 if self.recalibration_count > 1 and self.apply_penalty: 

+

335 # There is no benchmark in the first iteration or 

+

336 # first iterations were skipped, so no penalty is applied 

+

337 penaltyfactor = self.get_penalty(xk_descaled, xk) 

+

338 # Evaluate with penalty 

+

339 penalty = penaltyfactor 

+

340 else: 

+

341 # Evaluate without penalty 

+

342 penaltyfactor = 1 

+

343 penalty = None 

+

344 total_res, unweighted_objective = self.goals.eval_difference( 

+

345 verbose=True, 

+

346 penaltyfactor=penaltyfactor 

+

347 ) 

+

348 if self.at_calibration: # Only plot if at_calibration 

+

349 self.logger.calibration_callback_func( 

+

350 xk=xk, 

+

351 obj=total_res, 

+

352 verbose_information=unweighted_objective, 

+

353 penalty=penalty 

+

354 ) 

+

355 # current best iteration step of current calibration class 

+

356 if total_res < self._current_best_iterate["Objective"]: 

+

357 # self.best_goals = self.goals 

+

358 self._current_best_iterate = { 

+

359 "Iterate": counter, 

+

360 "Objective": total_res, 

+

361 "Unweighted Objective": unweighted_objective, 

+

362 "Parameters": xk_descaled, 

+

363 "Goals": self.goals, 

+

364 # For penalty function and for saving goals as csv 

+

365 "better_current_result": True, 

+

366 # Changed to false in this script after calling function "save_calibration_results" 

+

367 "Penaltyfactor": penalty 

+

368 } 

+

369 

+

370 if counter >= self.max_itercount: 

+

371 raise MaxIterationsReached( 

+

372 "Terminating calibration as the maximum number " 

+

373 f"of iterations {self.max_itercount} has been reached." 

+

374 ) 

+

375 

+

376 return total_res, unweighted_objective 

+

377 

+

378 def calibrate(self, framework, method=None, **kwargs) -> dict: 

+

379 """ 

+

380 Start the calibration process of the calibration classes, visualize and save the results. 

+

381 

+

382 The arguments of this function are equal to the 

+

383 arguments in Optimizer.optimize(). Look at the docstring 

+

384 in ebcpy to know which options are available. 

+

385 """ 

+

386 # %% Start Calibration: 

+

387 self.at_calibration = True 

+

388 self.logger.log(f"Start calibration of model: {self.sim_api.model_name}" 

+

389 f" with framework-class {self.__class__.__name__}") 

+

390 self.logger.log(f"Class: {self.calibration_class.name}, Start and Stop-Time " 

+

391 f"of simulation: {self.calibration_class.start_time}" 

+

392 f"-{self.calibration_class.stop_time} s\n Time-Intervals used" 

+

393 f" for objective: {self.calibration_class.relevant_intervals}") 

+

394 

+

395 # Setup the visualizer for plotting and logging: 

+

396 self.logger.calibrate_new_class(self.calibration_class, 

+

397 cd=self.cd_of_class, 

+

398 for_validation=False) 

+

399 self.logger.log_initial_names() 

+

400 

+

401 # Duration of Calibration 

+

402 t_cal_start = time.time() 

+

403 

+

404 # Run optimization 

+

405 try: 

+

406 _res = self.optimize( 

+

407 framework=framework, 

+

408 method=method, 

+

409 n_cpu=self.sim_api.n_cpu, 

+

410 **kwargs) 

+

411 except MaxIterationsReached as err: 

+

412 self.logger.log(msg=str(err), level=logging.WARNING) 

+

413 t_cal_stop = time.time() 

+

414 t_cal = t_cal_stop - t_cal_start 

+

415 

+

416 # Check if optimization worked correctly 

+

417 if "Iterate" not in self._current_best_iterate: 

+

418 raise Exception( 

+

419 "Some error during calibration yielded no successful iteration. " 

+

420 "Can't save or return any results." 

+

421 ) 

+

422 

+

423 # %% Save the relevant results. 

+

424 self.logger.save_calibration_result(self._current_best_iterate, 

+

425 self.sim_api.model_name, 

+

426 duration=t_cal, 

+

427 itercount=self.recalibration_count) 

+

428 # Reset 

+

429 self._current_best_iterate['better_current_result'] = False 

+

430 

+

431 # Save calibrated parameter values in JSON 

+

432 parameter_values = {} 

+

433 for p_name in self._current_best_iterate['Parameters'].index: 

+

434 parameter_values[p_name] = self._current_best_iterate['Parameters'][p_name] 

+

435 self.save_results(parameter_values=parameter_values, 

+

436 filename=self.calibration_class.name) 

+

437 return parameter_values 

+

438 

+

439 @property 

+

440 def calibration_class(self) -> CalibrationClass: 

+

441 """Get the current calibration class""" 

+

442 return self._cal_class 

+

443 

+

444 @calibration_class.setter 

+

445 def calibration_class(self, calibration_class: CalibrationClass): 

+

446 """Set the current calibration class""" 

+

447 self.sim_api.set_sim_setup( 

+

448 {"start_time": self._apply_start_time_method(start_time=calibration_class.start_time), 

+

449 "stop_time": calibration_class.stop_time} 

+

450 ) 

+

451 self._cal_class = calibration_class 

+

452 

+

453 @property 

+

454 def tuner_paras(self) -> TunerParas: 

+

455 """Get the current tuner parameters of the calibration class""" 

+

456 return self.calibration_class.tuner_paras 

+

457 

+

458 @tuner_paras.setter 

+

459 def tuner_paras(self, tuner_paras: TunerParas): 

+

460 """Set the current tuner parameters of the calibration class""" 

+

461 self.calibration_class.tuner_paras = tuner_paras 

+

462 

+

463 @property 

+

464 def goals(self) -> Goals: 

+

465 """Get the current goals of the calibration class""" 

+

466 return self.calibration_class.goals 

+

467 

+

468 @goals.setter 

+

469 def goals(self, goals: Goals): 

+

470 """Set the current goals of the calibration class""" 

+

471 self.calibration_class.goals = goals 

+

472 

+

473 @property 

+

474 def fixed_parameters(self) -> dict: 

+

475 """Get the currently fixed parameters during calibration""" 

+

476 return self._fixed_pars 

+

477 

+

478 @fixed_parameters.setter 

+

479 def fixed_parameters(self, fixed_parameters: dict): 

+

480 """Set the currently fixed parameters during calibration""" 

+

481 self._fixed_pars = fixed_parameters 

+

482 

+

483 def save_results(self, parameter_values: dict, filename: str): 

+

484 """Saves the given dict into a file with path 

+

485 self.result_path and name filename.""" 

+

486 if self.result_path is not None: 

+

487 os.makedirs(self.result_path, exist_ok=True) 

+

488 s_path = os.path.join(self.result_path, f'{filename}.json') 

+

489 with open(s_path, 'w') as json_file: 

+

490 json.dump(parameter_values, json_file, indent=4) 

+

491 

+

492 def validate(self, validation_class: CalibrationClass, calibration_result: Dict, verbose=False): 

+

493 """ 

+

494 Validate the given calibration class based on the given 

+

495 values for tuner_parameters. 

+

496 

+

497 :param CalibrationClass validation_class: 

+

498 The class to validate on 

+

499 :param dict calibration_result: 

+

500 The calibration result to apply to the validation class on. 

+

501 """ 

+

502 # Start Validation: 

+

503 self.at_calibration = False 

+

504 self.logger.log(f"Start validation of model: {self.sim_api.model_name} with " 

+

505 f"framework-class {self.__class__.__name__}") 

+

506 # Use start-time of calibration class 

+

507 self.calibration_class = validation_class 

+

508 start_time = self._apply_start_time_method( 

+

509 start_time=self.calibration_class.start_time 

+

510 ) 

+

511 old_tuner_paras = copy(self.calibration_class.tuner_paras) 

+

512 tuner_values = list(calibration_result.values()) 

+

513 self.calibration_class.tuner_paras = TunerParas( 

+

514 names=list(calibration_result.keys()), 

+

515 initial_values=tuner_values, 

+

516 # Dummy bounds as they are scaled anyway 

+

517 bounds=[(val - 1, val + 1) for val in tuner_values] 

+

518 ) 

+

519 

+

520 # Set the start-time for the simulation 

+

521 self.sim_api.sim_setup.start_time = start_time 

+

522 

+

523 self.logger.calibrate_new_class(self.calibration_class, 

+

524 cd=self.cd_of_class, 

+

525 for_validation=True) 

+

526 

+

527 # Use the results parameter vector to simulate again. 

+

528 self._counter = 0 # Reset to one 

+

529 # Scale the tuner parameters 

+

530 xk = self.tuner_paras.scale(tuner_values) 

+

531 # Evaluate objective 

+

532 obj, unweighted_objective = self.obj(xk=xk) 

+

533 self.logger.validation_callback_func( 

+

534 obj=obj 

+

535 ) 

+

536 # Reset tuner_parameters to avoid unwanted behaviour 

+

537 self.calibration_class.tuner_paras = old_tuner_paras 

+

538 if verbose: 

+

539 weights = [1] 

+

540 objectives = [obj] 

+

541 goals = ['all'] 

+

542 for goal, val in unweighted_objective.items(): 

+

543 weights.append(val[0]) 

+

544 objectives.append(val[1]) 

+

545 goals.append(goal) 

+

546 index = pd.MultiIndex.from_product( 

+

547 [[validation_class.name], goals], 

+

548 names=['Class', 'Goal'] 

+

549 ) 

+

550 obj_verbos = pd.DataFrame( 

+

551 {'weight': weights, validation_class.goals.statistical_measure: objectives}, 

+

552 index=index 

+

553 ) 

+

554 return obj_verbos 

+

555 return obj 

+

556 

+

557 def _handle_error(self, error): 

+

558 """ 

+

559 Also save the plots if an error occurs. 

+

560 See ebcpy.optimization.Optimizer._handle_error for more info. 

+

561 """ 

+

562 # This error is our own, we handle it in the calibrate() function 

+

563 if isinstance(error, MaxIterationsReached): 

+

564 raise error 

+

565 self.logger.save_calibration_result(best_iterate=self._current_best_iterate, 

+

566 model_name=self.sim_api.model_name, 

+

567 duration=0, 

+

568 itercount=0) 

+

569 super()._handle_error(error) 

+

570 

+

571 def get_penalty(self, current_tuners, current_tuners_scaled): 

+

572 """ 

+

573 Get penalty factor for evaluation of current objective. The penaltyfactor 

+

574 considers deviations of the tuner parameters in the objective function. 

+

575 First the relative deviation between the current best values 

+

576 of the tuner parameters from the recalibration steps and 

+

577 the tuner parameters obtained in the current iteration step is determined. 

+

578 Then the penaltyfactor is being increased according to the relative deviation. 

+

579 

+

580 :param pd.series current_tuner_values: 

+

581 To add 

+

582 :return: float penalty 

+

583 Penaltyfactor for evaluation. 

+

584 """ 

+

585 # TO-DO: Add possibility to consider the sensitivity of tuner parameters 

+

586 

+

587 # Get lists of tuner values (latest best (with all other tuners) & current values) 

+

588 previous = self.sim_api.all_tuners_dict 

+

589 previous_scaled = self.sim_api.all_tuners_dict_scaled 

+

590 # previous_scaled = list(self.sim_api.all_tuners_dict.keys()) 

+

591 current = current_tuners 

+

592 current_scaled = dict(current_tuners_scaled) 

+

593 

+

594 # Apply penalty function 

+

595 penalty = 1 

+

596 for key, value in current_scaled.items(): 

+

597 # Add corresponding function for penaltyfactor here 

+

598 if self.perform_square_deviation: 

+

599 # Apply quadratic deviation 

+

600 dev_square = (value - previous_scaled[key]) ** 2 

+

601 penalty += self.penalty_factor * dev_square 

+

602 else: 

+

603 # Apply relative deviation 

+

604 # Ingore tuner parameter whose current best value is 0 

+

605 if previous[key] == 0: 

+

606 continue 

+

607 # Get relative deviation of tuner values (reference: previous) 

+

608 try: 

+

609 dev = abs(current[key] - previous[key]) / abs(previous[key]) 

+

610 penalty += self.penalty_factor * dev 

+

611 except ZeroDivisionError: 

+

612 pass 

+

613 

+

614 return penalty 

+

615 

+

616 def _apply_start_time_method(self, start_time): 

+

617 """ 

+

618 Method to be calculate the start_time based on the used 

+

619 timedelta method. 

+

620 

+

621 :param float start_time: 

+

622 Start time which was specified by the user in the TOML file. 

+

623 :return float start_time - self.timedelta: 

+

624 Calculated "timedelta", if specified in the TOML file. 

+

625 """ 

+

626 return start_time - self.timedelta 

+
+ + + diff --git a/docs/master/coverage/d_88c635a12c52f913_multi_class_calibrator_py.html b/docs/master/coverage/d_88c635a12c52f913_multi_class_calibrator_py.html new file mode 100644 index 00000000..e5613363 --- /dev/null +++ b/docs/master/coverage/d_88c635a12c52f913_multi_class_calibrator_py.html @@ -0,0 +1,366 @@ + + + + + Coverage for aixcalibuha/calibration/multi_class_calibrator.py: 89% + + + + + +
+
+

+ Coverage for aixcalibuha/calibration/multi_class_calibrator.py: + 89% +

+ +

+ 98 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.4.1, + created at 2024-01-27 10:48 +0000 +

+ +
+
+
+

1""" 

+

2Module containing a class for 

+

3calibrating multiple calibration classes at once. 

+

4""" 

+

5 

+

6import os 

+

7from typing import List 

+

8import numpy as np 

+

9from aixcalibuha import CalibrationClass, data_types 

+

10from aixcalibuha.calibration import Calibrator 

+

11 

+

12 

+

13class MultipleClassCalibrator(Calibrator): 

+

14 r""" 

+

15 Class for calibration of multiple calibration classes. 

+

16 When passing multiple classes of the same name, all names 

+

17 are merged into one class with so called relevant time intervals. 

+

18 These time intervals are used for the evaluation of the objective 

+

19 function. Please have a look at the file in docs\img\typeOfContinouusCalibration.pdf 

+

20 for a better understanding on how this class works. 

+

21 

+

22 :param str start_time_method: 

+

23 Default is 'fixstart'. Method you want to use to 

+

24 specify the start time of your simulation. If 'fixstart' is selected, 

+

25 the keyword argument fixstart is used for all classes (Default is 0). 

+

26 If 'timedelta' is used, the keyword argument timedelta specifies the 

+

27 time being subtracted from each start time of each calibration class. 

+

28 Please have a look at the file in docs\img\typeOfContinouusCalibration.pdf 

+

29 for a better visualization. 

+

30 :param str calibration_strategy: 

+

31 Default is 'parallel'. Strategy you want to use for multi-class calibration. 

+

32 If 'parallel' is used, parameters will be calibrated on the respective time intervals 

+

33 independently. If 'sequential' is used, the order of the calibration classes matters: 

+

34 The resulting parameter values of one class will be used as starting values for calibration 

+

35 on the next class. 

+

36 :keyword float fix_start_time: 

+

37 Value for the fix start time if start_time_method="fixstart". Default is zero. 

+

38 :keyword float timedelta: 

+

39 Value for timedelta if start_time_method="timedelta". Default is zero. 

+

40 :keyword str merge_multiple_classes: 

+

41 Default True. If False, the given list of calibration-classes 

+

42 is handeled as-is. This means if you pass two CalibrationClass objects 

+

43 with the same name (e.g. "device on"), the calibration process will run 

+

44 for both these classes stand-alone. 

+

45 This will automatically yield an intersection of tuner-parameters, however may 

+

46 have advantages in some cases. 

+

47 """ 

+

48 

+

49 # Default value for the reference time is zero 

+

50 fix_start_time = 0 

+

51 merge_multiple_classes = True 

+

52 

+

53 def __init__(self, 

+

54 cd: str, 

+

55 sim_api, 

+

56 calibration_classes: List[CalibrationClass], 

+

57 start_time_method: str = 'fixstart', 

+

58 calibration_strategy: str = 'parallel', 

+

59 **kwargs): 

+

60 # Check if input is correct 

+

61 if not isinstance(calibration_classes, list): 

+

62 raise TypeError("calibration_classes is of type " 

+

63 "%s but should be list" % type(calibration_classes).__name__) 

+

64 

+

65 for cal_class in calibration_classes: 

+

66 if not isinstance(cal_class, CalibrationClass): 

+

67 raise TypeError(f"calibration_classes is of type {type(cal_class).__name__} " 

+

68 f"but should be CalibrationClass") 

+

69 # Pop kwargs of this class (pass parameters and remove from kwarg dict): 

+

70 self.merge_multiple_classes = kwargs.pop("merge_multiple_classes", True) 

+

71 # Apply (if given) the fix_start_time. Check for correct input as-well. 

+

72 self.fix_start_time = kwargs.pop("fix_start_time", 0) 

+

73 self.timedelta = kwargs.pop("timedelta", 0) 

+

74 

+

75 # Choose the time-method 

+

76 if start_time_method.lower() not in ["fixstart", "timedelta"]: 

+

77 raise ValueError(f"Given start_time_method {start_time_method} is not supported. " 

+

78 "Please choose between 'fixstart' or 'timedelta'") 

+

79 self.start_time_method = start_time_method 

+

80 

+

81 # Choose the calibration method 

+

82 if calibration_strategy.lower() not in ['parallel', 'sequential']: 

+

83 raise ValueError(f"Given calibration_strategy {calibration_strategy} is not supported. " 

+

84 f"Please choose between 'parallel' or 'sequential'") 

+

85 self.calibration_strategy = calibration_strategy.lower() 

+

86 

+

87 # Instantiate parent-class 

+

88 super().__init__(cd, sim_api, calibration_classes[0], **kwargs) 

+

89 # Merge the multiple calibration_classes 

+

90 if self.merge_multiple_classes: 

+

91 self.calibration_classes = data_types.merge_calibration_classes(calibration_classes) 

+

92 self._cal_history = [] 

+

93 

+

94 def calibrate(self, framework, method=None, **kwargs) -> dict: 

+

95 """ 

+

96 Start the calibration process. 

+

97 

+

98 :return dict self.res_tuner: 

+

99 Dictionary of the optimized tuner parameter names and values. 

+

100 :return dict self._current_best_iterate: 

+

101 Dictionary of the current best results of tuner parameter, 

+

102 iteration step, objective value, information 

+

103 about the goals object and the penaltyfactor. 

+

104 """ 

+

105 # First check possible intersection of tuner-parameteres 

+

106 # and warn the user about it 

+

107 all_tuners = [] 

+

108 for cal_class in self.calibration_classes: 

+

109 all_tuners.append(cal_class.tuner_paras.get_names()) 

+

110 intersection = set(all_tuners[0]).intersection(*all_tuners) 

+

111 if intersection and len(self.calibration_classes) > 1: 

+

112 self.logger.log("The following tuner-parameters intersect over multiple" 

+

113 f" classes:\n{', '.join(list(intersection))}") 

+

114 

+

115 # Iterate over the different existing classes 

+

116 for cal_class in self.calibration_classes: 

+

117 #%% Working-Directory: 

+

118 # Alter the working directory for saving the simulations-results 

+

119 self.cd_of_class = os.path.join(self.cd, 

+

120 f"{cal_class.name}_" 

+

121 f"{cal_class.start_time}_" 

+

122 f"{cal_class.stop_time}") 

+

123 self.sim_api.set_cd(self.cd_of_class) 

+

124 

+

125 #%% Calibration-Setup 

+

126 # Reset counter for new calibration 

+

127 self._counter = 0 

+

128 # Retrieve already calibrated parameters (i.e. calibrated in the previous classes) 

+

129 already_calibrated_parameters = {} 

+

130 for cal_run in self._cal_history: 

+

131 for par_name in cal_run['res']['Parameters'].index: 

+

132 already_calibrated_parameters[par_name] = cal_run['res']['Parameters'][par_name] 

+

133 # Set fixed names: 

+

134 self.fixed_parameters.update(already_calibrated_parameters) 

+

135 

+

136 # Reset best iterate for new class 

+

137 self._current_best_iterate = {"Objective": np.inf} 

+

138 self.calibration_class = cal_class 

+

139 

+

140 # Set initial values 

+

141 initial_values = self.tuner_paras.get_initial_values() 

+

142 for idx, par_name in enumerate(self.tuner_paras.get_names()): 

+

143 if self.calibration_strategy == "sequential": 

+

144 # Use already calibrated values as initial value for new calibration 

+

145 # Delete it from fixed values and retreive the value 

+

146 initial_values[idx] = self.fixed_parameters.pop(par_name, 

+

147 initial_values[idx]) 

+

148 else: 

+

149 try: 

+

150 self.fixed_parameters.pop(par_name) # Just delete, don't use the value 

+

151 except KeyError: 

+

152 pass # Faster than checking if is in dict. 

+

153 

+

154 self.x0 = self.tuner_paras.scale(initial_values) 

+

155 # Either bounds are present or not. 

+

156 # If present, the obj will scale the values to 0 and 1. If not 

+

157 # we have an unconstrained optimization. 

+

158 if self.tuner_paras.bounds is None: 

+

159 self.bounds = None 

+

160 else: 

+

161 self.bounds = [(0, 1) for i in range(len(self.x0))] 

+

162 

+

163 #%% Execution 

+

164 # Run the single ModelicaCalibration 

+

165 super().calibrate(framework=framework, method=method, **kwargs) 

+

166 

+

167 #%% Post-processing 

+

168 # Append result to list for future perturbation based on older results. 

+

169 self._cal_history.append({"res": self._current_best_iterate, 

+

170 "cal_class": cal_class}) 

+

171 

+

172 res_tuner = self.check_intersection_of_tuner_parameters() 

+

173 

+

174 # Save calibrated parameter values in JSON 

+

175 parameter_values = {} 

+

176 for cal_run in self._cal_history: 

+

177 for p_name in cal_run['res']['Parameters'].index: 

+

178 parameter_values[p_name] = cal_run['res']['Parameters'][p_name] 

+

179 for p_name, res_intersection in res_tuner.items(): 

+

180 parameter_values[p_name] = res_intersection 

+

181 self.save_results(parameter_values=parameter_values, 

+

182 filename='MultiClassCalibrationResult') 

+

183 

+

184 return parameter_values 

+

185 

+

186 def _apply_start_time_method(self, start_time): 

+

187 """ 

+

188 Method to be calculate the start_time based on the used 

+

189 start-time-method (timedelta or fix-start). 

+

190 

+

191 :param float start_time: 

+

192 Start time which was specified by the user in the TOML file. 

+

193 :return float start_time - self.timedelta: 

+

194 Calculated "timedelta", if specified in the TOML file. 

+

195 :return float self.fix_start_time: 

+

196 Fix start time which was specified by the user in the TOML file. 

+

197 """ 

+

198 if self.start_time_method == "timedelta": 

+

199 # Check if timedelta does not fall below the 

+

200 # start_time (start_time should not be lower then zero) 

+

201 if start_time - self.timedelta < 0: 

+

202 # pylint: disable=import-outside-toplevel 

+

203 import warnings 

+

204 warnings.warn( 

+

205 'Simulation start time current calibration class \n' 

+

206 ' falls below 0, because of the chosen timedelta. ' 

+

207 'The start time will be set to 0 seconds.' 

+

208 ) 

+

209 return 0 

+

210 # Using timedelta, _ref_time is subtracted of the given start-time 

+

211 return start_time - self.timedelta 

+

212 else: 

+

213 # With fixed start, the _ref_time parameter is always returned 

+

214 return self.fix_start_time 

+

215 

+

216 def check_intersection_of_tuner_parameters(self): 

+

217 """ 

+

218 Checks intersections between tuner parameters. 

+

219 

+

220 :return dict res_tuner: 

+

221 Dictionary of the optimized tuner parameter names and values. 

+

222 """ 

+

223 

+

224 # merge all tuners (writes all values from all classes in one dictionary) 

+

225 merged_tuner_parameters = {} 

+

226 for cal_class in self._cal_history: 

+

227 for tuner_name, best_value in cal_class["res"]["Parameters"].items(): 

+

228 if (tuner_name in merged_tuner_parameters and 

+

229 best_value not in merged_tuner_parameters[tuner_name]): 

+

230 merged_tuner_parameters[tuner_name].append(best_value) 

+

231 else: 

+

232 merged_tuner_parameters[tuner_name] = [best_value] 

+

233 

+

234 # Get tuner parameter 

+

235 res_tuner = {} 

+

236 for tuner_para, values in merged_tuner_parameters.items(): 

+

237 res_tuner[tuner_para] = values[0] 

+

238 

+

239 # pop single values, as of no interest 

+

240 intersected_tuners = {} 

+

241 for tuner_para, values in merged_tuner_parameters.items(): 

+

242 if len(values) >= 2: 

+

243 intersected_tuners[tuner_para] = values 

+

244 

+

245 # Handle tuner intersections 

+

246 if intersected_tuners.keys(): 

+

247 # Plot or log the information, depending on which logger you are using: 

+

248 self.logger.log_intersection_of_tuners(intersected_tuners, 

+

249 itercount=self.recalibration_count) 

+

250 

+

251 # Return average value of ALL tuner parameters (not only intersected). 

+

252 # Reason: if there is an intersection of a tuner parameter, but 

+

253 # the results of both calibration classes are exactly the same, there 

+

254 # is no intersection and the affected parameter will not be 

+

255 # delivered to "res_tuner" if one of the other tuners 

+

256 # intersect and "intersected_tuners.keys()" is true. 

+

257 average_tuner_parameter = {} 

+

258 for tuner_para, values in merged_tuner_parameters.items(): 

+

259 average_tuner_parameter[tuner_para] = sum(values) / len(values) 

+

260 

+

261 self.logger.log("The tuner parameters used for evaluation " 

+

262 "are averaged as follows:\n " 

+

263 "{}".format(' ,'.join([f"{tuner}={value}" 

+

264 for tuner, value in average_tuner_parameter.items()]))) 

+

265 

+

266 # Create result-dictionary 

+

267 res_tuner = average_tuner_parameter 

+

268 

+

269 return res_tuner 

+
+ + + diff --git a/docs/master/coverage/d_c1d4b9f643948c74___init___py.html b/docs/master/coverage/d_c1d4b9f643948c74___init___py.html new file mode 100644 index 00000000..d743fd17 --- /dev/null +++ b/docs/master/coverage/d_c1d4b9f643948c74___init___py.html @@ -0,0 +1,129 @@ + + + + + Coverage for aixcalibuha/utils/__init__.py: 83% + + + + + +
+
+

+ Coverage for aixcalibuha/utils/__init__.py: + 83% +

+ +

+ 12 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.4.1, + created at 2024-01-27 10:48 +0000 +

+ +
+
+
+

1""" 

+

2Package containing utility functions used in different packages. 

+

3Contains a statistics analyzer and a visualizer. 

+

4""" 

+

5from typing import Union, List 

+

6from aixcalibuha import CalibrationClass 

+

7 

+

8 

+

9def validate_cal_class_input( 

+

10 calibration_classes: Union[CalibrationClass, List[CalibrationClass]] 

+

11) -> List[CalibrationClass]: 

+

12 """Check if given list contains only CalibrationClass objects or is one 

+

13 and return a list in both cases. Else raise an error""" 

+

14 if isinstance(calibration_classes, list): 

+

15 for cal_class in calibration_classes: 

+

16 if not isinstance(cal_class, CalibrationClass): 

+

17 raise TypeError(f"calibration_classes is of type {type(cal_class).__name__} " 

+

18 f"but should be CalibrationClass") 

+

19 elif isinstance(calibration_classes, CalibrationClass): 

+

20 calibration_classes = [calibration_classes] 

+

21 else: 

+

22 raise TypeError(f"calibration_classes is of type {type(calibration_classes).__name__} " 

+

23 f"but should be CalibrationClass or list") 

+

24 return calibration_classes 

+

25 

+

26 

+

27class MaxIterationsReached(Exception): 

+

28 """ 

+

29 Exception raised for when the calibration 

+

30 ends because the maximum number of 

+

31 allowed iterations is reached. 

+

32 """ 

+
+ + + diff --git a/docs/master/coverage/d_c1d4b9f643948c74_configuration_py.html b/docs/master/coverage/d_c1d4b9f643948c74_configuration_py.html new file mode 100644 index 00000000..9fbb056f --- /dev/null +++ b/docs/master/coverage/d_c1d4b9f643948c74_configuration_py.html @@ -0,0 +1,312 @@ + + + + + Coverage for aixcalibuha/utils/configuration.py: 70% + + + + + +
+
+

+ Coverage for aixcalibuha/utils/configuration.py: + 70% +

+ +

+ 53 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.4.1, + created at 2024-01-27 10:48 +0000 +

+ +
+
+
+

1""" 

+

2Module to with configs and functions to read configs for objects in this repository. 

+

3""" 

+

4import os 

+

5import collections 

+

6import toml 

+

7import numpy as np 

+

8from ebcpy import data_types 

+

9from aixcalibuha import Goals, CalibrationClass, TunerParas 

+

10 

+

11tsd_config = {"data": "TODO: Specify the path to the target values measured", 

+

12 "key": None, 

+

13 "sheet_name": None, 

+

14 "sep": ","} 

+

15 

+

16kwargs_calibrator = {"timedelta": 0, 

+

17 "save_files": False, 

+

18 "verbose_logging": True, 

+

19 "show_plot": True, 

+

20 "create_tsd_plot": True, 

+

21 "save_tsd_plot": True, 

+

22 "fail_on_error": False, 

+

23 "ret_val_on_error": np.NAN} 

+

24 

+

25# Specify kwargs for multiple-class-calibration 

+

26kwargs_multiple_classes = {"merge_multiple_classes": True, 

+

27 "fix_start_time": 0, 

+

28 "timedelta": 0} 

+

29 

+

30default_input_config = {"sim_input_names": None, 

+

31 "sim_data_path": None, 

+

32 "meas_input_names": None, 

+

33 "meas_input_data": tsd_config, 

+

34 } 

+

35 

+

36default_cal_class_config = {"name": "TODO: Specify the name of the calibration class", 

+

37 "start_time": "TODO: Specify the start time of the class - e.g 0", 

+

38 "stop_time": "TODO: Specify the end time of the class", 

+

39 "goals": {"meas_target_data": tsd_config, 

+

40 "variable_names": "TODO: Specify variable names", 

+

41 "weightings": "TODO: Insert null if you don´t need special weightings. " 

+

42 "Else specify which goal get´s which weighting through a list"}, 

+

43 "tuner_paras": {"names": 

+

44 "TODO: Specify the names of the tuner parameters list", 

+

45 "initial_values": 

+

46 "TODO: Specify the inital values of the tuner parameters list", 

+

47 "bounds": 

+

48 "TODO: Specify the boundaries of the tuner parameters as a list of tuples"}} 

+

49 

+

50default_calibration_config = { 

+

51 "statistical_measure": "TODO: Specify the statistical " 

+

52 "measure for calibration (RMSE, MAE, etc.)", 

+

53 "calibration_classes": [default_cal_class_config], 

+

54 "start_time_method": 'fixstart', 

+

55 "settings": kwargs_calibrator, 

+

56 "settings multiple classes": kwargs_multiple_classes 

+

57} 

+

58 

+

59kwargs_scipy_dif_evo = {"maxiter": 30, 

+

60 "popsize": 5, 

+

61 "mutation": (0.5, 1), 

+

62 "recombination": 0.7, 

+

63 "seed": None, 

+

64 "polish": True, 

+

65 "init": 'latinhypercube', 

+

66 "atol": 0} 

+

67 

+

68kwargs_dlib_min = {"num_function_calls": int(1e9), 

+

69 "solver_epsilon": 0} 

+

70 

+

71kwargs_scipy_min = {"tol": None, 

+

72 "options": {"maxfun": 1}, 

+

73 "constraints": None, 

+

74 "jac": None, 

+

75 "hess": None, 

+

76 "hessp": None} 

+

77 

+

78default_optimization_config = {"framework": "TODO: Choose the framework for calibration", 

+

79 "method": "TODO: Choose the method of the framework", 

+

80 "settings": { 

+

81 "scipy_differential_evolution": kwargs_scipy_dif_evo, 

+

82 "dlib_minimize": kwargs_dlib_min, 

+

83 "scipy_minimize": kwargs_scipy_min} 

+

84 } 

+

85 

+

86default_sim_config = {"packages": None, 

+

87 "model_name": None, 

+

88 "type": "DymolaAPI", 

+

89 "dymola_path": None, 

+

90 "dymola_interface_path": None, 

+

91 "equidistant_output": True, 

+

92 "show_window": False, 

+

93 "get_structural_parameters": True 

+

94 } 

+

95 

+

96default_config = { 

+

97 "Working Directory": "TODO: Add the path where you want to work here", 

+

98 "SimulationAPI": default_sim_config, 

+

99 "Optimization": default_optimization_config, 

+

100 "Input Data": default_input_config, 

+

101 "Calibration": default_calibration_config 

+

102} 

+

103 

+

104 

+

105def get_goals_from_config(config): 

+

106 """ 

+

107 Read the data for a Goals object. 

+

108 

+

109 :param dict config: 

+

110 Config holding the following cols for 

+

111 - meas_target_data 

+

112 - variable_names 

+

113 - Optional: weightings 

+

114 :return: Goals goals 

+

115 Loaded Goals object 

+

116 """ 

+

117 config_mtd = config["meas_target_data"] 

+

118 mtd = data_types.TimeSeriesData(**config_mtd) 

+

119 return Goals(meas_target_data=mtd, 

+

120 variable_names=config["variable_names"], 

+

121 statistical_measure=config["statistical_measure"], 

+

122 weightings=config.get("weightings", None)) 

+

123 

+

124 

+

125def get_tuner_paras_from_config(config): 

+

126 """ 

+

127 Read the data for a TunerParas object. 

+

128 

+

129 :param dict config: 

+

130 Config holding the following cols for 

+

131 - names 

+

132 - initial_values 

+

133 - bounds 

+

134 :return: TunerParas tuner_paras 

+

135 Loaded Goals object 

+

136 """ 

+

137 return TunerParas(names=config["names"], 

+

138 initial_values=config["initial_values"], 

+

139 bounds=config["bounds"]) 

+

140 

+

141 

+

142def get_calibration_classes_from_config(config): 

+

143 """ 

+

144 Read the data for a CalibrationClass object. 

+

145 

+

146 :param list config: 

+

147 List of dicts with configs holding the following cols for 

+

148 - names 

+

149 - start_time 

+

150 - stop_time 

+

151 - Optional: goals, tuner_paras, relevant_intervals 

+

152 :return: TunerParas tuner_paras 

+

153 Loaded Goals object 

+

154 """ 

+

155 cal_classes = [] 

+

156 for cal_class_config in config: 

+

157 goals, tuner_paras = None, None 

+

158 if "goals" in cal_class_config: 

+

159 goals = get_goals_from_config(cal_class_config["goals"]) 

+

160 if "tuner_paras" in cal_class_config: 

+

161 tuner_paras = get_tuner_paras_from_config(cal_class_config["tuner_paras"]) 

+

162 cal_classes.append( 

+

163 CalibrationClass(name=cal_class_config["name"], 

+

164 start_time=cal_class_config["start_time"], 

+

165 stop_time=cal_class_config["stop_time"], 

+

166 goals=goals, 

+

167 tuner_paras=tuner_paras, 

+

168 relevant_intervals=cal_class_config.get("relevant_intervals", None))) 

+

169 return cal_classes 

+

170 

+

171 

+

172def write_config(filepath, config): 

+

173 """ 

+

174 Write the given config to the filepath. 

+

175 If the file already exists, the data is recursively 

+

176 updated. 

+

177 

+

178 :param str,os.path.normpath filepath: 

+

179 Filepath with the config. 

+

180 :param: dict config: 

+

181 Config to be saved 

+

182 """ 

+

183 if os.path.exists(filepath): 

+

184 existing_config = read_config(filepath) 

+

185 if existing_config: 

+

186 config = _update(existing_config, config) 

+

187 

+

188 with open(filepath, "a+") as file: 

+

189 file.seek(0) 

+

190 file.truncate() 

+

191 toml.dump(config, file) 

+

192 

+

193 

+

194def read_config(filepath): 

+

195 """ 

+

196 Read the given file and return the toml-config 

+

197 

+

198 :param str,os.path.normpath filepath: 

+

199 Filepath with the config. 

+

200 :return: dict config: 

+

201 Loaded config 

+

202 """ 

+

203 with open(filepath, "r") as file: 

+

204 config = toml.load(file) 

+

205 return config 

+

206 

+

207 

+

208def _update(dic, new_dic): 

+

209 """Recursively update a given dictionary with a new one""" 

+

210 for key, val in new_dic.items(): 

+

211 if isinstance(val, collections.abc.Mapping): 

+

212 dic[key] = _update(dic.get(key, {}), val) 

+

213 else: 

+

214 dic[key] = val 

+

215 return dic 

+
+ + + diff --git a/docs/master/coverage/d_c1d4b9f643948c74_visualizer_py.html b/docs/master/coverage/d_c1d4b9f643948c74_visualizer_py.html new file mode 100644 index 00000000..4ba40b7e --- /dev/null +++ b/docs/master/coverage/d_c1d4b9f643948c74_visualizer_py.html @@ -0,0 +1,797 @@ + + + + + Coverage for aixcalibuha/utils/visualizer.py: 87% + + + + + +
+
+

+ Coverage for aixcalibuha/utils/visualizer.py: + 87% +

+ +

+ 305 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.4.1, + created at 2024-01-27 10:48 +0000 +

+ +
+
+
+

1""" 

+

2Module with classes and function to help visualize 

+

3different processes inside the framework. Both plots 

+

4and print-function/log-function will be implemented here. 

+

5The Visualizer Class inherits the Logger class, as logging 

+

6will always be used as a default. 

+

7""" 

+

8import os 

+

9import logging 

+

10import csv 

+

11from shutil import copyfile 

+

12import matplotlib.pyplot as plt 

+

13import numpy as np 

+

14from ebcpy.utils import setup_logger 

+

15import aixcalibuha 

+

16 

+

17 

+

18def short_name(ini_name: str, max_len: int): 

+

19 """ 

+

20 Shortens long strings to a max length from the front. 

+

21 long_string_name => ...ing_name with len(new_name) = max_len 

+

22 

+

23 :param str ini_name: 

+

24 Long string to shorten. 

+

25 :param int max_len: 

+

26 Max len of the new string. 

+

27 :return: str 

+

28 The shorten string. 

+

29 """ 

+

30 if len(ini_name) > max_len: 

+

31 num_dots = len(ini_name) - max_len 

+

32 if num_dots > 3: 

+

33 num_dots = 3 

+

34 formatted_name = "." * num_dots + ini_name[-(max_len - num_dots):] 

+

35 else: 

+

36 formatted_name = ini_name 

+

37 return formatted_name 

+

38 

+

39 

+

40class CalibrationLogger: 

+

41 """Base class for showing the process of functions in 

+

42 this Framework with print-statements and saving everything 

+

43 relevant as a log-file. 

+

44 

+

45 :param str,os.path.normpath cd: 

+

46 Directory where to store the output of the Logger and possible 

+

47 child-classes. If the given directory can not be created, an error 

+

48 will be raised. 

+

49 :param str name: 

+

50 Name of the reason of logging, e.g. classification, processing etc. 

+

51 :param aixcalibuha.CalibrationClass calibration_class: 

+

52 Calibration class used in the calibration-process. 

+

53 :param logging.Logger logger: 

+

54 If given, this logger is used to print and or save the messsages. 

+

55 Else, a new one is set up. 

+

56 """ 

+

57 

+

58 # Instantiate class parameters 

+

59 integer_prec = 4 # Number of integer parts 

+

60 decimal_prec = 6 

+

61 _counter_calibration = 0 # Number of function calls of calibration 

+

62 _prec = decimal_prec 

+

63 _width = integer_prec + decimal_prec + 1 # Calculate the actual width 

+

64 

+

65 def __init__(self, cd, name, calibration_class, logger=None): 

+

66 """Instantiate class parameters""" 

+

67 self._tuner_paras = None 

+

68 self._goals = None 

+

69 if logger is None: 

+

70 self.logger = setup_logger(cd=cd, name=name) 

+

71 else: 

+

72 if not isinstance(logger, logging.Logger): 

+

73 raise TypeError(f"Given logger is of type {type(logger)} " 

+

74 f"but should be type logging.Logger") 

+

75 self.logger = logger 

+

76 self.cd = cd 

+

77 self.calibration_class = calibration_class 

+

78 

+

79 def log(self, msg, level=logging.INFO): 

+

80 """Wrapper function to directly log in the internal logger""" 

+

81 self.logger.log(msg=msg, level=level) 

+

82 

+

83 def error(self, msg): 

+

84 """Wrapper function to directly log an error""" 

+

85 self.logger.error(msg=msg) 

+

86 

+

87 def _set_prec_and_with_for_tuner_paras(self): 

+

88 if self.tuner_paras.bounds is None: 

+

89 self.integer_prec = 4 # Number of integer parts 

+

90 else: 

+

91 bounds_min, bounds_max = self.tuner_paras.get_bounds() 

+

92 maximal_value = max(max(bounds_max), max(abs(bounds_min))) 

+

93 self.integer_prec = len(str(int(maximal_value))) 

+

94 self._counter_calibration = 0 # Number of function calls of calibration 

+

95 self._width = self.integer_prec + self.decimal_prec + 1 # Calculate the actual width 

+

96 

+

97 def calibration_callback_func(self, xk, obj, verbose_information, penalty=None): 

+

98 """ 

+

99 Logs the current values of the objective function. 

+

100 

+

101 :param np.array xk: 

+

102 Array with the current values of the calibration 

+

103 :param float obj: 

+

104 Current objective value. 

+

105 :param dict verbose_information: 

+

106 A dict with difference-values of for all goals and the 

+

107 corresponding weightings 

+

108 :param float penalty: 

+

109 Penaltyfactor from current evaluation 

+

110 """ 

+

111 xk_descaled = self.tuner_paras.descale(xk) 

+

112 self._counter_calibration += 1 

+

113 if penalty is None: 

+

114 info_string = self._get_tuner_para_values_as_string( 

+

115 xk_descaled, obj, verbose_information 

+

116 ) 

+

117 else: 

+

118 info_string = self._get_tuner_para_values_as_string( 

+

119 xk_descaled, obj, verbose_information, penalty 

+

120 ) 

+

121 self.logger.info(info_string) 

+

122 

+

123 def validation_callback_func(self, obj): 

+

124 """ 

+

125 Log the validation result information 

+

126 

+

127 :param float obj: 

+

128 Objective value of validation. 

+

129 """ 

+

130 self.log(f"{self.goals.statistical_measure} of validation: {obj}") 

+

131 

+

132 def save_calibration_result(self, best_iterate, model_name, **kwargs): 

+

133 """ 

+

134 Process the result, re-run the simulation and generate 

+

135 a logFile for the minimal quality measurement 

+

136 

+

137 :param dict best_iterate: 

+

138 Result object of the minimization 

+

139 :param str model_name: 

+

140 Name of the model being calibrated 

+

141 """ 

+

142 if "Iterate" not in best_iterate: 

+

143 self.logger.error("No best iterate. Can't save result") 

+

144 return 

+

145 result_log = f"\nResults for calibration of model: {model_name}\n" 

+

146 result_log += f"Number of iterations: {self._counter_calibration}\n" 

+

147 result_log += "Final parameter values:\n" 

+

148 # Set the iteration counter to the actual number of the best iteration is printed 

+

149 self._counter_calibration = best_iterate["Iterate"] 

+

150 result_log += f"{self._get_tuner_para_names_as_string()}\n" 

+

151 final_values = self._get_tuner_para_values_as_string( 

+

152 xk_descaled=best_iterate["Parameters"], 

+

153 obj=best_iterate["Objective"], 

+

154 unweighted_objective=best_iterate["Unweighted Objective"], 

+

155 penalty=best_iterate["Penaltyfactor"]) 

+

156 result_log += f"{final_values}\n" 

+

157 self.logger.info(result_log) 

+

158 self._counter_calibration = 0 

+

159 

+

160 def calibrate_new_class(self, calibration_class, cd=None, for_validation=False): 

+

161 """Function to setup the figures for a new class of calibration. 

+

162 This function is called when instantiating this Class. If you 

+

163 uses continuuos calibration classes, call this function before 

+

164 starting the next calibration-class. 

+

165 

+

166 :param aixcalibuha.CalibrationClass calibration_class: 

+

167 Class holding information on names, tuner_paras, goals 

+

168 and time-intervals of calibration. 

+

169 :param str,os.path.normpath cd: 

+

170 Optional change in working directory to store files 

+

171 :param bool for_validation: 

+

172 If it's only for validation, only plot the goals 

+

173 """ 

+

174 if cd is not None: 

+

175 self.cd = cd 

+

176 self.calibration_class = calibration_class 

+

177 

+

178 @property 

+

179 def cd(self) -> str: 

+

180 """Get the current working directory for storing plots.""" 

+

181 return self._cd 

+

182 

+

183 @cd.setter 

+

184 def cd(self, cd: str): 

+

185 """Set the current working directory for storing plots.""" 

+

186 if not os.path.exists(cd): 

+

187 os.makedirs(cd) 

+

188 self._cd = cd 

+

189 

+

190 @property 

+

191 def tuner_paras(self) -> aixcalibuha.TunerParas: 

+

192 return self._tuner_paras 

+

193 

+

194 @tuner_paras.setter 

+

195 def tuner_paras(self, tuner_paras): 

+

196 """ 

+

197 Set the currently used TunerParas object to use the information for logging. 

+

198 

+

199 :param tuner_paras: aixcalibuha. 

+

200 """ 

+

201 if not isinstance(tuner_paras, aixcalibuha.TunerParas): 

+

202 raise TypeError(f"Given tuner_paras is of " 

+

203 f"type {type(tuner_paras).__name__} " 

+

204 "but type TunerParas is needed.") 

+

205 self._tuner_paras = tuner_paras 

+

206 self._set_prec_and_with_for_tuner_paras() 

+

207 

+

208 @property 

+

209 def goals(self) -> aixcalibuha.Goals: 

+

210 """Get current goals instance""" 

+

211 return self._goals 

+

212 

+

213 @goals.setter 

+

214 def goals(self, goals: aixcalibuha.Goals): 

+

215 """ 

+

216 Set the currently used Goals object to use the information for logging. 

+

217 

+

218 :param ebcpy.aixcalibuha.Goals goals: 

+

219 Goals to be set to the object 

+

220 """ 

+

221 if not isinstance(goals, aixcalibuha.Goals): 

+

222 raise TypeError(f"Given goals is of type {type(goals).__name__} " 

+

223 "but type Goals is needed.") 

+

224 self._goals = goals 

+

225 

+

226 @property 

+

227 def calibration_class(self) -> aixcalibuha.CalibrationClass: 

+

228 """Get current calibration class object""" 

+

229 return self._calibration_class 

+

230 

+

231 @calibration_class.setter 

+

232 def calibration_class(self, calibration_class: aixcalibuha.CalibrationClass): 

+

233 """ 

+

234 Set the current calibration class. 

+

235 

+

236 :param aixcalibuha.CalibrationClass calibration_class: 

+

237 Class holding information on names, tuner_paras, goals 

+

238 and time-intervals of calibration. 

+

239 """ 

+

240 if not isinstance(calibration_class, aixcalibuha.CalibrationClass): 

+

241 raise TypeError(f"Given calibration_class " 

+

242 f"is of type {type(calibration_class).__name__} " 

+

243 "but type CalibrationClass is needed.") 

+

244 self._calibration_class = calibration_class 

+

245 self.tuner_paras = calibration_class.tuner_paras 

+

246 if calibration_class.goals is not None: 

+

247 self.goals = calibration_class.goals 

+

248 

+

249 def log_initial_names(self): 

+

250 """Function to log the initial names and the statistical measure 

+

251 before calibration.""" 

+

252 self.logger.info(self._get_tuner_para_names_as_string()) 

+

253 

+

254 def log_intersection_of_tuners(self, intersected_tuner_parameters, **kwargs): 

+

255 """ 

+

256 If an intersection for multiple classes occurs, an information about 

+

257 the statistics of the dataset has to be provided. 

+

258 

+

259 :param dict intersected_tuner_parameters: 

+

260 Dict with cols being the name of the tuner parameter and the 

+

261 value being the list with all the different "best" values for 

+

262 the tuner parameter. 

+

263 """ 

+

264 _formatted_str = "\n".join([f"{tuner}: {values}" 

+

265 for tuner, values in intersected_tuner_parameters.items()]) 

+

266 self.logger.info("Multiple 'best' values for the following tuner parameters " 

+

267 "were identified in different classes:\n%s", _formatted_str) 

+

268 

+

269 def _get_tuner_para_names_as_string(self): 

+

270 """ 

+

271 Returns a string with the names of current tunerParameters 

+

272 

+

273 :return: str info_string 

+

274 The desired string 

+

275 """ 

+

276 initial_names = list(self.tuner_paras.get_names()) 

+

277 

+

278 info_string = "{0:9s}".format("Iteration") 

+

279 

+

280 # Names of tuner parameter 

+

281 for ini_name in initial_names: 

+

282 # Limit string length to a certain amount. 

+

283 # The full name has to be displayed somewhere else 

+

284 formatted_name = short_name(ini_name=ini_name, max_len=self._width) 

+

285 info_string += " {0:{width}s}".format(formatted_name, width=self._width) 

+

286 # Add string for qualitative measurement used (e.g. NRMSE, MEA etc.) 

+

287 info_string += " {0:{width}s}".format(self.goals.statistical_measure, 

+

288 width=self._width) 

+

289 info_string += "penaltyfactor" 

+

290 info_string += f" Unweighted {self.goals.statistical_measure}" 

+

291 return info_string 

+

292 

+

293 def _get_tuner_para_values_as_string(self, 

+

294 xk_descaled, 

+

295 obj, 

+

296 unweighted_objective, 

+

297 penalty=None): 

+

298 """ 

+

299 Returns a string with the values of current tuner parameters 

+

300 as well as the objective value. 

+

301 

+

302 :param np.array xk_descaled: 

+

303 Array with the current values of the calibration, descaled to bounds 

+

304 :param float obj: 

+

305 Current objective value. 

+

306 :param dict unweighted_objective: 

+

307 Further information about the objective value of each individual goal 

+

308 :param None/float penalty: 

+

309 Penaltyfactor. 

+

310 :return: str 

+

311 The desired string. 

+

312 """ 

+

313 # This will limit the number of iterations to 999999999 (for correct format). 

+

314 # More iterations will most likely never be used. 

+

315 info_string = '{0:9d}'.format(self._counter_calibration) 

+

316 

+

317 for x_value in xk_descaled: 

+

318 info_string += " {0:{width}.{prec}f}".format(x_value, 

+

319 width=self._width, 

+

320 prec=self._prec) 

+

321 # Add the last return value of the objective function. 

+

322 info_string += " {0:{width}.{prec}f}".format(obj, width=self._width, 

+

323 prec=self._prec) 

+

324 if penalty: 

+

325 info_string += " {0:{width}.{prec}f}".format(penalty, width=self._width, 

+

326 prec=self._prec - 3) 

+

327 else: 

+

328 info_string += " {}".format("-") 

+

329 _verbose_info = "= " + " + ".join(["{0:.{prec}}*{1:.{prec}}".format(val[0], 

+

330 val[1], prec=4) 

+

331 for goal, val in unweighted_objective.items()]) 

+

332 info_string += f" {_verbose_info}" 

+

333 

+

334 return info_string 

+

335 

+

336 

+

337class CalibrationVisualizer(CalibrationLogger): 

+

338 """More advanced class to not only log ongoing function 

+

339 evaluations but also show the process of the functions 

+

340 by plotting interesting causalities and saving these plots. 

+

341 

+

342 :keyword boolean show_plot: 

+

343 If False, all created plots are not shown during calibration but only 

+

344 stored at the end of the process. 

+

345 :keyword boolean create_tsd_plot: 

+

346 If False, the plot of the time series data (goals) is not created and 

+

347 thus shown in during calibration. It therefore is also not stored, even if 

+

348 you set the save_tsd_plot keyword-argument to true. 

+

349 :keyword boolean save_tsd_plot: 

+

350 If True, at each iteration the created plot of the 

+

351 time-series is saved. This may make the process much slower 

+

352 :keyword float show_plot_pause_time: 

+

353 Set the time (in seconds) the plt.draw() pauses. May be altered if show_plot 

+

354 yields plot which disappear to fast. Default is 1-e3 s. 

+

355 """ 

+

356 

+

357 def __init__(self, cd, 

+

358 name, 

+

359 calibration_class, 

+

360 logger=None, 

+

361 **kwargs): 

+

362 """Instantiate class parameters""" 

+

363 

+

364 # Instantiate the logger: 

+

365 super().__init__(cd=cd, 

+

366 name=name, 

+

367 calibration_class=calibration_class, 

+

368 logger=logger) 

+

369 

+

370 # Setup dummy parameters so class-functions 

+

371 # now the type of those later created objects: 

+

372 self._n_cols_goals, self._n_rows_goals, self._n_cols_tuner, self._n_rows_tuner = 1, 1, 1, 1 

+

373 self.fig_tuner, self.ax_tuner = None, None 

+

374 self.fig_goal, self.ax_goal = None, None 

+

375 self.fig_obj, self.ax_obj = None, None 

+

376 self._num_goals = 0 

+

377 self.goals_dir = "TimeSeriesPlot" 

+

378 # Set supported kwargs: 

+

379 plt.ioff() # Turn of interactive mode. 

+

380 self.save_tsd_plot = kwargs.get("save_tsd_plot", False) 

+

381 self.create_tsd_plot = kwargs.get("create_tsd_plot", True) 

+

382 self.show_plot = kwargs.get("show_plot", True) 

+

383 self.file_type = kwargs.get("file_type", "svg") 

+

384 self.show_plot_pause_time = kwargs.get("show_plot_pause_time", 1e-3) 

+

385 if not isinstance(self.show_plot_pause_time, (float, int)): 

+

386 raise TypeError( 

+

387 f"Given 'show_plot_pause_time' needs to " 

+

388 f"be float or int but is {type(self.show_plot_pause_time)}." 

+

389 ) 

+

390 

+

391 def calibrate_new_class(self, calibration_class, cd=None, for_validation=False): 

+

392 """Function to setup the figures for a new class of calibration. 

+

393 This function is called when instantiating this Class. If you 

+

394 uses continuuos calibration classes, call this function before 

+

395 starting the next calibration-class. 

+

396 

+

397 :param aixcalibuha.CalibrationClass calibration_class: 

+

398 Class holding information on names, tuner_paras, goals 

+

399 and time-intervals of calibration. 

+

400 :param str,os.path.normpath cd: 

+

401 Optional change in working directory to store files 

+

402 :param bool for_validation: 

+

403 If it's only for validation, only plot the goals 

+

404 """ 

+

405 super().calibrate_new_class(calibration_class, cd) 

+

406 

+

407 name = calibration_class.name 

+

408 

+

409 # Close all old figures to create new ones. 

+

410 plt.close("all") 

+

411 

+

412 if not for_validation: 

+

413 # %% Set-up figure for objective-plotting 

+

414 self.fig_obj, self.ax_obj = plt.subplots(1, 1) 

+

415 self.fig_obj.suptitle(name + ": Objective") 

+

416 self.ax_obj.set_ylabel(self.goals.statistical_measure) 

+

417 self.ax_obj.set_xlabel("Number iterations") 

+

418 # If the changes are small, it seems like the plot does 

+

419 # not fit the printed values. This boolean assures that no offset is used. 

+

420 self.ax_obj.ticklabel_format(useOffset=False) 

+

421 

+

422 # %% Setup Tuner-Paras figure 

+

423 # Make a almost quadratic layout based on the number of tuner-parameters evolved. 

+

424 num_tuners = len(self.tuner_paras.get_names()) 

+

425 self._n_cols_tuner = int(np.floor(np.sqrt(num_tuners))) 

+

426 self._n_rows_tuner = int(np.ceil(num_tuners / self._n_cols_tuner)) 

+

427 self.fig_tuner, self.ax_tuner = plt.subplots(self._n_rows_tuner, self._n_cols_tuner, 

+

428 squeeze=False, sharex=True) 

+

429 self.fig_tuner.suptitle(name + ": Tuner Parameters") 

+

430 self._plot_tuner_parameters(for_setup=True) 

+

431 

+

432 # %% Setup Goals figure 

+

433 # Only a dummy, as the figure is recreated at every iteration 

+

434 if self.goals is not None: 

+

435 self._num_goals = len(self.goals.get_goals_list()) 

+

436 self._n_cols_goals = int(np.floor(np.sqrt(self._num_goals))) 

+

437 self._n_rows_goals = int(np.ceil(self._num_goals / self._n_cols_goals)) 

+

438 self.fig_goal, self.ax_goal = plt.subplots(self._n_rows_goals, 

+

439 self._n_cols_goals, 

+

440 squeeze=False, 

+

441 sharex=True) 

+

442 self.fig_goal.suptitle(name + ": Goals") 

+

443 

+

444 def calibration_callback_func(self, xk, obj, verbose_information, penalty=None): 

+

445 """ 

+

446 Logs the current values of the objective function. 

+

447 

+

448 :param np.array xk: 

+

449 Array with the current values of the calibration 

+

450 :param float obj: 

+

451 Current objective value. 

+

452 :param dict verbose_information: 

+

453 A dict with difference-values of for all goals and the 

+

454 corresponding weightings 

+

455 """ 

+

456 # Call the logger function to print and log 

+

457 super().calibration_callback_func(xk, obj, verbose_information, penalty) 

+

458 # Plot the current objective value 

+

459 self.ax_obj.plot(self._counter_calibration, obj, "ro") 

+

460 

+

461 # Plot the tuner parameters 

+

462 self._plot_tuner_parameters(xk=xk) 

+

463 

+

464 # Plot the measured and simulated data 

+

465 if self.goals is not None and self.create_tsd_plot: 

+

466 self._plot_goals() 

+

467 

+

468 self._show_plot() 

+

469 

+

470 def validation_callback_func(self, obj): 

+

471 """ 

+

472 Log the validation result information. 

+

473 Also plot if selected. 

+

474 

+

475 :param float obj: 

+

476 Objective value of validation. 

+

477 """ 

+

478 super().validation_callback_func(obj=obj) 

+

479 # Plot the measured and simulated data 

+

480 if self.goals is not None and self.create_tsd_plot: 

+

481 self._plot_goals(at_validation=True) 

+

482 

+

483 self._show_plot(for_validation=True) 

+

484 

+

485 def _show_plot(self, for_validation=False): 

+

486 """Show plot if activated""" 

+

487 if not self.show_plot: 

+

488 return 

+

489 plt.draw() 

+

490 if self.create_tsd_plot: 

+

491 self.fig_goal.canvas.draw_idle() 

+

492 if not for_validation: 

+

493 self.fig_obj.canvas.draw_idle() 

+

494 self.fig_tuner.canvas.draw_idle() 

+

495 plt.pause(self.show_plot_pause_time) 

+

496 else: 

+

497 plt.show() 

+

498 

+

499 def save_calibration_result(self, best_iterate, model_name, **kwargs): 

+

500 """ 

+

501 Process the result, re-run the simulation and generate 

+

502 a logFile for the minimal quality measurement 

+

503 

+

504 :param scipy.optimize.minimize.result best_iterate: 

+

505 Result object of the minimization 

+

506 :param str model_name: 

+

507 Name of the model being calibrated 

+

508 """ 

+

509 if "Iterate" not in best_iterate: 

+

510 self.logger.error("No best iterate. Can't save result") 

+

511 return 

+

512 super().save_calibration_result(best_iterate, model_name, **kwargs) 

+

513 itercount = kwargs["itercount"] 

+

514 duration = kwargs["duration"] 

+

515 

+

516 # Extract filepathes 

+

517 iterpath = os.path.join(self.cd, f'Iteration_{itercount}') 

+

518 if not os.path.exists(iterpath): 

+

519 os.mkdir(iterpath) 

+

520 

+

521 filepath_tuner = os.path.join(iterpath, "tuner_parameter_plot.%s" % self.file_type) 

+

522 filepath_obj = os.path.join(iterpath, "objective_plot.%s" % self.file_type) 

+

523 if self.save_tsd_plot: 

+

524 bestgoal = os.path.join(self.cd, 

+

525 self.goals_dir, 

+

526 str(best_iterate["Iterate"]) + f"_goals.{self.file_type}") 

+

527 # Copy best goals figure 

+

528 copyfile(bestgoal, f'{iterpath}\\best_goals.%s' % self.file_type) 

+

529 

+

530 # Save calibration results as csv 

+

531 res_dict = dict(best_iterate['Parameters']) 

+

532 res_dict['Objective'] = best_iterate["Objective"] 

+

533 res_dict['Duration'] = duration 

+

534 res_csv = f'{self.cd}\\Iteration_{itercount}\\RESUL' \ 

+

535 f'TS_{self.calibration_class.name}_iteration{itercount}.csv' 

+

536 with open(res_csv, 'w') as rescsv: 

+

537 writer = csv.DictWriter(rescsv, res_dict.keys()) 

+

538 writer.writeheader() 

+

539 writer.writerow(res_dict) 

+

540 

+

541 # Save figures & close plots 

+

542 self.fig_tuner.savefig(filepath_tuner) 

+

543 self.fig_obj.savefig(filepath_obj) 

+

544 plt.close("all") 

+

545 

+

546 if best_iterate['better_current_result'] and self.save_tsd_plot: 

+

547 # save improvement of recalibration ("best goals df" as csv) 

+

548 best_iterate['Goals'].get_goals_data().to_csv( 

+

549 os.path.join(iterpath, 'goals_df.csv'), 

+

550 sep=",", 

+

551 decimal="." 

+

552 ) 

+

553 

+

554 def log_intersection_of_tuners(self, intersected_tuner_parameters, **kwargs): 

+

555 """ 

+

556 If an intersection for multiple classes occurs, an information about 

+

557 the statistics of the dataset has to be provided. 

+

558 

+

559 :param dict intersected_tuner_parameters: 

+

560 Dict with cols being the name of the tuner parameter and the 

+

561 value being the list with all the different "best" values for 

+

562 the tuner parameter. 

+

563 """ 

+

564 super().log_intersection_of_tuners(intersected_tuner_parameters, **kwargs) 

+

565 x_labels = intersected_tuner_parameters.keys() 

+

566 data = list(intersected_tuner_parameters.values()) 

+

567 fig_intersection, ax_intersection = plt.subplots(1, len(x_labels), squeeze=False) 

+

568 for i, x_label in enumerate(x_labels): 

+

569 # Remove name of record (modelica) for visualization 

+

570 x_label_vis = x_label.replace('TunerParameter.', '') 

+

571 cur_ax = ax_intersection[0][i] 

+

572 cur_ax.violinplot(data[i], showmeans=True, showmedians=False, 

+

573 showextrema=True) 

+

574 # cur_ax.plot([1] * len(data[i]), data[i], "ro", label="Results") 

+

575 cur_ax.plot([1] * len(data[i]), data[i], "ro", label="Ergebnisse") 

+

576 

+

577 cur_ax.get_xaxis().set_tick_params(direction='out') 

+

578 cur_ax.xaxis.set_ticks_position('bottom') 

+

579 cur_ax.set_xticks(np.arange(1, 2)) 

+

580 cur_ax.set_xlim(0.25, 1.75) 

+

581 cur_ax.set_xticklabels([x_label_vis]) 

+

582 cur_ax.legend(loc="upper right") 

+

583 

+

584 # Always store in the parent diretory as this info is relevant for all classes 

+

585 # fig_intersection.suptitle("Intersection of Tuner Parameters") 

+

586 fig_intersection.suptitle("Intersection of Tuner Parameters") 

+

587 path_intersections = os.path.join(os.path.dirname(self.cd), "tunerintersections") 

+

588 if not os.path.exists(path_intersections): 

+

589 os.makedirs(path_intersections) 

+

590 if "itercount" in kwargs: 

+

591 fig_intersection.savefig( 

+

592 os.path.join( 

+

593 path_intersections, 

+

594 f'tuner_parameter_intersection_plot_it{kwargs["itercount"]}.{self.file_type}') 

+

595 ) 

+

596 else: 

+

597 fig_intersection.savefig( 

+

598 os.path.join(path_intersections, 

+

599 f'tuner_parameter_intersection_plot.{self.file_type}') 

+

600 ) 

+

601 

+

602 if self.show_plot: 

+

603 plt.draw() 

+

604 plt.pause(15) 

+

605 

+

606 def _plot_tuner_parameters(self, xk=None, for_setup=False): 

+

607 """ 

+

608 Plot the tuner parameter values history for better user interaction 

+

609 

+

610 :param np.array xk: 

+

611 current iterate, scaled. 

+

612 :param bool for_setup: 

+

613 True if the function is called to initialize the calibration 

+

614 """ 

+

615 tuner_counter = 0 

+

616 for row in range(self._n_rows_tuner): 

+

617 for col in range(self._n_cols_tuner): 

+

618 cur_ax = self.ax_tuner[row][col] 

+

619 tuner_names_vis = self.tuner_paras.get_names() 

+

620 # Remove name of record (modelica) 

+

621 for i, name in enumerate(tuner_names_vis): 

+

622 tuner_names_vis[i] = name.replace('TunerParameter.', '') 

+

623 if tuner_counter >= len(self.tuner_paras.get_names()): 

+

624 cur_ax.axis("off") 

+

625 else: 

+

626 tuner_para_name = self.tuner_paras.get_names()[tuner_counter] 

+

627 if for_setup: 

+

628 cur_ax.set_ylabel(tuner_names_vis[tuner_counter]) 

+

629 max_value = self.tuner_paras.get_value(tuner_para_name, "max") 

+

630 min_value = self.tuner_paras.get_value(tuner_para_name, "min") 

+

631 ini_val = self.tuner_paras.get_value(tuner_para_name, "initial_value") 

+

632 cur_ax.axhline(max_value, color="r") 

+

633 cur_ax.axhline(min_value, color="r") 

+

634 cur_ax.plot(self._counter_calibration, ini_val, "bo") 

+

635 if xk is not None: 

+

636 cur_val = self.tuner_paras.descale(xk)[tuner_counter] 

+

637 cur_ax.plot(self._counter_calibration, cur_val, "bo") 

+

638 tuner_counter += 1 

+

639 

+

640 def _plot_goals(self, at_validation=False): 

+

641 """Plot the measured and simulated data for the current iterate""" 

+

642 

+

643 # Get information on the relevant-intervals of the calibration: 

+

644 rel_intervals = self.calibration_class.relevant_intervals 

+

645 

+

646 _goals_df = self.goals.get_goals_data() 

+

647 _goals_names = self.goals.get_goals_list() 

+

648 goal_counter = 0 

+

649 for row in range(self._n_rows_goals): 

+

650 for col in range(self._n_cols_goals): 

+

651 cur_ax = self.ax_goal[row][col] 

+

652 cur_ax.cla() 

+

653 if goal_counter >= self._num_goals: 

+

654 cur_ax.axis("off") 

+

655 else: 

+

656 cur_goal = _goals_names[goal_counter] 

+

657 cur_ax.plot(_goals_df[cur_goal, self.goals.sim_tag_str], 

+

658 label=cur_goal + f"_{self.goals.sim_tag_str}", 

+

659 linestyle="--", color="r") 

+

660 cur_ax.plot(_goals_df[cur_goal, self.goals.meas_tag_str], 

+

661 label=cur_goal + f"_{self.goals.meas_tag_str}", 

+

662 color="b") 

+

663 # Mark the disregarded intervals in grey 

+

664 _start = self.calibration_class.start_time 

+

665 _first = True # Only create one label 

+

666 for interval in rel_intervals: 

+

667 _end = interval[0] 

+

668 if _first: 

+

669 cur_ax.axvspan(_start, _end, 

+

670 facecolor="grey", 

+

671 alpha=0.7, 

+

672 label="Disregarded Interval") 

+

673 _first = False 

+

674 # Only create one label 

+

675 else: 

+

676 cur_ax.axvspan(_start, _end, 

+

677 facecolor="grey", alpha=0.7) 

+

678 _start = interval[1] 

+

679 # Final plot of grey 

+

680 cur_ax.axvspan(rel_intervals[-1][-1], 

+

681 self.calibration_class.stop_time, 

+

682 facecolor="grey", 

+

683 alpha=0.5) 

+

684 

+

685 cur_ax.legend(loc="lower right") 

+

686 cur_ax.set_xlabel("Time / s") 

+

687 goal_counter += 1 

+

688 

+

689 if at_validation: 

+

690 name_id = "Validation" 

+

691 else: 

+

692 name_id = self._counter_calibration 

+

693 

+

694 if self.save_tsd_plot: 

+

695 _savedir = os.path.join(self.cd, self.goals_dir) 

+

696 if not os.path.exists(_savedir): 

+

697 os.makedirs(_savedir) 

+

698 self.fig_goal.savefig( 

+

699 os.path.join(_savedir, 

+

700 f"{name_id}_goals.{self.file_type}")) 

+
+ + + diff --git a/docs/master/coverage/d_e6a0ac89e88ff9fe___init___py.html b/docs/master/coverage/d_e6a0ac89e88ff9fe___init___py.html new file mode 100644 index 00000000..ffc74f97 --- /dev/null +++ b/docs/master/coverage/d_e6a0ac89e88ff9fe___init___py.html @@ -0,0 +1,107 @@ + + + + + Coverage for aixcalibuha/__init__.py: 100% + + + + + +
+
+

+ Coverage for aixcalibuha/__init__.py: + 100% +

+ +

+ 4 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.4.1, + created at 2024-01-27 10:48 +0000 +

+ +
+
+
+

1""" 

+

2Python package to calibrate models created in Modelica or possible 

+

3other simulation software. 

+

4""" 

+

5from .data_types import CalibrationClass, TunerParas, Goals 

+

6from .calibration import Calibrator, MultipleClassCalibrator 

+

7from .sensitivity_analysis import SobolAnalyzer, MorrisAnalyzer, FASTAnalyzer, PAWNAnalyzer, \ 

+

8 plotting 

+

9 

+

10__version__ = "1.0.0" 

+
+ + + diff --git a/docs/master/coverage/d_e6a0ac89e88ff9fe_data_types_py.html b/docs/master/coverage/d_e6a0ac89e88ff9fe_data_types_py.html new file mode 100644 index 00000000..c46346d2 --- /dev/null +++ b/docs/master/coverage/d_e6a0ac89e88ff9fe_data_types_py.html @@ -0,0 +1,822 @@ + + + + + Coverage for aixcalibuha/data_types.py: 86% + + + + + +
+
+

+ Coverage for aixcalibuha/data_types.py: + 86% +

+ +

+ 272 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.4.1, + created at 2024-01-27 10:48 +0000 +

+ +
+
+
+

1""" 

+

2Module containing data types to enable an automatic usage of 

+

3different other modules in the Python package. 

+

4""" 

+

5import warnings 

+

6import logging 

+

7from typing import Union, Callable 

+

8from copy import deepcopy 

+

9import pandas as pd 

+

10import numpy as np 

+

11from ebcpy import TimeSeriesData 

+

12from ebcpy.utils.statistics_analyzer import StatisticsAnalyzer 

+

13from ebcpy.preprocessing import convert_datetime_index_to_float_index 

+

14 

+

15# pylint: disable=I1101 

+

16 

+

17logger = logging.getLogger(__name__) 

+

18 

+

19 

+

20class Goals: 

+

21 """ 

+

22 Class for one or multiple goals. Used to evaluate the 

+

23 difference between current simulation and measured data 

+

24 

+

25 :param (ebcpy.data_types.TimeSeriesData, pd.DataFrame) meas_target_data: 

+

26 The dataset of the measurement. It acts as a point of reference 

+

27 for the simulation output. If the dimensions of the given DataFrame and later 

+

28 added simulation-data are not equal, an error is raised. 

+

29 Has to hold all variables listed under the MEASUREMENT_NAME variable in the 

+

30 variable_names dict. 

+

31 :param dict variable_names: 

+

32 A dictionary to construct the goals-DataFrame using pandas MultiIndex-Functionality. 

+

33 The dict has to follow the structure. 

+

34 ``variable_names = {VARIABLE_NAME: [MEASUREMENT_NAME, SIMULATION_NAME]}`` 

+

35 

+

36 - VARIABLE_NAME: A string which holds the actual name 

+

37 of the variable you use as a goal. 

+

38 E.g.: ``VARIABLE_NAME="Temperature_Condenser_Outflow"`` 

+

39 - MEASUREMENT_NAME: Is either a string or a tuple. Hold the name the variable 

+

40 has inside the given meas_target_data. If you want to specify a tag you have 

+

41 to pass a tuple, like: ``(MEASUREMENT_NAME, TAG_NAME)``. Else just pass a string. 

+

42 E.g.: ``MEASUREMENT_NAME="HydraulicBench[4].T_Out"`` or 

+

43 ``MEASUREMENT_NAME=("HydraulicBench[4].T_Out", "preprocessed")`` 

+

44 - SIMULATION_NAME is either a string or a tuple, just like MEASUREMENT_NAME. 

+

45 E.g. (for Modelica): ``SIMULATION_NAME="HeatPump.Condenser.Vol.T"`` 

+

46 

+

47 You may use a tuple instead of a list OR a dict 

+

48 with key "meas" for measurement and key "sim" for simulation. These options may be 

+

49 relevant for your own code readability. 

+

50 E.g. ``variable_names = 

+

51 {VARIABLE_NAME: {"meas":MEASUREMENT_NAME, "sim": SIMULATION_NAME}}`` 

+

52 :param str statistical_measure: 

+

53 Measure to calculate the scalar of the objective, 

+

54 One of the supported methods in 

+

55 ebcpy.utils.statistics_analyzer.StatisticsAnalyzer 

+

56 e.g. RMSE, MAE, NRMSE 

+

57 :param list weightings: 

+

58 Values between 0 and 1 to account for multiple Goals to be evaluated. 

+

59 If multiple goals are selected, and weightings is None, each 

+

60 weighting will be equal to 1/(Number of goals). 

+

61 The weighting is scaled so that the sum will equal 1. 

+

62 """ 

+

63 

+

64 # Set default string for measurement reference 

+

65 meas_tag_str = "meas" 

+

66 sim_tag_str = "sim" 

+

67 

+

68 def __init__(self, 

+

69 meas_target_data: Union[TimeSeriesData, pd.DataFrame], 

+

70 variable_names: dict, 

+

71 statistical_measure: str, 

+

72 weightings: list = None): 

+

73 """Initialize class-objects and check correct input.""" 

+

74 

+

75 # Open the meas target data: 

+

76 if not isinstance(meas_target_data, (TimeSeriesData, pd.DataFrame)): 

+

77 raise TypeError(f"Given meas_target_data is of type {type(meas_target_data).__name__} " 

+

78 "but TimeSeriesData is required.") 

+

79 

+

80 if not isinstance(variable_names, dict): 

+

81 raise TypeError(f"Given variable_names is of type {type(variable_names).__name__} " 

+

82 f"but a dict is required.") 

+

83 

+

84 # Extract the measurement-information out of the dict. 

+

85 self.variable_names = variable_names 

+

86 

+

87 # Used to speed up the frequently used set_sim_target_data function 

+

88 self._sim_var_matcher = {} 

+

89 _columns = [] # Used to extract relevant part of df 

+

90 

+

91 _rename_cols_dict = {} 

+

92 for var_name, meas_sim_info in self.variable_names.items(): 

+

93 # First extract the information about the measurement out of the dict 

+

94 if isinstance(meas_sim_info, dict): 

+

95 meas_info = meas_sim_info[self.meas_tag_str] 

+

96 self._sim_var_matcher[var_name] = meas_sim_info[self.sim_tag_str] 

+

97 elif isinstance(meas_sim_info, (list, tuple)): 

+

98 meas_info = meas_sim_info[0] 

+

99 self._sim_var_matcher[var_name] = meas_sim_info[1] 

+

100 else: 

+

101 raise TypeError(f"Variable {var_name} of variable_names has a value" 

+

102 "neither being a dict, list or tuple.") 

+

103 # Now get the info to extract the values out of the given tsd 

+

104 # Convert string with into a list of tuples containing the relevant tag. 

+

105 # If mulitple tags exist, and the default tag (self.meas_tag_str) 

+

106 # is not present, an error is raised. 

+

107 if isinstance(meas_info, str): 

+

108 if isinstance(meas_target_data[meas_info], pd.Series): 

+

109 raise TypeError("Given meas_target_data contains columns without a tag." 

+

110 "Please only pass MultiIndex-DataFrame objects.") 

+

111 tags = meas_target_data[meas_info].columns 

+

112 _rename_cols_dict[meas_info] = var_name 

+

113 if len(tags) != 1 and self.meas_tag_str not in tags: 

+

114 raise TypeError("Not able to automatically select variables and tags. " 

+

115 f"Variable {meas_info} has mutliple tags, none of which " 

+

116 f"is specified as {self.meas_tag_str}.") 

+

117 if self.meas_tag_str in tags: 

+

118 _columns.append((meas_info, self.meas_tag_str)) 

+

119 else: 

+

120 _columns.append((meas_info, tags[0])) 

+

121 elif isinstance(meas_info, tuple): 

+

122 _rename_cols_dict[meas_info[0]] = var_name 

+

123 _columns.append(meas_info) 

+

124 else: 

+

125 raise TypeError(f"Measurement Info on variable {var_name} is " 

+

126 "neither of type string or tuple.") 

+

127 

+

128 # Take the subset of the given tsd based on var_names and tags. 

+

129 self._tsd = meas_target_data[_columns].copy() 

+

130 

+

131 # Rename all variables to the given var_name (key of self.variable_names) 

+

132 self._tsd = self._tsd.rename(columns=_rename_cols_dict, level=0) 

+

133 

+

134 # Rename all tags to the default measurement name for consistency. 

+

135 tags = dict(zip(self._tsd.columns.levels[1], 

+

136 [self.meas_tag_str for _ in range(len(_columns))])) 

+

137 self._tsd = self._tsd.rename(columns=tags, level=1) 

+

138 

+

139 # Save the tsd to a tsd_ref object 

+

140 # Used to never lose the original dataframe. 

+

141 # _tsd may be altered by relevant intervals, this object never! 

+

142 self._tsd_ref = self._tsd.copy() 

+

143 

+

144 # Set the statistical analyzer: 

+

145 self.statistical_measure = statistical_measure 

+

146 

+

147 # Set the weightings, if not specified. 

+

148 self._num_goals = len(_columns) 

+

149 if weightings is None: 

+

150 self.weightings = np.array([1 / self._num_goals 

+

151 for i in range(self._num_goals)]) 

+

152 else: 

+

153 if not isinstance(weightings, (list, np.ndarray)): 

+

154 raise TypeError(f"weightings is of type {type(weightings).__name__} " 

+

155 f"but should be of type list.") 

+

156 if len(weightings) != self._num_goals: 

+

157 raise IndexError(f"The given number of weightings ({len(weightings)}) " 

+

158 f"does not match the number of " 

+

159 f"goals ({self._num_goals})") 

+

160 self.weightings = np.array(weightings) / sum(weightings) 

+

161 

+

162 def __str__(self): 

+

163 """Overwrite string method to present the Goals-Object more 

+

164 nicely.""" 

+

165 return str(self._tsd) 

+

166 

+

167 @property 

+

168 def statistical_measure(self): 

+

169 """The statistical measure of this Goal instance""" 

+

170 return self._stat_meas 

+

171 

+

172 @statistical_measure.setter 

+

173 def statistical_measure(self, statistical_measure: Union[str, Callable]): 

+

174 """ 

+

175 Set the new statistical measure. The value must be 

+

176 supported by the method argument in the 

+

177 ``StatisticsAnalyzer`` class of ``ebcpy``. 

+

178 """ 

+

179 self._stat_analyzer = StatisticsAnalyzer(method=statistical_measure) 

+

180 if callable(statistical_measure): 

+

181 self._stat_meas = statistical_measure.__name__ 

+

182 else: 

+

183 self._stat_meas = statistical_measure 

+

184 

+

185 def eval_difference(self, verbose=False, penaltyfactor=1): 

+

186 """ 

+

187 Evaluate the difference of the measurement and simulated data based on the 

+

188 chosen statistical_measure. 

+

189 

+

190 :param boolean verbose: 

+

191 If True, a dict with difference-values of for all goals and the 

+

192 corresponding weightings is returned together with the total difference. 

+

193 This can be useful to better understand which goals is performing 

+

194 well in an optimization and which goals needs further is not performing well. 

+

195 :param float penaltyfactor: 

+

196 Muliplty result with this factor to account for 

+

197 penatlies of some sort. 

+

198 :return: float total_difference 

+

199 weighted ouput for all goals. 

+

200 """ 

+

201 total_difference = 0 

+

202 _verbose_calculation = {} 

+

203 

+

204 for i, goal_name in enumerate(self.variable_names.keys()): 

+

205 if self._tsd.isnull().values.any(): 

+

206 raise ValueError("There are not valid values in the " 

+

207 "simulated target data. Probably the time " 

+

208 "interval of measured and simulated data " 

+

209 "are not equal. \nPlease check the frequencies " 

+

210 "in the toml file (output_interval & frequency).") 

+

211 _diff = self._stat_analyzer.calc( 

+

212 meas=self._tsd[(goal_name, self.meas_tag_str)], 

+

213 sim=self._tsd[(goal_name, self.sim_tag_str)] 

+

214 ) 

+

215 # Apply penalty function 

+

216 _diff = _diff * penaltyfactor 

+

217 _verbose_calculation[goal_name] = (self.weightings[i], _diff) 

+

218 total_difference += self.weightings[i] * _diff 

+

219 if verbose: 

+

220 return total_difference, _verbose_calculation 

+

221 return total_difference 

+

222 

+

223 def set_sim_target_data(self, sim_target_data): 

+

224 """Alter the object with new simulation data 

+

225 self._sim_target_data based on the given dataframe 

+

226 sim_target_data. 

+

227 

+

228 :param TimeSeriesData sim_target_data: 

+

229 Object with simulation target data. This data should be 

+

230 the output of a simulation, hence "sim"-target-data. 

+

231 """ 

+

232 # Start with the base 

+

233 self._tsd = self._tsd_ref.copy() 

+

234 # Check index type 

+

235 if not isinstance(sim_target_data.index, type(self._tsd.index)): 

+

236 raise IndexError( 

+

237 f"Given sim_target_data is using {type(sim_target_data.index).__name__}" 

+

238 f" as an index, but the reference results (measured-data) was declared" 

+

239 f" using the {type(self._tsd_ref.index).__name__}. Convert your" 

+

240 f" measured-data index to solve this error." 

+

241 ) 

+

242 # Three critical cases may occur: 

+

243 # 1. sim_target_data is bigger (in len) than _tsd 

+

244 # --> Only the first part is accepted 

+

245 # 2. sim_target_data is smaller than _tsd 

+

246 # --> Missing values become NaN, which is fine. If no other function eliminates 

+

247 # the NaNs, an error is raised when doing eval_difference(). 

+

248 # 3. The index differs: 

+

249 # --> All new values are NaN. However, this should raise an error, as an error 

+

250 # in eval_difference would not lead back to this function. 

+

251 # Check if index matches in relevant intersection: 

+

252 sta = max(self._tsd.index[0], sim_target_data.index[0]) 

+

253 sto = min(self._tsd.index[-1], sim_target_data.index[-1]) 

+

254 if len(self._tsd.loc[sta:sto].index) != len(sim_target_data.loc[sta:sto].index): 

+

255 raise ValueError(f"Given indexes have different lengths " 

+

256 f"({len(self._tsd.loc[sta:sto].index)} vs " 

+

257 f"{len(sim_target_data.loc[sta:sto].index)}). " 

+

258 f"Can't compare them. ") 

+

259 mask = self._tsd.loc[sta:sto].index != sim_target_data.loc[sta:sto].index 

+

260 if np.any(mask): 

+

261 diff = self._tsd.loc[sta:sto].index - sim_target_data.loc[sta:sto].index 

+

262 raise IndexError(f"Measured and simulated data differ on {np.count_nonzero(mask)}" 

+

263 f" index points. Affected index part: {diff[mask]}. " 

+

264 f"This will lead to errors in evaluation, " 

+

265 f"hence we raise the error already here. " 

+

266 f"Check output_interval, equidistant_output and " 

+

267 f"frequency of measured data to find the reason for " 

+

268 f"this error. The have to match.") 

+

269 

+

270 # Resize simulation data to match to meas data 

+

271 for goal_name in self.variable_names.keys(): 

+

272 _tsd_sim = sim_target_data.loc[sta:sto, self._sim_var_matcher[goal_name]] 

+

273 if len(_tsd_sim.columns) > 1: 

+

274 raise ValueError("Given sim_target_data contains multiple tags for variable " 

+

275 f"{self._sim_var_matcher[goal_name]}. " 

+

276 "Can't select one automatically.") 

+

277 self._tsd.loc[sta:sto, (goal_name, self.sim_tag_str)] = _tsd_sim.values 

+

278 # Sort the index for better visualisation 

+

279 self._tsd = self._tsd.sort_index(axis=1) 

+

280 

+

281 def set_relevant_time_intervals(self, intervals): 

+

282 """ 

+

283 For many calibration-uses cases, different time-intervals of the measured 

+

284 and simulated data are relevant. Set the interval to be used with this function. 

+

285 This will change both measured and simulated data. Therefore, the eval_difference 

+

286 function can be called at every moment. 

+

287 

+

288 :param list intervals: 

+

289 List with time-intervals. Each list element has to be a tuple 

+

290 with the first element being the start_time as float or int and 

+

291 the second item being the end_time of the interval as float or int. 

+

292 E.g: 

+

293 [(0, 100), [150, 200), (500, 600)] 

+

294 """ 

+

295 _df_ref = self._tsd.copy() 

+

296 # Create initial False mask 

+

297 _mask = np.full(_df_ref.index.shape, False) 

+

298 # Dynamically make mask for multiple possible time-intervals 

+

299 for _start_time, _end_time in intervals: 

+

300 _mask = _mask | ((_df_ref.index >= _start_time) & (_df_ref.index <= _end_time)) 

+

301 self._tsd = _df_ref.loc[_mask] 

+

302 

+

303 def get_goals_list(self): 

+

304 """Get the internal list containing all goals.""" 

+

305 return list(self.variable_names.keys()) 

+

306 

+

307 def get_goals_data(self): 

+

308 """Get the current time-series-data object.""" 

+

309 return self._tsd.copy() 

+

310 

+

311 def get_sim_var_names(self): 

+

312 """Get the names of the simulation variables. 

+

313 

+

314 :returns list sim_var_names: 

+

315 Names of the simulation variables as a list 

+

316 """ 

+

317 return list(self._sim_var_matcher.values()) 

+

318 

+

319 def get_meas_frequency(self): 

+

320 """ 

+

321 Get the frequency of the measurement data. 

+

322 

+

323 :returns: 

+

324 float: Mean frequency of the index 

+

325 """ 

+

326 mean, std = self._tsd_ref.frequency 

+

327 if std >= 1e-8: 

+

328 logger.critical("The index of your measurement data is not " 

+

329 "equally sampled. The standard deviation is %s." 

+

330 "The may lead to errors when mapping measurements to simulation " 

+

331 "results.", mean.std()) 

+

332 return mean 

+

333 

+

334 

+

335class TunerParas: 

+

336 """ 

+

337 Class for tuner parameters. 

+

338 Tuner parameters are parameters of a model which are 

+

339 constant during simulation but are varied during calibration 

+

340 or other analysis. 

+

341 

+

342 :param list names: 

+

343 List of names of the tuner parameters 

+

344 :param float,int initial_values: 

+

345 Initial values for optimization. 

+

346 Even though some optimization methods don't require an 

+

347 initial guess, specifying a initial guess based on 

+

348 expected values or experience is helpful to better 

+

349 check the results of the calibration 

+

350 :param list,tuple bounds: 

+

351 Tuple or list of float or ints for lower and upper bound to the tuner parameter. 

+

352 The bounds object is optional, however highly recommend 

+

353 for calibration or optimization in general. As soon as you 

+

354 tune parameters with different units, such as Capacity and 

+

355 heat conductivity, the solver will fail to find good solutions. 

+

356 

+

357 

+

358 Example: 

+

359 

+

360 >>> tuner_paras = TunerParas(names=["C", "m_flow_2", "heatConv_a"], 

+

361 >>> initial_values=[5000, 0.02, 200], 

+

362 >>> bounds=[(4000, 6000), (0.01, 0.1), (10, 300)]) 

+

363 >>> print(tuner_paras) 

+

364 initial_value min max scale 

+

365 names 

+

366 C 5000.00 4000.00 6000.0 2000.00 

+

367 m_flow_2 0.02 0.01 0.1 0.09 

+

368 heatConv_a 200.00 10.00 300.0 290.00 

+

369 """ 

+

370 

+

371 def __init__(self, names, initial_values, bounds=None): 

+

372 """Initialize class-objects and check correct input.""" 

+

373 # Check if the given input-parameters are of correct format. If not, raise an error. 

+

374 for name in names: 

+

375 if not isinstance(name, str): 

+

376 raise TypeError(f"Given name is of type {type(name).__name__} " 

+

377 "and not of type str.") 

+

378 # Check if all names are unique: 

+

379 if len(names) != len(set(names)): 

+

380 raise ValueError("Given names contain duplicates. " 

+

381 "This will yield errors in later stages" 

+

382 "such as calibration of sensitivity analysis.") 

+

383 try: 

+

384 # Calculate the sum, as this will fail if the elements are not float or int. 

+

385 sum(initial_values) 

+

386 except TypeError as err: 

+

387 raise TypeError("initial_values contains other " 

+

388 "instances than float or int.") from err 

+

389 if len(names) != len(initial_values): 

+

390 raise ValueError(f"shape mismatch: names has length {len(names)}" 

+

391 f" and initial_values {len(initial_values)}.") 

+

392 self._bounds = bounds 

+

393 if bounds is None: 

+

394 _bound_min = -np.inf 

+

395 _bound_max = np.inf 

+

396 else: 

+

397 if len(bounds) != len(names): 

+

398 raise ValueError(f"shape mismatch: bounds has length {len(bounds)} " 

+

399 f"and names {len(names)}.") 

+

400 _bound_min, _bound_max = [], [] 

+

401 for bound in bounds: 

+

402 _bound_min.append(bound[0]) 

+

403 _bound_max.append(bound[1]) 

+

404 

+

405 self._df = pd.DataFrame({"names": names, 

+

406 "initial_value": initial_values, 

+

407 "min": _bound_min, 

+

408 "max": _bound_max}) 

+

409 self._df = self._df.set_index("names") 

+

410 self._set_scale() 

+

411 

+

412 def __str__(self): 

+

413 """Overwrite string method to present the TunerParas-Object more 

+

414 nicely.""" 

+

415 return str(self._df) 

+

416 

+

417 def scale(self, descaled): 

+

418 """ 

+

419 Scales the given value to the bounds of the tuner parameter between 0 and 1 

+

420 

+

421 :param np.array,list descaled: 

+

422 Value to be scaled 

+

423 :return: np.array scaled: 

+

424 Scaled value between 0 and 1 

+

425 """ 

+

426 # If no bounds are given, scaling is not possible--> descaled = scaled 

+

427 if self._bounds is None: 

+

428 return descaled 

+

429 _scaled = (descaled - self._df["min"]) / self._df["scale"] 

+

430 if not all((_scaled >= 0) & (_scaled <= 1)): 

+

431 warnings.warn("Given descaled values are outside " 

+

432 "of bounds. Automatically limiting " 

+

433 "the values with respect to the bounds.") 

+

434 return np.clip(_scaled, a_min=0, a_max=1) 

+

435 

+

436 def descale(self, scaled): 

+

437 """ 

+

438 Converts the given scaled value to an descaled one. 

+

439 

+

440 :param np.array,list scaled: 

+

441 Scaled input value between 0 and 1 

+

442 :return: np.array descaled: 

+

443 descaled value based on bounds. 

+

444 """ 

+

445 # If no bounds are given, scaling is not possible--> descaled = scaled 

+

446 if not self._bounds: 

+

447 return scaled 

+

448 _scaled = np.array(scaled) 

+

449 if not all((_scaled >= 0 - 1e4) & (_scaled <= 1 + 1e4)): 

+

450 warnings.warn("Given scaled values are outside of bounds. " 

+

451 "Automatically limiting the values with " 

+

452 "respect to the bounds.") 

+

453 _scaled = np.clip(_scaled, a_min=0, a_max=1) 

+

454 return _scaled * self._df["scale"] + self._df["min"] 

+

455 

+

456 @property 

+

457 def bounds(self): 

+

458 """Get property bounds""" 

+

459 return self._bounds 

+

460 

+

461 def get_names(self): 

+

462 """Return the names of the tuner parameters""" 

+

463 return list(self._df.index) 

+

464 

+

465 def get_initial_values(self): 

+

466 """Return the initial values of the tuner parameters""" 

+

467 return self._df["initial_value"].values 

+

468 

+

469 def get_bounds(self): 

+

470 """Return the bound-values of the tuner parameters""" 

+

471 return self._df["min"].values, self._df["max"].values 

+

472 

+

473 def get_value(self, name, col): 

+

474 """Function to get a value of a specific tuner parameter""" 

+

475 return self._df.loc[name, col] 

+

476 

+

477 def set_value(self, name, col, value): 

+

478 """Function to set a value of a specific tuner parameter""" 

+

479 if not isinstance(value, (float, int)): 

+

480 raise ValueError(f"Given value is of type {type(value).__name__} " 

+

481 "but float or int is required") 

+

482 if col not in ["max", "min", "initial_value"]: 

+

483 raise KeyError("Can only alter max, min and initial_value") 

+

484 self._df.loc[name, col] = value 

+

485 self._set_scale() 

+

486 

+

487 def remove_names(self, names): 

+

488 """ 

+

489 Remove gives list of names from the Tuner-parameters 

+

490 

+

491 :param list names: 

+

492 List with names inside of the TunerParas-dataframe 

+

493 """ 

+

494 self._df = self._df.drop(names) 

+

495 

+

496 def _set_scale(self): 

+

497 self._df["scale"] = self._df["max"] - self._df["min"] 

+

498 if not self._df[self._df["scale"] <= 0].empty: 

+

499 raise ValueError( 

+

500 "The given lower bounds are greater equal " 

+

501 "than the upper bounds, resulting in a " 

+

502 f"negative scale: \n{str(self._df['scale'])}" 

+

503 ) 

+

504 

+

505 

+

506class CalibrationClass: 

+

507 """ 

+

508 Class used for calibration of time-series data. 

+

509 

+

510 :param str name: 

+

511 Name of the class, e.g. 'device on' 

+

512 :param float,int start_time: 

+

513 Time at which the class starts 

+

514 :param float,int stop_time: 

+

515 Time at which the class ends 

+

516 :param Goals goals: 

+

517 Goals parameters which are relevant in this class. 

+

518 As this class may be used in the classifier, a Goals-Class 

+

519 may not be available at all times and can be added later. 

+

520 :param TunerParas tuner_paras: 

+

521 As this class may be used in the classifier, a TunerParas-Class 

+

522 may not be available at all times and can be added later. 

+

523 :param list relevant_intervals: 

+

524 List with time-intervals relevant for the calibration. 

+

525 Each list element has to be a tuple with the first element being 

+

526 the start-time as float/int and the second item being the end-time 

+

527 of the interval as float/int. 

+

528 E.g: 

+

529 For a class with start_time=0 and stop_time=1000, given following intervals 

+

530 [(0, 100), [150, 200), (500, 600)] 

+

531 will only evaluate the data between 0-100, 150-200 and 500-600. 

+

532 The given intervals may overlap. Furthermore the intervals do not need 

+

533 to be in an ascending order or be limited to 

+

534 the start_time and end_time parameters. 

+

535 :keyword (pd.DataFrame, ebcpy.data_types.TimeSeriesData) inputs: 

+

536 TimeSeriesData or DataFrame that holds 

+

537 input data for the simulation to run. 

+

538 The time-index should be float index and match the overall 

+

539 ranges set by start- and stop-time. 

+

540 :keyword dict input_kwargs: 

+

541 If inputs are provided, additional input keyword-args passed to the 

+

542 simulation API can be specified. 

+

543 Using FMUs, you don't need to specify anything. 

+

544 Using DymolaAPI, you have to specify 'table_name' and 'file_name' 

+

545 """ 

+

546 

+

547 def __init__(self, name, start_time, stop_time, goals=None, 

+

548 tuner_paras=None, relevant_intervals=None, **kwargs): 

+

549 """Initialize class-objects and check correct input.""" 

+

550 self.name = name 

+

551 self._start_time = start_time 

+

552 self.stop_time = stop_time 

+

553 self._goals = None 

+

554 self._tuner_paras = None 

+

555 if goals is not None: 

+

556 self.goals = goals 

+

557 if tuner_paras is not None: 

+

558 self.tuner_paras = tuner_paras 

+

559 if relevant_intervals is not None: 

+

560 self.relevant_intervals = relevant_intervals 

+

561 else: 

+

562 # Then all is relevant 

+

563 self.relevant_intervals = [(start_time, stop_time)] 

+

564 self._inputs = None 

+

565 inputs = kwargs.get('inputs', None) 

+

566 if inputs is not None: 

+

567 self.inputs = inputs # Trigger the property setter 

+

568 self.input_kwargs = kwargs.get('input_kwargs', {}) 

+

569 

+

570 @property 

+

571 def name(self): 

+

572 """Get name of calibration class""" 

+

573 return self._name 

+

574 

+

575 @name.setter 

+

576 def name(self, name: str): 

+

577 """Set name of calibration class""" 

+

578 if not isinstance(name, str): 

+

579 raise TypeError(f"Name of CalibrationClass is {type(name)} " 

+

580 f"but has to be of type str") 

+

581 self._name = name 

+

582 

+

583 @property 

+

584 def start_time(self) -> Union[float, int]: 

+

585 """Get start time of calibration class""" 

+

586 return self._start_time 

+

587 

+

588 @start_time.setter 

+

589 def start_time(self, start_time: Union[float, int]): 

+

590 """Set start time of calibration class""" 

+

591 if not start_time <= self.stop_time: 

+

592 raise ValueError("The given start-time is " 

+

593 "higher than the stop-time.") 

+

594 self._start_time = start_time 

+

595 

+

596 @property 

+

597 def stop_time(self) -> Union[float, int]: 

+

598 """Get stop time of calibration class""" 

+

599 return self._stop_time 

+

600 

+

601 @stop_time.setter 

+

602 def stop_time(self, stop_time: Union[float, int]): 

+

603 """Set stop time of calibration class""" 

+

604 if not self.start_time <= stop_time: 

+

605 raise ValueError("The given stop-time is " 

+

606 "lower than the start-time.") 

+

607 self._stop_time = stop_time 

+

608 

+

609 @property 

+

610 def tuner_paras(self) -> TunerParas: 

+

611 """Get the tuner parameters of the calibration-class""" 

+

612 return self._tuner_paras 

+

613 

+

614 @tuner_paras.setter 

+

615 def tuner_paras(self, tuner_paras): 

+

616 """ 

+

617 Set the tuner parameters for the calibration-class. 

+

618 

+

619 :param tuner_paras: TunerParas 

+

620 """ 

+

621 if not isinstance(tuner_paras, TunerParas): 

+

622 raise TypeError(f"Given tuner_paras is of type {type(tuner_paras).__name__} " 

+

623 "but should be type TunerParas") 

+

624 self._tuner_paras = deepcopy(tuner_paras) 

+

625 

+

626 @property 

+

627 def goals(self) -> Goals: 

+

628 """Get current goals instance""" 

+

629 return self._goals 

+

630 

+

631 @goals.setter 

+

632 def goals(self, goals: Goals): 

+

633 """ 

+

634 Set the goals object for the calibration-class. 

+

635 

+

636 :param Goals goals: 

+

637 Goals-data-type 

+

638 """ 

+

639 if not isinstance(goals, Goals): 

+

640 raise TypeError(f"Given goals parameter is of type {type(goals).__name__} " 

+

641 "but should be type Goals") 

+

642 self._goals = deepcopy(goals) 

+

643 

+

644 @property 

+

645 def relevant_intervals(self) -> list: 

+

646 """Get current relevant_intervals""" 

+

647 return self._relevant_intervals 

+

648 

+

649 @relevant_intervals.setter 

+

650 def relevant_intervals(self, relevant_intervals: list): 

+

651 """Set current relevant_intervals""" 

+

652 self._relevant_intervals = relevant_intervals 

+

653 

+

654 @property 

+

655 def inputs(self) -> Union[TimeSeriesData, pd.DataFrame]: 

+

656 """Get the inputs for this calibration class""" 

+

657 return self._inputs 

+

658 

+

659 @inputs.setter 

+

660 def inputs(self, inputs: Union[TimeSeriesData, pd.DataFrame]): 

+

661 """Set the inputs for this calibration class""" 

+

662 # Check correct index: 

+

663 if not isinstance(inputs, (TimeSeriesData, pd.DataFrame)): 

+

664 raise TypeError(f"Inputs need to be either TimeSeriesData " 

+

665 f"or pd.DataFrame, but you passed {type(inputs)}") 

+

666 if isinstance(inputs.index, pd.DatetimeIndex): 

+

667 inputs = convert_datetime_index_to_float_index(inputs) 

+

668 

+

669 self._inputs = inputs 

+

670 

+

671 

+

672def merge_calibration_classes(calibration_classes): 

+

673 """ 

+

674 Given a list of multiple calibration-classes, this function merges given 

+

675 objects by the "name" attribute. Relevant intervals are set, in order 

+

676 to maintain the start and stop-time info. 

+

677 

+

678 :param list calibration_classes: 

+

679 List containing multiple CalibrationClass-Objects 

+

680 :return: list cal_classes_merged: 

+

681 A list containing one CalibrationClass-Object for each different 

+

682 "name" of class. 

+

683 

+

684 Example: 

+

685 >>> cal_classes = [CalibrationClass("on", 0, 100), 

+

686 >>> CalibrationClass("off", 100, 200), 

+

687 >>> CalibrationClass("on", 200, 300)] 

+

688 >>> merged_classes = merge_calibration_classes(cal_classes) 

+

689 Is equal to: 

+

690 >>> merged_classes = [CalibrationClass("on", 0, 300, 

+

691 >>> relevant_intervals=[(0,100), (200,300)]), 

+

692 >>> CalibrationClass("off", 100, 200)] 

+

693 

+

694 """ 

+

695 # Use a dict for easy name-access 

+

696 temp_merged = {} 

+

697 for cal_class in calibration_classes: 

+

698 _name = cal_class.name 

+

699 # First create dictionary with all calibration classes 

+

700 if _name in temp_merged: 

+

701 temp_merged[_name]["intervals"] += cal_class.relevant_intervals 

+

702 else: 

+

703 temp_merged[_name] = {"goals": cal_class.goals, 

+

704 "tuner_paras": cal_class.tuner_paras, 

+

705 "intervals": deepcopy(cal_class.relevant_intervals), 

+

706 "inputs": deepcopy(cal_class.inputs), 

+

707 "input_kwargs": deepcopy(cal_class.input_kwargs) 

+

708 } 

+

709 

+

710 # Convert dict to actual calibration-classes 

+

711 cal_classes_merged = [] 

+

712 for _name, values in temp_merged.items(): 

+

713 # Flatten the list of tuples and get the start- and stop-values 

+

714 start_time = min(sum(values["intervals"], ())) 

+

715 stop_time = max(sum(values["intervals"], ())) 

+

716 cal_classes_merged.append(CalibrationClass( 

+

717 _name, start_time, stop_time, 

+

718 goals=values["goals"], 

+

719 tuner_paras=values["tuner_paras"], 

+

720 relevant_intervals=values["intervals"], 

+

721 inputs=values["inputs"], 

+

722 input_kwargs=values["input_kwargs"]) 

+

723 ) 

+

724 

+

725 return cal_classes_merged 

+
+ + + diff --git a/docs/master/coverage/d_f4434ebd7e0d50e1___init___py.html b/docs/master/coverage/d_f4434ebd7e0d50e1___init___py.html new file mode 100644 index 00000000..f66349d5 --- /dev/null +++ b/docs/master/coverage/d_f4434ebd7e0d50e1___init___py.html @@ -0,0 +1,108 @@ + + + + + Coverage for aixcalibuha/sensitivity_analysis/__init__.py: 100% + + + + + +
+
+

+ Coverage for aixcalibuha/sensitivity_analysis/__init__.py: + 100% +

+ +

+ 6 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.4.1, + created at 2024-01-27 10:48 +0000 +

+ +
+
+
+

1""" 

+

2This package contains classes to perform 

+

3sensitivity analysis with. 

+

4""" 

+

5 

+

6from aixcalibuha.sensitivity_analysis import plotting 

+

7from .sensitivity_analyzer import SenAnalyzer 

+

8from .sobol import SobolAnalyzer 

+

9from .morris import MorrisAnalyzer 

+

10from .fast import FASTAnalyzer 

+

11from .pawn import PAWNAnalyzer 

+
+ + + diff --git a/docs/master/coverage/d_f4434ebd7e0d50e1_fast_py.html b/docs/master/coverage/d_f4434ebd7e0d50e1_fast_py.html new file mode 100644 index 00000000..1948d7d0 --- /dev/null +++ b/docs/master/coverage/d_f4434ebd7e0d50e1_fast_py.html @@ -0,0 +1,186 @@ + + + + + Coverage for aixcalibuha/sensitivity_analysis/fast.py: 96% + + + + + +
+
+

+ Coverage for aixcalibuha/sensitivity_analysis/fast.py: + 96% +

+ +

+ 24 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.4.1, + created at 2024-01-27 10:48 +0000 +

+ +
+
+
+

1""" 

+

2Adds the FASTAnalyzer to the available 

+

3classes of sensitivity analysis. 

+

4""" 

+

5import numpy as np 

+

6from SALib.sample import fast_sampler as fast 

+

7from SALib.analyze import fast as analyze_fast 

+

8from aixcalibuha.sensitivity_analysis import SenAnalyzer 

+

9from aixcalibuha import CalibrationClass 

+

10 

+

11 

+

12class FASTAnalyzer(SenAnalyzer): 

+

13 """ 

+

14 FAST method from SALib 

+

15 https://salib.readthedocs.io/en/latest/api.html#fast-fourier-amplitude-sensitivity-test 

+

16 A variance-based method which can compute the sensitivity measures 

+

17 'S1' and 'ST'. 

+

18 

+

19 Additional arguments: 

+

20 

+

21 :keyword int M: 

+

22 Default 4, used for the fast-method 

+

23 :keyword seed: 

+

24 Used for the fast-method 

+

25 """ 

+

26 

+

27 def __init__(self, sim_api, **kwargs): 

+

28 super().__init__( 

+

29 sim_api=sim_api, 

+

30 **kwargs) 

+

31 # Set additional kwargs 

+

32 self.M = kwargs.pop("M", 4) 

+

33 self.seed = kwargs.pop("seed", None) 

+

34 

+

35 @property 

+

36 def analysis_variables(self): 

+

37 """The analysis variables of the FAST method""" 

+

38 return ['S1', 'ST'] 

+

39 

+

40 def analysis_function(self, x, y): 

+

41 """ 

+

42 Use the SALib.analyze.fast method to analyze the simulation results. 

+

43 

+

44 :param np.array x: 

+

45 placeholder for the `X` parameter of the morris method not used for sobol 

+

46 :param np.array y: 

+

47 The NumPy array containing the model outputs 

+

48 :return: 

+

49 returns the result of the SALib.analyze.fast method (from the documentation: 

+

50 Returns a dictionary with keys 'S1' and 'ST', where each entry is a list of 

+

51 size D (the number of parameters) containing the indices in the same order 

+

52 as the parameter file.) 

+

53 """ 

+

54 return analyze_fast.analyze(self.problem, y, 

+

55 M=self.M) 

+

56 

+

57 def create_sampler_demand(self): 

+

58 """ 

+

59 Function to create the sampler parameters for the fast method 

+

60 """ 

+

61 return {'M': self.M} 

+

62 

+

63 def generate_samples(self): 

+

64 """ 

+

65 Run the sampler for fast and return the results. 

+

66 

+

67 :return: 

+

68 The list of samples generated as a NumPy array with one row per sample 

+

69 and each row containing one value for each variable name in `problem['names']`. 

+

70 :rtype: np.ndarray 

+

71 """ 

+

72 return fast.sample(self.problem, 

+

73 N=self.num_samples, 

+

74 **self.create_sampler_demand()) 

+

75 

+

76 def _get_res_dict(self, result: dict, cal_class: CalibrationClass, analysis_variable: str): 

+

77 """ 

+

78 Convert the result object to a dict with the key 

+

79 being the variable name and the value being the result 

+

80 associated to self.analysis_variable. 

+

81 """ 

+

82 names = self.create_problem(cal_class.tuner_paras)['names'] 

+

83 if result is None: 

+

84 return {var_name: np.abs(res_val) 

+

85 for var_name, res_val in zip(names, 

+

86 np.zeros(len(names)))} 

+

87 return {var_name: np.abs(res_val) 

+

88 for var_name, res_val in zip(names, 

+

89 result[analysis_variable])} 

+
+ + + diff --git a/docs/master/coverage/d_f4434ebd7e0d50e1_morris_py.html b/docs/master/coverage/d_f4434ebd7e0d50e1_morris_py.html new file mode 100644 index 00000000..d6400eb6 --- /dev/null +++ b/docs/master/coverage/d_f4434ebd7e0d50e1_morris_py.html @@ -0,0 +1,185 @@ + + + + + Coverage for aixcalibuha/sensitivity_analysis/morris.py: 100% + + + + + +
+
+

+ Coverage for aixcalibuha/sensitivity_analysis/morris.py: + 100% +

+ +

+ 25 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.4.1, + created at 2024-01-27 10:48 +0000 +

+ +
+
+
+

1""" 

+

2Adds the MorrisAnalyzer to the available 

+

3classes of sensitivity analysis. 

+

4""" 

+

5import numpy as np 

+

6from SALib.sample import morris 

+

7from SALib.analyze import morris as analyze_morris 

+

8from aixcalibuha.sensitivity_analysis import SenAnalyzer 

+

9from aixcalibuha import CalibrationClass 

+

10 

+

11 

+

12class MorrisAnalyzer(SenAnalyzer): 

+

13 """ 

+

14 Moris method from SALib https://salib.readthedocs.io/en/latest/api.html#method-of-morris 

+

15 An elementary effects (One-At-A-Time) method which computes the sensitivity 

+

16 measures 'mu', 'mu_star' and 'sigma' with a confidence interval for mu_star. 

+

17 

+

18 Additional arguments: 

+

19 

+

20 :keyword int num_levels: 

+

21 Default num_samples, used for the morris-method 

+

22 :keyword optimal_trajectories: 

+

23 Used for the morris-method 

+

24 :keyword bool local_optimization: 

+

25 Default True, used for the morris-method 

+

26 """ 

+

27 def __init__(self, sim_api, **kwargs): 

+

28 super().__init__( 

+

29 sim_api=sim_api, 

+

30 **kwargs) 

+

31 # Set additional kwargs 

+

32 self.num_levels = kwargs.pop("num_levels", self.num_samples) 

+

33 self.optimal_trajectories = kwargs.pop("optimal_trajectories", None) 

+

34 self.local_optimization = kwargs.pop("local_optimization", True) 

+

35 

+

36 @property 

+

37 def analysis_variables(self): 

+

38 """The analysis variables of the sobol method""" 

+

39 return ['mu_star', 'mu', 'sigma', 'mu_star_conf'] 

+

40 

+

41 def analysis_function(self, x, y): 

+

42 """ 

+

43 Use the SALib.analyze.morris method to analyze the simulation results. 

+

44 

+

45 :param np.array x: 

+

46 the `X` parameter of the morris method (The NumPy matrix containing the model inputs) 

+

47 :param np.array y: 

+

48 The NumPy array containing the model outputs 

+

49 :return: 

+

50 returns the result of the SALib.analyze.sobol method (from the documentation: 

+

51 a dictionary with cols `mu`, `mu_star`, `sigma`, and `mu_star_conf`, where each 

+

52 entry is a list of size D (the number of parameters) containing the indices in the 

+

53 same order as the parameter file.) 

+

54 """ 

+

55 return analyze_morris.analyze(self.problem, x, y, 

+

56 num_levels=self.num_levels) 

+

57 

+

58 def create_sampler_demand(self): 

+

59 """ 

+

60 Function to create the sampler parameters for the morris method 

+

61 """ 

+

62 return {'num_levels': self.num_levels, 

+

63 'optimal_trajectories': self.optimal_trajectories, 

+

64 'local_optimization': self.local_optimization} 

+

65 

+

66 def generate_samples(self): 

+

67 """ 

+

68 Run the sampler specified for morris and return the results. 

+

69 

+

70 :return: 

+

71 The list of samples generated as a NumPy array with one row per sample 

+

72 and each row containing one value for each variable name in `problem['names']`. 

+

73 :rtype: np.ndarray 

+

74 """ 

+

75 return morris.sample(self.problem, 

+

76 N=self.num_samples, 

+

77 **self.create_sampler_demand()) 

+

78 

+

79 def _get_res_dict(self, result: dict, cal_class: CalibrationClass, analysis_variable: str): 

+

80 """ 

+

81 Convert the result object to a dict with the key 

+

82 being the variable name and the value being the result 

+

83 associated to self.analysis_variable. 

+

84 """ 

+

85 if result is None: 

+

86 names = cal_class.tuner_paras.get_names() 

+

87 return dict(zip(names, np.zeros(len(names)))) 

+

88 return dict(zip(result['names'], result[analysis_variable])) 

+
+ + + diff --git a/docs/master/coverage/d_f4434ebd7e0d50e1_pawn_py.html b/docs/master/coverage/d_f4434ebd7e0d50e1_pawn_py.html new file mode 100644 index 00000000..c0e3cf53 --- /dev/null +++ b/docs/master/coverage/d_f4434ebd7e0d50e1_pawn_py.html @@ -0,0 +1,221 @@ + + + + + Coverage for aixcalibuha/sensitivity_analysis/pawn.py: 95% + + + + + +
+
+

+ Coverage for aixcalibuha/sensitivity_analysis/pawn.py: + 95% +

+ +

+ 41 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.4.1, + created at 2024-01-27 10:48 +0000 +

+ +
+
+
+

1""" 

+

2Adds the PAWNAnalyzer to the available 

+

3classes of sensitivity analysis. 

+

4""" 

+

5 

+

6from SALib.sample import sobol 

+

7from SALib.sample import morris 

+

8from SALib.sample import fast_sampler as fast 

+

9from SALib.analyze import pawn as analyze_pawn 

+

10import numpy as np 

+

11from aixcalibuha.sensitivity_analysis import SenAnalyzer 

+

12from aixcalibuha import CalibrationClass 

+

13 

+

14 

+

15class PAWNAnalyzer(SenAnalyzer): 

+

16 """ 

+

17 PAWN method from SALib https://salib.readthedocs.io/en/latest/api.html#pawn-sensitivity-analysis 

+

18 Density-based method which computes the PAWN index at 'min', 'max', 'mean', 

+

19 'median' and coefficient of variation 'cv'. 

+

20  

+

21 Additional arguments: 

+

22 

+

23 :keyword bool calc_second_order: 

+

24 Default True, used for the sampler of the sobol-method 

+

25 :keyword int s: 

+

26 Default 10, used for the pawn-method. 

+

27 :keyword str sampler: 

+

28 Which sampler should be used. Default sobol. 

+

29 Choose between 'sobol', 'morris' and 'fast'. 

+

30 :keyword int num_levels: 

+

31 Default num_samples, used for the sampler of the morris-method. 

+

32 :keyword optimal_trajectories: 

+

33 Used for the sampler of the morris-method. 

+

34 :keyword bool local_optimization: 

+

35 Default True, used for the sampler of the morris-method. 

+

36 :keyword int M: 

+

37 Default 4, used for the sampler of the fast-method. 

+

38 """ 

+

39 

+

40 def __init__(self, sim_api, **kwargs): 

+

41 super().__init__( 

+

42 sim_api=sim_api, 

+

43 **kwargs) 

+

44 # Set additional kwargs 

+

45 self.calc_second_order = kwargs.pop("calc_second_order", True) 

+

46 self.s = kwargs.pop("s", 10) 

+

47 self.sampler = kwargs.pop("sampler", 'sobol') 

+

48 self.num_levels = kwargs.pop("num_levels", self.num_samples) 

+

49 self.optimal_trajectories = kwargs.pop("optimal_trajectories", None) 

+

50 self.local_optimization = kwargs.pop("local_optimization", True) 

+

51 self.M = kwargs.pop("M", 4) 

+

52 

+

53 @property 

+

54 def analysis_variables(self): 

+

55 """The analysis variables of the PAWN method""" 

+

56 return ['minimum', 'mean', 'median', 'maximum', 'CV'] 

+

57 

+

58 def analysis_function(self, x, y): 

+

59 """ 

+

60 Use the SALib.analyze.pawn method to analyze the simulation results. 

+

61 

+

62 :param np.array x: 

+

63 placeholder for the `X` parameter of the morris method not used for sobol 

+

64 :param np.array y: 

+

65 The NumPy array containing the model outputs 

+

66 :return: 

+

67 returns the result of the SALib.analyze.pawn method (from the documentation: 

+

68 This implementation reports the PAWN index at the min, mean, median, and 

+

69 max across the slides/conditioning intervals as well as the coefficient of 

+

70 variation (``CV``). The median value is the typically reported value. As 

+

71 the ``CV`` is (standard deviation / mean), it indicates the level of 

+

72 variability across the slides, with values closer to zero indicating lower 

+

73 variation.) 

+

74 """ 

+

75 return analyze_pawn.analyze(self.problem, x, y, 

+

76 S=self.s) 

+

77 

+

78 def create_sampler_demand(self): 

+

79 """ 

+

80 Function to create the sampler parameters for the sampler method 

+

81 """ 

+

82 if self.sampler == 'sobol': 

+

83 return {'calc_second_order': self.calc_second_order} 

+

84 if self.sampler == 'morris': 

+

85 return {'num_levels': self.num_levels, 

+

86 'optimal_trajectories': self.optimal_trajectories, 

+

87 'local_optimization': self.local_optimization} 

+

88 if self.sampler == 'fast': 

+

89 return {'M': self.M} 

+

90 raise NotImplementedError(f'{self.sampler} is not implemented yet') 

+

91 

+

92 def generate_samples(self): 

+

93 """ 

+

94 Run the sampler for the selected sampler and return the results. 

+

95 

+

96 :return: 

+

97 The list of samples generated as a NumPy array with one row per sample 

+

98 and each row containing one value for each variable name in `problem['names']`. 

+

99 :rtype: np.ndarray 

+

100 """ 

+

101 if self.sampler == 'sobol': 

+

102 return sobol.sample(self.problem, 

+

103 N=self.num_samples, 

+

104 **self.create_sampler_demand()) 

+

105 if self.sampler == 'morris': 

+

106 return morris.sample(self.problem, 

+

107 N=self.num_samples, 

+

108 **self.create_sampler_demand()) 

+

109 if self.sampler == 'fast': 

+

110 return fast.sample(self.problem, 

+

111 N=self.num_samples, 

+

112 **self.create_sampler_demand()) 

+

113 raise NotImplementedError(f'{self.sampler} is not implemented yet') 

+

114 

+

115 def _get_res_dict(self, result: dict, cal_class: CalibrationClass, analysis_variable: str): 

+

116 """ 

+

117 Convert the result object to a dict with the key 

+

118 being the variable name and the value being the result 

+

119 associated to self.analysis_variable. 

+

120 """ 

+

121 if result is None: 

+

122 names = cal_class.tuner_paras.get_names() 

+

123 return dict(zip(names, np.zeros(len(names)))) 

+

124 return dict(zip(result['names'], result[analysis_variable])) 

+
+ + + diff --git a/docs/master/coverage/d_f4434ebd7e0d50e1_plotting_py.html b/docs/master/coverage/d_f4434ebd7e0d50e1_plotting_py.html new file mode 100644 index 00000000..62c298dc --- /dev/null +++ b/docs/master/coverage/d_f4434ebd7e0d50e1_plotting_py.html @@ -0,0 +1,649 @@ + + + + + Coverage for aixcalibuha/sensitivity_analysis/plotting.py: 93% + + + + + +
+
+

+ Coverage for aixcalibuha/sensitivity_analysis/plotting.py: + 93% +

+ +

+ 235 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.4.1, + created at 2024-01-27 10:48 +0000 +

+ +
+
+
+

1""" 

+

2Module containing functions for plotting sensitivity results 

+

3""" 

+

4import sys 

+

5import pandas as pd 

+

6import numpy as np 

+

7import matplotlib 

+

8import matplotlib.pyplot as plt 

+

9from SALib.plotting.bar import plot as barplot 

+

10from aixcalibuha.utils.visualizer import short_name 

+

11 

+

12 

+

13def plot_single(result: pd.DataFrame, 

+

14 cal_classes: [str] = None, 

+

15 goals: [str] = None, 

+

16 max_name_len: int = 15, 

+

17 **kwargs): 

+

18 """ 

+

19 Plot sensitivity results of first and total order analysis variables. 

+

20 For each calibration class one figure is created, which shows for each goal an axis 

+

21 with a barplot of the values of the analysis variables. 

+

22 

+

23 :param pd.DataFrame result: 

+

24 A result from run 

+

25 :param int max_name_len: 

+

26 Default is 15. Shortens the parameter names to max_name_len characters. 

+

27 :param [str] cal_classes: 

+

28 Default are all possible calibration classes. If a list of 

+

29 names of calibration classes is given only plots for these 

+

30 classes are created. 

+

31 :param [str] goals: 

+

32 Default are all possible goal names. If a list of specific 

+

33 goal names is given only these will be plotted. 

+

34 :keyword bool show_plot: 

+

35 Default is True. If False, all created plots are not shown. 

+

36 :keyword ([fig], [ax]) figs_axes: 

+

37 Default None. Set own figures of subfigures with corresponding axes for customization. 

+

38 :keyword bool use_suffix: 

+

39 Default is True: If True, the last part after the last point 

+

40 of Modelica variables is used for the x ticks. 

+

41 :return: 

+

42 Returns all created figures and axes in lists like [fig], [ax] 

+

43 with shapes (len(cal_classes)), (len(cal_classes), len(goals)) 

+

44 """ 

+

45 use_suffix = kwargs.pop('use_suffix', False) 

+

46 show_plot = kwargs.pop('show_plot', True) 

+

47 figs_axes = kwargs.pop('figs_axes', None) 

+

48 _func_name = kwargs.pop('_func_name', 'plot_single') 

+

49 

+

50 # get lists of the calibration classes and their goals in the result dataframe 

+

51 if cal_classes is None: 

+

52 cal_classes = _del_duplicates(list(result.index.get_level_values(0))) 

+

53 if goals is None: 

+

54 goals = _del_duplicates(list(result.index.get_level_values(1))) 

+

55 

+

56 result = _rename_tuner_names(result, use_suffix, max_name_len, _func_name) 

+

57 

+

58 # plotting with simple plot function of the SALib 

+

59 figs = [] 

+

60 axes = [] 

+

61 for col, cal_class in enumerate(cal_classes): 

+

62 fig, ax = _create_figs_axes(figs_axes, col, goals, f"Class: {cal_class}") 

+

63 figs.append(fig) 

+

64 axes.append(ax) 

+

65 for row, goal in enumerate(goals): 

+

66 result_df = result.loc[cal_class, goal] 

+

67 axes[col][row].grid(True, which='both', axis='y') 

+

68 barplot(result_df.T, ax=axes[col][row]) 

+

69 axes[col][row].set_title(f"Goal: {goal}") 

+

70 axes[col][row].legend() 

+

71 

+

72 if show_plot: 

+

73 plt.show() 

+

74 

+

75 return figs, axes 

+

76 

+

77 

+

78def plot_second_order(result: pd.DataFrame, 

+

79 cal_classes: [str] = None, 

+

80 goals: [str] = None, 

+

81 max_name_len: int = 15, 

+

82 **kwargs): 

+

83 """ 

+

84 Plot sensitivity results of second order analysis variables. 

+

85 For each calibration class and goal one figure of a 3d plot is created 

+

86 with the barplots of the interactions for each parameter. 

+

87 Only working for more than 2 parameter. 

+

88 

+

89 :param pd.DataFrame result: 

+

90 A result from run 

+

91 :param int max_name_len: 

+

92 Default is 15. Shortens the parameter names to max_name_len characters. 

+

93 :param [str] cal_classes: 

+

94 Default are all possible calibration classes. If a list of 

+

95 names of calibration classes is given only plots for these 

+

96 classes are created. 

+

97 :param [str] goals: 

+

98 Default are all possible goal names. If a list of specific 

+

99 goal names is given only these will be plotted. 

+

100 :keyword bool show_plot: 

+

101 Default is True. If False, all created plots are not shown. 

+

102 :keyword bool use_suffix: 

+

103 Default is True: If True, the last part after the last point 

+

104 of Modelica variables is used for the x ticks. 

+

105 :keyword [[fig]] figs: 

+

106 Default None. Set own figures of subfigures for customization. 

+

107 Shape (len(cal_classes), len(goals)) 

+

108 :return: 

+

109 Returns all created figures and axes in lists like [fig], [ax] 

+

110 """ 

+

111 use_suffix = kwargs.pop('use_suffix', False) 

+

112 show_plot = kwargs.pop('show_plot', True) 

+

113 figs = kwargs.pop('figs', None) 

+

114 result = result.fillna(0) 

+

115 if cal_classes is None: 

+

116 cal_classes = _del_duplicates(list(result.index.get_level_values(0))) 

+

117 if goals is None: 

+

118 goals = _del_duplicates(list(result.index.get_level_values(1))) 

+

119 

+

120 result = _rename_tuner_names(result, use_suffix, max_name_len, "plot_second_order") 

+

121 

+

122 tuner_names = result.columns 

+

123 if len(tuner_names) < 2: 

+

124 return None 

+

125 xticks = np.arange(len(tuner_names)) 

+

126 

+

127 # when the index is not sorted pandas throws a performance warning 

+

128 result = result.sort_index() 

+

129 

+

130 # plot of S2 without S2_conf 

+

131 all_figs = [] 

+

132 all_axes = [] 

+

133 for class_idx, cal_class in enumerate(cal_classes): 

+

134 class_figs = [] 

+

135 class_axes = [] 

+

136 for goal_idx, goal in enumerate(goals): 

+

137 if figs is None: 

+

138 fig = plt.figure() 

+

139 else: 

+

140 fig = figs[class_idx][goal_idx] 

+

141 ax = fig.add_subplot(projection='3d') 

+

142 for idx, name in enumerate(tuner_names): 

+

143 ax.bar(tuner_names, 

+

144 result.loc[cal_class, goal, 'S2', name].to_numpy(), 

+

145 zs=idx, zdir='y', alpha=0.8) 

+

146 ax.set_title(f"Class: {cal_class} Goal: {goal}") 

+

147 ax.set_zlabel('S2 [-]') 

+

148 ax.set_yticks(xticks) 

+

149 ax.set_yticklabels(tuner_names) 

+

150 # rotate tick labels for better readability 

+

151 plt.setp(ax.get_xticklabels(), rotation=90, ha="right", rotation_mode="anchor") 

+

152 plt.setp(ax.get_yticklabels(), rotation=90, ha="right", rotation_mode="anchor") 

+

153 class_figs.append(fig) 

+

154 class_axes.append(ax) 

+

155 all_figs.append(class_figs) 

+

156 all_axes.append(class_axes) 

+

157 if show_plot: 

+

158 plt.show() 

+

159 return all_figs, all_axes 

+

160 

+

161 

+

162def plot_single_second_order(result: pd.DataFrame, 

+

163 para_name: str, 

+

164 cal_classes: [str] = None, 

+

165 goals: [str] = None, 

+

166 max_name_len: int = 15, 

+

167 **kwargs): 

+

168 """ 

+

169 Plot the value of S2 from one parameter with all other parameters. 

+

170 

+

171 :param pd.DataFrame result: 

+

172 Second order result from run. 

+

173 :param str para_name: 

+

174 Name of the parameter of which the results should be plotted. 

+

175 :param [str] cal_classes: 

+

176 Default are all possible calibration classes. If a list of 

+

177 names of calibration classes is given only plots for these 

+

178 classes are created. 

+

179 :param [str] goals: 

+

180 Default are all possible goal names. If a list of specific 

+

181 goal names is given only these will be plotted. 

+

182 :param int max_name_len: 

+

183 Default is 15. Shortens the parameter names to max_name_len characters. 

+

184 :keyword bool show_plot: 

+

185 Default is True. If False, all created plots are not shown. 

+

186 :keyword bool use_suffix: 

+

187 Default is True: If True, the last part after the last point 

+

188 of Modelica variables is used for the x ticks. 

+

189 :keyword ([fig], [ax]) figs_axes: 

+

190 Default None. Set own figures of subfigures with corresponding axes for customization. 

+

191 :return: 

+

192 Returns all created figures and axes in lists like [fig], [ax] 

+

193 with shapes (len(cal_classes)), (len(cal_classes), len(goals)) 

+

194 """ 

+

195 use_suffix = kwargs.pop('use_suffix', False) 

+

196 show_plot = kwargs.pop('show_plot', True) 

+

197 figs_axes = kwargs.pop('figs_axes', None) 

+

198 

+

199 result = result.loc[:, :, :, para_name][:].fillna(0) 

+

200 figs, axes = plot_single( 

+

201 result=result, 

+

202 show_plot=False, 

+

203 cal_classes=cal_classes, 

+

204 goals=goals, 

+

205 figs_axes=figs_axes, 

+

206 use_suffix=use_suffix, 

+

207 max_name_len=max_name_len, 

+

208 _func_name="plot_single_second_order" 

+

209 ) 

+

210 # set new title for the figures of each calibration class 

+

211 for fig in figs: 

+

212 fig.suptitle(f"Interaction of {para_name} in class {fig._suptitle.get_text()}") 

+

213 if show_plot: 

+

214 plt.show() 

+

215 return figs, axes 

+

216 

+

217 

+

218def heatmap(result: pd.DataFrame, 

+

219 cal_class: str, 

+

220 goal: str, 

+

221 ax: matplotlib.axes.Axes = None, 

+

222 max_name_len: int = 15, 

+

223 **kwargs): 

+

224 """ 

+

225 Plot S2 sensitivity results from one calibration class and goal as a heatmap. 

+

226 

+

227 :param pd.DataFrame result: 

+

228 A second order result from run 

+

229 :param str cal_class: 

+

230 Name of the class to plot S2 from. 

+

231 :param str goal: 

+

232 Name of the goal to plot S2 from. 

+

233 :param matplotlib.axes ax: 

+

234 Default is None. If an axes is given the heatmap will be plotted on it, else 

+

235 a new figure and axes is created. 

+

236 :param int max_name_len: 

+

237 Default is 15. Shortens the parameter names to max_name_len characters. 

+

238 :keyword bool show_plot: 

+

239 Default is True. If False, all created plots are not shown. 

+

240 :keyword bool use_suffix: 

+

241 Default is False. If True, only the last suffix of a Modelica variable is displayed. 

+

242 :return: 

+

243 Returns axes 

+

244 """ 

+

245 use_suffix = kwargs.pop('use_suffix', False) 

+

246 show_plot = kwargs.pop('show_plot', True) 

+

247 _func_name = kwargs.pop('_func_name', "heatmap") 

+

248 

+

249 result = _rename_tuner_names(result, use_suffix, max_name_len, _func_name) 

+

250 if ax is None: 

+

251 _, ax = plt.subplots(layout="constrained") 

+

252 data = result.sort_index().loc[cal_class, goal, 'S2'].fillna(0).reindex( 

+

253 index=result.columns) 

+

254 image = ax.imshow(data, cmap='Reds') 

+

255 ax.set_title(f'Class: {cal_class} Goal: {goal}') 

+

256 ax.set_xticks(np.arange(len(data.columns))) 

+

257 ax.set_yticks(np.arange(len(data.index))) 

+

258 ax.set_xticklabels(data.columns) 

+

259 ax.set_yticklabels(data.index) 

+

260 ax.spines[:].set_color('black') 

+

261 plt.setp(ax.get_xticklabels(), rotation=90, ha="right", rotation_mode="anchor") 

+

262 cbar = ax.figure.colorbar(image, ax=ax) 

+

263 cbar.ax.set_ylabel("S2", rotation=90) 

+

264 if show_plot: 

+

265 plt.show() 

+

266 return ax 

+

267 

+

268 

+

269def heatmaps(result: pd.DataFrame, 

+

270 cal_classes: [str] = None, 

+

271 goals: [str] = None, 

+

272 max_name_len: int = 15, 

+

273 **kwargs): 

+

274 """ 

+

275 Plot S2 sensitivity results as a heatmap for multiple 

+

276 calibration classes and goals in one figure. 

+

277 

+

278 :param pd.DataFrame result: 

+

279 A second order result from run 

+

280 :param [str] cal_classes: 

+

281 Default is a list of all calibration classes in the result. 

+

282 If a list of classes is given only these classes are plotted. 

+

283 :param [str] goals: 

+

284 Default is a list of all goals in the result. 

+

285 If a list of goals is given only these goals are plotted. 

+

286 :param int max_name_len: 

+

287 Default is 15. Shortens the parameter names to max_name_len characters. 

+

288 :keyword bool show_plot: 

+

289 Default is True. If False, all created plots are not shown. 

+

290 :keyword bool use_suffix: 

+

291 Default is False. If True, only the last suffix of a Modelica variable is displayed. 

+

292 """ 

+

293 use_suffix = kwargs.pop('use_suffix', False) 

+

294 show_plot = kwargs.pop('show_plot', True) 

+

295 

+

296 if cal_classes is None: 

+

297 cal_classes = result.index.get_level_values("Class").unique() 

+

298 if goals is None: 

+

299 goals = result.index.get_level_values("Goal").unique() 

+

300 

+

301 _, axes = plt.subplots(ncols=len(cal_classes), nrows=len(goals), sharex='all', sharey='all', 

+

302 layout="constrained") 

+

303 if len(goals) == 1: 

+

304 axes = [axes] 

+

305 if len(cal_classes) == 1: 

+

306 for idx, ax in enumerate(axes): 

+

307 axes[idx] = [ax] 

+

308 _func_name = "heatmaps" 

+

309 for col, class_name in enumerate(cal_classes): 

+

310 for row, goal_name in enumerate(goals): 

+

311 heatmap(result, 

+

312 class_name, 

+

313 goal_name, 

+

314 ax=axes[row][col], 

+

315 show_plot=False, 

+

316 use_suffix=use_suffix, 

+

317 max_name_len=max_name_len, 

+

318 _func_name=_func_name) 

+

319 _func_name = None 

+

320 if show_plot: 

+

321 plt.show() 

+

322 

+

323 

+

324def plot_time_dependent(result: pd.DataFrame, 

+

325 parameters: [str] = None, 

+

326 goals: [str] = None, 

+

327 analysis_variables: [str] = None, 

+

328 plot_conf: bool = True, 

+

329 **kwargs): 

+

330 """ 

+

331 Plot time dependent sensitivity results without interactions from run_time_dependent(). 

+

332 

+

333 For each goal one figure is created with one axes for each analysis variable. 

+

334 In these plots the time dependent sensitivity of the parameters is plotted. 

+

335 The confidence interval can also be plotted. 

+

336 

+

337 :param pd.DataFrame result: 

+

338 A result from run_time_dependent without second order results. 

+

339 :param [str] parameters: 

+

340 Default all parameters. List of parameters to plot the sensitivity. 

+

341 :param [str] analysis_variables: 

+

342 Default all analysis_variables. List of analysis variables to plot. 

+

343 :param bool plot_conf: 

+

344 Default True. If true, the confidence intervals for each parameter are plotted. 

+

345 :param [str] goals: 

+

346 Default are all possible goal names. If a list of specific 

+

347 goal names is given only these will be plotted. 

+

348 :keyword ([fig], [ax]) figs_axes: 

+

349 Default None. Optional custom figures and axes 

+

350 (see example for verbose sensitivity analysis). 

+

351 :return: 

+

352 Returns all created figures and axes in lists like [fig], [ax] 

+

353 with shapes (len(goals)), (len(goals), len(analysis_variables)) 

+

354 :keyword bool show_plot: 

+

355 Default is True. If False, all created plots are not shown. 

+

356 :keyword bool use_suffix: 

+

357 Default is True: If True, the last part after the last point 

+

358 of Modelica variables is used for the x ticks. 

+

359 :keyword int max_name_len: 

+

360 Default is 50. Shortens the parameter names to max_name_len characters. 

+

361 """ 

+

362 use_suffix = kwargs.pop('use_suffix', False) 

+

363 max_name_len = kwargs.pop('max_name_len', 50) 

+

364 show_plot = kwargs.pop('show_plot', True) 

+

365 figs_axes = kwargs.pop('figs_axes', None) 

+

366 

+

367 if goals is None: 

+

368 goals = _del_duplicates(list(result.index.get_level_values(0))) 

+

369 all_analysis_variables = _del_duplicates(list(result.index.get_level_values(1))) 

+

370 if analysis_variables is None: 

+

371 analysis_variables = [av for av in all_analysis_variables if '_conf' not in av] 

+

372 if parameters is None: 

+

373 parameters = result.columns.values 

+

374 

+

375 result = _rename_tuner_names(result, use_suffix, max_name_len, "plot_time_dependent") 

+

376 

+

377 renamed_parameters = [_format_name(para, use_suffix, max_name_len) for para in parameters] 

+

378 

+

379 figs = [] 

+

380 axes = [] 

+

381 for idx_goal, goal in enumerate(goals): 

+

382 fig, ax = _create_figs_axes(figs_axes, idx_goal, analysis_variables, f"Goal: {goal}") 

+

383 figs.append(fig) 

+

384 axes.append(ax) 

+

385 for idx_av, analysis_var in enumerate(analysis_variables): 

+

386 axes[idx_goal][idx_av].plot(result.loc[goal, analysis_var][renamed_parameters]) 

+

387 axes[idx_goal][idx_av].set_ylabel(analysis_var) 

+

388 axes[idx_goal][idx_av].legend(renamed_parameters) 

+

389 if plot_conf and analysis_var + '_conf' in all_analysis_variables: 

+

390 for para in renamed_parameters: 

+

391 y = result.loc[goal, analysis_var][para] 

+

392 x = y.index.to_numpy() 

+

393 conv_int = result.loc[goal, analysis_var + '_conf'][para] 

+

394 large_values_indices = conv_int[conv_int > 1].index 

+

395 if list(large_values_indices): 

+

396 sys.stderr.write( 

+

397 f"plot_time_dependent INFO:" 

+

398 f"Confidence interval for {goal}, {analysis_var}, {para} was at the " 

+

399 f"following times {list(large_values_indices)} lager than 1 " 

+

400 f"and is smoothed out in the plot.\n") 

+

401 for idx in large_values_indices: 

+

402 prev_idx = conv_int.index.get_loc(idx) - 1 

+

403 if prev_idx >= 0: 

+

404 conv_int.iloc[conv_int.index.get_loc(idx)] = conv_int.iloc[prev_idx] 

+

405 else: 

+

406 conv_int.iloc[conv_int.index.get_loc(idx)] = 1 

+

407 axes[idx_goal][idx_av].fill_between(x, (y - conv_int), (y + conv_int), alpha=.1) 

+

408 axes[idx_goal][-1].set_xlabel('time') 

+

409 if show_plot: 

+

410 plt.show() 

+

411 return figs, axes 

+

412 

+

413 

+

414def plot_parameter_verbose(parameter: str, 

+

415 single_result: pd.DataFrame, 

+

416 second_order_result: pd.DataFrame = None, 

+

417 goals: [str] = None, 

+

418 **kwargs): 

+

419 """ 

+

420 Plot all time dependent sensitivity measure for one parameter. 

+

421 For each goal an axes is created within one figure. 

+

422 

+

423 If second_order_results form SobolAnalyzer.run_time_dependent are given 

+

424 the S2 results of the interaction with each other parameter are added on top 

+

425 of each other and the first order result. 

+

426 

+

427 :param str parameter: 

+

428 Parameter to plot all sensitivity results for. If use_suffix=True, then 

+

429 the name must also be only the suffix. 

+

430 :param pd.DataFrame single_result: 

+

431 First and total order result form run_time_dependent. 

+

432 :param pd.DataFrame second_order_result: 

+

433 Default None. Second order result of SobolAnalyzer.run_time_dependent. 

+

434 :param [str] goals: 

+

435 Default are all possible goal names. If a list of specific 

+

436 goal names is given only these will be plotted. 

+

437 :keyword (fig, [ax]) fig_axes: 

+

438 Default None. Optional custom figures and axes 

+

439 (see example for verbose sensitivity analysis). 

+

440 :return: 

+

441 Returns all created figures and axes in lists like fig, [ax] 

+

442 with shape (len(goals)) for the axes list 

+

443 :keyword bool show_plot: 

+

444 Default is True. If False, all created plots are not shown. 

+

445 :keyword bool use_suffix: 

+

446 Default is True: If True, the last part after the last point 

+

447 of Modelica variables is used for the x ticks. 

+

448 :keyword int max_name_len: 

+

449 Default is 15. Shortens the parameter names to max_name_len characters. 

+

450 """ 

+

451 use_suffix = kwargs.pop('use_suffix', False) 

+

452 max_name_len = kwargs.pop('max_name_len', 50) 

+

453 show_plot = kwargs.pop('show_plot', True) 

+

454 fig_axes = kwargs.pop('fig_axes', None) 

+

455 

+

456 if goals is None: 

+

457 goals = _del_duplicates(list(single_result.index.get_level_values(0))) 

+

458 all_analysis_variables = _del_duplicates(list(single_result.index.get_level_values(1))) 

+

459 analysis_variables = [av for av in all_analysis_variables if '_conf' not in av] 

+

460 

+

461 renamed_parameter = _format_name(parameter, use_suffix, max_name_len) 

+

462 

+

463 single_result = _rename_tuner_names(single_result, use_suffix, max_name_len, 

+

464 "plot_parameter_verbose") 

+

465 if second_order_result is not None: 

+

466 second_order_result = _rename_tuner_names(second_order_result, use_suffix, max_name_len) 

+

467 

+

468 if fig_axes is None: 

+

469 fig, ax = plt.subplots(len(goals), sharex='all', layout="constrained") 

+

470 else: 

+

471 fig = fig_axes[0] 

+

472 ax = fig_axes[1] 

+

473 fig.suptitle(f"Parameter: {parameter}") 

+

474 if not isinstance(ax, np.ndarray): 

+

475 ax = [ax] 

+

476 for g_i, goal in enumerate(goals): 

+

477 if second_order_result is not None: 

+

478 result_2_goal = second_order_result.loc[goal, 'S2', renamed_parameter] 

+

479 mean = result_2_goal.mean().drop([renamed_parameter]) 

+

480 mean.sort_values(ascending=False, inplace=True) 

+

481 sorted_interactions = list(mean.index) 

+

482 time_ar = _del_duplicates(list(result_2_goal.index.get_level_values(0))) 

+

483 value = single_result.loc[goal, 'S1'][renamed_parameter].to_numpy() 

+

484 ax[g_i].plot(single_result.loc[goal, 'S1'][renamed_parameter], label='S1') 

+

485 ax[g_i].fill_between(time_ar, np.zeros_like(value), value, alpha=0.1) 

+

486 for para in sorted_interactions: 

+

487 value_2 = value + result_2_goal[para].to_numpy() 

+

488 ax[g_i].plot(time_ar, value_2, label='S2 ' + para) 

+

489 ax[g_i].fill_between(time_ar, value, value_2, alpha=0.1) 

+

490 value = value_2 

+

491 ax[g_i].plot(single_result.loc[goal, 'ST'][renamed_parameter], label='ST') 

+

492 legend = ['S1'] 

+

493 legend.extend(analysis_variables) 

+

494 legend.append('ST') 

+

495 ax[g_i].set_title(f"Goal: {goal}") 

+

496 ax[g_i].legend() 

+

497 else: 

+

498 for analysis_var in analysis_variables: 

+

499 ax[g_i].plot(single_result.loc[goal, analysis_var][renamed_parameter]) 

+

500 ax[g_i].legend(analysis_variables) 

+

501 if show_plot: 

+

502 plt.show() 

+

503 return fig, ax 

+

504 

+

505 

+

506def _del_duplicates(x): 

+

507 """Helper function""" 

+

508 return list(dict.fromkeys(x)) 

+

509 

+

510 

+

511def _rename_tuner_names(result, use_suffix, max_len, func_name=None): 

+

512 """Helper function""" 

+

513 tuner_names = list(result.columns) 

+

514 renamed_names = {name: _format_name(name, use_suffix, max_len) for name in tuner_names} 

+

515 result = result.rename(columns=renamed_names, index=renamed_names) 

+

516 result = result.sort_index() 

+

517 for old, new in renamed_names.items(): 

+

518 if old != new and func_name is not None: 

+

519 sys.stderr.write(f"{func_name} INFO: parameter name {old} changed to {new}\n") 

+

520 return result 

+

521 

+

522 

+

523def _format_name(name, use_suffix, max_len): 

+

524 """ 

+

525 Format tuner names. 

+

526 """ 

+

527 if use_suffix: 

+

528 name = _get_suffix(name) 

+

529 name = short_name(name, max_len) 

+

530 return name 

+

531 

+

532 

+

533def _get_suffix(modelica_var_name): 

+

534 """Helper function""" 

+

535 index_last_dot = modelica_var_name.rfind('.') 

+

536 suffix = modelica_var_name[index_last_dot + 1:] 

+

537 return suffix 

+

538 

+

539 

+

540def _create_figs_axes(figs_axes, fig_index, ax_len_list, fig_title): 

+

541 """ 

+

542 Check if figs and axes are already given, if not create them. 

+

543 """ 

+

544 if figs_axes is None: 

+

545 fig, ax = plt.subplots(len(ax_len_list), sharex='all', layout="constrained") 

+

546 else: 

+

547 fig = figs_axes[0][fig_index] 

+

548 ax = figs_axes[1][fig_index] 

+

549 fig.suptitle(fig_title) 

+

550 if not isinstance(ax, np.ndarray): 

+

551 ax = [ax] 

+

552 return fig, ax 

+
+ + + diff --git a/docs/master/coverage/d_f4434ebd7e0d50e1_sensitivity_analyzer_py.html b/docs/master/coverage/d_f4434ebd7e0d50e1_sensitivity_analyzer_py.html new file mode 100644 index 00000000..7dd3dfd0 --- /dev/null +++ b/docs/master/coverage/d_f4434ebd7e0d50e1_sensitivity_analyzer_py.html @@ -0,0 +1,1013 @@ + + + + + Coverage for aixcalibuha/sensitivity_analysis/sensitivity_analyzer.py: 94% + + + + + +
+
+

+ Coverage for aixcalibuha/sensitivity_analysis/sensitivity_analyzer.py: + 94% +

+ +

+ 407 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.4.1, + created at 2024-01-27 10:48 +0000 +

+ +
+
+
+

1"""Package containing modules for sensitivity analysis. 

+

2The module contains the relevant base-classes.""" 

+

3import abc 

+

4import copy 

+

5import os 

+

6import pathlib 

+

7import multiprocessing as mp 

+

8from typing import List 

+

9from collections import Counter 

+

10import numpy as np 

+

11import pandas as pd 

+

12from ebcpy.utils import setup_logger 

+

13from ebcpy.utils.reproduction import CopyFile 

+

14from ebcpy.simulationapi import SimulationAPI 

+

15from aixcalibuha import CalibrationClass, data_types 

+

16from aixcalibuha import utils 

+

17from aixcalibuha.sensitivity_analysis.plotting import plot_single, plot_time_dependent 

+

18 

+

19 

+

20def _load_single_file(_filepath, parquet_engine='pyarrow'): 

+

21 """Helper function""" 

+

22 if _filepath is None: 

+

23 return None 

+

24 return data_types.TimeSeriesData(_filepath, default_tag='sim', key='simulation', 

+

25 engine=parquet_engine) 

+

26 

+

27 

+

28def _load_files(_filepaths, parquet_engine='pyarrow'): 

+

29 """Helper function""" 

+

30 results = [] 

+

31 for _filepath in _filepaths: 

+

32 results.append(_load_single_file(_filepath, parquet_engine=parquet_engine)) 

+

33 return results 

+

34 

+

35 

+

36def _restruct_verbose(list_output_verbose): 

+

37 """Helper function""" 

+

38 output_verbose = {} 

+

39 for key, val in list_output_verbose[0].items(): 

+

40 output_verbose[key] = np.array([]) 

+

41 for i in list_output_verbose: 

+

42 for key, val in i.items(): 

+

43 output_verbose[key] = np.append(output_verbose[key], np.array([val[1]])) 

+

44 return output_verbose 

+

45 

+

46 

+

47def _concat_all_sims(sim_results_list): 

+

48 """Helper function that concat all results in a list to one DataFrame.""" 

+

49 sim_results_list = [r.to_df() for r in sim_results_list] 

+

50 sim_results_list = pd.concat(sim_results_list, keys=range(len(sim_results_list)), 

+

51 axis='columns') 

+

52 sim_results_list = sim_results_list.swaplevel(axis=1).sort_index(axis=1) 

+

53 return sim_results_list 

+

54 

+

55 

+

56def _restruct_time_dependent(sen_time_dependent_list, time_index): 

+

57 """Helper function that restructures the time dependent sensitivity results.""" 

+

58 

+

59 def _restruct_single(sen_time_dependent_list_s, second_order=False): 

+

60 sen_time_dependent_df = pd.concat(sen_time_dependent_list_s, keys=time_index, axis=0) 

+

61 sen_time_dependent_df = sen_time_dependent_df.droplevel('Class', axis='index') 

+

62 sen_time_dependent_df = sen_time_dependent_df.swaplevel(0, 1) 

+

63 sen_time_dependent_df = sen_time_dependent_df.swaplevel(1, 2).sort_index(axis=0) 

+

64 if second_order: 

+

65 sen_time_dependent_df = sen_time_dependent_df.swaplevel(2, 3).sort_index(axis=0) 

+

66 sen_time_dependent_df.index.set_names( 

+

67 ['Goal', 'Analysis variable', 'Interaction', 'time'], inplace=True) 

+

68 else: 

+

69 sen_time_dependent_df.index.set_names(['Goal', 'Analysis variable', 'time'], 

+

70 inplace=True) 

+

71 return sen_time_dependent_df 

+

72 

+

73 if isinstance(sen_time_dependent_list[0], tuple): 

+

74 sen_time_dependent_list1, sen_time_dependent_list2 = zip(*sen_time_dependent_list) 

+

75 return _restruct_single(sen_time_dependent_list1), _restruct_single( 

+

76 sen_time_dependent_list2, True) 

+

77 return _restruct_single(sen_time_dependent_list) 

+

78 

+

79 

+

80def _divide_chunks(long_list, chunk_length): 

+

81 """Helper function that divides all list into multiple list with a specific chunk length.""" 

+

82 for i in range(0, len(long_list), chunk_length): 

+

83 yield long_list[i:i + chunk_length] 

+

84 

+

85 

+

86class SenAnalyzer(abc.ABC): 

+

87 """ 

+

88 Class to perform a Sensitivity Analysis. 

+

89 

+

90 :param SimulationAPI sim_api: 

+

91 Simulation-API used to simulate the samples 

+

92 :param int num_samples: 

+

93 The parameter `N` to the sampler methods of sobol and morris. NOTE: This is not the 

+

94 number of samples produced, but relates to the total number of samples produced in 

+

95 a manner dependent on the sampler method used. See the documentation of the specific  

+

96 method in the SALib for more information. 

+

97 :keyword str,os.path.normpath cd: 

+

98 The path for the current working directory. 

+

99 Logger and results will be stored here. 

+

100 :keyword boolean fail_on_error: 

+

101 Default is False. If True, the calibration will stop with an error if 

+

102 the simulation fails. See also: ``ret_val_on_error`` 

+

103 :keyword float,np.NAN ret_val_on_error: 

+

104 Default is np.NAN. If ``fail_on_error`` is false, you can specify here 

+

105 which value to return in the case of a failed simulation. Possible 

+

106 options are np.NaN, np.inf or some other high numbers. be aware that this 

+

107 max influence the solver. 

+

108 :keyword boolean save_files: 

+

109 Default False. If true, all simulation files for each iteration will be saved! 

+

110 :keyword str suffix_files: 

+

111 Default 'csv'. Specifies the data format to store the simulation files in. 

+

112 Options are 'csv', 'hdf', 'parquet'. 

+

113 :keyword str parquet_engine: 

+

114 The engine to use for the data format parquet. 

+

115 Supported options can be extracted 

+

116 from the ebcpy.TimeSeriesData.save() function. 

+

117 Default is 'pyarrow'. 

+

118 :keyword str,os.path.normpath savepath_sim: 

+

119 Default is cd. Own directory for the time series data sets of all simulations 

+

120 during the sensitivity analysis. The own dir can be necessary for large data sets, 

+

121 because they can crash IDE during indexing when they are in the project folder. 

+

122 

+

123 """ 

+

124 

+

125 def __init__(self, 

+

126 sim_api: SimulationAPI, 

+

127 num_samples: int, 

+

128 **kwargs): 

+

129 """Instantiate class parameters""" 

+

130 # Setup the instance attributes 

+

131 self.sim_api = sim_api 

+

132 self.num_samples = num_samples 

+

133 

+

134 # Update kwargs 

+

135 self.fail_on_error = kwargs.pop("fail_on_error", True) 

+

136 self.save_files = kwargs.pop("save_files", False) 

+

137 self.suffix_files = kwargs.pop('suffix_files', 'csv') 

+

138 self.parquet_engine = kwargs.pop('parquet_engine', 'pyarrow') 

+

139 self.ret_val_on_error = kwargs.pop("ret_val_on_error", np.NAN) 

+

140 self.cd = kwargs.pop("cd", os.getcwd()) 

+

141 self.savepath_sim = kwargs.pop('savepath_sim', self.cd) 

+

142 

+

143 if isinstance(self.cd, str): 

+

144 self.cd = pathlib.Path(self.cd) 

+

145 if isinstance(self.savepath_sim, str): 

+

146 self.savepath_sim = pathlib.Path(self.savepath_sim) 

+

147 

+

148 # Setup the logger 

+

149 self.logger = setup_logger(cd=self.cd, name=self.__class__.__name__) 

+

150 

+

151 # Setup default values 

+

152 self.problem: dict = None 

+

153 self.reproduction_files = [] 

+

154 

+

155 @property 

+

156 @abc.abstractmethod 

+

157 def analysis_variables(self) -> List[str]: 

+

158 """ 

+

159 Indicate which variables are 

+

160 able to be selected for analysis 

+

161 

+

162 :return: 

+

163 A list of strings 

+

164 :rtype: List[str] 

+

165 """ 

+

166 raise NotImplementedError(f'{self.__class__.__name__}.analysis_variables ' 

+

167 f'property is not defined yet') 

+

168 

+

169 @abc.abstractmethod 

+

170 def analysis_function(self, x, y): 

+

171 """ 

+

172 Use the method to analyze the simulation results. 

+

173 

+

174 :param np.array x: 

+

175 the `X` parameter of the method (The NumPy matrix containing the model inputs) 

+

176 :param np.array y: 

+

177 The NumPy array containing the model outputs 

+

178 """ 

+

179 raise NotImplementedError(f'{self.__class__.__name__}.analysis_function ' 

+

180 f'function is not defined yet') 

+

181 

+

182 @abc.abstractmethod 

+

183 def create_sampler_demand(self) -> dict: 

+

184 """ 

+

185 Return the sampler parameters 

+

186 

+

187 :return: 

+

188 dict: A dict with the sampler demand 

+

189 """ 

+

190 raise NotImplementedError(f'{self.__class__.__name__}.analysis_function ' 

+

191 f'function is not defined yet') 

+

192 

+

193 @abc.abstractmethod 

+

194 def generate_samples(self): 

+

195 """ 

+

196 Run the sampler specified by `method` and return the results. 

+

197 

+

198 :return: 

+

199 The list of samples generated as a NumPy array with one row per sample 

+

200 and each row containing one value for each variable name in `problem['names']`. 

+

201 :rtype: np.ndarray 

+

202 """ 

+

203 raise NotImplementedError(f'{self.__class__.__name__}.generate_samples ' 

+

204 f'function is not defined yet') 

+

205 

+

206 def simulate_samples(self, cal_class, **kwargs): 

+

207 """ 

+

208 Creates the samples for the calibration class and simulates them. 

+

209 

+

210 :param cal_class: 

+

211 One class for calibration. Goals and tuner_paras have to be set 

+

212 :keyword scale: 

+

213 Default is False. If True the bounds of the tuner-parameters 

+

214 will be scaled between 0 and 1. 

+

215 

+

216 :return: 

+

217 Returns two lists. First a list with the simulation results for each sample. 

+

218 If save_files the list contains the filepaths to the results 

+

219 Second a list of the samples. 

+

220 :rtype: list 

+

221 """ 

+

222 scale = kwargs.pop('scale', False) 

+

223 # Set the output interval according the given Goals 

+

224 mean_freq = cal_class.goals.get_meas_frequency() 

+

225 self.logger.info("Setting output_interval of simulation according " 

+

226 "to measurement target data frequency: %s", mean_freq) 

+

227 self.sim_api.sim_setup.output_interval = mean_freq 

+

228 initial_names = cal_class.tuner_paras.get_names() 

+

229 self.sim_api.set_sim_setup({"start_time": cal_class.start_time, 

+

230 "stop_time": cal_class.stop_time}) 

+

231 self.sim_api.result_names = cal_class.goals.get_sim_var_names() 

+

232 

+

233 self.problem = self.create_problem(cal_class.tuner_paras, scale=scale) 

+

234 samples = self.generate_samples() 

+

235 

+

236 # creat df of samples with the result_file_names as the index 

+

237 result_file_names = [f"simulation_{idx}" for idx in range(len(samples))] 

+

238 samples_df = pd.DataFrame(samples, columns=initial_names, index=result_file_names) 

+

239 samples_df.to_csv(self.cd.joinpath(f'samples_{cal_class.name}.csv')) 

+

240 

+

241 # Simulate the current values 

+

242 parameters = [] 

+

243 for initial_values in samples: 

+

244 if scale: 

+

245 initial_values = cal_class.tuner_paras.descale(initial_values) 

+

246 parameters.append(dict(zip(initial_names, initial_values))) 

+

247 

+

248 self.logger.info('Starting %s parameter variations on %s cores', 

+

249 len(samples), self.sim_api.n_cpu) 

+

250 if self.save_files: 

+

251 sim_dir = self.savepath_sim.joinpath(f'simulations_{cal_class.name}') 

+

252 os.makedirs(sim_dir, exist_ok=True) 

+

253 samples_df.to_csv(self.savepath_sim.joinpath(f'samples_{cal_class.name}.csv')) 

+

254 self.logger.info(f'Saving simulation files in: {sim_dir}') 

+

255 _filepaths = self.sim_api.simulate( 

+

256 parameters=parameters, 

+

257 return_option="savepath", 

+

258 savepath=sim_dir, 

+

259 result_file_name=result_file_names, 

+

260 result_file_suffix=self.suffix_files, 

+

261 parquet_engine=self.parquet_engine, 

+

262 fail_on_error=self.fail_on_error, 

+

263 inputs=cal_class.inputs, 

+

264 **cal_class.input_kwargs 

+

265 ) 

+

266 self.reproduction_files.extend(_filepaths) 

+

267 results = _filepaths 

+

268 else: 

+

269 results = self.sim_api.simulate( 

+

270 parameters=parameters, 

+

271 inputs=cal_class.inputs, 

+

272 fail_on_error=self.fail_on_error, 

+

273 **cal_class.input_kwargs 

+

274 ) 

+

275 self.logger.info('Finished %s simulations', len(samples)) 

+

276 return results, samples 

+

277 

+

278 def _check_index(self, tsd: data_types.TimeSeriesData, sim_num=None): 

+

279 freq = tsd.frequency 

+

280 if sim_num is None: 

+

281 sim_num = tsd.filepath.name 

+

282 if freq[0] != self.sim_api.sim_setup.output_interval: 

+

283 self.logger.info( 

+

284 f'The mean value of the frequency from {sim_num} does not match output ' 

+

285 'interval index will be cleaned and spaced equally') 

+

286 tsd.to_datetime_index() 

+

287 tsd.clean_and_space_equally(f'{str(self.sim_api.sim_setup.output_interval * 1000)}ms') 

+

288 tsd.to_float_index() 

+

289 freq = tsd.frequency 

+

290 if freq[1] > 0.0: 

+

291 self.logger.info(f'The standard deviation of the frequency from {sim_num} is to high ' 

+

292 f'and will be rounded to the accuracy of the output interval') 

+

293 tsd.index = np.round(tsd.index.astype("float64"), 

+

294 str(self.sim_api.sim_setup.output_interval)[::-1].find('.')) 

+

295 return tsd 

+

296 

+

297 def _single_eval_statistical_measure(self, kwargs_eval): 

+

298 """Evaluates statistical measure of one result""" 

+

299 cal_class = kwargs_eval.pop('cal_class') 

+

300 result = kwargs_eval.pop('result') 

+

301 num_sim = kwargs_eval.pop('sim_num', None) 

+

302 if result is None: 

+

303 verbose_error = {} 

+

304 for goal, weight in zip(cal_class.goals.get_goals_list(), cal_class.goals.weightings): 

+

305 verbose_error[goal] = (weight, self.ret_val_on_error) 

+

306 return self.ret_val_on_error, verbose_error 

+

307 result = self._check_index(result, num_sim) 

+

308 cal_class.goals.set_sim_target_data(result) 

+

309 cal_class.goals.set_relevant_time_intervals(cal_class.relevant_intervals) 

+

310 # Evaluate the current objective 

+

311 total_res, verbose_calculation = cal_class.goals.eval_difference(verbose=True) 

+

312 return total_res, verbose_calculation 

+

313 

+

314 def eval_statistical_measure(self, cal_class, results, verbose=True): 

+

315 """Evaluates statistical measures of results on single core""" 

+

316 self.logger.info('Starting evaluation of statistical measure') 

+

317 output = [] 

+

318 list_output_verbose = [] 

+

319 for i, result in enumerate(results): 

+

320 total_res, verbose_calculation = self._single_eval_statistical_measure( 

+

321 {'cal_class': cal_class, 'result': result, 'sim_num': f'simulation_{i}'} 

+

322 ) 

+

323 output.append(total_res) 

+

324 list_output_verbose.append(verbose_calculation) 

+

325 if verbose: 

+

326 # restructure output_verbose 

+

327 output_verbose = _restruct_verbose(list_output_verbose) 

+

328 return np.asarray(output), output_verbose 

+

329 return np.asarray(output) 

+

330 

+

331 def _single_load_eval_file(self, kwargs_load_eval): 

+

332 """For multiprocessing""" 

+

333 filepath = kwargs_load_eval.pop('filepath') 

+

334 _result = _load_single_file(filepath, self.parquet_engine) 

+

335 kwargs_load_eval.update({'result': _result}) 

+

336 total_res, verbose_calculation = self._single_eval_statistical_measure(kwargs_load_eval) 

+

337 return total_res, verbose_calculation 

+

338 

+

339 def _mp_load_eval(self, _filepaths, cal_class, n_cpu): 

+

340 """ 

+

341 Loading and evaluating the statistical measure of saved simulation files on multiple cores 

+

342 """ 

+

343 self.logger.info(f'Load files and evaluate statistical measure on {n_cpu} processes.') 

+

344 kwargs_load_eval = [] 

+

345 for filepath in _filepaths: 

+

346 kwargs_load_eval.append({'filepath': filepath, 'cal_class': cal_class}) 

+

347 output_array = [] 

+

348 list_output_verbose = [] 

+

349 with mp.Pool(processes=n_cpu) as pool: 

+

350 for total, verbose in pool.imap(self._single_load_eval_file, kwargs_load_eval): 

+

351 output_array.append(total) 

+

352 list_output_verbose.append(verbose) 

+

353 output_array = np.asarray(output_array) 

+

354 output_verbose = _restruct_verbose(list_output_verbose) 

+

355 return output_array, output_verbose 

+

356 

+

357 def _load_eval(self, _filepaths, cal_class, n_cpu): 

+

358 """ 

+

359 Loading and evaluating the statistical measure of saved simulation files. 

+

360 Single- or multiprocessing possible with definition of n_cpu. 

+

361 """ 

+

362 if n_cpu == 1: 

+

363 results = _load_files(_filepaths, self.parquet_engine) 

+

364 output_array, output_verbose = self.eval_statistical_measure( 

+

365 cal_class=cal_class, 

+

366 results=results 

+

367 ) 

+

368 return output_array, output_verbose 

+

369 output_array, output_verbose = self._mp_load_eval(_filepaths, cal_class, n_cpu) 

+

370 return output_array, output_verbose 

+

371 

+

372 def run(self, calibration_classes, merge_multiple_classes=True, **kwargs): 

+

373 """ 

+

374 Execute the sensitivity analysis for each class and 

+

375 return the result. 

+

376 

+

377 :param CalibrationClass,list calibration_classes: 

+

378 Either one or multiple classes for calibration with same tuner-parameters. 

+

379 :param bool merge_multiple_classes: 

+

380 Default True. If False, the given list of calibration-classes 

+

381 is handled as-is. This means if you pass two CalibrationClass objects 

+

382 with the same name (e.g. "device on"), the calibration process will run 

+

383 for both these classes stand-alone. 

+

384 This will automatically yield an intersection of tuner-parameters, however may 

+

385 have advantages in some cases. 

+

386 :keyword bool verbose: 

+

387 Default False. If True, in addition to the combined Goals of the Classes 

+

388 (saved under index Goal: all), the sensitivity measures of the individual 

+

389 Goals will also be calculated and returned. 

+

390 :keyword scale: 

+

391 Default is False. If True the bounds of the tuner-parameters 

+

392 will be scaled between 0 and 1. 

+

393 :keyword bool use_fist_sim: 

+

394 Default False. If True, the simulations of the first calibration class will be used for 

+

395 all other calibration classes with their relevant time intervals. 

+

396 The simulations must be stored on a hard-drive, so it must be used with 

+

397 either save_files or load_files. 

+

398 :keyword int n_cpu: 

+

399 Default is 1. The number of processes to use for the evaluation of the statistical 

+

400 measure. For n_cpu > 1 only one simulation file is loaded at once in a process and 

+

401 dumped directly after the evaluation of the statistical measure, 

+

402 so that only minimal memory is used. 

+

403 Use this option for large analyses. 

+

404 Only implemented for save_files=True or load_sim_files=True. 

+

405 :keyword bool load_sim_files: 

+

406 Default False. If True, no new simulations are done and old simulations are loaded. 

+

407 The simulations and corresponding samples will be loaded from self.savepath_sim like 

+

408 they were saved from self.save_files. Currently, the name of the sim folder must be 

+

409 "simulations_CAL_CLASS_NAME" and for the samples "samples_CAL_CLASS_NAME". 

+

410 The usage of the same simulations for different 

+

411 calibration classes is not supported yet. 

+

412 :keyword bool save_results: 

+

413 Default True. If True, all results are saved as a csv in cd. 

+

414 (samples, statistical measures and analysis variables). 

+

415 :keyword bool plot_result: 

+

416 Default True. If True, the results will be plotted. 

+

417 :return: 

+

418 Returns a pandas.DataFrame. The DataFrame has a Multiindex with the 

+

419 levels Class, Goal and Analysis variable. The Goal name of combined goals is 'all'. 

+

420 The variables are the tuner-parameters. 

+

421 For the Sobol Method and calc_second_order returns a tuple of DataFrames (df_1, df_2) 

+

422 where df_2 contains the second oder analysis variables and has an extra index level 

+

423 Interaction, which also contains the variables. 

+

424 :rtype: pandas.DataFrame 

+

425 """ 

+

426 verbose = kwargs.pop('verbose', False) 

+

427 scale = kwargs.pop('scale', False) 

+

428 use_first_sim = kwargs.pop('use_first_sim', False) 

+

429 n_cpu = kwargs.pop('n_cpu', 1) 

+

430 save_results = kwargs.pop('save_results', True) 

+

431 plot_result = kwargs.pop('plot_result', True) 

+

432 load_sim_files = kwargs.pop('load_sim_files', False) 

+

433 # Check correct input 

+

434 calibration_classes = utils.validate_cal_class_input(calibration_classes) 

+

435 # Merge the classes for avoiding possible intersection of tuner-parameters 

+

436 if merge_multiple_classes: 

+

437 calibration_classes = data_types.merge_calibration_classes(calibration_classes) 

+

438 

+

439 # Check n_cpu 

+

440 if n_cpu > mp.cpu_count(): 

+

441 raise ValueError(f"Given n_cpu '{n_cpu}' is greater " 

+

442 "than the available number of " 

+

443 f"cpus on your machine '{mp.cpu_count()}'") 

+

444 

+

445 # Check if the usage of the simulations from the first calibration class for all is possible 

+

446 if use_first_sim: 

+

447 if not self.save_files and not load_sim_files: 

+

448 raise AttributeError('To use the simulations of the first calibration class ' 

+

449 'for all classes the simulation files must be saved. ' 

+

450 'Either set save_files=True or load already exiting files ' 

+

451 'with load_sim_files=True.') 

+

452 start_time = 0 

+

453 stop_time = 0 

+

454 for idx, cal_class in enumerate(calibration_classes): 

+

455 if idx == 0: 

+

456 start_time = cal_class.start_time 

+

457 stop_time = cal_class.stop_time 

+

458 continue 

+

459 if start_time > cal_class.start_time or stop_time < cal_class.stop_time: 

+

460 raise ValueError(f'To use the simulations of the first calibration class ' 

+

461 f'for all classes the start and stop times of the other ' 

+

462 f'classes must be in the interval [{start_time}, {stop_time}] ' 

+

463 f'of the first calibration class.') 

+

464 

+

465 all_results = [] 

+

466 for idx, cal_class in enumerate(calibration_classes): 

+

467 

+

468 self.logger.info('Start sensitivity analysis of class: %s, ' 

+

469 'Time-Interval: %s-%s s', cal_class.name, 

+

470 cal_class.start_time, cal_class.stop_time) 

+

471 

+

472 # Generate list with metrics of every parameter variation 

+

473 results_goals = {} 

+

474 if load_sim_files: 

+

475 self.problem = self.create_problem(cal_class.tuner_paras, scale=scale) 

+

476 if use_first_sim: 

+

477 class_name = calibration_classes[0].name 

+

478 else: 

+

479 class_name = cal_class.name 

+

480 sim_dir = self.savepath_sim.joinpath(f'simulations_{class_name}') 

+

481 samples_path = self.savepath_sim.joinpath(f'samples_{class_name}.csv') 

+

482 self.logger.info(f'Loading samples from {samples_path}') 

+

483 samples = pd.read_csv(samples_path, 

+

484 header=0, 

+

485 index_col=0) 

+

486 samples = samples.to_numpy() 

+

487 result_file_names = [f"simulation_{idx}.{self.suffix_files}" for idx in 

+

488 range(len(samples))] 

+

489 _filepaths = [sim_dir.joinpath(result_file_name) for result_file_name in 

+

490 result_file_names] 

+

491 self.logger.info(f'Loading simulation files from {sim_dir}') 

+

492 output_array, output_verbose = self._load_eval(_filepaths, cal_class, n_cpu) 

+

493 else: 

+

494 results, samples = self.simulate_samples( 

+

495 cal_class=cal_class, 

+

496 scale=scale 

+

497 ) 

+

498 if self.save_files: 

+

499 output_array, output_verbose = self._load_eval(results, cal_class, n_cpu) 

+

500 else: 

+

501 output_array, output_verbose = self.eval_statistical_measure( 

+

502 cal_class=cal_class, 

+

503 results=results 

+

504 ) 

+

505 if use_first_sim: 

+

506 load_sim_files = True 

+

507 

+

508 # combine output_array and output_verbose 

+

509 # set key for output_array depending on one or multiple goals 

+

510 stat_mea = {'all': output_array} 

+

511 if len(output_verbose) == 1: 

+

512 stat_mea = output_verbose 

+

513 if len(output_verbose) > 1 and verbose: 

+

514 stat_mea.update(output_verbose) 

+

515 

+

516 # save statistical measure and corresponding samples for each cal_class in cd 

+

517 if save_results: 

+

518 result_file_names = [f"simulation_{idx}" for idx in range(len(output_array))] 

+

519 stat_mea_df = pd.DataFrame(stat_mea, index=result_file_names) 

+

520 savepath_stat_mea = self.cd.joinpath( 

+

521 f'{cal_class.goals.statistical_measure}_{cal_class.name}.csv') 

+

522 stat_mea_df.to_csv(savepath_stat_mea) 

+

523 self.reproduction_files.append(savepath_stat_mea) 

+

524 samples_df = pd.DataFrame(samples, columns=cal_class.tuner_paras.get_names(), 

+

525 index=result_file_names) 

+

526 savepath_samples = self.cd.joinpath(f'samples_{cal_class.name}.csv') 

+

527 samples_df.to_csv(savepath_samples) 

+

528 self.reproduction_files.append(savepath_samples) 

+

529 

+

530 self.logger.info('Starting calculation of analysis variables') 

+

531 for key, val in stat_mea.items(): 

+

532 result_goal = self.analysis_function( 

+

533 x=samples, 

+

534 y=val 

+

535 ) 

+

536 results_goals[key] = result_goal 

+

537 all_results.append(results_goals) 

+

538 self.logger.info('Finished sensitivity analysis of class: %s, ' 

+

539 'Time-Interval: %s-%s s', cal_class.name, 

+

540 cal_class.start_time, cal_class.stop_time) 

+

541 result = self._conv_local_results(results=all_results, 

+

542 local_classes=calibration_classes) 

+

543 if save_results: 

+

544 self._save(result) 

+

545 if plot_result: 

+

546 self.plot(result) 

+

547 return result, calibration_classes 

+

548 

+

549 def _save(self, result: pd.DataFrame, time_dependent: bool = False): 

+

550 """ 

+

551 Saves the result DataFrame of run and run_time_dependent. 

+

552 Needs to be overwritten for Sobol results. 

+

553 """ 

+

554 if time_dependent: 

+

555 savepath_result = self.cd.joinpath(f'{self.__class__.__name__}_results_time.csv') 

+

556 else: 

+

557 savepath_result = self.cd.joinpath(f'{self.__class__.__name__}_results.csv') 

+

558 result.to_csv(savepath_result) 

+

559 self.reproduction_files.append(savepath_result) 

+

560 

+

561 @staticmethod 

+

562 def create_problem(tuner_paras, scale=False) -> dict: 

+

563 """Create function for later access if multiple calibration-classes are used.""" 

+

564 num_vars = len(tuner_paras.get_names()) 

+

565 bounds = np.array(tuner_paras.get_bounds()) 

+

566 if scale: 

+

567 bounds = [np.zeros_like(bounds[0]), np.ones_like(bounds[1])] 

+

568 problem = {'num_vars': num_vars, 

+

569 'names': tuner_paras.get_names(), 

+

570 'bounds': np.transpose(bounds)} 

+

571 return problem 

+

572 

+

573 @staticmethod 

+

574 def select_by_threshold(calibration_classes, result, analysis_variable, threshold): 

+

575 """ 

+

576 Automatically select sensitive tuner parameters based on a given threshold 

+

577 of a given analysis variable from a sensitivity result. 

+

578 Uses only the combined goals. 

+

579 

+

580 :param list calibration_classes: 

+

581 List of aixcalibuha.data_types.CalibrationClass objects that you want to 

+

582 automatically select sensitive tuner-parameters. 

+

583 :param pd.DataFrame result: 

+

584 Result object of sensitivity analysis run 

+

585 :param str analysis_variable: 

+

586 Analysis variable to use for the selection 

+

587 :param float threshold: 

+

588 Minimal required value of given key 

+

589 :return: list calibration_classes 

+

590 """ 

+

591 for cal_class in calibration_classes: 

+

592 first_goal = result.index.get_level_values(1)[0] 

+

593 class_result = result.loc[cal_class.name, first_goal, analysis_variable] 

+

594 tuner_paras = copy.deepcopy(cal_class.tuner_paras) 

+

595 select_names = class_result[class_result < threshold].index.values 

+

596 tuner_paras.remove_names(select_names) 

+

597 if not tuner_paras.get_names(): 

+

598 raise ValueError( 

+

599 'Automatic selection removed all tuner parameter ' 

+

600 f'from class {cal_class.name} after ' 

+

601 'SensitivityAnalysis was done. Please adjust the ' 

+

602 'threshold in json or manually chose tuner ' 

+

603 'parameters for the calibration.') 

+

604 # cal_class.set_tuner_paras(tuner_paras) 

+

605 cal_class.tuner_paras = tuner_paras 

+

606 return calibration_classes 

+

607 

+

608 @staticmethod 

+

609 def select_by_threshold_verbose(calibration_class: CalibrationClass, 

+

610 result: pd.DataFrame, 

+

611 analysis_variable: str, 

+

612 threshold: float, 

+

613 calc_names_for_selection: List[str] = None): 

+

614 """ 

+

615 Select tuner-parameters of single calibration class with verbose sensitivity results. 

+

616 This function selects tuner-parameters if their sensitivity is equal or greater 

+

617 than the threshold in just one target value of one calibration class in the 

+

618 sensitivity result. This can be more robust because a small sensitivity in one target 

+

619 value and state of the system can mean that the parameter can also be calibrated in 

+

620 a global calibration class which calibrates multiple states and target values at 

+

621 the same time and has there not directly the same sensitivity as in the isolated 

+

622 view of a calibration class for only one state. 

+

623 

+

624 :param CalibrationClass calibration_class: 

+

625 The calibration class from which the tuner parameters will be selected. 

+

626 :param pd.DataFrame result: 

+

627 Sensitivity results to use for the selection. Can include multiple classes. 

+

628 :param str analysis_variable: 

+

629 The analysis variable to use for the selection. 

+

630 :param float threshold: 

+

631 Minimal required value of given analysis variable. 

+

632 :param List[str] calc_names_for_selection: 

+

633 Specifies which calibration classes in the sensitivity results will be used for 

+

634 the selection. Default are all classes. 

+

635 """ 

+

636 if Counter(calibration_class.tuner_paras.get_names()) != Counter(list(result.columns)): 

+

637 raise NameError("The tuner-parameter of the calibration class do not " 

+

638 "match the tuner-parameters in the sensitivity result." 

+

639 "They have to match.") 

+

640 

+

641 result = result.loc[:, :, analysis_variable] 

+

642 calc_names_results = result.index.get_level_values("Class").unique() 

+

643 if calc_names_for_selection: 

+

644 for cal_class in calc_names_for_selection: 

+

645 if cal_class not in calc_names_results: 

+

646 raise NameError(f"The calibration class name {cal_class} " 

+

647 f"does not match any class name " 

+

648 f"in the given sensitivity result.") 

+

649 result = result.loc[calc_names_for_selection, :, :] 

+

650 

+

651 selected_tuners = (result >= threshold).any() 

+

652 

+

653 remove_tuners = [] 

+

654 for tuner, selected in selected_tuners.items(): 

+

655 if not selected: 

+

656 remove_tuners.append(tuner) 

+

657 tuner_paras = copy.deepcopy(calibration_class.tuner_paras) 

+

658 tuner_paras.remove_names(remove_tuners) 

+

659 if not tuner_paras.get_names(): 

+

660 raise ValueError("Threshold to small. All tuner-parameters would be removed.") 

+

661 calibration_class.tuner_paras = tuner_paras 

+

662 return calibration_class 

+

663 

+

664 def run_time_dependent(self, cal_class: CalibrationClass, **kwargs): 

+

665 """ 

+

666 Calculate the time dependent sensitivity for all the single goals in the calibration class. 

+

667 

+

668 :param CalibrationClass cal_class: 

+

669 Calibration class with tuner-parameters to calculate sensitivity for. 

+

670 Can include dummy target date. 

+

671 :keyword scale: 

+

672 Default is False. If True the bounds of the tuner-parameters 

+

673 will be scaled between 0 and 1. 

+

674 :keyword bool load_sim_files: 

+

675 Default False. If True, no new simulations are done and old simulations are loaded. 

+

676 The simulations and corresponding samples will be loaded from self.savepath_sim like 

+

677 they were saved from self.save_files. Currently, the name of the sim folder must be 

+

678 "simulations_CAL_CLASS_NAME" and for the samples "samples_CAL_CLASS_NAME". 

+

679 :keyword bool save_results: 

+

680 Default True. If True, all results are saved as a csv in cd. 

+

681 (samples and analysis variables). 

+

682 :keyword int n_steps: 

+

683 Default is all time steps. If the problem is large, the evaluation of all time steps 

+

684 at once can cause a memory error. Then n_steps defines how many time_steps 

+

685 are evaluated at once in chunks. This increases the needed time exponentially and 

+

686 the simulation files must be saved. 

+

687 :keyword bool plot_result: 

+

688 Default True. If True, the results will be plotted. 

+

689 :return: 

+

690 Returns a pandas.DataFrame. 

+

691 :rtype: pandas.DataFrame 

+

692 """ 

+

693 scale = kwargs.pop('scale', False) 

+

694 save_results = kwargs.pop('save_results', True) 

+

695 plot_result = kwargs.pop('plot_result', True) 

+

696 load_sim_files = kwargs.pop('load_sim_files', False) 

+

697 n_steps = kwargs.pop('n_steps', 'all') 

+

698 

+

699 self.logger.info("Start time dependent sensitivity analysis.") 

+

700 if load_sim_files: 

+

701 self.problem = self.create_problem(cal_class.tuner_paras, scale=scale) 

+

702 sim_dir = self.savepath_sim.joinpath(f'simulations_{cal_class.name}') 

+

703 samples_path = self.savepath_sim.joinpath(f'samples_{cal_class.name}.csv') 

+

704 samples = pd.read_csv(samples_path, 

+

705 header=0, 

+

706 index_col=0) 

+

707 samples = samples.to_numpy() 

+

708 result_file_names = [f"simulation_{idx}.{self.suffix_files}" for idx in 

+

709 range(len(samples))] 

+

710 _filepaths = [sim_dir.joinpath(result_file_name) for result_file_name in 

+

711 result_file_names] 

+

712 

+

713 sen_time_dependent_list, time_index = self._load_analyze_tsteps(_filepaths=_filepaths, 

+

714 samples=samples, 

+

715 n_steps=n_steps, 

+

716 cal_class=cal_class) 

+

717 sen_time_dependent_df = _restruct_time_dependent(sen_time_dependent_list, time_index) 

+

718 else: 

+

719 results, samples = self.simulate_samples( 

+

720 cal_class=cal_class, 

+

721 scale=scale 

+

722 ) 

+

723 if self.save_files: 

+

724 sen_time_dependent_list, time_index = self._load_analyze_tsteps(_filepaths=results, 

+

725 samples=samples, 

+

726 n_steps=n_steps, 

+

727 cal_class=cal_class) 

+

728 sen_time_dependent_df = _restruct_time_dependent(sen_time_dependent_list, 

+

729 time_index) 

+

730 else: 

+

731 variables = results[0].get_variable_names() 

+

732 time_index = results[0].index.to_numpy() 

+

733 total_result = _concat_all_sims(results) 

+

734 sen_time_dependent_list = [] 

+

735 for time_step in time_index: 

+

736 result_df_tstep = self._analyze_tstep_df(time_step=time_step, 

+

737 tsteps_sim_results=total_result, 

+

738 variables=variables, 

+

739 samples=samples, 

+

740 cal_class=cal_class) 

+

741 sen_time_dependent_list.append(result_df_tstep) 

+

742 sen_time_dependent_df = _restruct_time_dependent(sen_time_dependent_list, 

+

743 time_index) 

+

744 self.logger.info("Finished time dependent sensitivity analysys.") 

+

745 if save_results: 

+

746 self._save(sen_time_dependent_df, time_dependent=True) 

+

747 if plot_result: 

+

748 if isinstance(sen_time_dependent_df, pd.DataFrame): 

+

749 plot_time_dependent(sen_time_dependent_df) 

+

750 else: 

+

751 plot_time_dependent(sen_time_dependent_df[0]) 

+

752 return sen_time_dependent_df 

+

753 

+

754 def _analyze_tstep_df(self, time_step, tsteps_sim_results, variables, samples, cal_class): 

+

755 """Analyze the sensitivity at a single time step.""" 

+

756 result_dict_tstep = {} 

+

757 for var in variables: 

+

758 result_tstep_var = tsteps_sim_results[var].loc[time_step].to_numpy() 

+

759 if np.all(result_tstep_var == result_tstep_var[0]): 

+

760 sen_tstep_var = None 

+

761 else: 

+

762 sen_tstep_var = self.analysis_function( 

+

763 x=samples, 

+

764 y=result_tstep_var 

+

765 ) 

+

766 result_dict_tstep[var] = sen_tstep_var 

+

767 result_df_tstep = self._conv_local_results(results=[result_dict_tstep], 

+

768 local_classes=[cal_class]) 

+

769 return result_df_tstep 

+

770 

+

771 def _load_tsteps_df(self, tsteps, _filepaths): 

+

772 """ 

+

773 Load all simulations and extract and concat the sim results of the time steps in tsteps. 

+

774 """ 

+

775 self.logger.info( 

+

776 f"Loading time steps from {tsteps[0]} to {tsteps[-1]} of the simulation files.") 

+

777 tsteps_sim_results = [] 

+

778 for _filepath in _filepaths: 

+

779 sim = _load_single_file(_filepath) 

+

780 tsteps_sim_results.append(sim.loc[tsteps[0]:tsteps[-1]]) 

+

781 tsteps_sim_results = _concat_all_sims(tsteps_sim_results) 

+

782 return tsteps_sim_results 

+

783 

+

784 def _load_analyze_tsteps(self, _filepaths, samples, n_steps, cal_class): 

+

785 """ 

+

786 Load and analyze all time steps in chunks with n_steps time steps. 

+

787 """ 

+

788 sim1 = _load_single_file(_filepaths[0]) 

+

789 time_index = sim1.index.to_numpy() 

+

790 variables = sim1.get_variable_names() 

+

791 sen_time_dependent_list = [] 

+

792 if n_steps == 'all': 

+

793 list_tsteps = [time_index] 

+

794 elif isinstance(n_steps, int) and not (n_steps <= 0 or n_steps > len(time_index)): 

+

795 list_tsteps = _divide_chunks(time_index, n_steps) 

+

796 else: 

+

797 raise ValueError( 

+

798 f"n_steps can only be between 1 and {len(time_index)} or the string all.") 

+

799 

+

800 for tsteps in list_tsteps: 

+

801 tsteps_sim_results = self._load_tsteps_df(tsteps=tsteps, _filepaths=_filepaths) 

+

802 self.logger.info("Analyzing these time steps.") 

+

803 for tstep in tsteps: 

+

804 result_df_tstep = self._analyze_tstep_df(time_step=tstep, 

+

805 tsteps_sim_results=tsteps_sim_results, 

+

806 variables=variables, 

+

807 samples=samples, 

+

808 cal_class=cal_class) 

+

809 sen_time_dependent_list.append(result_df_tstep) 

+

810 return sen_time_dependent_list, time_index 

+

811 

+

812 def _conv_global_result(self, result: dict, cal_class: CalibrationClass, 

+

813 analysis_variable: str): 

+

814 glo_res_dict = self._get_res_dict(result=result, cal_class=cal_class, 

+

815 analysis_variable=analysis_variable) 

+

816 return pd.DataFrame(glo_res_dict, index=['global']) 

+

817 

+

818 def _conv_local_results(self, results: list, local_classes: list): 

+

819 """ 

+

820 Convert the result dictionaries form SALib of each class and goal into one DataFrame. 

+

821 Overwritten for Sobol. 

+

822 """ 

+

823 _conv_results = [] 

+

824 tuples = [] 

+

825 for class_results, local_class in zip(results, local_classes): 

+

826 for goal, goal_results in class_results.items(): 

+

827 for analysis_var in self.analysis_variables: 

+

828 _conv_results.append(self._get_res_dict(result=goal_results, 

+

829 cal_class=local_class, 

+

830 analysis_variable=analysis_var)) 

+

831 tuples.append((local_class.name, goal, analysis_var)) 

+

832 index = pd.MultiIndex.from_tuples(tuples=tuples, 

+

833 names=['Class', 'Goal', 'Analysis variable']) 

+

834 df = pd.DataFrame(_conv_results, index=index) 

+

835 return df 

+

836 

+

837 @abc.abstractmethod 

+

838 def _get_res_dict(self, result: dict, cal_class: CalibrationClass, analysis_variable: str): 

+

839 """ 

+

840 Convert the result object to a dict with the key 

+

841 being the variable name and the value being the result 

+

842 associated to analysis_variable. 

+

843 """ 

+

844 raise NotImplementedError 

+

845 

+

846 def plot(self, result): 

+

847 """ 

+

848 Plot the results of the sensitivity analysis method from run(). 

+

849 

+

850 :param pd.DataFrame result: 

+

851 Dataframe of the results like from the run() function. 

+

852 :return tuple of matplotlib objects (fig, ax): 

+

853 """ 

+

854 plot_single(result=result) 

+

855 

+

856 @staticmethod 

+

857 def load_from_csv(path): 

+

858 """ 

+

859 Load sensitivity results which were saved with the run() or run_time_dependent() function. 

+

860 

+

861 For second order results use the load_second_order_from_csv() function of the SobolAnalyzer. 

+

862 """ 

+

863 result = pd.read_csv(path, index_col=[0, 1, 2]) 

+

864 return result 

+

865 

+

866 def save_for_reproduction(self, 

+

867 title: str, 

+

868 path: pathlib.Path = None, 

+

869 files: list = None, 

+

870 exclude_sim_files: bool = False, 

+

871 remove_saved_files: bool = False, 

+

872 **kwargs): 

+

873 """ 

+

874 Save the settings of the SenAnalyzer and SimApi in order to 

+

875 reproduce the simulations and sensitivity analysis method. 

+

876 All saved results will be also saved in the reproduction 

+

877 archive. The simulations can be excluded from saving. 

+

878 

+

879 :param str title: 

+

880 Title of the study 

+

881 :param pathlib.Path path: 

+

882 Where to store the .zip file. If not given, self.cd is used. 

+

883 :param list files: 

+

884 List of files to save along the standard ones. 

+

885 Examples would be plots, tables etc. 

+

886 :param bool exclude_sim_files: 

+

887 Default False. If True, the simulation files will not be saved in 

+

888 the reproduction archive. 

+

889 :param bool remove_saved_files: 

+

890 Default False. If True, the result and simulation files will be moved 

+

891 instead of just copied. 

+

892 :param dict kwargs: 

+

893 All keyword arguments except title, files, and path of the function 

+

894 `save_reproduction_archive`. Most importantly, `log_message` may be 

+

895 specified to avoid input during execution. 

+

896 """ 

+

897 if files is None: 

+

898 files = [] 

+

899 

+

900 for file_path in self.reproduction_files: 

+

901 if exclude_sim_files: 

+

902 if 'simulation' in str(file_path): 

+

903 continue 

+

904 filename = "SenAnalyzer" + str(file_path).rsplit(self.cd.name, maxsplit=1)[-1] 

+

905 files.append(CopyFile( 

+

906 sourcepath=file_path, 

+

907 filename=filename, 

+

908 remove=remove_saved_files 

+

909 )) 

+

910 

+

911 return self.sim_api.save_for_reproduction( 

+

912 title=title, 

+

913 path=path, 

+

914 files=files, 

+

915 **kwargs 

+

916 ) 

+
+ + + diff --git a/docs/master/coverage/d_f4434ebd7e0d50e1_sobol_py.html b/docs/master/coverage/d_f4434ebd7e0d50e1_sobol_py.html new file mode 100644 index 00000000..e08b9a0f --- /dev/null +++ b/docs/master/coverage/d_f4434ebd7e0d50e1_sobol_py.html @@ -0,0 +1,290 @@ + + + + + Coverage for aixcalibuha/sensitivity_analysis/sobol.py: 98% + + + + + +
+
+

+ Coverage for aixcalibuha/sensitivity_analysis/sobol.py: + 98% +

+ +

+ 82 statements   + + + +

+

+ « prev     + ^ index     + » next +       + coverage.py v7.4.1, + created at 2024-01-27 10:48 +0000 +

+ +
+
+
+

1""" 

+

2Adds the SobolAnalyzer to the available 

+

3classes of sensitivity analysis. 

+

4""" 

+

5import pandas as pd 

+

6from SALib.sample import sobol 

+

7from SALib.analyze import sobol as analyze_sobol 

+

8import numpy as np 

+

9from aixcalibuha.sensitivity_analysis import SenAnalyzer 

+

10from aixcalibuha import CalibrationClass 

+

11from aixcalibuha.sensitivity_analysis.plotting import plot_single, heatmaps 

+

12 

+

13 

+

14class SobolAnalyzer(SenAnalyzer): 

+

15 """ 

+

16 Sobol method from SALib 

+

17 https://salib.readthedocs.io/en/latest/api.html#sobol-sensitivity-analysis 

+

18 A variance-based method which can compute the sensitivity measures 

+

19 'S1', 'ST' and 'S2' with their confidence intervals. 

+

20 

+

21 Additional arguments: 

+

22 

+

23 :keyword bool calc_second_order: 

+

24 Default True, used for the sobol-method 

+

25 :keyword int seed: 

+

26 Used for the sobol-method 

+

27 """ 

+

28 __analysis_variables = ['S1', 'ST', 'S1_conf', 'ST_conf'] 

+

29 __analysis_variables_1 = ['S1', 'ST', 'S1_conf', 'ST_conf'] 

+

30 __analysis_variables_2 = ['S2', 'S2_conf'] 

+

31 

+

32 def __init__(self, sim_api, **kwargs): 

+

33 # Additional kwarg which changes the possible analysis_variables 

+

34 self.calc_second_order = kwargs.get("calc_second_order", True) 

+

35 if self.calc_second_order: 

+

36 self.__analysis_variables = ['S1', 'ST', 'S1_conf', 'ST_conf', 'S2', 'S2_conf'] 

+

37 # separately store first and total order (1) and second order (2) analysis variables 

+

38 self.av_1_selected = [] 

+

39 self.av_2_selected = [] 

+

40 # Set additional kwargs 

+

41 self.seed = kwargs.pop("seed", None) 

+

42 

+

43 super().__init__( 

+

44 sim_api=sim_api, 

+

45 **kwargs) 

+

46 

+

47 @property 

+

48 def analysis_variables(self): 

+

49 """The analysis variables of the sobol method""" 

+

50 return self.__analysis_variables 

+

51 

+

52 def analysis_function(self, x, y): 

+

53 """ 

+

54 Use the SALib.analyze.sobol method to analyze the simulation results. 

+

55 

+

56 :param np.array x: 

+

57 placeholder for the `X` parameter of the morris method not used for sobol 

+

58 :param np.array y: 

+

59 The NumPy array containing the model outputs 

+

60 :return: 

+

61 returns the result of the SALib.analyze.sobol method (from the documentation: 

+

62 a dictionary with cols `S1`, `S1_conf`, `ST`, and `ST_conf`, where each entry 

+

63 is a list of size D (the number of parameters) containing the indices in the same 

+

64 order as the parameter file. If calc_second_order is True, the dictionary also 

+

65 contains cols `S2` and `S2_conf`.) 

+

66 """ 

+

67 return analyze_sobol.analyze(self.problem, y, 

+

68 calc_second_order=self.calc_second_order) 

+

69 

+

70 def create_sampler_demand(self): 

+

71 """ 

+

72 Function to create the sampler parameters for the morris method 

+

73 """ 

+

74 return {'calc_second_order': self.calc_second_order} 

+

75 

+

76 def generate_samples(self): 

+

77 """ 

+

78 Run the sampler for sobol and return the results. 

+

79 

+

80 :return: 

+

81 The list of samples generated as a NumPy array with one row per sample 

+

82 and each row containing one value for each variable name in `problem['names']`. 

+

83 :rtype: np.ndarray 

+

84 """ 

+

85 return sobol.sample(self.problem, 

+

86 N=self.num_samples, 

+

87 **self.create_sampler_demand()) 

+

88 

+

89 def _save(self, result: tuple, time_dependent: bool = False): 

+

90 """ 

+

91 Save the results of the run and run_time_dependent function of the SobolAnalyzer. 

+

92 """ 

+

93 if not result[0].empty: 

+

94 super()._save(result=result[0], time_dependent=time_dependent) 

+

95 if time_dependent: 

+

96 savepath_result_2 = self.cd.joinpath( 

+

97 f'{self.__class__.__name__}_results_second_order_time.csv') 

+

98 else: 

+

99 savepath_result_2 = self.cd.joinpath( 

+

100 f'{self.__class__.__name__}_results_second_order.csv') 

+

101 if not result[1].empty: 

+

102 result[1].to_csv(savepath_result_2) 

+

103 self.reproduction_files.append(savepath_result_2) 

+

104 

+

105 def _conv_local_results(self, results: list, local_classes: list): 

+

106 """ 

+

107 Convert the result dictionaries form SALib 

+

108 of each class and goal into a tuple of two DataFrames. 

+

109 First is the single order and second is the second order result. 

+

110 If one of the results is not computed an empty list is returned. 

+

111 """ 

+

112 _conv_results = [] 

+

113 _conv_results_2 = [] 

+

114 tuples = [] 

+

115 tuples_2 = [] 

+

116 for class_results, local_class in zip(results, local_classes): 

+

117 for goal, goal_results in class_results.items(): 

+

118 for analysis_var in self.analysis_variables: 

+

119 res_dict = self._get_res_dict(result=goal_results, 

+

120 cal_class=local_class, 

+

121 analysis_variable=analysis_var) 

+

122 if analysis_var in self.__analysis_variables_1: 

+

123 _conv_results.append(res_dict) 

+

124 tuples.append((local_class.name, goal, analysis_var)) 

+

125 elif analysis_var in self.__analysis_variables_2: 

+

126 for tuner_para, res_dict in res_dict.items(): 

+

127 _conv_results_2.append(res_dict) 

+

128 tuples_2.append((local_class.name, goal, analysis_var, tuner_para)) 

+

129 index = pd.MultiIndex.from_tuples(tuples=tuples, 

+

130 names=['Class', 'Goal', 'Analysis variable']) 

+

131 index_2 = pd.MultiIndex.from_tuples(tuples=tuples_2, 

+

132 names=['Class', 'Goal', 'Analysis variable', 

+

133 'Interaction']) 

+

134 df = pd.DataFrame(_conv_results, index=index) 

+

135 df_2 = pd.DataFrame(_conv_results_2, index=index_2) 

+

136 return df, df_2 

+

137 

+

138 def _get_res_dict(self, result: dict, cal_class: CalibrationClass, analysis_variable: str): 

+

139 """ 

+

140 Convert the result object to a dict with the key 

+

141 being the variable name and the value being the result 

+

142 associated to analysis_variable. 

+

143 For second oder analysis variables the result is converted to a 

+

144 dict with the key being the variable name and the value being another dict 

+

145 with the variable names as the keys and the result associated to analysis_variable 

+

146 from the interaction between the two variables. 

+

147 """ 

+

148 names = self.create_problem(cal_class.tuner_paras)['names'] 

+

149 if analysis_variable in self.__analysis_variables_1: 

+

150 if result is None: 

+

151 res_dict_1 = {var_name: np.abs(res_val) 

+

152 for var_name, res_val in zip(names, 

+

153 np.zeros(len(names)))} 

+

154 else: 

+

155 res_dict_1 = {var_name: np.abs(res_val) 

+

156 for var_name, res_val in zip(names, 

+

157 result[analysis_variable])} 

+

158 return res_dict_1 

+

159 if analysis_variable in self.__analysis_variables_2: 

+

160 if result is None: 

+

161 res_dict_2 = {var_name: dict(zip(names, np.abs(res_val))) 

+

162 for var_name, res_val in zip(names, 

+

163 np.zeros((len(names), len(names))))} 

+

164 else: 

+

165 result_av = result[analysis_variable] 

+

166 for i, _ in enumerate(result_av): 

+

167 for j, _ in enumerate(result_av): 

+

168 if i > j: 

+

169 result_av[i][j] = result_av[j][i] 

+

170 res_dict_2 = {var_name: dict(zip(names, np.abs(res_val))) 

+

171 for var_name, res_val in zip(names, 

+

172 result_av)} 

+

173 return res_dict_2 

+

174 

+

175 def plot(self, result): 

+

176 """ 

+

177 Plot the results of the sensitivity analysis method from run(). 

+

178 

+

179 :param pd.DataFrame result: 

+

180 Dataframe of the results like from the run() function. 

+

181 :return tuple of matplotlib objects (fig, ax): 

+

182 """ 

+

183 plot_single(result=result[0]) 

+

184 heatmaps(result=result[1]) 

+

185 

+

186 @staticmethod 

+

187 def load_second_order_from_csv(path): 

+

188 """ 

+

189 Load second order sensitivity results which were saved with the run() or 

+

190 run_time_dependent() function. 

+

191 """ 

+

192 result = pd.read_csv(path, index_col=[0, 1, 2, 3]) 

+

193 return result 

+
+ + + diff --git a/docs/master/coverage/favicon_32.png b/docs/master/coverage/favicon_32.png new file mode 100644 index 00000000..8649f047 Binary files /dev/null and b/docs/master/coverage/favicon_32.png differ diff --git a/docs/master/coverage/index.html b/docs/master/coverage/index.html new file mode 100644 index 00000000..a46bf8e4 --- /dev/null +++ b/docs/master/coverage/index.html @@ -0,0 +1,200 @@ + + + + + Coverage report + + + + + +
+
+

Coverage report: + 86% +

+ +
+ +
+

+ coverage.py v7.4.1, + created at 2024-01-27 10:48 +0000 +

+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Modulestatementsmissingexcludedcoverage
aixcalibuha/__init__.py400100%
aixcalibuha/calibration/__init__.py200100%
aixcalibuha/calibration/calibrator.py22698057%
aixcalibuha/calibration/multi_class_calibrator.py9811089%
aixcalibuha/data_types.py27237086%
aixcalibuha/sensitivity_analysis/__init__.py600100%
aixcalibuha/sensitivity_analysis/fast.py241096%
aixcalibuha/sensitivity_analysis/morris.py2500100%
aixcalibuha/sensitivity_analysis/pawn.py412295%
aixcalibuha/sensitivity_analysis/plotting.py23516793%
aixcalibuha/sensitivity_analysis/sensitivity_analyzer.py40725594%
aixcalibuha/sensitivity_analysis/sobol.py822098%
aixcalibuha/utils/__init__.py122083%
aixcalibuha/utils/configuration.py5316070%
aixcalibuha/utils/visualizer.py30541187%
Total17922511586%
+

+ No items found using the specified filter. +

+
+ + + diff --git a/docs/master/coverage/keybd_closed.png b/docs/master/coverage/keybd_closed.png new file mode 100644 index 00000000..ba119c47 Binary files /dev/null and b/docs/master/coverage/keybd_closed.png differ diff --git a/docs/master/coverage/keybd_open.png b/docs/master/coverage/keybd_open.png new file mode 100644 index 00000000..a8bac6c9 Binary files /dev/null and b/docs/master/coverage/keybd_open.png differ diff --git a/docs/master/coverage/status.json b/docs/master/coverage/status.json new file mode 100644 index 00000000..327170e6 --- /dev/null +++ b/docs/master/coverage/status.json @@ -0,0 +1 @@ +{"note":"This file is an internal implementation detail to speed up HTML report generation. Its format can change at any time. You might be looking for the JSON report: https://coverage.rtfd.io/cmd.html#cmd-json","format":2,"version":"7.4.1","globals":"781dbb5175afcf9fabf311a56dd15a07","files":{"d_e6a0ac89e88ff9fe___init___py":{"hash":"1cd662932db3b4adf60ae3baabb527b3","index":{"nums":[0,1,4,0,0,0,0,0],"html_filename":"d_e6a0ac89e88ff9fe___init___py.html","relative_filename":"aixcalibuha/__init__.py"}},"d_88c635a12c52f913___init___py":{"hash":"18d8e6adfcbbae5ef2c2a5ee0bd59a37","index":{"nums":[0,1,2,0,0,0,0,0],"html_filename":"d_88c635a12c52f913___init___py.html","relative_filename":"aixcalibuha/calibration/__init__.py"}},"d_88c635a12c52f913_calibrator_py":{"hash":"c43e3a01b250257c2b32b15e1fe7c8d8","index":{"nums":[0,1,226,0,98,0,0,0],"html_filename":"d_88c635a12c52f913_calibrator_py.html","relative_filename":"aixcalibuha/calibration/calibrator.py"}},"d_88c635a12c52f913_multi_class_calibrator_py":{"hash":"42413cd9b7dd504e0b949e1e63983e81","index":{"nums":[0,1,98,0,11,0,0,0],"html_filename":"d_88c635a12c52f913_multi_class_calibrator_py.html","relative_filename":"aixcalibuha/calibration/multi_class_calibrator.py"}},"d_e6a0ac89e88ff9fe_data_types_py":{"hash":"e5a6794e4ab2831e78a0848a9e62e08d","index":{"nums":[0,1,272,0,37,0,0,0],"html_filename":"d_e6a0ac89e88ff9fe_data_types_py.html","relative_filename":"aixcalibuha/data_types.py"}},"d_f4434ebd7e0d50e1___init___py":{"hash":"b2ba495c8784526dffc17857bd06bc72","index":{"nums":[0,1,6,0,0,0,0,0],"html_filename":"d_f4434ebd7e0d50e1___init___py.html","relative_filename":"aixcalibuha/sensitivity_analysis/__init__.py"}},"d_f4434ebd7e0d50e1_fast_py":{"hash":"ea795c2c72a7646431cd00e698421f11","index":{"nums":[0,1,24,0,1,0,0,0],"html_filename":"d_f4434ebd7e0d50e1_fast_py.html","relative_filename":"aixcalibuha/sensitivity_analysis/fast.py"}},"d_f4434ebd7e0d50e1_morris_py":{"hash":"1dfd6076f6224e644798a994d000e6d3","index":{"nums":[0,1,25,0,0,0,0,0],"html_filename":"d_f4434ebd7e0d50e1_morris_py.html","relative_filename":"aixcalibuha/sensitivity_analysis/morris.py"}},"d_f4434ebd7e0d50e1_pawn_py":{"hash":"410cdc67a1ef842886d1b9005b83c344","index":{"nums":[0,1,41,2,2,0,0,0],"html_filename":"d_f4434ebd7e0d50e1_pawn_py.html","relative_filename":"aixcalibuha/sensitivity_analysis/pawn.py"}},"d_f4434ebd7e0d50e1_plotting_py":{"hash":"16399d7e25a3791feca0609cb0326482","index":{"nums":[0,1,235,7,16,0,0,0],"html_filename":"d_f4434ebd7e0d50e1_plotting_py.html","relative_filename":"aixcalibuha/sensitivity_analysis/plotting.py"}},"d_f4434ebd7e0d50e1_sensitivity_analyzer_py":{"hash":"ac696b8d108408239ce75b5b9ba1287c","index":{"nums":[0,1,407,5,25,0,0,0],"html_filename":"d_f4434ebd7e0d50e1_sensitivity_analyzer_py.html","relative_filename":"aixcalibuha/sensitivity_analysis/sensitivity_analyzer.py"}},"d_f4434ebd7e0d50e1_sobol_py":{"hash":"1106a47848637dc2e786953008e657ed","index":{"nums":[0,1,82,0,2,0,0,0],"html_filename":"d_f4434ebd7e0d50e1_sobol_py.html","relative_filename":"aixcalibuha/sensitivity_analysis/sobol.py"}},"d_c1d4b9f643948c74___init___py":{"hash":"d5ad29b020628935d31b8a25ed2a90cd","index":{"nums":[0,1,12,0,2,0,0,0],"html_filename":"d_c1d4b9f643948c74___init___py.html","relative_filename":"aixcalibuha/utils/__init__.py"}},"d_c1d4b9f643948c74_configuration_py":{"hash":"6c36c49ca6fd82ae5e250b748cf786e6","index":{"nums":[0,1,53,0,16,0,0,0],"html_filename":"d_c1d4b9f643948c74_configuration_py.html","relative_filename":"aixcalibuha/utils/configuration.py"}},"d_c1d4b9f643948c74_visualizer_py":{"hash":"1248db525c512105269dcd94f7a2f4e6","index":{"nums":[0,1,305,1,41,0,0,0],"html_filename":"d_c1d4b9f643948c74_visualizer_py.html","relative_filename":"aixcalibuha/utils/visualizer.py"}}}} \ No newline at end of file diff --git a/docs/master/coverage/style.css b/docs/master/coverage/style.css new file mode 100644 index 00000000..2555fdfe --- /dev/null +++ b/docs/master/coverage/style.css @@ -0,0 +1,309 @@ +@charset "UTF-8"; +/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */ +/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */ +/* Don't edit this .css file. Edit the .scss file instead! */ +html, body, h1, h2, h3, p, table, td, th { margin: 0; padding: 0; border: 0; font-weight: inherit; font-style: inherit; font-size: 100%; font-family: inherit; vertical-align: baseline; } + +body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-size: 1em; background: #fff; color: #000; } + +@media (prefers-color-scheme: dark) { body { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { body { color: #eee; } } + +html > body { font-size: 16px; } + +a:active, a:focus { outline: 2px dashed #007acc; } + +p { font-size: .875em; line-height: 1.4em; } + +table { border-collapse: collapse; } + +td { vertical-align: top; } + +table tr.hidden { display: none !important; } + +p#no_rows { display: none; font-size: 1.2em; } + +a.nav { text-decoration: none; color: inherit; } + +a.nav:hover { text-decoration: underline; color: inherit; } + +.hidden { display: none; } + +header { background: #f8f8f8; width: 100%; z-index: 2; border-bottom: 1px solid #ccc; } + +@media (prefers-color-scheme: dark) { header { background: black; } } + +@media (prefers-color-scheme: dark) { header { border-color: #333; } } + +header .content { padding: 1rem 3.5rem; } + +header h2 { margin-top: .5em; font-size: 1em; } + +header p.text { margin: .5em 0 -.5em; color: #666; font-style: italic; } + +@media (prefers-color-scheme: dark) { header p.text { color: #aaa; } } + +header.sticky { position: fixed; left: 0; right: 0; height: 2.5em; } + +header.sticky .text { display: none; } + +header.sticky h1, header.sticky h2 { font-size: 1em; margin-top: 0; display: inline-block; } + +header.sticky .content { padding: 0.5rem 3.5rem; } + +header.sticky .content p { font-size: 1em; } + +header.sticky ~ #source { padding-top: 6.5em; } + +main { position: relative; z-index: 1; } + +footer { margin: 1rem 3.5rem; } + +footer .content { padding: 0; color: #666; font-style: italic; } + +@media (prefers-color-scheme: dark) { footer .content { color: #aaa; } } + +#index { margin: 1rem 0 0 3.5rem; } + +h1 { font-size: 1.25em; display: inline-block; } + +#filter_container { float: right; margin: 0 2em 0 0; } + +#filter_container input { width: 10em; padding: 0.2em 0.5em; border: 2px solid #ccc; background: #fff; color: #000; } + +@media (prefers-color-scheme: dark) { #filter_container input { border-color: #444; } } + +@media (prefers-color-scheme: dark) { #filter_container input { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { #filter_container input { color: #eee; } } + +#filter_container input:focus { border-color: #007acc; } + +header button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; color: inherit; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; } + +@media (prefers-color-scheme: dark) { header button { border-color: #444; } } + +header button:active, header button:focus { outline: 2px dashed #007acc; } + +header button.run { background: #eeffee; } + +@media (prefers-color-scheme: dark) { header button.run { background: #373d29; } } + +header button.run.show_run { background: #dfd; border: 2px solid #00dd00; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.run.show_run { background: #373d29; } } + +header button.mis { background: #ffeeee; } + +@media (prefers-color-scheme: dark) { header button.mis { background: #4b1818; } } + +header button.mis.show_mis { background: #fdd; border: 2px solid #ff0000; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.mis.show_mis { background: #4b1818; } } + +header button.exc { background: #f7f7f7; } + +@media (prefers-color-scheme: dark) { header button.exc { background: #333; } } + +header button.exc.show_exc { background: #eee; border: 2px solid #808080; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.exc.show_exc { background: #333; } } + +header button.par { background: #ffffd5; } + +@media (prefers-color-scheme: dark) { header button.par { background: #650; } } + +header button.par.show_par { background: #ffa; border: 2px solid #bbbb00; margin: 0 .1em; } + +@media (prefers-color-scheme: dark) { header button.par.show_par { background: #650; } } + +#help_panel, #source p .annotate.long { display: none; position: absolute; z-index: 999; background: #ffffcc; border: 1px solid #888; border-radius: .2em; color: #333; padding: .25em .5em; } + +#source p .annotate.long { white-space: normal; float: right; top: 1.75em; right: 1em; height: auto; } + +#help_panel_wrapper { float: right; position: relative; } + +#keyboard_icon { margin: 5px; } + +#help_panel_state { display: none; } + +#help_panel { top: 25px; right: 0; padding: .75em; border: 1px solid #883; color: #333; } + +#help_panel .keyhelp p { margin-top: .75em; } + +#help_panel .legend { font-style: italic; margin-bottom: 1em; } + +.indexfile #help_panel { width: 25em; } + +.pyfile #help_panel { width: 18em; } + +#help_panel_state:checked ~ #help_panel { display: block; } + +kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em .35em; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-weight: bold; background: #eee; border-radius: 3px; } + +#source { padding: 1em 0 1em 3.5rem; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; } + +#source p { position: relative; white-space: pre; } + +#source p * { box-sizing: border-box; } + +#source p .n { float: left; text-align: right; width: 3.5rem; box-sizing: border-box; margin-left: -3.5rem; padding-right: 1em; color: #999; user-select: none; } + +@media (prefers-color-scheme: dark) { #source p .n { color: #777; } } + +#source p .n.highlight { background: #ffdd00; } + +#source p .n a { margin-top: -4em; padding-top: 4em; text-decoration: none; color: #999; } + +@media (prefers-color-scheme: dark) { #source p .n a { color: #777; } } + +#source p .n a:hover { text-decoration: underline; color: #999; } + +@media (prefers-color-scheme: dark) { #source p .n a:hover { color: #777; } } + +#source p .t { display: inline-block; width: 100%; box-sizing: border-box; margin-left: -.5em; padding-left: 0.3em; border-left: 0.2em solid #fff; } + +@media (prefers-color-scheme: dark) { #source p .t { border-color: #1e1e1e; } } + +#source p .t:hover { background: #f2f2f2; } + +@media (prefers-color-scheme: dark) { #source p .t:hover { background: #282828; } } + +#source p .t:hover ~ .r .annotate.long { display: block; } + +#source p .t .com { color: #008000; font-style: italic; line-height: 1px; } + +@media (prefers-color-scheme: dark) { #source p .t .com { color: #6a9955; } } + +#source p .t .key { font-weight: bold; line-height: 1px; } + +#source p .t .str { color: #0451a5; } + +@media (prefers-color-scheme: dark) { #source p .t .str { color: #9cdcfe; } } + +#source p.mis .t { border-left: 0.2em solid #ff0000; } + +#source p.mis.show_mis .t { background: #fdd; } + +@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t { background: #4b1818; } } + +#source p.mis.show_mis .t:hover { background: #f2d2d2; } + +@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t:hover { background: #532323; } } + +#source p.run .t { border-left: 0.2em solid #00dd00; } + +#source p.run.show_run .t { background: #dfd; } + +@media (prefers-color-scheme: dark) { #source p.run.show_run .t { background: #373d29; } } + +#source p.run.show_run .t:hover { background: #d2f2d2; } + +@media (prefers-color-scheme: dark) { #source p.run.show_run .t:hover { background: #404633; } } + +#source p.exc .t { border-left: 0.2em solid #808080; } + +#source p.exc.show_exc .t { background: #eee; } + +@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t { background: #333; } } + +#source p.exc.show_exc .t:hover { background: #e2e2e2; } + +@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t:hover { background: #3c3c3c; } } + +#source p.par .t { border-left: 0.2em solid #bbbb00; } + +#source p.par.show_par .t { background: #ffa; } + +@media (prefers-color-scheme: dark) { #source p.par.show_par .t { background: #650; } } + +#source p.par.show_par .t:hover { background: #f2f2a2; } + +@media (prefers-color-scheme: dark) { #source p.par.show_par .t:hover { background: #6d5d0c; } } + +#source p .r { position: absolute; top: 0; right: 2.5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; } + +#source p .annotate { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; color: #666; padding-right: .5em; } + +@media (prefers-color-scheme: dark) { #source p .annotate { color: #ddd; } } + +#source p .annotate.short:hover ~ .long { display: block; } + +#source p .annotate.long { width: 30em; right: 2.5em; } + +#source p input { display: none; } + +#source p input ~ .r label.ctx { cursor: pointer; border-radius: .25em; } + +#source p input ~ .r label.ctx::before { content: "▶ "; } + +#source p input ~ .r label.ctx:hover { background: #e8f4ff; color: #666; } + +@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { background: #0f3a42; } } + +@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { color: #aaa; } } + +#source p input:checked ~ .r label.ctx { background: #d0e8ff; color: #666; border-radius: .75em .75em 0 0; padding: 0 .5em; margin: -.25em 0; } + +@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { background: #056; } } + +@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { color: #aaa; } } + +#source p input:checked ~ .r label.ctx::before { content: "▼ "; } + +#source p input:checked ~ .ctxs { padding: .25em .5em; overflow-y: scroll; max-height: 10.5em; } + +#source p label.ctx { color: #999; display: inline-block; padding: 0 .5em; font-size: .8333em; } + +@media (prefers-color-scheme: dark) { #source p label.ctx { color: #777; } } + +#source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #d0e8ff; border-radius: .25em; margin-right: 1.75em; text-align: right; } + +@media (prefers-color-scheme: dark) { #source p .ctxs { background: #056; } } + +#index { font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-size: 0.875em; } + +#index table.index { margin-left: -.5em; } + +#index td, #index th { text-align: right; width: 5em; padding: .25em .5em; border-bottom: 1px solid #eee; } + +@media (prefers-color-scheme: dark) { #index td, #index th { border-color: #333; } } + +#index td.name, #index th.name { text-align: left; width: auto; } + +#index th { font-style: italic; color: #333; cursor: pointer; } + +@media (prefers-color-scheme: dark) { #index th { color: #ddd; } } + +#index th:hover { background: #eee; } + +@media (prefers-color-scheme: dark) { #index th:hover { background: #333; } } + +#index th[aria-sort="ascending"], #index th[aria-sort="descending"] { white-space: nowrap; background: #eee; padding-left: .5em; } + +@media (prefers-color-scheme: dark) { #index th[aria-sort="ascending"], #index th[aria-sort="descending"] { background: #333; } } + +#index th[aria-sort="ascending"]::after { font-family: sans-serif; content: " ↑"; } + +#index th[aria-sort="descending"]::after { font-family: sans-serif; content: " ↓"; } + +#index td.name a { text-decoration: none; color: inherit; } + +#index tr.total td, #index tr.total_dynamic td { font-weight: bold; border-top: 1px solid #ccc; border-bottom: none; } + +#index tr.file:hover { background: #eee; } + +@media (prefers-color-scheme: dark) { #index tr.file:hover { background: #333; } } + +#index tr.file:hover td.name { text-decoration: underline; color: inherit; } + +#scroll_marker { position: fixed; z-index: 3; right: 0; top: 0; width: 16px; height: 100%; background: #fff; border-left: 1px solid #eee; will-change: transform; } + +@media (prefers-color-scheme: dark) { #scroll_marker { background: #1e1e1e; } } + +@media (prefers-color-scheme: dark) { #scroll_marker { border-color: #333; } } + +#scroll_marker .marker { background: #ccc; position: absolute; min-height: 3px; width: 100%; } + +@media (prefers-color-scheme: dark) { #scroll_marker .marker { background: #444; } } diff --git a/docs/master/docs/.buildinfo b/docs/master/docs/.buildinfo index 0cd5a485..79b4de15 100644 --- a/docs/master/docs/.buildinfo +++ b/docs/master/docs/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: a3fdd33f9c18161ba41c81ab15dfb061 +config: 9c0bb4d7edb15f4b816c6614dd4fb700 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/master/docs/.doctrees/code/aixcalibuha.calibration.doctree b/docs/master/docs/.doctrees/code/aixcalibuha.calibration.doctree index 35448d95..3413ea62 100644 Binary files a/docs/master/docs/.doctrees/code/aixcalibuha.calibration.doctree and b/docs/master/docs/.doctrees/code/aixcalibuha.calibration.doctree differ diff --git a/docs/master/docs/.doctrees/code/aixcalibuha.sensitivity_analysis.doctree b/docs/master/docs/.doctrees/code/aixcalibuha.sensitivity_analysis.doctree index 4ccc00d9..a088bf03 100644 Binary files a/docs/master/docs/.doctrees/code/aixcalibuha.sensitivity_analysis.doctree and b/docs/master/docs/.doctrees/code/aixcalibuha.sensitivity_analysis.doctree differ diff --git a/docs/master/docs/.doctrees/code/aixcalibuha.utils.doctree b/docs/master/docs/.doctrees/code/aixcalibuha.utils.doctree index f7e2cc93..4d27069b 100644 Binary files a/docs/master/docs/.doctrees/code/aixcalibuha.utils.doctree and b/docs/master/docs/.doctrees/code/aixcalibuha.utils.doctree differ diff --git a/docs/master/docs/.doctrees/environment.pickle b/docs/master/docs/.doctrees/environment.pickle index 98136837..39b94ff4 100644 Binary files a/docs/master/docs/.doctrees/environment.pickle and b/docs/master/docs/.doctrees/environment.pickle differ diff --git a/docs/master/docs/.doctrees/examples.doctree b/docs/master/docs/.doctrees/examples.doctree index 7f66bbba..0808e91f 100644 Binary files a/docs/master/docs/.doctrees/examples.doctree and b/docs/master/docs/.doctrees/examples.doctree differ diff --git a/docs/master/docs/.doctrees/index.doctree b/docs/master/docs/.doctrees/index.doctree index 4788a1ff..e67808e7 100644 Binary files a/docs/master/docs/.doctrees/index.doctree and b/docs/master/docs/.doctrees/index.doctree differ diff --git a/docs/master/docs/.doctrees/version_his.doctree b/docs/master/docs/.doctrees/version_his.doctree index f6149b8c..413650d5 100644 Binary files a/docs/master/docs/.doctrees/version_his.doctree and b/docs/master/docs/.doctrees/version_his.doctree differ diff --git a/docs/master/docs/_modules/aixcalibuha/calibration/calibrator.html b/docs/master/docs/_modules/aixcalibuha/calibration/calibrator.html index e53551f6..277bf690 100644 --- a/docs/master/docs/_modules/aixcalibuha/calibration/calibrator.html +++ b/docs/master/docs/_modules/aixcalibuha/calibration/calibrator.html @@ -3,7 +3,7 @@ - aixcalibuha.calibration.calibrator — AixCaliBuHa 0.3.1 documentation + aixcalibuha.calibration.calibrator — AixCaliBuHa 1.0.0 documentation @@ -34,7 +34,7 @@ AixCaliBuHa
- 0.3 + 1.0
@@ -91,6 +91,7 @@

Source code for aixcalibuha.calibration.calibrator

from typing import Dict from copy import copy import numpy as np +import pandas as pd from ebcpy import data_types, Optimizer from ebcpy.simulationapi import SimulationAPI from aixcalibuha.utils import visualizer, MaxIterationsReached @@ -176,7 +177,7 @@

Source code for aixcalibuha.calibration.calibrator

calibration_class: CalibrationClass, **kwargs): """Instantiate instance attributes""" - #%% Kwargs + # %% Kwargs # Initialize supported keywords with default value # Pop the items so they wont be added when calling the # __init__ of the parent class. Always pop with a default value in case @@ -212,7 +213,7 @@

Source code for aixcalibuha.calibration.calibrator

raise TypeError(f"Given {bool_keyword} is of type " f"{type(keyword_value).__name__} but should be type bool") - #%% Initialize all public parameters + # %% Initialize all public parameters super().__init__(cd, **kwargs) # Set sim_api self.sim_api = sim_api @@ -234,7 +235,7 @@

Source code for aixcalibuha.calibration.calibrator

"stop_time": self.calibration_class.stop_time} ) - #%% Setup the logger + # %% Setup the logger # De-register the logger setup in the optimization class: if self.verbose_logging: self.logger = visualizer.CalibrationVisualizer( @@ -324,13 +325,13 @@

Source code for aixcalibuha.calibration.calibrator

) return self.ret_val_on_error - total_res = self._kpi_and_logging_calculation( + total_res, unweighted_objective = self._kpi_and_logging_calculation( xk_descaled=xk_descaled, counter=self._counter, results=sim_target_data ) - return total_res
+ return total_res, unweighted_objective
[docs] def mp_obj(self, x, *args): # Initialize list for results @@ -349,12 +350,14 @@

Source code for aixcalibuha.calibration.calibrator

xk_descaled_list.append(xk_descaled) # Update Parameters parameter_copy = parameters.copy() - parameter_copy.update({name: value for name, value in zip(initial_names, xk_descaled.values)}) + parameter_copy.update( + {name: value for name, value in zip(initial_names, xk_descaled.values)}) parameter_list.append(parameter_copy) # Simulate if self.save_files: - result_file_names = [f"simulation_{self._counter + idx}" for idx in range(len(parameter_list))] + result_file_names = [f"simulation_{self._counter + idx}" for idx in + range(len(parameter_list))] _filepaths = self.sim_api.simulate( parameters=parameter_list, return_option="savepath", @@ -385,7 +388,7 @@

Source code for aixcalibuha.calibration.calibrator

if result is None: total_res_list[idx] = self.ret_val_on_error continue - total_res = self._kpi_and_logging_calculation( + total_res, unweighted_objective = self._kpi_and_logging_calculation( xk_descaled=xk_descaled_list[idx], counter=self._counter, results=result @@ -451,7 +454,7 @@

Source code for aixcalibuha.calibration.calibrator

f"of iterations {self.max_itercount} has been reached." ) - return total_res + return total_res, unweighted_objective
[docs] def calibrate(self, framework, method=None, **kwargs) -> dict: """ @@ -461,7 +464,7 @@

Source code for aixcalibuha.calibration.calibrator

arguments in Optimizer.optimize(). Look at the docstring in ebcpy to know which options are available. """ - #%% Start Calibration: + # %% Start Calibration: self.at_calibration = True self.logger.log(f"Start calibration of model: {self.sim_api.model_name}" f" with framework-class {self.__class__.__name__}") @@ -498,7 +501,7 @@

Source code for aixcalibuha.calibration.calibrator

"Can't save or return any results." ) - #%% Save the relevant results. + # %% Save the relevant results. self.logger.save_calibration_result(self._current_best_iterate, self.sim_api.model_name, duration=t_cal, @@ -567,7 +570,7 @@

Source code for aixcalibuha.calibration.calibrator

with open(s_path, 'w') as json_file: json.dump(parameter_values, json_file, indent=4)
-
[docs] def validate(self, validation_class: CalibrationClass, calibration_result: Dict): +
[docs] def validate(self, validation_class: CalibrationClass, calibration_result: Dict, verbose=False): """ Validate the given calibration class based on the given values for tuner_parameters. @@ -582,10 +585,10 @@

Source code for aixcalibuha.calibration.calibrator

self.logger.log(f"Start validation of model: {self.sim_api.model_name} with " f"framework-class {self.__class__.__name__}") # Use start-time of calibration class + self.calibration_class = validation_class start_time = self._apply_start_time_method( start_time=self.calibration_class.start_time ) - self.calibration_class = validation_class old_tuner_paras = copy(self.calibration_class.tuner_paras) tuner_values = list(calibration_result.values()) self.calibration_class.tuner_paras = TunerParas( @@ -607,12 +610,29 @@

Source code for aixcalibuha.calibration.calibrator

# Scale the tuner parameters xk = self.tuner_paras.scale(tuner_values) # Evaluate objective - obj = self.obj(xk=xk) + obj, unweighted_objective = self.obj(xk=xk) self.logger.validation_callback_func( obj=obj ) # Reset tuner_parameters to avoid unwanted behaviour self.calibration_class.tuner_paras = old_tuner_paras + if verbose: + weights = [1] + objectives = [obj] + goals = ['all'] + for goal, val in unweighted_objective.items(): + weights.append(val[0]) + objectives.append(val[1]) + goals.append(goal) + index = pd.MultiIndex.from_product( + [[validation_class.name], goals], + names=['Class', 'Goal'] + ) + obj_verbos = pd.DataFrame( + {'weight': weights, validation_class.goals.statistical_measure: objectives}, + index=index + ) + return obj_verbos return obj
def _handle_error(self, error): diff --git a/docs/master/docs/_modules/aixcalibuha/calibration/multi_class_calibrator.html b/docs/master/docs/_modules/aixcalibuha/calibration/multi_class_calibrator.html index aa0a11ab..65f48903 100644 --- a/docs/master/docs/_modules/aixcalibuha/calibration/multi_class_calibrator.html +++ b/docs/master/docs/_modules/aixcalibuha/calibration/multi_class_calibrator.html @@ -3,7 +3,7 @@ - aixcalibuha.calibration.multi_class_calibrator — AixCaliBuHa 0.3.1 documentation + aixcalibuha.calibration.multi_class_calibrator — AixCaliBuHa 1.0.0 documentation @@ -34,7 +34,7 @@ AixCaliBuHa
- 0.3 + 1.0
@@ -257,6 +257,8 @@

Source code for aixcalibuha.calibration.multi_class_calibrator

for cal_run in self._cal_history: for p_name in cal_run['res']['Parameters'].index: parameter_values[p_name] = cal_run['res']['Parameters'][p_name] + for p_name, res_intersection in res_tuner.items(): + parameter_values[p_name] = res_intersection self.save_results(parameter_values=parameter_values, filename='MultiClassCalibrationResult') diff --git a/docs/master/docs/_modules/aixcalibuha/data_types.html b/docs/master/docs/_modules/aixcalibuha/data_types.html index 29dad095..a1d1988b 100644 --- a/docs/master/docs/_modules/aixcalibuha/data_types.html +++ b/docs/master/docs/_modules/aixcalibuha/data_types.html @@ -3,7 +3,7 @@ - aixcalibuha.data_types — AixCaliBuHa 0.3.1 documentation + aixcalibuha.data_types — AixCaliBuHa 1.0.0 documentation @@ -34,7 +34,7 @@ AixCaliBuHa
- 0.3 + 1.0
@@ -92,6 +92,7 @@

Source code for aixcalibuha.data_types

 from ebcpy import TimeSeriesData
 from ebcpy.utils.statistics_analyzer import StatisticsAnalyzer
 from ebcpy.preprocessing import convert_datetime_index_to_float_index
+
 # pylint: disable=I1101
 
 logger = logging.getLogger(__name__)
@@ -227,8 +228,8 @@ 

Source code for aixcalibuha.data_types

         # Set the weightings, if not specified.
         self._num_goals = len(_columns)
         if weightings is None:
-            self._weightings = np.array([1/self._num_goals
-                                         for i in range(self._num_goals)])
+            self.weightings = np.array([1 / self._num_goals
+                                        for i in range(self._num_goals)])
         else:
             if not isinstance(weightings, (list, np.ndarray)):
                 raise TypeError(f"weightings is of type {type(weightings).__name__} "
@@ -237,7 +238,7 @@ 

Source code for aixcalibuha.data_types

                 raise IndexError(f"The given number of weightings ({len(weightings)}) "
                                  f"does not match the number of "
                                  f"goals ({self._num_goals})")
-            self._weightings = np.array(weightings) / sum(weightings)
+            self.weightings = np.array(weightings) / sum(weightings)
 
     def __str__(self):
         """Overwrite string method to present the Goals-Object more
@@ -294,10 +295,8 @@ 

Source code for aixcalibuha.data_types

             )
             # Apply penalty function
             _diff = _diff * penaltyfactor
-
-            _verbose_calculation[self._weightings[i]] = _diff
-            total_difference += self._weightings[i] * _diff
-
+            _verbose_calculation[goal_name] = (self.weightings[i], _diff)
+            total_difference += self.weightings[i] * _diff
         if verbose:
             return total_difference, _verbose_calculation
         return total_difference
@@ -449,6 +448,7 @@

Source code for aixcalibuha.data_types

     m_flow_2             0.02     0.01     0.1     0.09
     heatConv_a         200.00    10.00   300.0   290.00
     """
+
     def __init__(self, names, initial_values, bounds=None):
         """Initialize class-objects and check correct input."""
         # Check if the given input-parameters are of correct format. If not, raise an error.
@@ -507,7 +507,7 @@ 

Source code for aixcalibuha.data_types

         # If no bounds are given, scaling is not possible--> descaled = scaled
         if self._bounds is None:
             return descaled
-        _scaled = (descaled - self._df["min"])/self._df["scale"]
+        _scaled = (descaled - self._df["min"]) / self._df["scale"]
         if not all((_scaled >= 0) & (_scaled <= 1)):
             warnings.warn("Given descaled values are outside "
                           "of bounds. Automatically limiting "
@@ -527,12 +527,12 @@ 

Source code for aixcalibuha.data_types

         if not self._bounds:
             return scaled
         _scaled = np.array(scaled)
-        if not all((_scaled >= 0-1e4) & (_scaled <= 1+1e4)):
+        if not all((_scaled >= 0 - 1e4) & (_scaled <= 1 + 1e4)):
             warnings.warn("Given scaled values are outside of bounds. "
                           "Automatically limiting the values with "
                           "respect to the bounds.")
         _scaled = np.clip(_scaled, a_min=0, a_max=1)
-        return _scaled*self._df["scale"] + self._df["min"]
+ return _scaled * self._df["scale"] + self._df["min"]
@property def bounds(self): @@ -787,7 +787,7 @@

Source code for aixcalibuha.data_types

                                   "inputs": deepcopy(cal_class.inputs),
                                   "input_kwargs": deepcopy(cal_class.input_kwargs)
                                   }
-                                  
+
     # Convert dict to actual calibration-classes
     cal_classes_merged = []
     for _name, values in temp_merged.items():
diff --git a/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/fast.html b/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/fast.html
new file mode 100644
index 00000000..9755762b
--- /dev/null
+++ b/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/fast.html
@@ -0,0 +1,200 @@
+
+
+
+  
+  
+  aixcalibuha.sensitivity_analysis.fast — AixCaliBuHa 1.0.0 documentation
+      
+      
+
+  
+  
+  
+        
+        
+        
+        
+        
+    
+    
+     
+
+
+ 
+  
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for aixcalibuha.sensitivity_analysis.fast

+"""
+Adds the FASTAnalyzer to the available
+classes of sensitivity analysis.
+"""
+import numpy as np
+from SALib.sample import fast_sampler as fast
+from SALib.analyze import fast as analyze_fast
+from aixcalibuha.sensitivity_analysis import SenAnalyzer
+from aixcalibuha import CalibrationClass
+
+
+
[docs]class FASTAnalyzer(SenAnalyzer): + """ + FAST method from SALib + https://salib.readthedocs.io/en/latest/api.html#fast-fourier-amplitude-sensitivity-test + A variance-based method which can compute the sensitivity measures + 'S1' and 'ST'. + + Additional arguments: + + :keyword int M: + Default 4, used for the fast-method + :keyword seed: + Used for the fast-method + """ + + def __init__(self, sim_api, **kwargs): + super().__init__( + sim_api=sim_api, + **kwargs) + # Set additional kwargs + self.M = kwargs.pop("M", 4) + self.seed = kwargs.pop("seed", None) + + @property + def analysis_variables(self): + """The analysis variables of the FAST method""" + return ['S1', 'ST'] + +
[docs] def analysis_function(self, x, y): + """ + Use the SALib.analyze.fast method to analyze the simulation results. + + :param np.array x: + placeholder for the `X` parameter of the morris method not used for sobol + :param np.array y: + The NumPy array containing the model outputs + :return: + returns the result of the SALib.analyze.fast method (from the documentation: + Returns a dictionary with keys 'S1' and 'ST', where each entry is a list of + size D (the number of parameters) containing the indices in the same order + as the parameter file.) + """ + return analyze_fast.analyze(self.problem, y, + M=self.M)
+ +
[docs] def create_sampler_demand(self): + """ + Function to create the sampler parameters for the fast method + """ + return {'M': self.M}
+ +
[docs] def generate_samples(self): + """ + Run the sampler for fast and return the results. + + :return: + The list of samples generated as a NumPy array with one row per sample + and each row containing one value for each variable name in `problem['names']`. + :rtype: np.ndarray + """ + return fast.sample(self.problem, + N=self.num_samples, + **self.create_sampler_demand())
+ + def _get_res_dict(self, result: dict, cal_class: CalibrationClass, analysis_variable: str): + """ + Convert the result object to a dict with the key + being the variable name and the value being the result + associated to self.analysis_variable. + """ + names = self.create_problem(cal_class.tuner_paras)['names'] + if result is None: + return {var_name: np.abs(res_val) + for var_name, res_val in zip(names, + np.zeros(len(names)))} + return {var_name: np.abs(res_val) + for var_name, res_val in zip(names, + result[analysis_variable])}
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/morris.html b/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/morris.html index aa6c6c1c..d9dd7c74 100644 --- a/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/morris.html +++ b/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/morris.html @@ -3,7 +3,7 @@ - aixcalibuha.sensitivity_analysis.morris — AixCaliBuHa 0.3.1 documentation + aixcalibuha.sensitivity_analysis.morris — AixCaliBuHa 1.0.0 documentation @@ -34,7 +34,7 @@ AixCaliBuHa
- 0.3 + 1.0
@@ -83,6 +83,7 @@

Source code for aixcalibuha.sensitivity_analysis.morris

Adds the MorrisAnalyzer to the available classes of sensitivity analysis. """ +import numpy as np from SALib.sample import morris from SALib.analyze import morris as analyze_morris from aixcalibuha.sensitivity_analysis import SenAnalyzer @@ -91,10 +92,14 @@

Source code for aixcalibuha.sensitivity_analysis.morris

[docs]class MorrisAnalyzer(SenAnalyzer): """ + Moris method from SALib https://salib.readthedocs.io/en/latest/api.html#method-of-morris + An elementary effects (One-At-A-Time) method which computes the sensitivity + measures 'mu', 'mu_star' and 'sigma' with a confidence interval for mu_star. + Additional arguments: :keyword int num_levels: - Default 4, used for the morris-method + Default num_samples, used for the morris-method :keyword optimal_trajectories: Used for the morris-method :keyword bool local_optimization: @@ -105,14 +110,14 @@

Source code for aixcalibuha.sensitivity_analysis.morris

sim_api=sim_api, **kwargs) # Set additional kwargs - self.num_levels = kwargs.pop("num_levels", 4) + self.num_levels = kwargs.pop("num_levels", self.num_samples) self.optimal_trajectories = kwargs.pop("optimal_trajectories", None) self.local_optimization = kwargs.pop("local_optimization", True) @property def analysis_variables(self): """The analysis variables of the sobol method""" - return ['mu_star', 'sigma', 'mu_star_conf'] + return ['mu_star', 'mu', 'sigma', 'mu_star_conf']
[docs] def analysis_function(self, x, y): """ @@ -152,13 +157,16 @@

Source code for aixcalibuha.sensitivity_analysis.morris

N=self.num_samples, **self.create_sampler_demand())
- def _get_res_dict(self, result: dict, cal_class: CalibrationClass): + def _get_res_dict(self, result: dict, cal_class: CalibrationClass, analysis_variable: str): """ Convert the result object to a dict with the key being the variable name and the value being the result associated to self.analysis_variable. """ - return dict(zip(result['names'], result[self.analysis_variable]))
+ if result is None: + names = cal_class.tuner_paras.get_names() + return dict(zip(names, np.zeros(len(names)))) + return dict(zip(result['names'], result[analysis_variable]))
diff --git a/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/pawn.html b/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/pawn.html new file mode 100644 index 00000000..06244479 --- /dev/null +++ b/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/pawn.html @@ -0,0 +1,235 @@ + + + + + + aixcalibuha.sensitivity_analysis.pawn — AixCaliBuHa 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for aixcalibuha.sensitivity_analysis.pawn

+"""
+Adds the PAWNAnalyzer to the available
+classes of sensitivity analysis.
+"""
+
+from SALib.sample import sobol
+from SALib.sample import morris
+from SALib.sample import fast_sampler as fast
+from SALib.analyze import pawn as analyze_pawn
+import numpy as np
+from aixcalibuha.sensitivity_analysis import SenAnalyzer
+from aixcalibuha import CalibrationClass
+
+
+
[docs]class PAWNAnalyzer(SenAnalyzer): + """ + PAWN method from SALib https://salib.readthedocs.io/en/latest/api.html#pawn-sensitivity-analysis + Density-based method which computes the PAWN index at 'min', 'max', 'mean', + 'median' and coefficient of variation 'cv'. + + Additional arguments: + + :keyword bool calc_second_order: + Default True, used for the sampler of the sobol-method + :keyword int s: + Default 10, used for the pawn-method. + :keyword str sampler: + Which sampler should be used. Default sobol. + Choose between 'sobol', 'morris' and 'fast'. + :keyword int num_levels: + Default num_samples, used for the sampler of the morris-method. + :keyword optimal_trajectories: + Used for the sampler of the morris-method. + :keyword bool local_optimization: + Default True, used for the sampler of the morris-method. + :keyword int M: + Default 4, used for the sampler of the fast-method. + """ + + def __init__(self, sim_api, **kwargs): + super().__init__( + sim_api=sim_api, + **kwargs) + # Set additional kwargs + self.calc_second_order = kwargs.pop("calc_second_order", True) + self.s = kwargs.pop("s", 10) + self.sampler = kwargs.pop("sampler", 'sobol') + self.num_levels = kwargs.pop("num_levels", self.num_samples) + self.optimal_trajectories = kwargs.pop("optimal_trajectories", None) + self.local_optimization = kwargs.pop("local_optimization", True) + self.M = kwargs.pop("M", 4) + + @property + def analysis_variables(self): + """The analysis variables of the PAWN method""" + return ['minimum', 'mean', 'median', 'maximum', 'CV'] + +
[docs] def analysis_function(self, x, y): + """ + Use the SALib.analyze.pawn method to analyze the simulation results. + + :param np.array x: + placeholder for the `X` parameter of the morris method not used for sobol + :param np.array y: + The NumPy array containing the model outputs + :return: + returns the result of the SALib.analyze.pawn method (from the documentation: + This implementation reports the PAWN index at the min, mean, median, and + max across the slides/conditioning intervals as well as the coefficient of + variation (``CV``). The median value is the typically reported value. As + the ``CV`` is (standard deviation / mean), it indicates the level of + variability across the slides, with values closer to zero indicating lower + variation.) + """ + return analyze_pawn.analyze(self.problem, x, y, + S=self.s)
+ +
[docs] def create_sampler_demand(self): + """ + Function to create the sampler parameters for the sampler method + """ + if self.sampler == 'sobol': + return {'calc_second_order': self.calc_second_order} + if self.sampler == 'morris': + return {'num_levels': self.num_levels, + 'optimal_trajectories': self.optimal_trajectories, + 'local_optimization': self.local_optimization} + if self.sampler == 'fast': + return {'M': self.M} + raise NotImplementedError(f'{self.sampler} is not implemented yet')
+ +
[docs] def generate_samples(self): + """ + Run the sampler for the selected sampler and return the results. + + :return: + The list of samples generated as a NumPy array with one row per sample + and each row containing one value for each variable name in `problem['names']`. + :rtype: np.ndarray + """ + if self.sampler == 'sobol': + return sobol.sample(self.problem, + N=self.num_samples, + **self.create_sampler_demand()) + if self.sampler == 'morris': + return morris.sample(self.problem, + N=self.num_samples, + **self.create_sampler_demand()) + if self.sampler == 'fast': + return fast.sample(self.problem, + N=self.num_samples, + **self.create_sampler_demand()) + raise NotImplementedError(f'{self.sampler} is not implemented yet')
+ + def _get_res_dict(self, result: dict, cal_class: CalibrationClass, analysis_variable: str): + """ + Convert the result object to a dict with the key + being the variable name and the value being the result + associated to self.analysis_variable. + """ + if result is None: + names = cal_class.tuner_paras.get_names() + return dict(zip(names, np.zeros(len(names)))) + return dict(zip(result['names'], result[analysis_variable]))
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/plotting.html b/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/plotting.html new file mode 100644 index 00000000..0500db09 --- /dev/null +++ b/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/plotting.html @@ -0,0 +1,663 @@ + + + + + + aixcalibuha.sensitivity_analysis.plotting — AixCaliBuHa 1.0.0 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for aixcalibuha.sensitivity_analysis.plotting

+"""
+Module containing functions for plotting sensitivity results
+"""
+import sys
+import pandas as pd
+import numpy as np
+import matplotlib
+import matplotlib.pyplot as plt
+from SALib.plotting.bar import plot as barplot
+from aixcalibuha.utils.visualizer import short_name
+
+
+
[docs]def plot_single(result: pd.DataFrame, + cal_classes: [str] = None, + goals: [str] = None, + max_name_len: int = 15, + **kwargs): + """ + Plot sensitivity results of first and total order analysis variables. + For each calibration class one figure is created, which shows for each goal an axis + with a barplot of the values of the analysis variables. + + :param pd.DataFrame result: + A result from run + :param int max_name_len: + Default is 15. Shortens the parameter names to max_name_len characters. + :param [str] cal_classes: + Default are all possible calibration classes. If a list of + names of calibration classes is given only plots for these + classes are created. + :param [str] goals: + Default are all possible goal names. If a list of specific + goal names is given only these will be plotted. + :keyword bool show_plot: + Default is True. If False, all created plots are not shown. + :keyword ([fig], [ax]) figs_axes: + Default None. Set own figures of subfigures with corresponding axes for customization. + :keyword bool use_suffix: + Default is True: If True, the last part after the last point + of Modelica variables is used for the x ticks. + :return: + Returns all created figures and axes in lists like [fig], [ax] + with shapes (len(cal_classes)), (len(cal_classes), len(goals)) + """ + use_suffix = kwargs.pop('use_suffix', False) + show_plot = kwargs.pop('show_plot', True) + figs_axes = kwargs.pop('figs_axes', None) + _func_name = kwargs.pop('_func_name', 'plot_single') + + # get lists of the calibration classes and their goals in the result dataframe + if cal_classes is None: + cal_classes = _del_duplicates(list(result.index.get_level_values(0))) + if goals is None: + goals = _del_duplicates(list(result.index.get_level_values(1))) + + result = _rename_tuner_names(result, use_suffix, max_name_len, _func_name) + + # plotting with simple plot function of the SALib + figs = [] + axes = [] + for col, cal_class in enumerate(cal_classes): + fig, ax = _create_figs_axes(figs_axes, col, goals, f"Class: {cal_class}") + figs.append(fig) + axes.append(ax) + for row, goal in enumerate(goals): + result_df = result.loc[cal_class, goal] + axes[col][row].grid(True, which='both', axis='y') + barplot(result_df.T, ax=axes[col][row]) + axes[col][row].set_title(f"Goal: {goal}") + axes[col][row].legend() + + if show_plot: + plt.show() + + return figs, axes
+ + +
[docs]def plot_second_order(result: pd.DataFrame, + cal_classes: [str] = None, + goals: [str] = None, + max_name_len: int = 15, + **kwargs): + """ + Plot sensitivity results of second order analysis variables. + For each calibration class and goal one figure of a 3d plot is created + with the barplots of the interactions for each parameter. + Only working for more than 2 parameter. + + :param pd.DataFrame result: + A result from run + :param int max_name_len: + Default is 15. Shortens the parameter names to max_name_len characters. + :param [str] cal_classes: + Default are all possible calibration classes. If a list of + names of calibration classes is given only plots for these + classes are created. + :param [str] goals: + Default are all possible goal names. If a list of specific + goal names is given only these will be plotted. + :keyword bool show_plot: + Default is True. If False, all created plots are not shown. + :keyword bool use_suffix: + Default is True: If True, the last part after the last point + of Modelica variables is used for the x ticks. + :keyword [[fig]] figs: + Default None. Set own figures of subfigures for customization. + Shape (len(cal_classes), len(goals)) + :return: + Returns all created figures and axes in lists like [fig], [ax] + """ + use_suffix = kwargs.pop('use_suffix', False) + show_plot = kwargs.pop('show_plot', True) + figs = kwargs.pop('figs', None) + result = result.fillna(0) + if cal_classes is None: + cal_classes = _del_duplicates(list(result.index.get_level_values(0))) + if goals is None: + goals = _del_duplicates(list(result.index.get_level_values(1))) + + result = _rename_tuner_names(result, use_suffix, max_name_len, "plot_second_order") + + tuner_names = result.columns + if len(tuner_names) < 2: + return None + xticks = np.arange(len(tuner_names)) + + # when the index is not sorted pandas throws a performance warning + result = result.sort_index() + + # plot of S2 without S2_conf + all_figs = [] + all_axes = [] + for class_idx, cal_class in enumerate(cal_classes): + class_figs = [] + class_axes = [] + for goal_idx, goal in enumerate(goals): + if figs is None: + fig = plt.figure() + else: + fig = figs[class_idx][goal_idx] + ax = fig.add_subplot(projection='3d') + for idx, name in enumerate(tuner_names): + ax.bar(tuner_names, + result.loc[cal_class, goal, 'S2', name].to_numpy(), + zs=idx, zdir='y', alpha=0.8) + ax.set_title(f"Class: {cal_class} Goal: {goal}") + ax.set_zlabel('S2 [-]') + ax.set_yticks(xticks) + ax.set_yticklabels(tuner_names) + # rotate tick labels for better readability + plt.setp(ax.get_xticklabels(), rotation=90, ha="right", rotation_mode="anchor") + plt.setp(ax.get_yticklabels(), rotation=90, ha="right", rotation_mode="anchor") + class_figs.append(fig) + class_axes.append(ax) + all_figs.append(class_figs) + all_axes.append(class_axes) + if show_plot: + plt.show() + return all_figs, all_axes
+ + +
[docs]def plot_single_second_order(result: pd.DataFrame, + para_name: str, + cal_classes: [str] = None, + goals: [str] = None, + max_name_len: int = 15, + **kwargs): + """ + Plot the value of S2 from one parameter with all other parameters. + + :param pd.DataFrame result: + Second order result from run. + :param str para_name: + Name of the parameter of which the results should be plotted. + :param [str] cal_classes: + Default are all possible calibration classes. If a list of + names of calibration classes is given only plots for these + classes are created. + :param [str] goals: + Default are all possible goal names. If a list of specific + goal names is given only these will be plotted. + :param int max_name_len: + Default is 15. Shortens the parameter names to max_name_len characters. + :keyword bool show_plot: + Default is True. If False, all created plots are not shown. + :keyword bool use_suffix: + Default is True: If True, the last part after the last point + of Modelica variables is used for the x ticks. + :keyword ([fig], [ax]) figs_axes: + Default None. Set own figures of subfigures with corresponding axes for customization. + :return: + Returns all created figures and axes in lists like [fig], [ax] + with shapes (len(cal_classes)), (len(cal_classes), len(goals)) + """ + use_suffix = kwargs.pop('use_suffix', False) + show_plot = kwargs.pop('show_plot', True) + figs_axes = kwargs.pop('figs_axes', None) + + result = result.loc[:, :, :, para_name][:].fillna(0) + figs, axes = plot_single( + result=result, + show_plot=False, + cal_classes=cal_classes, + goals=goals, + figs_axes=figs_axes, + use_suffix=use_suffix, + max_name_len=max_name_len, + _func_name="plot_single_second_order" + ) + # set new title for the figures of each calibration class + for fig in figs: + fig.suptitle(f"Interaction of {para_name} in class {fig._suptitle.get_text()}") + if show_plot: + plt.show() + return figs, axes
+ + +
[docs]def heatmap(result: pd.DataFrame, + cal_class: str, + goal: str, + ax: matplotlib.axes.Axes = None, + max_name_len: int = 15, + **kwargs): + """ + Plot S2 sensitivity results from one calibration class and goal as a heatmap. + + :param pd.DataFrame result: + A second order result from run + :param str cal_class: + Name of the class to plot S2 from. + :param str goal: + Name of the goal to plot S2 from. + :param matplotlib.axes ax: + Default is None. If an axes is given the heatmap will be plotted on it, else + a new figure and axes is created. + :param int max_name_len: + Default is 15. Shortens the parameter names to max_name_len characters. + :keyword bool show_plot: + Default is True. If False, all created plots are not shown. + :keyword bool use_suffix: + Default is False. If True, only the last suffix of a Modelica variable is displayed. + :return: + Returns axes + """ + use_suffix = kwargs.pop('use_suffix', False) + show_plot = kwargs.pop('show_plot', True) + _func_name = kwargs.pop('_func_name', "heatmap") + + result = _rename_tuner_names(result, use_suffix, max_name_len, _func_name) + if ax is None: + _, ax = plt.subplots(layout="constrained") + data = result.sort_index().loc[cal_class, goal, 'S2'].fillna(0).reindex( + index=result.columns) + image = ax.imshow(data, cmap='Reds') + ax.set_title(f'Class: {cal_class} Goal: {goal}') + ax.set_xticks(np.arange(len(data.columns))) + ax.set_yticks(np.arange(len(data.index))) + ax.set_xticklabels(data.columns) + ax.set_yticklabels(data.index) + ax.spines[:].set_color('black') + plt.setp(ax.get_xticklabels(), rotation=90, ha="right", rotation_mode="anchor") + cbar = ax.figure.colorbar(image, ax=ax) + cbar.ax.set_ylabel("S2", rotation=90) + if show_plot: + plt.show() + return ax
+ + +
[docs]def heatmaps(result: pd.DataFrame, + cal_classes: [str] = None, + goals: [str] = None, + max_name_len: int = 15, + **kwargs): + """ + Plot S2 sensitivity results as a heatmap for multiple + calibration classes and goals in one figure. + + :param pd.DataFrame result: + A second order result from run + :param [str] cal_classes: + Default is a list of all calibration classes in the result. + If a list of classes is given only these classes are plotted. + :param [str] goals: + Default is a list of all goals in the result. + If a list of goals is given only these goals are plotted. + :param int max_name_len: + Default is 15. Shortens the parameter names to max_name_len characters. + :keyword bool show_plot: + Default is True. If False, all created plots are not shown. + :keyword bool use_suffix: + Default is False. If True, only the last suffix of a Modelica variable is displayed. + """ + use_suffix = kwargs.pop('use_suffix', False) + show_plot = kwargs.pop('show_plot', True) + + if cal_classes is None: + cal_classes = result.index.get_level_values("Class").unique() + if goals is None: + goals = result.index.get_level_values("Goal").unique() + + _, axes = plt.subplots(ncols=len(cal_classes), nrows=len(goals), sharex='all', sharey='all', + layout="constrained") + if len(goals) == 1: + axes = [axes] + if len(cal_classes) == 1: + for idx, ax in enumerate(axes): + axes[idx] = [ax] + _func_name = "heatmaps" + for col, class_name in enumerate(cal_classes): + for row, goal_name in enumerate(goals): + heatmap(result, + class_name, + goal_name, + ax=axes[row][col], + show_plot=False, + use_suffix=use_suffix, + max_name_len=max_name_len, + _func_name=_func_name) + _func_name = None + if show_plot: + plt.show()
+ + +
[docs]def plot_time_dependent(result: pd.DataFrame, + parameters: [str] = None, + goals: [str] = None, + analysis_variables: [str] = None, + plot_conf: bool = True, + **kwargs): + """ + Plot time dependent sensitivity results without interactions from run_time_dependent(). + + For each goal one figure is created with one axes for each analysis variable. + In these plots the time dependent sensitivity of the parameters is plotted. + The confidence interval can also be plotted. + + :param pd.DataFrame result: + A result from run_time_dependent without second order results. + :param [str] parameters: + Default all parameters. List of parameters to plot the sensitivity. + :param [str] analysis_variables: + Default all analysis_variables. List of analysis variables to plot. + :param bool plot_conf: + Default True. If true, the confidence intervals for each parameter are plotted. + :param [str] goals: + Default are all possible goal names. If a list of specific + goal names is given only these will be plotted. + :keyword ([fig], [ax]) figs_axes: + Default None. Optional custom figures and axes + (see example for verbose sensitivity analysis). + :return: + Returns all created figures and axes in lists like [fig], [ax] + with shapes (len(goals)), (len(goals), len(analysis_variables)) + :keyword bool show_plot: + Default is True. If False, all created plots are not shown. + :keyword bool use_suffix: + Default is True: If True, the last part after the last point + of Modelica variables is used for the x ticks. + :keyword int max_name_len: + Default is 50. Shortens the parameter names to max_name_len characters. + """ + use_suffix = kwargs.pop('use_suffix', False) + max_name_len = kwargs.pop('max_name_len', 50) + show_plot = kwargs.pop('show_plot', True) + figs_axes = kwargs.pop('figs_axes', None) + + if goals is None: + goals = _del_duplicates(list(result.index.get_level_values(0))) + all_analysis_variables = _del_duplicates(list(result.index.get_level_values(1))) + if analysis_variables is None: + analysis_variables = [av for av in all_analysis_variables if '_conf' not in av] + if parameters is None: + parameters = result.columns.values + + result = _rename_tuner_names(result, use_suffix, max_name_len, "plot_time_dependent") + + renamed_parameters = [_format_name(para, use_suffix, max_name_len) for para in parameters] + + figs = [] + axes = [] + for idx_goal, goal in enumerate(goals): + fig, ax = _create_figs_axes(figs_axes, idx_goal, analysis_variables, f"Goal: {goal}") + figs.append(fig) + axes.append(ax) + for idx_av, analysis_var in enumerate(analysis_variables): + axes[idx_goal][idx_av].plot(result.loc[goal, analysis_var][renamed_parameters]) + axes[idx_goal][idx_av].set_ylabel(analysis_var) + axes[idx_goal][idx_av].legend(renamed_parameters) + if plot_conf and analysis_var + '_conf' in all_analysis_variables: + for para in renamed_parameters: + y = result.loc[goal, analysis_var][para] + x = y.index.to_numpy() + conv_int = result.loc[goal, analysis_var + '_conf'][para] + large_values_indices = conv_int[conv_int > 1].index + if list(large_values_indices): + sys.stderr.write( + f"plot_time_dependent INFO:" + f"Confidence interval for {goal}, {analysis_var}, {para} was at the " + f"following times {list(large_values_indices)} lager than 1 " + f"and is smoothed out in the plot.\n") + for idx in large_values_indices: + prev_idx = conv_int.index.get_loc(idx) - 1 + if prev_idx >= 0: + conv_int.iloc[conv_int.index.get_loc(idx)] = conv_int.iloc[prev_idx] + else: + conv_int.iloc[conv_int.index.get_loc(idx)] = 1 + axes[idx_goal][idx_av].fill_between(x, (y - conv_int), (y + conv_int), alpha=.1) + axes[idx_goal][-1].set_xlabel('time') + if show_plot: + plt.show() + return figs, axes
+ + +
[docs]def plot_parameter_verbose(parameter: str, + single_result: pd.DataFrame, + second_order_result: pd.DataFrame = None, + goals: [str] = None, + **kwargs): + """ + Plot all time dependent sensitivity measure for one parameter. + For each goal an axes is created within one figure. + + If second_order_results form SobolAnalyzer.run_time_dependent are given + the S2 results of the interaction with each other parameter are added on top + of each other and the first order result. + + :param str parameter: + Parameter to plot all sensitivity results for. If use_suffix=True, then + the name must also be only the suffix. + :param pd.DataFrame single_result: + First and total order result form run_time_dependent. + :param pd.DataFrame second_order_result: + Default None. Second order result of SobolAnalyzer.run_time_dependent. + :param [str] goals: + Default are all possible goal names. If a list of specific + goal names is given only these will be plotted. + :keyword (fig, [ax]) fig_axes: + Default None. Optional custom figures and axes + (see example for verbose sensitivity analysis). + :return: + Returns all created figures and axes in lists like fig, [ax] + with shape (len(goals)) for the axes list + :keyword bool show_plot: + Default is True. If False, all created plots are not shown. + :keyword bool use_suffix: + Default is True: If True, the last part after the last point + of Modelica variables is used for the x ticks. + :keyword int max_name_len: + Default is 15. Shortens the parameter names to max_name_len characters. + """ + use_suffix = kwargs.pop('use_suffix', False) + max_name_len = kwargs.pop('max_name_len', 50) + show_plot = kwargs.pop('show_plot', True) + fig_axes = kwargs.pop('fig_axes', None) + + if goals is None: + goals = _del_duplicates(list(single_result.index.get_level_values(0))) + all_analysis_variables = _del_duplicates(list(single_result.index.get_level_values(1))) + analysis_variables = [av for av in all_analysis_variables if '_conf' not in av] + + renamed_parameter = _format_name(parameter, use_suffix, max_name_len) + + single_result = _rename_tuner_names(single_result, use_suffix, max_name_len, + "plot_parameter_verbose") + if second_order_result is not None: + second_order_result = _rename_tuner_names(second_order_result, use_suffix, max_name_len) + + if fig_axes is None: + fig, ax = plt.subplots(len(goals), sharex='all', layout="constrained") + else: + fig = fig_axes[0] + ax = fig_axes[1] + fig.suptitle(f"Parameter: {parameter}") + if not isinstance(ax, np.ndarray): + ax = [ax] + for g_i, goal in enumerate(goals): + if second_order_result is not None: + result_2_goal = second_order_result.loc[goal, 'S2', renamed_parameter] + mean = result_2_goal.mean().drop([renamed_parameter]) + mean.sort_values(ascending=False, inplace=True) + sorted_interactions = list(mean.index) + time_ar = _del_duplicates(list(result_2_goal.index.get_level_values(0))) + value = single_result.loc[goal, 'S1'][renamed_parameter].to_numpy() + ax[g_i].plot(single_result.loc[goal, 'S1'][renamed_parameter], label='S1') + ax[g_i].fill_between(time_ar, np.zeros_like(value), value, alpha=0.1) + for para in sorted_interactions: + value_2 = value + result_2_goal[para].to_numpy() + ax[g_i].plot(time_ar, value_2, label='S2 ' + para) + ax[g_i].fill_between(time_ar, value, value_2, alpha=0.1) + value = value_2 + ax[g_i].plot(single_result.loc[goal, 'ST'][renamed_parameter], label='ST') + legend = ['S1'] + legend.extend(analysis_variables) + legend.append('ST') + ax[g_i].set_title(f"Goal: {goal}") + ax[g_i].legend() + else: + for analysis_var in analysis_variables: + ax[g_i].plot(single_result.loc[goal, analysis_var][renamed_parameter]) + ax[g_i].legend(analysis_variables) + if show_plot: + plt.show() + return fig, ax
+ + +def _del_duplicates(x): + """Helper function""" + return list(dict.fromkeys(x)) + + +def _rename_tuner_names(result, use_suffix, max_len, func_name=None): + """Helper function""" + tuner_names = list(result.columns) + renamed_names = {name: _format_name(name, use_suffix, max_len) for name in tuner_names} + result = result.rename(columns=renamed_names, index=renamed_names) + result = result.sort_index() + for old, new in renamed_names.items(): + if old != new and func_name is not None: + sys.stderr.write(f"{func_name} INFO: parameter name {old} changed to {new}\n") + return result + + +def _format_name(name, use_suffix, max_len): + """ + Format tuner names. + """ + if use_suffix: + name = _get_suffix(name) + name = short_name(name, max_len) + return name + + +def _get_suffix(modelica_var_name): + """Helper function""" + index_last_dot = modelica_var_name.rfind('.') + suffix = modelica_var_name[index_last_dot + 1:] + return suffix + + +def _create_figs_axes(figs_axes, fig_index, ax_len_list, fig_title): + """ + Check if figs and axes are already given, if not create them. + """ + if figs_axes is None: + fig, ax = plt.subplots(len(ax_len_list), sharex='all', layout="constrained") + else: + fig = figs_axes[0][fig_index] + ax = figs_axes[1][fig_index] + fig.suptitle(fig_title) + if not isinstance(ax, np.ndarray): + ax = [ax] + return fig, ax +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/sensitivity_analyzer.html b/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/sensitivity_analyzer.html index 48a28284..684151ee 100644 --- a/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/sensitivity_analyzer.html +++ b/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/sensitivity_analyzer.html @@ -3,7 +3,7 @@ - aixcalibuha.sensitivity_analysis.sensitivity_analyzer — AixCaliBuHa 0.3.1 documentation + aixcalibuha.sensitivity_analysis.sensitivity_analyzer — AixCaliBuHa 1.0.0 documentation @@ -34,7 +34,7 @@ AixCaliBuHa
- 0.3 + 1.0
@@ -84,15 +84,84 @@

Source code for aixcalibuha.sensitivity_analysis.sensitivity_analyzer

import abc import copy import os -import time +import pathlib +import multiprocessing as mp from typing import List +from collections import Counter import numpy as np import pandas as pd from ebcpy.utils import setup_logger -from ebcpy import data_types +from ebcpy.utils.reproduction import CopyFile from ebcpy.simulationapi import SimulationAPI from aixcalibuha import CalibrationClass, data_types from aixcalibuha import utils +from aixcalibuha.sensitivity_analysis.plotting import plot_single, plot_time_dependent + + +def _load_single_file(_filepath, parquet_engine='pyarrow'): + """Helper function""" + if _filepath is None: + return None + return data_types.TimeSeriesData(_filepath, default_tag='sim', key='simulation', + engine=parquet_engine) + + +def _load_files(_filepaths, parquet_engine='pyarrow'): + """Helper function""" + results = [] + for _filepath in _filepaths: + results.append(_load_single_file(_filepath, parquet_engine=parquet_engine)) + return results + + +def _restruct_verbose(list_output_verbose): + """Helper function""" + output_verbose = {} + for key, val in list_output_verbose[0].items(): + output_verbose[key] = np.array([]) + for i in list_output_verbose: + for key, val in i.items(): + output_verbose[key] = np.append(output_verbose[key], np.array([val[1]])) + return output_verbose + + +def _concat_all_sims(sim_results_list): + """Helper function that concat all results in a list to one DataFrame.""" + sim_results_list = [r.to_df() for r in sim_results_list] + sim_results_list = pd.concat(sim_results_list, keys=range(len(sim_results_list)), + axis='columns') + sim_results_list = sim_results_list.swaplevel(axis=1).sort_index(axis=1) + return sim_results_list + + +def _restruct_time_dependent(sen_time_dependent_list, time_index): + """Helper function that restructures the time dependent sensitivity results.""" + + def _restruct_single(sen_time_dependent_list_s, second_order=False): + sen_time_dependent_df = pd.concat(sen_time_dependent_list_s, keys=time_index, axis=0) + sen_time_dependent_df = sen_time_dependent_df.droplevel('Class', axis='index') + sen_time_dependent_df = sen_time_dependent_df.swaplevel(0, 1) + sen_time_dependent_df = sen_time_dependent_df.swaplevel(1, 2).sort_index(axis=0) + if second_order: + sen_time_dependent_df = sen_time_dependent_df.swaplevel(2, 3).sort_index(axis=0) + sen_time_dependent_df.index.set_names( + ['Goal', 'Analysis variable', 'Interaction', 'time'], inplace=True) + else: + sen_time_dependent_df.index.set_names(['Goal', 'Analysis variable', 'time'], + inplace=True) + return sen_time_dependent_df + + if isinstance(sen_time_dependent_list[0], tuple): + sen_time_dependent_list1, sen_time_dependent_list2 = zip(*sen_time_dependent_list) + return _restruct_single(sen_time_dependent_list1), _restruct_single( + sen_time_dependent_list2, True) + return _restruct_single(sen_time_dependent_list) + + +def _divide_chunks(long_list, chunk_length): + """Helper function that divides all list into multiple list with a specific chunk length.""" + for i in range(0, len(long_list), chunk_length): + yield long_list[i:i + chunk_length]
[docs]class SenAnalyzer(abc.ABC): @@ -103,13 +172,12 @@

Source code for aixcalibuha.sensitivity_analysis.sensitivity_analyzer

Simulation-API used to simulate the samples :param int num_samples: The parameter `N` to the sampler methods of sobol and morris. NOTE: This is not the - the number of samples produced, but relates to the total number of samples produced in - a manner dependent on the sampler method used. See the documentation of sobol and - morris in the SALib for more information. - :keyword str analysis_function: - Used to automatically select result values. + number of samples produced, but relates to the total number of samples produced in + a manner dependent on the sampler method used. See the documentation of the specific + method in the SALib for more information. :keyword str,os.path.normpath cd: The path for the current working directory. + Logger and results will be stored here. :keyword boolean fail_on_error: Default is False. If True, the calibration will stop with an error if the simulation fails. See also: ``ret_val_on_error`` @@ -119,7 +187,19 @@

Source code for aixcalibuha.sensitivity_analysis.sensitivity_analyzer

options are np.NaN, np.inf or some other high numbers. be aware that this max influence the solver. :keyword boolean save_files: - If true, all simulation files for each iteration will be saved! + Default False. If true, all simulation files for each iteration will be saved! + :keyword str suffix_files: + Default 'csv'. Specifies the data format to store the simulation files in. + Options are 'csv', 'hdf', 'parquet'. + :keyword str parquet_engine: + The engine to use for the data format parquet. + Supported options can be extracted + from the ebcpy.TimeSeriesData.save() function. + Default is 'pyarrow'. + :keyword str,os.path.normpath savepath_sim: + Default is cd. Own directory for the time series data sets of all simulations + during the sensitivity analysis. The own dir can be necessary for large data sets, + because they can crash IDE during indexing when they are in the project folder. """ @@ -135,20 +215,23 @@

Source code for aixcalibuha.sensitivity_analysis.sensitivity_analyzer

# Update kwargs self.fail_on_error = kwargs.pop("fail_on_error", True) self.save_files = kwargs.pop("save_files", False) + self.suffix_files = kwargs.pop('suffix_files', 'csv') + self.parquet_engine = kwargs.pop('parquet_engine', 'pyarrow') self.ret_val_on_error = kwargs.pop("ret_val_on_error", np.NAN) self.cd = kwargs.pop("cd", os.getcwd()) - self.analysis_variable = kwargs.pop('analysis_variable', - self.analysis_variables[0]) - if self.analysis_variable not in self.analysis_variables: - raise TypeError(f'Given analysis_variable "{self.analysis_variable}" not ' - f'supported for class {self.__class__.__name__}. ' - f'Supported options are: {", ".join(self.analysis_variables)}.') + self.savepath_sim = kwargs.pop('savepath_sim', self.cd) + + if isinstance(self.cd, str): + self.cd = pathlib.Path(self.cd) + if isinstance(self.savepath_sim, str): + self.savepath_sim = pathlib.Path(self.savepath_sim) # Setup the logger self.logger = setup_logger(cd=self.cd, name=self.__class__.__name__) # Setup default values self.problem: dict = None + self.reproduction_files = [] @property @abc.abstractmethod @@ -201,19 +284,23 @@

Source code for aixcalibuha.sensitivity_analysis.sensitivity_analyzer

raise NotImplementedError(f'{self.__class__.__name__}.generate_samples ' f'function is not defined yet')
-
[docs] def simulate_samples(self, samples, cal_class): +
[docs] def simulate_samples(self, cal_class, **kwargs): """ - Put the parameters in the model and simulate it. + Creates the samples for the calibration class and simulates them. - :param Union[list, np.ndarray] samples: - Output variables in dymola :param cal_class: One class for calibration. Goals and tuner_paras have to be set + :keyword scale: + Default is False. If True the bounds of the tuner-parameters + will be scaled between 0 and 1. - :returns: np.array - An array containing the evaluated differences for each sample + :return: + Returns two lists. First a list with the simulation results for each sample. + If save_files the list contains the filepaths to the results + Second a list of the samples. + :rtype: list """ - output = [] + scale = kwargs.pop('scale', False) # Set the output interval according the given Goals mean_freq = cal_class.goals.get_meas_frequency() self.logger.info("Setting output_interval of simulation according " @@ -223,31 +310,42 @@

Source code for aixcalibuha.sensitivity_analysis.sensitivity_analyzer

self.sim_api.set_sim_setup({"start_time": cal_class.start_time, "stop_time": cal_class.stop_time}) self.sim_api.result_names = cal_class.goals.get_sim_var_names() - self.logger.info('Starting %s parameter variations on %s cores', - len(samples), self.sim_api.n_cpu) + + self.problem = self.create_problem(cal_class.tuner_paras, scale=scale) + samples = self.generate_samples() + + # creat df of samples with the result_file_names as the index + result_file_names = [f"simulation_{idx}" for idx in range(len(samples))] + samples_df = pd.DataFrame(samples, columns=initial_names, index=result_file_names) + samples_df.to_csv(self.cd.joinpath(f'samples_{cal_class.name}.csv')) + # Simulate the current values parameters = [] - for i, initial_values in enumerate(samples): - parameters.append({name: value for name, value in zip(initial_names, initial_values)}) + for initial_values in samples: + if scale: + initial_values = cal_class.tuner_paras.descale(initial_values) + parameters.append(dict(zip(initial_names, initial_values))) + self.logger.info('Starting %s parameter variations on %s cores', + len(samples), self.sim_api.n_cpu) if self.save_files: - result_file_names = [f"simulation_{idx + 1}" for idx in range(len(parameters))] + sim_dir = self.savepath_sim.joinpath(f'simulations_{cal_class.name}') + os.makedirs(sim_dir, exist_ok=True) + samples_df.to_csv(self.savepath_sim.joinpath(f'samples_{cal_class.name}.csv')) + self.logger.info(f'Saving simulation files in: {sim_dir}') _filepaths = self.sim_api.simulate( parameters=parameters, return_option="savepath", - savepath=self.sim_api.cd, + savepath=sim_dir, result_file_name=result_file_names, + result_file_suffix=self.suffix_files, + parquet_engine=self.parquet_engine, fail_on_error=self.fail_on_error, inputs=cal_class.inputs, **cal_class.input_kwargs ) - # Load results - results = [] - for _filepath in _filepaths: - if _filepath is None: - results.append(None) - else: - results.append(data_types.TimeSeriesData(_filepath)) + self.reproduction_files.extend(_filepaths) + results = _filepaths else: results = self.sim_api.simulate( parameters=parameters, @@ -256,95 +354,324 @@

Source code for aixcalibuha.sensitivity_analysis.sensitivity_analyzer

**cal_class.input_kwargs ) self.logger.info('Finished %s simulations', len(samples)) - for i, result in enumerate(results): - if result is None: - output.append(self.ret_val_on_error) - else: - cal_class.goals.set_sim_target_data(result) - cal_class.goals.set_relevant_time_intervals(cal_class.relevant_intervals) - # Evaluate the current objective - total_res = cal_class.goals.eval_difference() - output.append(total_res) + return results, samples
+ + def _check_index(self, tsd: data_types.TimeSeriesData, sim_num=None): + freq = tsd.frequency + if sim_num is None: + sim_num = tsd.filepath.name + if freq[0] != self.sim_api.sim_setup.output_interval: + self.logger.info( + f'The mean value of the frequency from {sim_num} does not match output ' + 'interval index will be cleaned and spaced equally') + tsd.to_datetime_index() + tsd.clean_and_space_equally(f'{str(self.sim_api.sim_setup.output_interval * 1000)}ms') + tsd.to_float_index() + freq = tsd.frequency + if freq[1] > 0.0: + self.logger.info(f'The standard deviation of the frequency from {sim_num} is to high ' + f'and will be rounded to the accuracy of the output interval') + tsd.index = np.round(tsd.index.astype("float64"), + str(self.sim_api.sim_setup.output_interval)[::-1].find('.')) + return tsd + + def _single_eval_statistical_measure(self, kwargs_eval): + """Evaluates statistical measure of one result""" + cal_class = kwargs_eval.pop('cal_class') + result = kwargs_eval.pop('result') + num_sim = kwargs_eval.pop('sim_num', None) + if result is None: + verbose_error = {} + for goal, weight in zip(cal_class.goals.get_goals_list(), cal_class.goals.weightings): + verbose_error[goal] = (weight, self.ret_val_on_error) + return self.ret_val_on_error, verbose_error + result = self._check_index(result, num_sim) + cal_class.goals.set_sim_target_data(result) + cal_class.goals.set_relevant_time_intervals(cal_class.relevant_intervals) + # Evaluate the current objective + total_res, verbose_calculation = cal_class.goals.eval_difference(verbose=True) + return total_res, verbose_calculation +
[docs] def eval_statistical_measure(self, cal_class, results, verbose=True): + """Evaluates statistical measures of results on single core""" + self.logger.info('Starting evaluation of statistical measure') + output = [] + list_output_verbose = [] + for i, result in enumerate(results): + total_res, verbose_calculation = self._single_eval_statistical_measure( + {'cal_class': cal_class, 'result': result, 'sim_num': f'simulation_{i}'} + ) + output.append(total_res) + list_output_verbose.append(verbose_calculation) + if verbose: + # restructure output_verbose + output_verbose = _restruct_verbose(list_output_verbose) + return np.asarray(output), output_verbose return np.asarray(output)
-
[docs] def run(self, calibration_classes, merge_multiple_classes=True): + def _single_load_eval_file(self, kwargs_load_eval): + """For multiprocessing""" + filepath = kwargs_load_eval.pop('filepath') + _result = _load_single_file(filepath, self.parquet_engine) + kwargs_load_eval.update({'result': _result}) + total_res, verbose_calculation = self._single_eval_statistical_measure(kwargs_load_eval) + return total_res, verbose_calculation + + def _mp_load_eval(self, _filepaths, cal_class, n_cpu): + """ + Loading and evaluating the statistical measure of saved simulation files on multiple cores + """ + self.logger.info(f'Load files and evaluate statistical measure on {n_cpu} processes.') + kwargs_load_eval = [] + for filepath in _filepaths: + kwargs_load_eval.append({'filepath': filepath, 'cal_class': cal_class}) + output_array = [] + list_output_verbose = [] + with mp.Pool(processes=n_cpu) as pool: + for total, verbose in pool.imap(self._single_load_eval_file, kwargs_load_eval): + output_array.append(total) + list_output_verbose.append(verbose) + output_array = np.asarray(output_array) + output_verbose = _restruct_verbose(list_output_verbose) + return output_array, output_verbose + + def _load_eval(self, _filepaths, cal_class, n_cpu): + """ + Loading and evaluating the statistical measure of saved simulation files. + Single- or multiprocessing possible with definition of n_cpu. + """ + if n_cpu == 1: + results = _load_files(_filepaths, self.parquet_engine) + output_array, output_verbose = self.eval_statistical_measure( + cal_class=cal_class, + results=results + ) + return output_array, output_verbose + output_array, output_verbose = self._mp_load_eval(_filepaths, cal_class, n_cpu) + return output_array, output_verbose + +
[docs] def run(self, calibration_classes, merge_multiple_classes=True, **kwargs): """ Execute the sensitivity analysis for each class and return the result. :param CalibrationClass,list calibration_classes: - Either one or multiple classes for calibration + Either one or multiple classes for calibration with same tuner-parameters. :param bool merge_multiple_classes: Default True. If False, the given list of calibration-classes - is handeled as-is. This means if you pass two CalibrationClass objects + is handled as-is. This means if you pass two CalibrationClass objects with the same name (e.g. "device on"), the calibration process will run for both these classes stand-alone. This will automatically yield an intersection of tuner-parameters, however may have advantages in some cases. + :keyword bool verbose: + Default False. If True, in addition to the combined Goals of the Classes + (saved under index Goal: all), the sensitivity measures of the individual + Goals will also be calculated and returned. + :keyword scale: + Default is False. If True the bounds of the tuner-parameters + will be scaled between 0 and 1. + :keyword bool use_fist_sim: + Default False. If True, the simulations of the first calibration class will be used for + all other calibration classes with their relevant time intervals. + The simulations must be stored on a hard-drive, so it must be used with + either save_files or load_files. + :keyword int n_cpu: + Default is 1. The number of processes to use for the evaluation of the statistical + measure. For n_cpu > 1 only one simulation file is loaded at once in a process and + dumped directly after the evaluation of the statistical measure, + so that only minimal memory is used. + Use this option for large analyses. + Only implemented for save_files=True or load_sim_files=True. + :keyword bool load_sim_files: + Default False. If True, no new simulations are done and old simulations are loaded. + The simulations and corresponding samples will be loaded from self.savepath_sim like + they were saved from self.save_files. Currently, the name of the sim folder must be + "simulations_CAL_CLASS_NAME" and for the samples "samples_CAL_CLASS_NAME". + The usage of the same simulations for different + calibration classes is not supported yet. + :keyword bool save_results: + Default True. If True, all results are saved as a csv in cd. + (samples, statistical measures and analysis variables). + :keyword bool plot_result: + Default True. If True, the results will be plotted. :return: - Returns a list of dictionaries. One dict is the SALib-result for - one calibration-class. The order is based on the order of the - calibration-class list. - :rtype: list + Returns a pandas.DataFrame. The DataFrame has a Multiindex with the + levels Class, Goal and Analysis variable. The Goal name of combined goals is 'all'. + The variables are the tuner-parameters. + For the Sobol Method and calc_second_order returns a tuple of DataFrames (df_1, df_2) + where df_2 contains the second oder analysis variables and has an extra index level + Interaction, which also contains the variables. + :rtype: pandas.DataFrame """ + verbose = kwargs.pop('verbose', False) + scale = kwargs.pop('scale', False) + use_first_sim = kwargs.pop('use_first_sim', False) + n_cpu = kwargs.pop('n_cpu', 1) + save_results = kwargs.pop('save_results', True) + plot_result = kwargs.pop('plot_result', True) + load_sim_files = kwargs.pop('load_sim_files', False) # Check correct input calibration_classes = utils.validate_cal_class_input(calibration_classes) # Merge the classes for avoiding possible intersection of tuner-parameters if merge_multiple_classes: calibration_classes = data_types.merge_calibration_classes(calibration_classes) + # Check n_cpu + if n_cpu > mp.cpu_count(): + raise ValueError(f"Given n_cpu '{n_cpu}' is greater " + "than the available number of " + f"cpus on your machine '{mp.cpu_count()}'") + + # Check if the usage of the simulations from the first calibration class for all is possible + if use_first_sim: + if not self.save_files and not load_sim_files: + raise AttributeError('To use the simulations of the first calibration class ' + 'for all classes the simulation files must be saved. ' + 'Either set save_files=True or load already exiting files ' + 'with load_sim_files=True.') + start_time = 0 + stop_time = 0 + for idx, cal_class in enumerate(calibration_classes): + if idx == 0: + start_time = cal_class.start_time + stop_time = cal_class.stop_time + continue + if start_time > cal_class.start_time or stop_time < cal_class.stop_time: + raise ValueError(f'To use the simulations of the first calibration class ' + f'for all classes the start and stop times of the other ' + f'classes must be in the interval [{start_time}, {stop_time}] ' + f'of the first calibration class.') + all_results = [] - for cal_class in calibration_classes: - t_sen_start = time.time() + for idx, cal_class in enumerate(calibration_classes): + self.logger.info('Start sensitivity analysis of class: %s, ' 'Time-Interval: %s-%s s', cal_class.name, cal_class.start_time, cal_class.stop_time) - self.problem = self.create_problem(cal_class.tuner_paras) - samples = self.generate_samples() # Generate list with metrics of every parameter variation - output_array = self.simulate_samples( - samples=samples, - cal_class=cal_class) - result = self.analysis_function( - x=samples, - y=output_array - ) - t_sen_stop = time.time() - result['duration[s]'] = t_sen_stop - t_sen_start - all_results.append(result) + results_goals = {} + if load_sim_files: + self.problem = self.create_problem(cal_class.tuner_paras, scale=scale) + if use_first_sim: + class_name = calibration_classes[0].name + else: + class_name = cal_class.name + sim_dir = self.savepath_sim.joinpath(f'simulations_{class_name}') + samples_path = self.savepath_sim.joinpath(f'samples_{class_name}.csv') + self.logger.info(f'Loading samples from {samples_path}') + samples = pd.read_csv(samples_path, + header=0, + index_col=0) + samples = samples.to_numpy() + result_file_names = [f"simulation_{idx}.{self.suffix_files}" for idx in + range(len(samples))] + _filepaths = [sim_dir.joinpath(result_file_name) for result_file_name in + result_file_names] + self.logger.info(f'Loading simulation files from {sim_dir}') + output_array, output_verbose = self._load_eval(_filepaths, cal_class, n_cpu) + else: + results, samples = self.simulate_samples( + cal_class=cal_class, + scale=scale + ) + if self.save_files: + output_array, output_verbose = self._load_eval(results, cal_class, n_cpu) + else: + output_array, output_verbose = self.eval_statistical_measure( + cal_class=cal_class, + results=results + ) + if use_first_sim: + load_sim_files = True + + # combine output_array and output_verbose + # set key for output_array depending on one or multiple goals + stat_mea = {'all': output_array} + if len(output_verbose) == 1: + stat_mea = output_verbose + if len(output_verbose) > 1 and verbose: + stat_mea.update(output_verbose) + + # save statistical measure and corresponding samples for each cal_class in cd + if save_results: + result_file_names = [f"simulation_{idx}" for idx in range(len(output_array))] + stat_mea_df = pd.DataFrame(stat_mea, index=result_file_names) + savepath_stat_mea = self.cd.joinpath( + f'{cal_class.goals.statistical_measure}_{cal_class.name}.csv') + stat_mea_df.to_csv(savepath_stat_mea) + self.reproduction_files.append(savepath_stat_mea) + samples_df = pd.DataFrame(samples, columns=cal_class.tuner_paras.get_names(), + index=result_file_names) + savepath_samples = self.cd.joinpath(f'samples_{cal_class.name}.csv') + samples_df.to_csv(savepath_samples) + self.reproduction_files.append(savepath_samples) + + self.logger.info('Starting calculation of analysis variables') + for key, val in stat_mea.items(): + result_goal = self.analysis_function( + x=samples, + y=val + ) + results_goals[key] = result_goal + all_results.append(results_goals) + self.logger.info('Finished sensitivity analysis of class: %s, ' + 'Time-Interval: %s-%s s', cal_class.name, + cal_class.start_time, cal_class.stop_time) result = self._conv_local_results(results=all_results, local_classes=calibration_classes) + if save_results: + self._save(result) + if plot_result: + self.plot(result) return result, calibration_classes
+ def _save(self, result: pd.DataFrame, time_dependent: bool = False): + """ + Saves the result DataFrame of run and run_time_dependent. + Needs to be overwritten for Sobol results. + """ + if time_dependent: + savepath_result = self.cd.joinpath(f'{self.__class__.__name__}_results_time.csv') + else: + savepath_result = self.cd.joinpath(f'{self.__class__.__name__}_results.csv') + result.to_csv(savepath_result) + self.reproduction_files.append(savepath_result) +
[docs] @staticmethod - def create_problem(tuner_paras) -> dict: + def create_problem(tuner_paras, scale=False) -> dict: """Create function for later access if multiple calibration-classes are used.""" num_vars = len(tuner_paras.get_names()) bounds = np.array(tuner_paras.get_bounds()) + if scale: + bounds = [np.zeros_like(bounds[0]), np.ones_like(bounds[1])] problem = {'num_vars': num_vars, 'names': tuner_paras.get_names(), 'bounds': np.transpose(bounds)} return problem
[docs] @staticmethod - def select_by_threshold(calibration_classes, result, threshold): + def select_by_threshold(calibration_classes, result, analysis_variable, threshold): """ Automatically select sensitive tuner parameters based on a given threshold - and a key-word of the result. + of a given analysis variable from a sensitivity result. + Uses only the combined goals. :param list calibration_classes: List of aixcalibuha.data_types.CalibrationClass objects that you want to automatically select sensitive tuner-parameters. :param pd.DataFrame result: Result object of sensitivity analysis run + :param str analysis_variable: + Analysis variable to use for the selection :param float threshold: Minimal required value of given key :return: list calibration_classes """ - for num_class, cal_class in enumerate(calibration_classes): - class_result = result.loc[cal_class.name] + for cal_class in calibration_classes: + first_goal = result.index.get_level_values(1)[0] + class_result = result.loc[cal_class.name, first_goal, analysis_variable] tuner_paras = copy.deepcopy(cal_class.tuner_paras) select_names = class_result[class_result < threshold].index.values tuner_paras.remove_names(select_names) @@ -359,25 +686,315 @@

Source code for aixcalibuha.sensitivity_analysis.sensitivity_analyzer

cal_class.tuner_paras = tuner_paras return calibration_classes
- def _conv_global_result(self, result: dict, cal_class: CalibrationClass): - glo_res_dict = self._get_res_dict(result=result, cal_class=cal_class) +
[docs] @staticmethod + def select_by_threshold_verbose(calibration_class: CalibrationClass, + result: pd.DataFrame, + analysis_variable: str, + threshold: float, + calc_names_for_selection: List[str] = None): + """ + Select tuner-parameters of single calibration class with verbose sensitivity results. + This function selects tuner-parameters if their sensitivity is equal or greater + than the threshold in just one target value of one calibration class in the + sensitivity result. This can be more robust because a small sensitivity in one target + value and state of the system can mean that the parameter can also be calibrated in + a global calibration class which calibrates multiple states and target values at + the same time and has there not directly the same sensitivity as in the isolated + view of a calibration class for only one state. + + :param CalibrationClass calibration_class: + The calibration class from which the tuner parameters will be selected. + :param pd.DataFrame result: + Sensitivity results to use for the selection. Can include multiple classes. + :param str analysis_variable: + The analysis variable to use for the selection. + :param float threshold: + Minimal required value of given analysis variable. + :param List[str] calc_names_for_selection: + Specifies which calibration classes in the sensitivity results will be used for + the selection. Default are all classes. + """ + if Counter(calibration_class.tuner_paras.get_names()) != Counter(list(result.columns)): + raise NameError("The tuner-parameter of the calibration class do not " + "match the tuner-parameters in the sensitivity result." + "They have to match.") + + result = result.loc[:, :, analysis_variable] + calc_names_results = result.index.get_level_values("Class").unique() + if calc_names_for_selection: + for cal_class in calc_names_for_selection: + if cal_class not in calc_names_results: + raise NameError(f"The calibration class name {cal_class} " + f"does not match any class name " + f"in the given sensitivity result.") + result = result.loc[calc_names_for_selection, :, :] + + selected_tuners = (result >= threshold).any() + + remove_tuners = [] + for tuner, selected in selected_tuners.items(): + if not selected: + remove_tuners.append(tuner) + tuner_paras = copy.deepcopy(calibration_class.tuner_paras) + tuner_paras.remove_names(remove_tuners) + if not tuner_paras.get_names(): + raise ValueError("Threshold to small. All tuner-parameters would be removed.") + calibration_class.tuner_paras = tuner_paras + return calibration_class
+ +
[docs] def run_time_dependent(self, cal_class: CalibrationClass, **kwargs): + """ + Calculate the time dependent sensitivity for all the single goals in the calibration class. + + :param CalibrationClass cal_class: + Calibration class with tuner-parameters to calculate sensitivity for. + Can include dummy target date. + :keyword scale: + Default is False. If True the bounds of the tuner-parameters + will be scaled between 0 and 1. + :keyword bool load_sim_files: + Default False. If True, no new simulations are done and old simulations are loaded. + The simulations and corresponding samples will be loaded from self.savepath_sim like + they were saved from self.save_files. Currently, the name of the sim folder must be + "simulations_CAL_CLASS_NAME" and for the samples "samples_CAL_CLASS_NAME". + :keyword bool save_results: + Default True. If True, all results are saved as a csv in cd. + (samples and analysis variables). + :keyword int n_steps: + Default is all time steps. If the problem is large, the evaluation of all time steps + at once can cause a memory error. Then n_steps defines how many time_steps + are evaluated at once in chunks. This increases the needed time exponentially and + the simulation files must be saved. + :keyword bool plot_result: + Default True. If True, the results will be plotted. + :return: + Returns a pandas.DataFrame. + :rtype: pandas.DataFrame + """ + scale = kwargs.pop('scale', False) + save_results = kwargs.pop('save_results', True) + plot_result = kwargs.pop('plot_result', True) + load_sim_files = kwargs.pop('load_sim_files', False) + n_steps = kwargs.pop('n_steps', 'all') + + self.logger.info("Start time dependent sensitivity analysis.") + if load_sim_files: + self.problem = self.create_problem(cal_class.tuner_paras, scale=scale) + sim_dir = self.savepath_sim.joinpath(f'simulations_{cal_class.name}') + samples_path = self.savepath_sim.joinpath(f'samples_{cal_class.name}.csv') + samples = pd.read_csv(samples_path, + header=0, + index_col=0) + samples = samples.to_numpy() + result_file_names = [f"simulation_{idx}.{self.suffix_files}" for idx in + range(len(samples))] + _filepaths = [sim_dir.joinpath(result_file_name) for result_file_name in + result_file_names] + + sen_time_dependent_list, time_index = self._load_analyze_tsteps(_filepaths=_filepaths, + samples=samples, + n_steps=n_steps, + cal_class=cal_class) + sen_time_dependent_df = _restruct_time_dependent(sen_time_dependent_list, time_index) + else: + results, samples = self.simulate_samples( + cal_class=cal_class, + scale=scale + ) + if self.save_files: + sen_time_dependent_list, time_index = self._load_analyze_tsteps(_filepaths=results, + samples=samples, + n_steps=n_steps, + cal_class=cal_class) + sen_time_dependent_df = _restruct_time_dependent(sen_time_dependent_list, + time_index) + else: + variables = results[0].get_variable_names() + time_index = results[0].index.to_numpy() + total_result = _concat_all_sims(results) + sen_time_dependent_list = [] + for time_step in time_index: + result_df_tstep = self._analyze_tstep_df(time_step=time_step, + tsteps_sim_results=total_result, + variables=variables, + samples=samples, + cal_class=cal_class) + sen_time_dependent_list.append(result_df_tstep) + sen_time_dependent_df = _restruct_time_dependent(sen_time_dependent_list, + time_index) + self.logger.info("Finished time dependent sensitivity analysys.") + if save_results: + self._save(sen_time_dependent_df, time_dependent=True) + if plot_result: + if isinstance(sen_time_dependent_df, pd.DataFrame): + plot_time_dependent(sen_time_dependent_df) + else: + plot_time_dependent(sen_time_dependent_df[0]) + return sen_time_dependent_df
+ + def _analyze_tstep_df(self, time_step, tsteps_sim_results, variables, samples, cal_class): + """Analyze the sensitivity at a single time step.""" + result_dict_tstep = {} + for var in variables: + result_tstep_var = tsteps_sim_results[var].loc[time_step].to_numpy() + if np.all(result_tstep_var == result_tstep_var[0]): + sen_tstep_var = None + else: + sen_tstep_var = self.analysis_function( + x=samples, + y=result_tstep_var + ) + result_dict_tstep[var] = sen_tstep_var + result_df_tstep = self._conv_local_results(results=[result_dict_tstep], + local_classes=[cal_class]) + return result_df_tstep + + def _load_tsteps_df(self, tsteps, _filepaths): + """ + Load all simulations and extract and concat the sim results of the time steps in tsteps. + """ + self.logger.info( + f"Loading time steps from {tsteps[0]} to {tsteps[-1]} of the simulation files.") + tsteps_sim_results = [] + for _filepath in _filepaths: + sim = _load_single_file(_filepath) + tsteps_sim_results.append(sim.loc[tsteps[0]:tsteps[-1]]) + tsteps_sim_results = _concat_all_sims(tsteps_sim_results) + return tsteps_sim_results + + def _load_analyze_tsteps(self, _filepaths, samples, n_steps, cal_class): + """ + Load and analyze all time steps in chunks with n_steps time steps. + """ + sim1 = _load_single_file(_filepaths[0]) + time_index = sim1.index.to_numpy() + variables = sim1.get_variable_names() + sen_time_dependent_list = [] + if n_steps == 'all': + list_tsteps = [time_index] + elif isinstance(n_steps, int) and not (n_steps <= 0 or n_steps > len(time_index)): + list_tsteps = _divide_chunks(time_index, n_steps) + else: + raise ValueError( + f"n_steps can only be between 1 and {len(time_index)} or the string all.") + + for tsteps in list_tsteps: + tsteps_sim_results = self._load_tsteps_df(tsteps=tsteps, _filepaths=_filepaths) + self.logger.info("Analyzing these time steps.") + for tstep in tsteps: + result_df_tstep = self._analyze_tstep_df(time_step=tstep, + tsteps_sim_results=tsteps_sim_results, + variables=variables, + samples=samples, + cal_class=cal_class) + sen_time_dependent_list.append(result_df_tstep) + return sen_time_dependent_list, time_index + + def _conv_global_result(self, result: dict, cal_class: CalibrationClass, + analysis_variable: str): + glo_res_dict = self._get_res_dict(result=result, cal_class=cal_class, + analysis_variable=analysis_variable) return pd.DataFrame(glo_res_dict, index=['global']) def _conv_local_results(self, results: list, local_classes: list): - _conv_results = [self._get_res_dict(result=result, cal_class=local_class) - for result, local_class in zip(results, local_classes)] - df = pd.DataFrame(_conv_results) - df.index = [c.name for c in local_classes] + """ + Convert the result dictionaries form SALib of each class and goal into one DataFrame. + Overwritten for Sobol. + """ + _conv_results = [] + tuples = [] + for class_results, local_class in zip(results, local_classes): + for goal, goal_results in class_results.items(): + for analysis_var in self.analysis_variables: + _conv_results.append(self._get_res_dict(result=goal_results, + cal_class=local_class, + analysis_variable=analysis_var)) + tuples.append((local_class.name, goal, analysis_var)) + index = pd.MultiIndex.from_tuples(tuples=tuples, + names=['Class', 'Goal', 'Analysis variable']) + df = pd.DataFrame(_conv_results, index=index) return df @abc.abstractmethod - def _get_res_dict(self, result: dict, cal_class: CalibrationClass): + def _get_res_dict(self, result: dict, cal_class: CalibrationClass, analysis_variable: str): """ Convert the result object to a dict with the key being the variable name and the value being the result - associated to self.analysis_variable. + associated to analysis_variable. """ - raise NotImplementedError
+ raise NotImplementedError + +
[docs] def plot(self, result): + """ + Plot the results of the sensitivity analysis method from run(). + + :param pd.DataFrame result: + Dataframe of the results like from the run() function. + :return tuple of matplotlib objects (fig, ax): + """ + plot_single(result=result)
+ +
[docs] @staticmethod + def load_from_csv(path): + """ + Load sensitivity results which were saved with the run() or run_time_dependent() function. + + For second order results use the load_second_order_from_csv() function of the SobolAnalyzer. + """ + result = pd.read_csv(path, index_col=[0, 1, 2]) + return result
+ +
[docs] def save_for_reproduction(self, + title: str, + path: pathlib.Path = None, + files: list = None, + exclude_sim_files: bool = False, + remove_saved_files: bool = False, + **kwargs): + """ + Save the settings of the SenAnalyzer and SimApi in order to + reproduce the simulations and sensitivity analysis method. + All saved results will be also saved in the reproduction + archive. The simulations can be excluded from saving. + + :param str title: + Title of the study + :param pathlib.Path path: + Where to store the .zip file. If not given, self.cd is used. + :param list files: + List of files to save along the standard ones. + Examples would be plots, tables etc. + :param bool exclude_sim_files: + Default False. If True, the simulation files will not be saved in + the reproduction archive. + :param bool remove_saved_files: + Default False. If True, the result and simulation files will be moved + instead of just copied. + :param dict kwargs: + All keyword arguments except title, files, and path of the function + `save_reproduction_archive`. Most importantly, `log_message` may be + specified to avoid input during execution. + """ + if files is None: + files = [] + + for file_path in self.reproduction_files: + if exclude_sim_files: + if 'simulation' in str(file_path): + continue + filename = "SenAnalyzer" + str(file_path).rsplit(self.cd.name, maxsplit=1)[-1] + files.append(CopyFile( + sourcepath=file_path, + filename=filename, + remove=remove_saved_files + )) + + return self.sim_api.save_for_reproduction( + title=title, + path=path, + files=files, + **kwargs + )
diff --git a/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/sobol.html b/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/sobol.html index 40d6eb32..c62792a3 100644 --- a/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/sobol.html +++ b/docs/master/docs/_modules/aixcalibuha/sensitivity_analysis/sobol.html @@ -3,7 +3,7 @@ - aixcalibuha.sensitivity_analysis.sobol — AixCaliBuHa 0.3.1 documentation + aixcalibuha.sensitivity_analysis.sobol — AixCaliBuHa 1.0.0 documentation @@ -34,7 +34,7 @@ AixCaliBuHa
- 0.3 + 1.0
@@ -83,40 +83,58 @@

Source code for aixcalibuha.sensitivity_analysis.sobol

Adds the SobolAnalyzer to the available classes of sensitivity analysis. """ -from SALib.sample import saltelli as sobol +import pandas as pd +from SALib.sample import sobol from SALib.analyze import sobol as analyze_sobol import numpy as np from aixcalibuha.sensitivity_analysis import SenAnalyzer from aixcalibuha import CalibrationClass +from aixcalibuha.sensitivity_analysis.plotting import plot_single, heatmaps
[docs]class SobolAnalyzer(SenAnalyzer): """ + Sobol method from SALib + https://salib.readthedocs.io/en/latest/api.html#sobol-sensitivity-analysis + A variance-based method which can compute the sensitivity measures + 'S1', 'ST' and 'S2' with their confidence intervals. + Additional arguments: :keyword bool calc_second_order: Default True, used for the sobol-method - :keyword seed: + :keyword int seed: Used for the sobol-method """ + __analysis_variables = ['S1', 'ST', 'S1_conf', 'ST_conf'] + __analysis_variables_1 = ['S1', 'ST', 'S1_conf', 'ST_conf'] + __analysis_variables_2 = ['S2', 'S2_conf'] + def __init__(self, sim_api, **kwargs): + # Additional kwarg which changes the possible analysis_variables + self.calc_second_order = kwargs.get("calc_second_order", True) + if self.calc_second_order: + self.__analysis_variables = ['S1', 'ST', 'S1_conf', 'ST_conf', 'S2', 'S2_conf'] + # separately store first and total order (1) and second order (2) analysis variables + self.av_1_selected = [] + self.av_2_selected = [] + # Set additional kwargs + self.seed = kwargs.pop("seed", None) + super().__init__( sim_api=sim_api, **kwargs) - # Set additional kwargs - self.calc_second_order = kwargs.pop("calc_second_order", True) - self.seed = kwargs.pop("seed", None) @property def analysis_variables(self): """The analysis variables of the sobol method""" - return ['S1', 'ST', 'S1_conf', 'ST_conf'] + return self.__analysis_variables
[docs] def analysis_function(self, x, y): """ Use the SALib.analyze.sobol method to analyze the simulation results. - :param np.array: x + :param np.array x: placeholder for the `X` parameter of the morris method not used for sobol :param np.array y: The NumPy array containing the model outputs @@ -149,16 +167,111 @@

Source code for aixcalibuha.sensitivity_analysis.sobol

N=self.num_samples, **self.create_sampler_demand())
- def _get_res_dict(self, result: dict, cal_class: CalibrationClass): + def _save(self, result: tuple, time_dependent: bool = False): + """ + Save the results of the run and run_time_dependent function of the SobolAnalyzer. + """ + if not result[0].empty: + super()._save(result=result[0], time_dependent=time_dependent) + if time_dependent: + savepath_result_2 = self.cd.joinpath( + f'{self.__class__.__name__}_results_second_order_time.csv') + else: + savepath_result_2 = self.cd.joinpath( + f'{self.__class__.__name__}_results_second_order.csv') + if not result[1].empty: + result[1].to_csv(savepath_result_2) + self.reproduction_files.append(savepath_result_2) + + def _conv_local_results(self, results: list, local_classes: list): + """ + Convert the result dictionaries form SALib + of each class and goal into a tuple of two DataFrames. + First is the single order and second is the second order result. + If one of the results is not computed an empty list is returned. + """ + _conv_results = [] + _conv_results_2 = [] + tuples = [] + tuples_2 = [] + for class_results, local_class in zip(results, local_classes): + for goal, goal_results in class_results.items(): + for analysis_var in self.analysis_variables: + res_dict = self._get_res_dict(result=goal_results, + cal_class=local_class, + analysis_variable=analysis_var) + if analysis_var in self.__analysis_variables_1: + _conv_results.append(res_dict) + tuples.append((local_class.name, goal, analysis_var)) + elif analysis_var in self.__analysis_variables_2: + for tuner_para, res_dict in res_dict.items(): + _conv_results_2.append(res_dict) + tuples_2.append((local_class.name, goal, analysis_var, tuner_para)) + index = pd.MultiIndex.from_tuples(tuples=tuples, + names=['Class', 'Goal', 'Analysis variable']) + index_2 = pd.MultiIndex.from_tuples(tuples=tuples_2, + names=['Class', 'Goal', 'Analysis variable', + 'Interaction']) + df = pd.DataFrame(_conv_results, index=index) + df_2 = pd.DataFrame(_conv_results_2, index=index_2) + return df, df_2 + + def _get_res_dict(self, result: dict, cal_class: CalibrationClass, analysis_variable: str): """ Convert the result object to a dict with the key being the variable name and the value being the result - associated to self.analysis_variable. + associated to analysis_variable. + For second oder analysis variables the result is converted to a + dict with the key being the variable name and the value being another dict + with the variable names as the keys and the result associated to analysis_variable + from the interaction between the two variables. """ names = self.create_problem(cal_class.tuner_paras)['names'] - return {var_name: np.abs(res_val) - for var_name, res_val in zip(names, - result[self.analysis_variable])}
+ if analysis_variable in self.__analysis_variables_1: + if result is None: + res_dict_1 = {var_name: np.abs(res_val) + for var_name, res_val in zip(names, + np.zeros(len(names)))} + else: + res_dict_1 = {var_name: np.abs(res_val) + for var_name, res_val in zip(names, + result[analysis_variable])} + return res_dict_1 + if analysis_variable in self.__analysis_variables_2: + if result is None: + res_dict_2 = {var_name: dict(zip(names, np.abs(res_val))) + for var_name, res_val in zip(names, + np.zeros((len(names), len(names))))} + else: + result_av = result[analysis_variable] + for i, _ in enumerate(result_av): + for j, _ in enumerate(result_av): + if i > j: + result_av[i][j] = result_av[j][i] + res_dict_2 = {var_name: dict(zip(names, np.abs(res_val))) + for var_name, res_val in zip(names, + result_av)} + return res_dict_2 + +
[docs] def plot(self, result): + """ + Plot the results of the sensitivity analysis method from run(). + + :param pd.DataFrame result: + Dataframe of the results like from the run() function. + :return tuple of matplotlib objects (fig, ax): + """ + plot_single(result=result[0]) + heatmaps(result=result[1])
+ +
[docs] @staticmethod + def load_second_order_from_csv(path): + """ + Load second order sensitivity results which were saved with the run() or + run_time_dependent() function. + """ + result = pd.read_csv(path, index_col=[0, 1, 2, 3]) + return result
diff --git a/docs/master/docs/_modules/aixcalibuha/utils.html b/docs/master/docs/_modules/aixcalibuha/utils.html index ef49c160..30fdd811 100644 --- a/docs/master/docs/_modules/aixcalibuha/utils.html +++ b/docs/master/docs/_modules/aixcalibuha/utils.html @@ -3,7 +3,7 @@ - aixcalibuha.utils — AixCaliBuHa 0.3.1 documentation + aixcalibuha.utils — AixCaliBuHa 1.0.0 documentation @@ -34,7 +34,7 @@ AixCaliBuHa
- 0.3 + 1.0
diff --git a/docs/master/docs/_modules/aixcalibuha/utils/configuration.html b/docs/master/docs/_modules/aixcalibuha/utils/configuration.html index e91f1c01..b5158046 100644 --- a/docs/master/docs/_modules/aixcalibuha/utils/configuration.html +++ b/docs/master/docs/_modules/aixcalibuha/utils/configuration.html @@ -3,7 +3,7 @@ - aixcalibuha.utils.configuration — AixCaliBuHa 0.3.1 documentation + aixcalibuha.utils.configuration — AixCaliBuHa 1.0.0 documentation @@ -34,7 +34,7 @@ AixCaliBuHa
- 0.3 + 1.0
@@ -90,7 +90,6 @@

Source code for aixcalibuha.utils.configuration

< from ebcpy import data_types from aixcalibuha import Goals, CalibrationClass, TunerParas - tsd_config = {"data": "TODO: Specify the path to the target values measured", "key": None, "sheet_name": None, @@ -131,13 +130,13 @@

Source code for aixcalibuha.utils.configuration

< "TODO: Specify the boundaries of the tuner parameters as a list of tuples"}} default_calibration_config = { - "statistical_measure": "TODO: Specify the statistical " - "measure for calibration (RMSE, MAE, etc.)", - "calibration_classes": [default_cal_class_config], - "start_time_method": 'fixstart', - "settings": kwargs_calibrator, - "settings multiple classes": kwargs_multiple_classes - } + "statistical_measure": "TODO: Specify the statistical " + "measure for calibration (RMSE, MAE, etc.)", + "calibration_classes": [default_cal_class_config], + "start_time_method": 'fixstart', + "settings": kwargs_calibrator, + "settings multiple classes": kwargs_multiple_classes +} kwargs_scipy_dif_evo = {"maxiter": 30, "popsize": 5, @@ -182,7 +181,7 @@

Source code for aixcalibuha.utils.configuration

< "Optimization": default_optimization_config, "Input Data": default_input_config, "Calibration": default_calibration_config - } +}
[docs]def get_goals_from_config(config): diff --git a/docs/master/docs/_modules/aixcalibuha/utils/visualizer.html b/docs/master/docs/_modules/aixcalibuha/utils/visualizer.html index 8672e7a9..89906543 100644 --- a/docs/master/docs/_modules/aixcalibuha/utils/visualizer.html +++ b/docs/master/docs/_modules/aixcalibuha/utils/visualizer.html @@ -3,7 +3,7 @@ - aixcalibuha.utils.visualizer — AixCaliBuHa 0.3.1 documentation + aixcalibuha.utils.visualizer — AixCaliBuHa 1.0.0 documentation @@ -34,7 +34,7 @@ AixCaliBuHa
- 0.3 + 1.0
@@ -97,6 +97,28 @@

Source code for aixcalibuha.utils.visualizer

import aixcalibuha
 
 
+
[docs]def short_name(ini_name: str, max_len: int): + """ + Shortens long strings to a max length from the front. + long_string_name => ...ing_name with len(new_name) = max_len + + :param str ini_name: + Long string to shorten. + :param int max_len: + Max len of the new string. + :return: str + The shorten string. + """ + if len(ini_name) > max_len: + num_dots = len(ini_name) - max_len + if num_dots > 3: + num_dots = 3 + formatted_name = "." * num_dots + ini_name[-(max_len - num_dots):] + else: + formatted_name = ini_name + return formatted_name
+ +
[docs]class CalibrationLogger: """Base class for showing the process of functions in this Framework with print-statements and saving everything @@ -341,16 +363,11 @@

Source code for aixcalibuha.utils.visualizer

for ini_name in initial_names:
             # Limit string length to a certain amount.
             # The full name has to be displayed somewhere else
-            if len(ini_name) > self._width:
-                num_dots = len(ini_name) - self._width
-                if num_dots > 3:
-                    num_dots = 3
-                formatted_name = "."*num_dots + ini_name[-(self._width-num_dots):]
-            else:
-                formatted_name = ini_name
+            formatted_name = short_name(ini_name=ini_name, max_len=self._width)
             info_string += "   {0:{width}s}".format(formatted_name, width=self._width)
         # Add string for qualitative measurement used (e.g. NRMSE, MEA etc.)
-        info_string += "     {0:{width}s}".format(self.goals.statistical_measure, width=self._width)
+        info_string += "     {0:{width}s}".format(self.goals.statistical_measure,
+                                                  width=self._width)
         info_string += "penaltyfactor"
         info_string += f"   Unweighted {self.goals.statistical_measure}"
         return info_string
@@ -388,11 +405,12 @@ 

Source code for aixcalibuha.utils.visualizer

prec=self._prec)
         if penalty:
             info_string += "   {0:{width}.{prec}f}".format(penalty, width=self._width,
-                                                           prec=self._prec-3)
+                                                           prec=self._prec - 3)
         else:
             info_string += "        {}".format("-")
-        _verbose_info = "= " + " + ".join(["{0:.{prec}}*{1:.{prec}}".format(weight, val, prec=4)
-                                           for weight, val in unweighted_objective.items()])
+        _verbose_info = "= " + " + ".join(["{0:.{prec}}*{1:.{prec}}".format(val[0],
+                                                                            val[1], prec=4)
+                                           for goal, val in unweighted_objective.items()])
         info_string += f"          {_verbose_info}"
 
         return info_string
@@ -544,18 +562,21 @@

Source code for aixcalibuha.utils.visualizer

if self.goals is not None and self.create_tsd_plot:
             self._plot_goals(at_validation=True)
 
-        self._show_plot()
+ self._show_plot(for_validation=True)
- def _show_plot(self): + def _show_plot(self, for_validation=False): """Show plot if activated""" if not self.show_plot: return plt.draw() - self.fig_obj.canvas.draw_idle() - self.fig_tuner.canvas.draw_idle() if self.create_tsd_plot: self.fig_goal.canvas.draw_idle() - plt.pause(self.show_plot_pause_time) + if not for_validation: + self.fig_obj.canvas.draw_idle() + self.fig_tuner.canvas.draw_idle() + plt.pause(self.show_plot_pause_time) + else: + plt.show()
[docs] def save_calibration_result(self, best_iterate, model_name, **kwargs): """ @@ -650,8 +671,9 @@

Source code for aixcalibuha.utils.visualizer

os.makedirs(path_intersections)
         if "itercount" in kwargs:
             fig_intersection.savefig(
-                os.path.join(path_intersections,
-                             f'tuner_parameter_intersection_plot_it{kwargs["itercount"]}.{self.file_type}')
+                os.path.join(
+                    path_intersections,
+                    f'tuner_parameter_intersection_plot_it{kwargs["itercount"]}.{self.file_type}')
             )
         else:
             fig_intersection.savefig(
diff --git a/docs/master/docs/_modules/index.html b/docs/master/docs/_modules/index.html
index 8f215de2..79766d13 100644
--- a/docs/master/docs/_modules/index.html
+++ b/docs/master/docs/_modules/index.html
@@ -3,7 +3,7 @@
 
   
   
-  Overview: module code — AixCaliBuHa 0.3.1 documentation
+  Overview: module code — AixCaliBuHa 1.0.0 documentation
       
       
 
@@ -34,7 +34,7 @@
             AixCaliBuHa
           
               
- 0.3 + 1.0
@@ -81,7 +81,10 @@

All modules for which code is available

diff --git a/docs/master/docs/code/aixcalibuha.sensitivity_analysis.html b/docs/master/docs/code/aixcalibuha.sensitivity_analysis.html index 354962f4..058ded93 100644 --- a/docs/master/docs/code/aixcalibuha.sensitivity_analysis.html +++ b/docs/master/docs/code/aixcalibuha.sensitivity_analysis.html @@ -4,7 +4,7 @@ - aixcalibuha.sensitivity_analysis package — AixCaliBuHa 0.3.1 documentation + aixcalibuha.sensitivity_analysis package — AixCaliBuHa 1.0.0 documentation @@ -37,7 +37,7 @@ AixCaliBuHa
- 0.3 + 1.0
@@ -102,6 +102,77 @@ sensitivity analysis with.

Submodules

+
+
+

aixcalibuha.sensitivity_analysis.fast module

+

Adds the FASTAnalyzer to the available +classes of sensitivity analysis.

+
+
+class aixcalibuha.sensitivity_analysis.fast.FASTAnalyzer(sim_api, **kwargs)[source]
+

Bases: SenAnalyzer

+

FAST method from SALib +https://salib.readthedocs.io/en/latest/api.html#fast-fourier-amplitude-sensitivity-test +A variance-based method which can compute the sensitivity measures +‘S1’ and ‘ST’.

+

Additional arguments:

+
+
Parameters:
+
    +
  • M (int) – Default 4, used for the fast-method

  • +
  • seed – Used for the fast-method

  • +
+
+
+
+
+analysis_function(x, y)[source]
+

Use the SALib.analyze.fast method to analyze the simulation results.

+
+
Parameters:
+
    +
  • x (np.array) – placeholder for the X parameter of the morris method not used for sobol

  • +
  • y (np.array) – The NumPy array containing the model outputs

  • +
+
+
Returns:
+

returns the result of the SALib.analyze.fast method (from the documentation: +Returns a dictionary with keys ‘S1’ and ‘ST’, where each entry is a list of +size D (the number of parameters) containing the indices in the same order +as the parameter file.)

+
+
+
+ +
+
+property analysis_variables
+

The analysis variables of the FAST method

+
+ +
+
+create_sampler_demand()[source]
+

Function to create the sampler parameters for the fast method

+
+ +
+
+generate_samples()[source]
+

Run the sampler for fast and return the results.

+
+
Returns:
+

The list of samples generated as a NumPy array with one row per sample +and each row containing one value for each variable name in problem[‘names’].

+
+
Return type:
+

np.ndarray

+
+
+
+ +
+

aixcalibuha.sensitivity_analysis.morris module

@@ -111,11 +182,14 @@

Submodules class aixcalibuha.sensitivity_analysis.morris.MorrisAnalyzer(sim_api, **kwargs)[source]

Bases: SenAnalyzer

+

Moris method from SALib https://salib.readthedocs.io/en/latest/api.html#method-of-morris +An elementary effects (One-At-A-Time) method which computes the sensitivity +measures ‘mu’, ‘mu_star’ and ‘sigma’ with a confidence interval for mu_star.

Additional arguments:

Parameters:
    -
  • num_levels (int) – Default 4, used for the morris-method

  • +
  • num_levels (int) – Default num_samples, used for the morris-method

  • optimal_trajectories – Used for the morris-method

  • local_optimization (bool) – Default True, used for the morris-method

@@ -170,6 +244,283 @@

Submodules +

aixcalibuha.sensitivity_analysis.pawn module

+

Adds the PAWNAnalyzer to the available +classes of sensitivity analysis.

+
+
+class aixcalibuha.sensitivity_analysis.pawn.PAWNAnalyzer(sim_api, **kwargs)[source]
+

Bases: SenAnalyzer

+

PAWN method from SALib https://salib.readthedocs.io/en/latest/api.html#pawn-sensitivity-analysis +Density-based method which computes the PAWN index at ‘min’, ‘max’, ‘mean’, +‘median’ and coefficient of variation ‘cv’.

+

Additional arguments:

+
+
Parameters:
+
    +
  • calc_second_order (bool) – Default True, used for the sampler of the sobol-method

  • +
  • s (int) – Default 10, used for the pawn-method.

  • +
  • sampler (str) – Which sampler should be used. Default sobol. +Choose between ‘sobol’, ‘morris’ and ‘fast’.

  • +
  • num_levels (int) – Default num_samples, used for the sampler of the morris-method.

  • +
  • optimal_trajectories – Used for the sampler of the morris-method.

  • +
  • local_optimization (bool) – Default True, used for the sampler of the morris-method.

  • +
  • M (int) – Default 4, used for the sampler of the fast-method.

  • +
+
+
+
+
+analysis_function(x, y)[source]
+

Use the SALib.analyze.pawn method to analyze the simulation results.

+
+
Parameters:
+
    +
  • x (np.array) – placeholder for the X parameter of the morris method not used for sobol

  • +
  • y (np.array) – The NumPy array containing the model outputs

  • +
+
+
Returns:
+

returns the result of the SALib.analyze.pawn method (from the documentation: +This implementation reports the PAWN index at the min, mean, median, and +max across the slides/conditioning intervals as well as the coefficient of +variation (CV). The median value is the typically reported value. As +the CV is (standard deviation / mean), it indicates the level of +variability across the slides, with values closer to zero indicating lower +variation.)

+
+
+
+ +
+
+property analysis_variables
+

The analysis variables of the PAWN method

+
+ +
+
+create_sampler_demand()[source]
+

Function to create the sampler parameters for the sampler method

+
+ +
+
+generate_samples()[source]
+

Run the sampler for the selected sampler and return the results.

+
+
Returns:
+

The list of samples generated as a NumPy array with one row per sample +and each row containing one value for each variable name in problem[‘names’].

+
+
Return type:
+

np.ndarray

+
+
+
+ +
+ +

+
+

aixcalibuha.sensitivity_analysis.plotting module

+

Module containing functions for plotting sensitivity results

+
+
+aixcalibuha.sensitivity_analysis.plotting.heatmap(result: DataFrame, cal_class: str, goal: str, ax: Axes | None = None, max_name_len: int = 15, **kwargs)[source]
+

Plot S2 sensitivity results from one calibration class and goal as a heatmap.

+
+
Parameters:
+
    +
  • result (pd.DataFrame) – A second order result from run

  • +
  • cal_class (str) – Name of the class to plot S2 from.

  • +
  • goal (str) – Name of the goal to plot S2 from.

  • +
  • ax (matplotlib.axes) – Default is None. If an axes is given the heatmap will be plotted on it, else +a new figure and axes is created.

  • +
  • max_name_len (int) – Default is 15. Shortens the parameter names to max_name_len characters.

  • +
  • show_plot (bool) – Default is True. If False, all created plots are not shown.

  • +
  • use_suffix (bool) – Default is False. If True, only the last suffix of a Modelica variable is displayed.

  • +
+
+
Returns:
+

Returns axes

+
+
+
+ +
+
+aixcalibuha.sensitivity_analysis.plotting.heatmaps(result: ~pandas.core.frame.DataFrame, cal_classes: [<class 'str'>] = None, goals: [<class 'str'>] = None, max_name_len: int = 15, **kwargs)[source]
+

Plot S2 sensitivity results as a heatmap for multiple +calibration classes and goals in one figure.

+
+
Parameters:
+
    +
  • result (pd.DataFrame) – A second order result from run

  • +
  • cal_classes ([str]) – Default is a list of all calibration classes in the result. +If a list of classes is given only these classes are plotted.

  • +
  • goals ([str]) – Default is a list of all goals in the result. +If a list of goals is given only these goals are plotted.

  • +
  • max_name_len (int) – Default is 15. Shortens the parameter names to max_name_len characters.

  • +
  • show_plot (bool) – Default is True. If False, all created plots are not shown.

  • +
  • use_suffix (bool) – Default is False. If True, only the last suffix of a Modelica variable is displayed.

  • +
+
+
+
+ +
+
+aixcalibuha.sensitivity_analysis.plotting.plot_parameter_verbose(parameter: str, single_result: ~pandas.core.frame.DataFrame, second_order_result: ~pandas.core.frame.DataFrame = None, goals: [<class 'str'>] = None, **kwargs)[source]
+

Plot all time dependent sensitivity measure for one parameter. +For each goal an axes is created within one figure.

+

If second_order_results form SobolAnalyzer.run_time_dependent are given +the S2 results of the interaction with each other parameter are added on top +of each other and the first order result.

+
+
Parameters:
+
    +
  • parameter (str) – Parameter to plot all sensitivity results for. If use_suffix=True, then +the name must also be only the suffix.

  • +
  • single_result (pd.DataFrame) – First and total order result form run_time_dependent.

  • +
  • second_order_result (pd.DataFrame) – Default None. Second order result of SobolAnalyzer.run_time_dependent.

  • +
  • goals ([str]) – Default are all possible goal names. If a list of specific +goal names is given only these will be plotted.

  • +
  • fig_axes ((fig, [ax])) – Default None. Optional custom figures and axes +(see example for verbose sensitivity analysis).

  • +
  • show_plot (bool) – Default is True. If False, all created plots are not shown.

  • +
  • use_suffix (bool) – Default is True: If True, the last part after the last point +of Modelica variables is used for the x ticks.

  • +
  • max_name_len (int) – Default is 15. Shortens the parameter names to max_name_len characters.

  • +
+
+
Returns:
+

Returns all created figures and axes in lists like fig, [ax] +with shape (len(goals)) for the axes list

+
+
+
+ +
+
+aixcalibuha.sensitivity_analysis.plotting.plot_second_order(result: ~pandas.core.frame.DataFrame, cal_classes: [<class 'str'>] = None, goals: [<class 'str'>] = None, max_name_len: int = 15, **kwargs)[source]
+

Plot sensitivity results of second order analysis variables. +For each calibration class and goal one figure of a 3d plot is created +with the barplots of the interactions for each parameter. +Only working for more than 2 parameter.

+
+
Parameters:
+
    +
  • result (pd.DataFrame) – A result from run

  • +
  • max_name_len (int) – Default is 15. Shortens the parameter names to max_name_len characters.

  • +
  • cal_classes ([str]) – Default are all possible calibration classes. If a list of +names of calibration classes is given only plots for these +classes are created.

  • +
  • goals ([str]) – Default are all possible goal names. If a list of specific +goal names is given only these will be plotted.

  • +
  • show_plot (bool) – Default is True. If False, all created plots are not shown.

  • +
  • use_suffix (bool) – Default is True: If True, the last part after the last point +of Modelica variables is used for the x ticks.

  • +
  • figs ([[fig]]) – Default None. Set own figures of subfigures for customization. +Shape (len(cal_classes), len(goals))

  • +
+
+
Returns:
+

Returns all created figures and axes in lists like [fig], [ax]

+
+
+
+ +
+
+aixcalibuha.sensitivity_analysis.plotting.plot_single(result: ~pandas.core.frame.DataFrame, cal_classes: [<class 'str'>] = None, goals: [<class 'str'>] = None, max_name_len: int = 15, **kwargs)[source]
+

Plot sensitivity results of first and total order analysis variables. +For each calibration class one figure is created, which shows for each goal an axis +with a barplot of the values of the analysis variables.

+
+
Parameters:
+
    +
  • result (pd.DataFrame) – A result from run

  • +
  • max_name_len (int) – Default is 15. Shortens the parameter names to max_name_len characters.

  • +
  • cal_classes ([str]) – Default are all possible calibration classes. If a list of +names of calibration classes is given only plots for these +classes are created.

  • +
  • goals ([str]) – Default are all possible goal names. If a list of specific +goal names is given only these will be plotted.

  • +
  • show_plot (bool) – Default is True. If False, all created plots are not shown.

  • +
  • figs_axes (([fig], [ax])) – Default None. Set own figures of subfigures with corresponding axes for customization.

  • +
  • use_suffix (bool) – Default is True: If True, the last part after the last point +of Modelica variables is used for the x ticks.

  • +
+
+
Returns:
+

Returns all created figures and axes in lists like [fig], [ax] +with shapes (len(cal_classes)), (len(cal_classes), len(goals))

+
+
+
+ +
+
+aixcalibuha.sensitivity_analysis.plotting.plot_single_second_order(result: ~pandas.core.frame.DataFrame, para_name: str, cal_classes: [<class 'str'>] = None, goals: [<class 'str'>] = None, max_name_len: int = 15, **kwargs)[source]
+

Plot the value of S2 from one parameter with all other parameters.

+
+
Parameters:
+
    +
  • result (pd.DataFrame) – Second order result from run.

  • +
  • para_name (str) – Name of the parameter of which the results should be plotted.

  • +
  • cal_classes ([str]) – Default are all possible calibration classes. If a list of +names of calibration classes is given only plots for these +classes are created.

  • +
  • goals ([str]) – Default are all possible goal names. If a list of specific +goal names is given only these will be plotted.

  • +
  • max_name_len (int) – Default is 15. Shortens the parameter names to max_name_len characters.

  • +
  • show_plot (bool) – Default is True. If False, all created plots are not shown.

  • +
  • use_suffix (bool) – Default is True: If True, the last part after the last point +of Modelica variables is used for the x ticks.

  • +
  • figs_axes (([fig], [ax])) – Default None. Set own figures of subfigures with corresponding axes for customization.

  • +
+
+
Returns:
+

Returns all created figures and axes in lists like [fig], [ax] +with shapes (len(cal_classes)), (len(cal_classes), len(goals))

+
+
+
+ +
+
+aixcalibuha.sensitivity_analysis.plotting.plot_time_dependent(result: ~pandas.core.frame.DataFrame, parameters: [<class 'str'>] = None, goals: [<class 'str'>] = None, analysis_variables: [<class 'str'>] = None, plot_conf: bool = True, **kwargs)[source]
+

Plot time dependent sensitivity results without interactions from run_time_dependent().

+

For each goal one figure is created with one axes for each analysis variable. +In these plots the time dependent sensitivity of the parameters is plotted. +The confidence interval can also be plotted.

+
+
Parameters:
+
    +
  • result (pd.DataFrame) – A result from run_time_dependent without second order results.

  • +
  • parameters ([str]) – Default all parameters. List of parameters to plot the sensitivity.

  • +
  • analysis_variables ([str]) – Default all analysis_variables. List of analysis variables to plot.

  • +
  • plot_conf (bool) – Default True. If true, the confidence intervals for each parameter are plotted.

  • +
  • goals ([str]) – Default are all possible goal names. If a list of specific +goal names is given only these will be plotted.

  • +
  • figs_axes (([fig], [ax])) – Default None. Optional custom figures and axes +(see example for verbose sensitivity analysis).

  • +
  • show_plot (bool) – Default is True. If False, all created plots are not shown.

  • +
  • use_suffix (bool) – Default is True: If True, the last part after the last point +of Modelica variables is used for the x ticks.

  • +
  • max_name_len (int) – Default is 50. Shortens the parameter names to max_name_len characters.

  • +
+
+
Returns:
+

Returns all created figures and axes in lists like [fig], [ax] +with shapes (len(goals)), (len(goals), len(analysis_variables))

+
+
+
+

aixcalibuha.sensitivity_analysis.sensitivity_analyzer module

@@ -185,18 +536,27 @@

Submodules
  • sim_api (SimulationAPI) – Simulation-API used to simulate the samples

  • num_samples (int) – The parameter N to the sampler methods of sobol and morris. NOTE: This is not the -the number of samples produced, but relates to the total number of samples produced in -a manner dependent on the sampler method used. See the documentation of sobol and -morris in the SALib for more information.

  • -
  • analysis_function (str) – Used to automatically select result values.

  • -
  • cd (str,os.path.normpath) – The path for the current working directory.

  • +number of samples produced, but relates to the total number of samples produced in +a manner dependent on the sampler method used. See the documentation of the specific +method in the SALib for more information.

    +
  • cd (str,os.path.normpath) – The path for the current working directory. +Logger and results will be stored here.

  • fail_on_error (boolean) – Default is False. If True, the calibration will stop with an error if the simulation fails. See also: ret_val_on_error

  • ret_val_on_error (float,np.NAN) – Default is np.NAN. If fail_on_error is false, you can specify here which value to return in the case of a failed simulation. Possible options are np.NaN, np.inf or some other high numbers. be aware that this max influence the solver.

  • -
  • save_files (boolean) – If true, all simulation files for each iteration will be saved!

  • +
  • save_files (boolean) – Default False. If true, all simulation files for each iteration will be saved!

  • +
  • suffix_files (str) – Default ‘csv’. Specifies the data format to store the simulation files in. +Options are ‘csv’, ‘hdf’, ‘parquet’.

  • +
  • parquet_engine (str) – The engine to use for the data format parquet. +Supported options can be extracted +from the ebcpy.TimeSeriesData.save() function. +Default is ‘pyarrow’.

  • +
  • savepath_sim (str,os.path.normpath) – Default is cd. Own directory for the time series data sets of all simulations +during the sensitivity analysis. The own dir can be necessary for large data sets, +because they can crash IDE during indexing when they are in the project folder.

@@ -231,7 +591,7 @@

Submodules
-static create_problem(tuner_paras) dict[source]
+static create_problem(tuner_paras, scale=False) dict[source]

Create function for later access if multiple calibration-classes are used.

@@ -246,6 +606,12 @@

Submodules +
+eval_statistical_measure(cal_class, results, verbose=True)[source]
+

Evaluates statistical measures of results on single core

+
+
abstract generate_samples()[source]
@@ -261,45 +627,153 @@

Submodules +
+static load_from_csv(path)[source]
+

Load sensitivity results which were saved with the run() or run_time_dependent() function.

+

For second order results use the load_second_order_from_csv() function of the SobolAnalyzer.

+

+ +
+
+plot(result)[source]
+

Plot the results of the sensitivity analysis method from run().

+
+
Parameters:
+

result (pd.DataFrame) – Dataframe of the results like from the run() function.

+
+
Return tuple of matplotlib objects (fig, ax):
+

+
+
+
-run(calibration_classes, merge_multiple_classes=True)[source]
+run(calibration_classes, merge_multiple_classes=True, **kwargs)[source]

Execute the sensitivity analysis for each class and return the result.

Parameters:
    -
  • calibration_classes (CalibrationClass,list) – Either one or multiple classes for calibration

  • +
  • calibration_classes (CalibrationClass,list) – Either one or multiple classes for calibration with same tuner-parameters.

  • merge_multiple_classes (bool) – Default True. If False, the given list of calibration-classes -is handeled as-is. This means if you pass two CalibrationClass objects +is handled as-is. This means if you pass two CalibrationClass objects with the same name (e.g. “device on”), the calibration process will run for both these classes stand-alone. This will automatically yield an intersection of tuner-parameters, however may have advantages in some cases.

  • +
  • verbose (bool) – Default False. If True, in addition to the combined Goals of the Classes +(saved under index Goal: all), the sensitivity measures of the individual +Goals will also be calculated and returned.

  • +
  • scale – Default is False. If True the bounds of the tuner-parameters +will be scaled between 0 and 1.

  • +
  • use_fist_sim (bool) – Default False. If True, the simulations of the first calibration class will be used for +all other calibration classes with their relevant time intervals. +The simulations must be stored on a hard-drive, so it must be used with +either save_files or load_files.

  • +
  • n_cpu (int) – Default is 1. The number of processes to use for the evaluation of the statistical +measure. For n_cpu > 1 only one simulation file is loaded at once in a process and +dumped directly after the evaluation of the statistical measure, +so that only minimal memory is used. +Use this option for large analyses. +Only implemented for save_files=True or load_sim_files=True.

  • +
  • load_sim_files (bool) – Default False. If True, no new simulations are done and old simulations are loaded. +The simulations and corresponding samples will be loaded from self.savepath_sim like +they were saved from self.save_files. Currently, the name of the sim folder must be +“simulations_CAL_CLASS_NAME” and for the samples “samples_CAL_CLASS_NAME”. +The usage of the same simulations for different +calibration classes is not supported yet.

  • +
  • save_results (bool) – Default True. If True, all results are saved as a csv in cd. +(samples, statistical measures and analysis variables).

  • +
  • plot_result (bool) – Default True. If True, the results will be plotted.

Returns:
-

Returns a list of dictionaries. One dict is the SALib-result for -one calibration-class. The order is based on the order of the -calibration-class list.

+

Returns a pandas.DataFrame. The DataFrame has a Multiindex with the +levels Class, Goal and Analysis variable. The Goal name of combined goals is ‘all’. +The variables are the tuner-parameters. +For the Sobol Method and calc_second_order returns a tuple of DataFrames (df_1, df_2) +where df_2 contains the second oder analysis variables and has an extra index level +Interaction, which also contains the variables.

Return type:
-

list

+

pandas.DataFrame

+
+
+
+ +
+
+run_time_dependent(cal_class: CalibrationClass, **kwargs)[source]
+

Calculate the time dependent sensitivity for all the single goals in the calibration class.

+
+
Parameters:
+
    +
  • cal_class (CalibrationClass) – Calibration class with tuner-parameters to calculate sensitivity for. +Can include dummy target date.

  • +
  • scale – Default is False. If True the bounds of the tuner-parameters +will be scaled between 0 and 1.

  • +
  • load_sim_files (bool) – Default False. If True, no new simulations are done and old simulations are loaded. +The simulations and corresponding samples will be loaded from self.savepath_sim like +they were saved from self.save_files. Currently, the name of the sim folder must be +“simulations_CAL_CLASS_NAME” and for the samples “samples_CAL_CLASS_NAME”.

  • +
  • save_results (bool) – Default True. If True, all results are saved as a csv in cd. +(samples and analysis variables).

  • +
  • n_steps (int) – Default is all time steps. If the problem is large, the evaluation of all time steps +at once can cause a memory error. Then n_steps defines how many time_steps +are evaluated at once in chunks. This increases the needed time exponentially and +the simulation files must be saved.

  • +
  • plot_result (bool) – Default True. If True, the results will be plotted.

  • +
+
+
Returns:
+

Returns a pandas.DataFrame.

+
+
Return type:
+

pandas.DataFrame

+
+
+
+ +
+
+save_for_reproduction(title: str, path: Path | None = None, files: list | None = None, exclude_sim_files: bool = False, remove_saved_files: bool = False, **kwargs)[source]
+

Save the settings of the SenAnalyzer and SimApi in order to +reproduce the simulations and sensitivity analysis method. +All saved results will be also saved in the reproduction +archive. The simulations can be excluded from saving.

+
+
Parameters:
+
    +
  • title (str) – Title of the study

  • +
  • path (pathlib.Path) – Where to store the .zip file. If not given, self.cd is used.

  • +
  • files (list) – List of files to save along the standard ones. +Examples would be plots, tables etc.

  • +
  • exclude_sim_files (bool) – Default False. If True, the simulation files will not be saved in +the reproduction archive.

  • +
  • remove_saved_files (bool) – Default False. If True, the result and simulation files will be moved +instead of just copied.

  • +
  • kwargs (dict) – All keyword arguments except title, files, and path of the function +save_reproduction_archive. Most importantly, log_message may be +specified to avoid input during execution.

  • +
-static select_by_threshold(calibration_classes, result, threshold)[source]
+static select_by_threshold(calibration_classes, result, analysis_variable, threshold)[source]

Automatically select sensitive tuner parameters based on a given threshold -and a key-word of the result.

+of a given analysis variable from a sensitivity result. +Uses only the combined goals.

Parameters:
  • calibration_classes (list) – List of aixcalibuha.data_types.CalibrationClass objects that you want to automatically select sensitive tuner-parameters.

  • result (pd.DataFrame) – Result object of sensitivity analysis run

  • +
  • analysis_variable (str) – Analysis variable to use for the selection

  • threshold (float) – Minimal required value of given key

@@ -309,20 +783,50 @@

Submodules +
+static select_by_threshold_verbose(calibration_class: CalibrationClass, result: DataFrame, analysis_variable: str, threshold: float, calc_names_for_selection: List[str] | None = None)[source]
+

Select tuner-parameters of single calibration class with verbose sensitivity results. +This function selects tuner-parameters if their sensitivity is equal or greater +than the threshold in just one target value of one calibration class in the +sensitivity result. This can be more robust because a small sensitivity in one target +value and state of the system can mean that the parameter can also be calibrated in +a global calibration class which calibrates multiple states and target values at +the same time and has there not directly the same sensitivity as in the isolated +view of a calibration class for only one state.

+
+
Parameters:
+
    +
  • calibration_class (CalibrationClass) – The calibration class from which the tuner parameters will be selected.

  • +
  • result (pd.DataFrame) – Sensitivity results to use for the selection. Can include multiple classes.

  • +
  • analysis_variable (str) – The analysis variable to use for the selection.

  • +
  • threshold (float) – Minimal required value of given analysis variable.

  • +
  • calc_names_for_selection (List[str]) – Specifies which calibration classes in the sensitivity results will be used for +the selection. Default are all classes.

  • +
+
+
+

+
-simulate_samples(samples, cal_class)[source]
-

Put the parameters in the model and simulate it.

+simulate_samples(cal_class, **kwargs)[source] +

Creates the samples for the calibration class and simulates them.

Parameters:
    -
  • samples (Union[list, np.ndarray]) – Output variables in dymola

  • cal_class – One class for calibration. Goals and tuner_paras have to be set

  • +
  • scale – Default is False. If True the bounds of the tuner-parameters +will be scaled between 0 and 1.

Returns:
-

np.array -An array containing the evaluated differences for each sample

+

Returns two lists. First a list with the simulation results for each sample. +If save_files the list contains the filepaths to the results +Second a list of the samples.

+
+
Return type:
+

list

@@ -338,12 +842,16 @@

Submodules class aixcalibuha.sensitivity_analysis.sobol.SobolAnalyzer(sim_api, **kwargs)[source]

Bases: SenAnalyzer

+

Sobol method from SALib +https://salib.readthedocs.io/en/latest/api.html#sobol-sensitivity-analysis +A variance-based method which can compute the sensitivity measures +‘S1’, ‘ST’ and ‘S2’ with their confidence intervals.

Additional arguments:

Parameters:
  • calc_second_order (bool) – Default True, used for the sobol-method

  • -
  • seed – Used for the sobol-method

  • +
  • seed (int) – Used for the sobol-method

@@ -354,8 +862,7 @@

Submodules
Parameters:
    -
  • np.array – x -placeholder for the X parameter of the morris method not used for sobol

  • +
  • x (np.array) – placeholder for the X parameter of the morris method not used for sobol

  • y (np.array) – The NumPy array containing the model outputs

@@ -396,6 +903,26 @@

Submodules +
+static load_second_order_from_csv(path)[source]
+

Load second order sensitivity results which were saved with the run() or +run_time_dependent() function.

+

+ +
+
+plot(result)[source]
+

Plot the results of the sensitivity analysis method from run().

+
+
Parameters:
+

result (pd.DataFrame) – Dataframe of the results like from the run() function.

+
+
Return tuple of matplotlib objects (fig, ax):
+

+
+
+

diff --git a/docs/master/docs/code/aixcalibuha.utils.html b/docs/master/docs/code/aixcalibuha.utils.html index 58bed2dc..9458ff48 100644 --- a/docs/master/docs/code/aixcalibuha.utils.html +++ b/docs/master/docs/code/aixcalibuha.utils.html @@ -4,7 +4,7 @@ - aixcalibuha.utils package — AixCaliBuHa 0.3.1 documentation + aixcalibuha.utils package — AixCaliBuHa 1.0.0 documentation @@ -37,7 +37,7 @@ AixCaliBuHa
- 0.3 + 1.0
@@ -467,6 +467,25 @@

Submodules +
+aixcalibuha.utils.visualizer.short_name(ini_name: str, max_len: int)[source]
+

Shortens long strings to a max length from the front. +long_string_name => …ing_name with len(new_name) = max_len

+
+
Parameters:
+
    +
  • ini_name (str) – Long string to shorten.

  • +
  • max_len (int) – Max len of the new string.

  • +
+
+
Returns:
+

str +The shorten string.

+
+
+
+ diff --git a/docs/master/docs/code/modules.html b/docs/master/docs/code/modules.html index 2402f62b..5256a031 100644 --- a/docs/master/docs/code/modules.html +++ b/docs/master/docs/code/modules.html @@ -4,7 +4,7 @@ - aixcalibuha — AixCaliBuHa 0.3.1 documentation + aixcalibuha — AixCaliBuHa 1.0.0 documentation @@ -37,7 +37,7 @@ AixCaliBuHa
- 0.3 + 1.0
@@ -98,7 +98,10 @@

aixcalibuhaaixcalibuha.sensitivity_analysis package diff --git a/docs/master/docs/contribution.html b/docs/master/docs/contribution.html index 0236c1ea..ad3e7bcf 100644 --- a/docs/master/docs/contribution.html +++ b/docs/master/docs/contribution.html @@ -4,7 +4,7 @@ - Contribute as a user — AixCaliBuHa 0.3.1 documentation + Contribute as a user — AixCaliBuHa 1.0.0 documentation @@ -37,7 +37,7 @@ AixCaliBuHa
- 0.3 + 1.0
diff --git a/docs/master/docs/examples.html b/docs/master/docs/examples.html index 477a0f64..6a2aa514 100644 --- a/docs/master/docs/examples.html +++ b/docs/master/docs/examples.html @@ -4,7 +4,7 @@ - Examples — AixCaliBuHa 0.3.1 documentation + Examples — AixCaliBuHa 1.0.0 documentation @@ -37,7 +37,7 @@ AixCaliBuHa
- 0.3 + 1.0
@@ -54,6 +54,8 @@
  • e1_A_energy_system_analysis.py and e1_B_energy_system_analysis.py
  • e2_A_optimization_problem_definition.py and e2_B_optimization_problem_definition.py
  • e3_sensitivity_analysis_example.py
  • +
  • e3_1_verbose_sensitivity_analysis_example.py
  • +
  • e3_1_verbose_dymola_sensitivity_analysis_example.py
  • e4_calibration_example.py
  • e5_automated_process.py
  • e6_multiprocessing_calibration_example.py
  • @@ -103,7 +105,7 @@

    Getting started -
  • Create a clean environment of python 3.7 or 3.8. In Anaconda run: conda create -n py38_ebcpy python=3.8

  • +
  • Create a clean environment of python 3.8 or 3.9. In Anaconda run: conda create -n py38_ebcpy python=3.8

  • Activate the environment in your terminal. In Anaconda run: activate py38_ebcpy

  • Clone the library using git clone --recurse-submodules https://github.com/RWTH-EBC/AixCaliBuHA

  • Install the library using pip install -e AixCaliBuHA

  • @@ -146,10 +148,26 @@

    e2_A_optimizati

    e3_sensitivity_analysis_example.py

      -
    1. Learn how to execute a sensitivity analysis

    2. +
    3. Learn how to execute a simple sensitivity analysis

    4. Learn how to automatically select sensitive tuner parameters

    +
    +

    e3_1_verbose_sensitivity_analysis_example.py

    +
      +
    1. Learn how to execute a verbose sensitivity analysis

    2. +
    3. Learn the meaning of the results the analysis of your model

    4. +
    5. Learn other sensitivity methods

    6. +
    7. Learn time dependent sensitivity analysis

    8. +
    9. Learn how to save the results for reproduction

    10. +
    +
    +
    +

    e3_1_verbose_dymola_sensitivity_analysis_example.py

    +
      +
    1. Learn how to execute a sensitivity analysis with the dymola api

    2. +
    +

    e4_calibration_example.py

      diff --git a/docs/master/docs/genindex.html b/docs/master/docs/genindex.html index 787cadd7..eedd6cd8 100644 --- a/docs/master/docs/genindex.html +++ b/docs/master/docs/genindex.html @@ -3,7 +3,7 @@ - Index — AixCaliBuHa 0.3.1 documentation + Index — AixCaliBuHa 1.0.0 documentation @@ -34,7 +34,7 @@ AixCaliBuHa
      - 0.3 + 1.0
      @@ -88,11 +88,13 @@

      Index

      | E | F | G + | H | I | L | M | N | O + | P | R | S | T @@ -143,6 +145,13 @@

      A

      +
    1. + aixcalibuha.sensitivity_analysis.fast + +
    2. @@ -153,15 +162,29 @@

      A

    3. - aixcalibuha.sensitivity_analysis.sensitivity_analyzer + aixcalibuha.sensitivity_analysis.pawn
    4. +
    5. + aixcalibuha.sensitivity_analysis.plotting + +
    6. - +