From 25f58fc1fa975d206318de98463263ee5ad83da4 Mon Sep 17 00:00:00 2001 From: hpieper14 Date: Thu, 24 Mar 2022 20:05:06 -0400 Subject: [PATCH 1/8] initialize inverseDirichletAdaptiveLoss as AbstractAdaptiveLoss obj and add computation to discretize_inner_functions --- .DS_Store | Bin 6148 -> 8196 bytes src/pinns_pde_solve.jl | 49 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/.DS_Store b/.DS_Store index 75e07537ef343327553e5f9966efde125a4fc170..e486903e52bbfdbf462210092bf943bdbf9cd846 100644 GIT binary patch delta 175 zcmZoMXmOBWU|?W$DortDU;r^WfEYvza8E20o2aMAD7Z0TH}hr%jz7$c**Q2SHn1=X zZsuW`#5DN^+XQxIhGK>yhUCc)So%4P%nWoCjEyWOm$N#mK_ya(lXH^t^K%%1CPIbE zf{XHU^7GPxB8-z$*)=vt^K543lHdmNT|w4v7UcNOJegm_bFx1V2M16i$VCjB<9X&V F0{~UrCba+n delta 133 zcmZp1XfcprU|?W$DortDU=RQ@Ie-{Mvv5r;6q~50$jHAjU^g=(|7IS6Nlg5_489DB z40#Nh3~3Ch48@aI3rRD|PVN_)KiNz~bz*VVVs;J=L1v(KAQ0dN60RVVHWq$op3E=f T2{M;~31SDxT87Q>Jad= Date: Thu, 24 Mar 2022 22:23:22 -0400 Subject: [PATCH 2/8] Fix syntax errors --- src/pinns_pde_solve.jl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/pinns_pde_solve.jl b/src/pinns_pde_solve.jl index 12a54e0b07..cd7bc1c599 100644 --- a/src/pinns_pde_solve.jl +++ b/src/pinns_pde_solve.jl @@ -1455,6 +1455,9 @@ function discretize_inner_functions(pde_system::PDESystem, discretization::Physi logvector(logger, pde_grads_std_all, "adaptive_loss/pde_grad_std_all", iteration[1]) logvector(logger, bc_grads_std, "adaptive_loss/bc_grad_std", iteration[1]) logvector(logger, adaloss.bc_loss_weights, "adaptive_loss/bc_loss_weights", iteration[1]) + end + nothing + end elseif adaloss isa MiniMaxAdaptiveLoss pde_max_optimiser = adaloss.pde_max_optimiser bc_max_optimiser = adaloss.bc_max_optimiser From 7c3a7a95269c8bf5bd451c572214ad2fab1da350 Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Fri, 25 Mar 2022 09:22:49 -0400 Subject: [PATCH 3/8] Update src/pinns_pde_solve.jl --- src/pinns_pde_solve.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/pinns_pde_solve.jl b/src/pinns_pde_solve.jl index cd7bc1c599..2e9d38ed67 100644 --- a/src/pinns_pde_solve.jl +++ b/src/pinns_pde_solve.jl @@ -315,7 +315,6 @@ https://iopscience.iop.org/article/10.1088/2632-2153/ac3712/pdf with code reference https://github.com/mosaic-group/inverse-dirichlet-pinn """ - mutable struct InverseDirichletAdaptiveLoss{T <: Real} <: AbstractAdaptiveLoss reweight_every::Int64 weight_change_inertia::T From 05978296b06475f3905386e3aaf2380e5a8fd624 Mon Sep 17 00:00:00 2001 From: hpieper14 Date: Mon, 4 Apr 2022 10:38:08 -0400 Subject: [PATCH 4/8] Fix typos, add adaptive reweighting for PDE loss function in addition to BC loss function --- src/pinns_pde_solve.jl | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/src/pinns_pde_solve.jl b/src/pinns_pde_solve.jl index 2e9d38ed67..63881018a8 100644 --- a/src/pinns_pde_solve.jl +++ b/src/pinns_pde_solve.jl @@ -1438,22 +1438,29 @@ function discretize_inner_functions(pde_system::PDESystem, discretization::Physi end nothing end - if adaloss isa InverseDirichletAdaptiveLoss + elseif adaloss isa InverseDirichletAdaptiveLoss + # TODO I think the numerator and denominator are not quite right here. weight_change_inertia = discretization.adaptive_loss.weight_change_inertia - function run_loss_inverse_dirichlet_adaptive_loss(0) + function run_loss_inverse_dirichlet_adaptive_loss(θ) if iteration[1] % adaloss.reweight_every == 0 - pde_grads_std_all = [std(Zygote.gradient(pde_loss_function, 0)[1]) for pde_loss_function in pde_loss_function] - pde_grads_std_max = maximum(pde_grads_std_all) - bc_grads_std = [std(Zygote.gradient(bc_loss_function, 0)[1]) for bc_loss_function in bc_loss_funcitons] + pde_grads_std = [std(Zygote.gradient(pde_loss_function, θ)[1]) for pde_loss_function in pde_loss_functions] + bc_grads_std = [std(Zygote.gradient(bc_loss_function, θ)[1]) for bc_loss_function in bc_loss_functions] + pde_grads_std_max = maximum(pde_grads_std) + bc_grads_std_max = maximum(bc_grads_std) + grads_std_max = max(pde_grads_std_max, bc_grads_std_max) nonzero_divisor_eps = adaloss_T isa Float64 ? Float64(1e-11) : convert(adaloss_T, 1e-7) - bc_loss_weights_proposed = pde_grad_std_max ./ (bc_grads_std .+ nonzero_divisor_eps) - adaloss.bc_loss_weights .= weight_change_intertia .* adaloss.bc_loss_weights .+ (1 .- weight_change_inertia) .* bc_loss_weights_proposed + bc_loss_weights_proposed = grads_std_max ./ (bc_grads_std .+ nonzero_divisor_eps) + adaloss.bc_loss_weights .= weight_change_inertia .* adaloss.bc_loss_weights .+ (1 .- weight_change_inertia) .* bc_loss_weights_proposed + + pde_loss_weights_proposed = grads_std_max ./ (pde_grads_std .+ nonzero_divisor_eps) + adaloss.pde_loss_weights .= weight_change_inertia .* adaloss.pde_loss_weights .+ (1 .- weight_change_inertia) .* pde_loss_weights_proposed - logscalar(logger, pde_grads_std_max, "adaptive_loss/pde_grad_std_max", iteration[1]) - logvector(logger, pde_grads_std_all, "adaptive_loss/pde_grad_std_all", iteration[1]) + logscalar(logger, grads_std_max, "adaptive_loss/grads_std_max", iteration[1]) + logvector(logger, pde_grads_std, "adaptive_loss/pde_grad_std", iteration[1]) logvector(logger, bc_grads_std, "adaptive_loss/bc_grad_std", iteration[1]) logvector(logger, adaloss.bc_loss_weights, "adaptive_loss/bc_loss_weights", iteration[1]) + logvector(logger, adaloss.pde_loss_weights, "adaptive_loss/pde_loss_weights", iteration[1]) end nothing end @@ -1539,6 +1546,7 @@ function discretize_inner_functions(pde_system::PDESystem, discretization::Physi inner_pde_loss_functions=_pde_loss_functions, inner_bc_loss_functions=_bc_loss_functions) end + # Convert a PDE problem into an OptimizationProblem function SciMLBase.discretize(pde_system::PDESystem, discretization::PhysicsInformedNN) discretized_functions = discretize_inner_functions(pde_system, discretization) From cec87ef6828f5a410c9e652ea309d6800d7a1de9 Mon Sep 17 00:00:00 2001 From: hpieper14 Date: Mon, 4 Apr 2022 10:38:47 -0400 Subject: [PATCH 5/8] change weight_change_inertia to value from paper --- src/pinns_pde_solve.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/pinns_pde_solve.jl b/src/pinns_pde_solve.jl index 63881018a8..971fdf4636 100644 --- a/src/pinns_pde_solve.jl +++ b/src/pinns_pde_solve.jl @@ -321,12 +321,12 @@ mutable struct InverseDirichletAdaptiveLoss{T <: Real} <: AbstractAdaptiveLoss pde_loss_weights::Vector{T} bc_loss_weights::Vector{T} additional_loss_weights::Vector{T} - SciMLBase.@add_kwonly function InverseDirichletAdaptiveLoss{T}(reweight_every; weight_change_inertia=0.9, pde_loss_weights=1, bc_loss_weights=1, additional_loss_weights=1) where T <: Real + SciMLBase.@add_kwonly function InverseDirichletAdaptiveLoss{T}(reweight_every; weight_change_inertia=0.5, pde_loss_weights=1, bc_loss_weights=1, additional_loss_weights=1) where T <: Real new(convert(Int64, reweight_every), convert(T, weight_change_inertia), vectorify(pde_loss_weights, T), vectorify(bc_loss_weights, T), vectorify(additional_loss_weights, T)) end end # default to Float64 -SciMLBase.@add_kwonly function InverseDirichletAdaptiveLoss(reweight_every; weight_change_inertia=0.9, pde_loss_weights=1, bc_loss_weights=1, additional_loss_weights=1) +SciMLBase.@add_kwonly function InverseDirichletAdaptiveLoss(reweight_every; weight_change_inertia=0.5, pde_loss_weights=1, bc_loss_weights=1, additional_loss_weights=1) InverseDirichletAdaptiveLoss{Float64}(reweight_every; weight_change_inertia=weight_change_inertia, pde_loss_weights=pde_loss_weights, bc_loss_weights=bc_loss_weights, additional_loss_weights=additional_loss_weights) end From 165e3d0d0d3feb4f18cce3441542f856fa8c13cd Mon Sep 17 00:00:00 2001 From: hpieper14 Date: Mon, 4 Apr 2022 10:40:53 -0400 Subject: [PATCH 6/8] add inverse dirichlet loss to test file --- test/adaptive_loss_tests.jl | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/test/adaptive_loss_tests.jl b/test/adaptive_loss_tests.jl index 93ab213210..f25719d5ba 100644 --- a/test/adaptive_loss_tests.jl +++ b/test/adaptive_loss_tests.jl @@ -18,7 +18,9 @@ using Random nonadaptive_loss = NeuralPDE.NonAdaptiveLoss(pde_loss_weights=1, bc_loss_weights=1) gradnormadaptive_loss = NeuralPDE.GradientScaleAdaptiveLoss(100, pde_loss_weights=1e3, bc_loss_weights=1) adaptive_loss = NeuralPDE.MiniMaxAdaptiveLoss(100; pde_loss_weights=1, bc_loss_weights=1) -adaptive_losses = [nonadaptive_loss, gradnormadaptive_loss,adaptive_loss] +invdirichletadaptive_loss = NeuralPDE.InverseDirichletAdaptiveLoss(100, pde_loss_weights=1e3, bc_loss_weights=1) +adaptive_losses = [nonadaptive_loss, gradnormadaptive_loss,adaptive_loss, invdirichletadaptive_loss] + maxiters=4000 seed=60 @@ -96,12 +98,16 @@ error_results_no_logs = map(test_2d_poisson_equation_adaptive_loss_no_logs_run_s @show error_results_no_logs[1][:total_diff_rel] @show error_results_no_logs[2][:total_diff_rel] @show error_results_no_logs[3][:total_diff_rel] +@show error_results_no_logs[4][:total_diff_rel] + # accuracy tests, these work for this specific seed but might not for others # note that this doesn't test that the adaptive losses are outperforming the nonadaptive loss, which is not guaranteed, and seed/arch/hyperparam/pde etc dependent @test error_results_no_logs[1][:total_diff_rel] < 0.4 @test error_results_no_logs[2][:total_diff_rel] < 0.4 @test error_results_no_logs[3][:total_diff_rel] < 0.4 +@test error_results_no_logs[4][:total_diff_rel] < 0.4 #plots_diffs[1][:plot] #plots_diffs[2][:plot] #plots_diffs[3][:plot] +#lots_diffs[4][:plot] From d19fd89c9d683f9d769e8a67bff93d4989509a4d Mon Sep 17 00:00:00 2001 From: hpieper14 Date: Mon, 11 Apr 2022 17:14:01 -0400 Subject: [PATCH 7/8] remove epsilon divisor --- src/pinns_pde_solve.jl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/pinns_pde_solve.jl b/src/pinns_pde_solve.jl index 971fdf4636..b52c825578 100644 --- a/src/pinns_pde_solve.jl +++ b/src/pinns_pde_solve.jl @@ -1449,11 +1449,10 @@ function discretize_inner_functions(pde_system::PDESystem, discretization::Physi bc_grads_std_max = maximum(bc_grads_std) grads_std_max = max(pde_grads_std_max, bc_grads_std_max) - nonzero_divisor_eps = adaloss_T isa Float64 ? Float64(1e-11) : convert(adaloss_T, 1e-7) - bc_loss_weights_proposed = grads_std_max ./ (bc_grads_std .+ nonzero_divisor_eps) + bc_loss_weights_proposed = grads_std_max ./ (bc_grads_std) adaloss.bc_loss_weights .= weight_change_inertia .* adaloss.bc_loss_weights .+ (1 .- weight_change_inertia) .* bc_loss_weights_proposed - pde_loss_weights_proposed = grads_std_max ./ (pde_grads_std .+ nonzero_divisor_eps) + pde_loss_weights_proposed = grads_std_max ./ (pde_grads_std) adaloss.pde_loss_weights .= weight_change_inertia .* adaloss.pde_loss_weights .+ (1 .- weight_change_inertia) .* pde_loss_weights_proposed logscalar(logger, grads_std_max, "adaptive_loss/grads_std_max", iteration[1]) From dabc1d0452b71ba87675f5467f7cb908de869b74 Mon Sep 17 00:00:00 2001 From: Hannah Pieper <32441973+hpieper14@users.noreply.github.com> Date: Sun, 24 Apr 2022 16:42:28 -0400 Subject: [PATCH 8/8] pass in generic args Co-authored-by: Christopher Rackauckas --- src/pinns_pde_solve.jl | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/pinns_pde_solve.jl b/src/pinns_pde_solve.jl index b52c825578..249ae3b32a 100644 --- a/src/pinns_pde_solve.jl +++ b/src/pinns_pde_solve.jl @@ -326,10 +326,7 @@ mutable struct InverseDirichletAdaptiveLoss{T <: Real} <: AbstractAdaptiveLoss end end # default to Float64 -SciMLBase.@add_kwonly function InverseDirichletAdaptiveLoss(reweight_every; weight_change_inertia=0.5, pde_loss_weights=1, bc_loss_weights=1, additional_loss_weights=1) - InverseDirichletAdaptiveLoss{Float64}(reweight_every; weight_change_inertia=weight_change_inertia, - pde_loss_weights=pde_loss_weights, bc_loss_weights=bc_loss_weights, additional_loss_weights=additional_loss_weights) -end +InverseDirichletAdaptiveLoss(args...; kwargs...) = InverseDirichletAdaptiveLoss{Float64}(args...; kwargs...) """ A way of adaptively reweighting the components of the loss function in the total sum such that the loss weights are maximized by an internal optimiser, which leads to a behavior where loss functions that have not been satisfied get a greater weight,