Skip to content

Commit d983a0a

Browse files
Merge pull request #482 from zoemcc/adaptive_loss_merge_2
Adaptive loss merge take two
2 parents 40619e6 + 8f44bde commit d983a0a

File tree

13 files changed

+677
-38
lines changed

13 files changed

+677
-38
lines changed

.github/workflows/CI.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,12 @@ jobs:
1616
- NNPDEHan
1717
- NNPDENS
1818
- NNPDE
19+
- AdaptiveLoss
1920
- NeuralAdapter
2021
- IntegroDiff
2122
- NNSTOPPINGTIME
2223
- NNRODE
24+
- Logging
2325
- Forward
2426
version:
2527
- '1'

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,3 +4,4 @@
44
Manifest.toml
55
*.pdf
66
.vscode/settings.json
7+
*/testlogs

lib/NeuralPDELogging/Project.toml

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
name = "NeuralPDELogging"
2+
uuid = "7c138fc3-9327-4ab8-b9a3-c864f3475625"
3+
authors = ["Zoe McCarthy <[email protected]>"]
4+
version = "0.1.0"
5+
6+
[deps]
7+
Logging = "56ddb016-857b-54e1-b83d-db4d58db5568"
8+
NeuralPDE = "315f7962-48a3-4962-8226-d0f33b1235f0"
9+
TensorBoardLogger = "899adc3e-224a-11e9-021f-63837185c80f"
10+
11+
[extras]
12+
DiffEqFlux = "aae7a2af-3d4f-5e19-a356-7da93b79d9d0"
13+
GalacticOptim = "a75be94c-b780-496d-a8a9-0878b188d577"
14+
ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78"
15+
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
16+
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
17+
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
18+
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
19+
20+
[targets]
21+
test = ["Test", "SafeTestsets", "Pkg", "DiffEqFlux", "GalacticOptim", "ModelingToolkit", "Random"]
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
module NeuralPDELogging
2+
3+
using NeuralPDE
4+
using TensorBoardLogger
5+
6+
"""This function overrides the empty function in NeuralPDE in order to use TensorBoardLogger in that package
7+
This is light type piracy but it should be alright since this is a subpackage of NeuralPDE"""
8+
function NeuralPDE.logvector(logger::TBLogger, vector::AbstractVector{R}, name::AbstractString, step::Integer) where R <: Real
9+
for j in 1:length(vector)
10+
log_value(logger, "$(name)/$(j)", vector[j], step=step)
11+
end
12+
nothing
13+
end
14+
15+
"""This function overrides the empty function in NeuralPDE in order to use TensorBoardLogger in that package.
16+
This is light type piracy but it should be alright since this is a subpackage of NeuralPDE"""
17+
function NeuralPDE.logscalar(logger::TBLogger, scalar::R, name::AbstractString, step::Integer) where R <: Real
18+
log_value(logger, "$(name)", scalar, step=step)
19+
nothing
20+
end
21+
22+
23+
end
Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,128 @@
1+
@info "adaptive_loss_logging_tests"
2+
using DiffEqFlux
3+
using ModelingToolkit
4+
using Test, NeuralPDE
5+
using GalacticOptim
6+
import ModelingToolkit: Interval, infimum, supremum
7+
using Random
8+
#using Plots
9+
@info "Starting Soon!"
10+
11+
nonadaptive_loss = NeuralPDE.NonAdaptiveLoss(pde_loss_weights=1, bc_loss_weights=1)
12+
gradnormadaptive_loss = NeuralPDE.GradientScaleAdaptiveLoss(100, pde_loss_weights=1e3, bc_loss_weights=1)
13+
adaptive_loss = NeuralPDE.MiniMaxAdaptiveLoss(100; pde_loss_weights=1, bc_loss_weights=1)
14+
adaptive_losses = [nonadaptive_loss, gradnormadaptive_loss,adaptive_loss]
15+
maxiters=800
16+
seed=60
17+
18+
## 2D Poisson equation
19+
function test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, outdir, haslogger; seed=60, maxiters=800)
20+
logdir = joinpath(outdir, string(run))
21+
if haslogger
22+
logger = TBLogger(logdir)
23+
else
24+
logger = nothing
25+
end
26+
Random.seed!(seed)
27+
hid = 40
28+
chain_ = FastChain(FastDense(2,hid,Flux.σ),FastDense(hid,hid,Flux.σ),FastDense(hid,1))
29+
strategy_ = NeuralPDE.StochasticTraining(256)
30+
@info "adaptive reweighting test logdir: $(logdir), maxiters: $(maxiters), 2D Poisson equation, adaptive_loss: $(nameof(typeof(adaptive_loss))) "
31+
@parameters x y
32+
@variables u(..)
33+
Dxx = Differential(x)^2
34+
Dyy = Differential(y)^2
35+
36+
# 2D PDE
37+
eq = Dxx(u(x,y)) + Dyy(u(x,y)) ~ -sin(pi*x)*sin(pi*y)
38+
39+
# Initial and boundary conditions
40+
bcs = [u(0,y) ~ 0.0, u(1,y) ~ -sin(pi*1)*sin(pi*y),
41+
u(x,0) ~ 0.0, u(x,1) ~ -sin(pi*x)*sin(pi*1)]
42+
# Space and time domains
43+
domains = [x Interval(0.0,1.0),
44+
y Interval(0.0,1.0)]
45+
46+
initθ = Float64.(DiffEqFlux.initial_params(chain_))
47+
iteration = [0]
48+
discretization = NeuralPDE.PhysicsInformedNN(chain_,
49+
strategy_;
50+
init_params = initθ,
51+
adaptive_loss = adaptive_loss,
52+
logger = logger,
53+
iteration=iteration)
54+
55+
56+
@named pde_system = PDESystem(eq,bcs,domains,[x,y],[u(x, y)])
57+
prob = NeuralPDE.discretize(pde_system,discretization)
58+
phi = discretization.phi
59+
sym_prob = NeuralPDE.symbolic_discretize(pde_system,discretization)
60+
61+
62+
xs,ys = [infimum(d.domain):0.01:supremum(d.domain) for d in domains]
63+
analytic_sol_func(x,y) = (sin(pi*x)*sin(pi*y))/(2pi^2)
64+
u_real = reshape([analytic_sol_func(x,y) for x in xs for y in ys], (length(xs),length(ys)))
65+
66+
cb = function (p,l)
67+
iteration[1] += 1
68+
if iteration[1] % 100 == 0
69+
@info "Current loss is: $l, iteration is $(iteration[1])"
70+
end
71+
if haslogger
72+
log_value(logger, "outer_error/loss", l, step=iteration[1])
73+
if iteration[1] % 30 == 0
74+
u_predict = reshape([first(phi([x,y],p)) for x in xs for y in ys],(length(xs),length(ys)))
75+
diff_u = abs.(u_predict .- u_real)
76+
total_diff = sum(diff_u)
77+
log_value(logger, "outer_error/total_diff", total_diff, step=iteration[1])
78+
total_u = sum(abs.(u_real))
79+
total_diff_rel = total_diff / total_u
80+
log_value(logger, "outer_error/total_diff_rel", total_diff_rel, step=iteration[1])
81+
total_diff_sq = sum(diff_u .^ 2)
82+
log_value(logger, "outer_error/total_diff_sq", total_diff_sq, step=iteration[1])
83+
end
84+
end
85+
return false
86+
end
87+
res = GalacticOptim.solve(prob, ADAM(0.03); maxiters=maxiters, cb=cb)
88+
89+
u_predict = reshape([first(phi([x,y],res.minimizer)) for x in xs for y in ys],(length(xs),length(ys)))
90+
diff_u = abs.(u_predict .- u_real)
91+
total_diff = sum(diff_u)
92+
total_u = sum(abs.(u_real))
93+
total_diff_rel = total_diff / total_u
94+
95+
#p1 = plot(xs, ys, u_real, linetype=:contourf,title = "analytic");
96+
#p2 = plot(xs, ys, u_predict, linetype=:contourf,title = "predict");
97+
#p3 = plot(xs, ys, diff_u,linetype=:contourf,title = "error");
98+
#(plot=plot(p1,p2,p3), error=total_diff, total_diff_rel=total_diff_rel)
99+
(error=total_diff, total_diff_rel=total_diff_rel)
100+
end
101+
102+
possible_logger_dir = mktempdir()
103+
if ENV["LOG_SETTING"] == "NoImport"
104+
haslogger = false
105+
expected_log_folders = 0
106+
elseif ENV["LOG_SETTING"] == "ImportNoUse"
107+
using NeuralPDELogging
108+
haslogger = false
109+
expected_log_folders = 0
110+
elseif ENV["LOG_SETTING"] == "ImportUse"
111+
using NeuralPDELogging
112+
using TensorBoardLogger
113+
haslogger = true
114+
expected_log_folders = 3
115+
end
116+
117+
@info "has logger: $(haslogger), expected log folders: $(expected_log_folders)"
118+
119+
test_2d_poisson_equation_adaptive_loss_run_seediters(adaptive_loss, run) = test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, possible_logger_dir, haslogger; seed=seed, maxiters=maxiters)
120+
error_results = map(test_2d_poisson_equation_adaptive_loss_run_seediters, adaptive_losses, 1:length(adaptive_losses))
121+
122+
@test length(readdir(possible_logger_dir)) == expected_log_folders
123+
if expected_log_folders > 0
124+
@info "dirs at $(possible_logger_dir): $(string(readdir(possible_logger_dir)))"
125+
for logdir in readdir(possible_logger_dir)
126+
@test length(readdir(joinpath(possible_logger_dir, logdir))) > 0
127+
end
128+
end
Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
using Pkg
2+
using SafeTestsets
3+
4+
const GROUP = get(ENV, "GROUP", "All")
5+
6+
const is_APPVEYOR = Sys.iswindows() && haskey(ENV,"APPVEYOR")
7+
8+
const is_TRAVIS = haskey(ENV,"TRAVIS")
9+
10+
is_CI = haskey(ENV,"CI")
11+
12+
13+
@time begin
14+
if GROUP == "All" || GROUP == "Logging"
15+
@time @safetestset "AdaptiveLossLogNoImport" begin
16+
using Pkg
17+
neuralpde_dir = dirname(abspath(joinpath(@__DIR__, "..", "..", "..")))
18+
@info "loading neuralpde package at : $(neuralpde_dir)"
19+
neuralpde = Pkg.PackageSpec(path = neuralpde_dir)
20+
Pkg.develop(neuralpde)
21+
@info "making sure that there are no logs without having imported NeuralPDELogging"
22+
ENV["LOG_SETTING"] = "NoImport"
23+
include("adaptive_loss_log_tests.jl")
24+
end
25+
@time @safetestset "AdaptiveLossLogImportNoUse" begin
26+
using Pkg
27+
neuralpde_dir = dirname(abspath(joinpath(@__DIR__, "..", "..", "..")))
28+
@info "loading neuralpde package at : $(neuralpde_dir)"
29+
neuralpde = Pkg.PackageSpec(path = neuralpde_dir)
30+
Pkg.develop(neuralpde)
31+
@info "making sure that there are still no logs now that we have imported NeuralPDELogging"
32+
ENV["LOG_SETTING"] = "ImportNoUse"
33+
include("adaptive_loss_log_tests.jl")
34+
end
35+
@time @safetestset "AdaptiveLossLogImportUse" begin
36+
using Pkg
37+
neuralpde_dir = dirname(abspath(joinpath(@__DIR__, "..", "..", "..")))
38+
@info "loading neuralpde package at : $(neuralpde_dir)"
39+
neuralpde = Pkg.PackageSpec(path = neuralpde_dir)
40+
Pkg.develop(neuralpde)
41+
ENV["LOG_SETTING"] = "ImportUse"
42+
@info "making sure that logs are generated now if we use a logger"
43+
include("adaptive_loss_log_tests.jl")
44+
end
45+
end
46+
end

src/NeuralPDE.jl

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -183,10 +183,13 @@ include("param_kolmogorov_solve.jl")
183183
export NNODE, TerminalPDEProblem, NNPDEHan, NNPDENS, NNRODE,
184184
KolmogorovPDEProblem, NNKolmogorov, NNStopping,ParamKolmogorovPDEProblem,KolmogorovParamDomain, NNParamKolmogorov,
185185
PhysicsInformedNN, discretize,
186-
GridTraining, StochasticTraining, QuadratureTraining, QuasiRandomTraining
186+
GridTraining, StochasticTraining, QuadratureTraining, QuasiRandomTraining,
187187
build_loss_function, get_loss_function,
188-
generate_training_sets, get_variables, get_argument, get_bounds
189-
get_phi, get_numeric_derivative, get_numeric_integral
190-
build_symbolic_equation, build_symbolic_loss_function, symbolic_discretize
188+
generate_training_sets, get_variables, get_argument, get_bounds,
189+
get_phi, get_numeric_derivative, get_numeric_integral,
190+
build_symbolic_equation, build_symbolic_loss_function, symbolic_discretize,
191+
AbstractAdaptiveLoss, NonAdaptiveLoss, GradientScaleAdaptiveLoss, MiniMaxAdaptiveLoss,
192+
LogOptions
193+
191194

192195
end # module

0 commit comments

Comments
 (0)