跳转至

DeepHPMs(Deep Hidden Physics Models)

AI Studio快速体验

# 案例 1
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers_sine.mat -P ./datasets/
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers_sine.mat --output ./datasets/burgers_sine.mat
python burgers.py DATASET_PATH=./datasets/burgers_sine.mat DATASET_PATH_SOL=./datasets/burgers_sine.mat

# 案例 2
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers_sine.mat -P ./datasets/
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers.mat -P ./datasets/
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers_sine.mat --output ./datasets/burgers_sine.mat
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers.mat --output ./datasets/burgers.mat
python burgers.py DATASET_PATH=./datasets/burgers_sine.mat DATASET_PATH_SOL=./datasets/burgers.mat

# 案例 3
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers.mat -P ./datasets/
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers_sine.mat -P ./datasets/
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers.mat --output ./datasets/burgers.mat
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers_sine.mat --output ./datasets/burgers_sine.mat
python burgers.py DATASET_PATH=./datasets/burgers.mat DATASET_PATH_SOL=./datasets/burgers_sine.mat

# 案例 4
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/KdV_sine.mat -P ./datasets/
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/KdV_sine.mat --output ./datasets/KdV_sine.mat
python korteweg_de_vries.py DATASET_PATH=./datasets/KdV_sine.mat DATASET_PATH_SOL=./datasets/KdV_sine.mat

# 案例 5
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/KdV_sine.mat -P ./datasets/
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/KdV_cos.mat -P ./datasets/
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/KdV_sine.mat --output ./datasets/KdV_sine.mat
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/KdV_cos.mat --output ./datasets/KdV_cos.mat
python korteweg_de_vries.py DATASET_PATH=./datasets/KdV_sine.mat DATASET_PATH_SOL=./datasets/KdV_cos.mat

# 案例 6
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/KS.mat -P ./datasets/
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/KS.mat --output ./datasets/KS.mat
python kuramoto_sivashinsky.py DATASET_PATH=./datasets/KS.mat DATASET_PATH_SOL=./datasets/KS.mat

# 案例 7
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/cylinder.mat -P ./datasets/
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/cylinder.mat --output ./datasets/cylinder.mat
python navier_stokes.py DATASET_PATH=./datasets/cylinder.mat DATASET_PATH_SOL=./datasets/cylinder.mat

# 案例 8
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/NLS.mat -P ./datasets/
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/NLS.mat --output ./datasets/NLS.mat
python schrodinger.py DATASET_PATH=./datasets/NLS.mat DATASET_PATH_SOL=./datasets/NLS.mat
# 案例 1
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers_sine.mat -P ./datasets/
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers_sine.mat --output ./datasets/burgers_sine.mat
python burgers.py mode=eval DATASET_PATH=./datasets/burgers_sine.mat DATASET_PATH_SOL=./datasets/burgers_sine.mat EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/DeepHPMs/burgers_same_pretrained.pdparams

# 案例 2
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers_sine.mat -P ./datasets/
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers.mat -P ./datasets/
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers_sine.mat --output ./datasets/burgers_sine.mat
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers.mat --output ./datasets/burgers.mat
python burgers.py mode=eval DATASET_PATH=./datasets/burgers_sine.mat DATASET_PATH_SOL=./datasets/burgers.mat EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/DeepHPMs/burgers_diff_pretrained.pdparams

# 案例 3
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers.mat -P ./datasets/
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers_sine.mat -P ./datasets/
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers.mat --output ./datasets/burgers.mat
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/burgers_sine.mat --output ./datasets/burgers_sine.mat
python burgers.py mode=eval DATASET_PATH=./datasets/burgers.mat DATASET_PATH_SOL=./datasets/burgers_sine.mat EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/DeepHPMs/burgers_diff_swap_pretrained.pdparams

# 案例 4
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/KdV_sine.mat -P ./datasets/
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/KdV_sine.mat --output ./datasets/KdV_sine.mat
python korteweg_de_vries.py mode=eval DATASET_PATH=./datasets/KdV_sine.mat DATASET_PATH_SOL=./datasets/KdV_sine.mat EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/DeepHPMs/kdv_same_pretrained.pdparams

# 案例 5
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/KdV_sine.mat -P ./datasets/
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/KdV_cos.mat -P ./datasets/
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/KdV_sine.mat --output ./datasets/KdV_sine.mat
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/KdV_cos.mat --output ./datasets/KdV_cos.mat
python korteweg_de_vries.py mode=eval DATASET_PATH=./datasets/KdV_sine.mat DATASET_PATH_SOL=./datasets/KdV_cos.mat EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/DeepHPMs/kdv_diff_pretrained.pdparams

# 案例 6
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/KS.mat -P ./datasets/
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/KS.mat --output ./datasets/KS.mat
python kuramoto_sivashinsky.py mode=eval DATASET_PATH=./datasets/KS.mat DATASET_PATH_SOL=./datasets/KS.mat EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/DeepHPMs/ks_pretrained.pdparams

# 案例 7
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/cylinder.mat -P ./datasets/
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/cylinder.mat --output ./datasets/cylinder.mat
python navier_stokes.py mode=eval DATASET_PATH=./datasets/cylinder.mat DATASET_PATH_SOL=./datasets/cylinder.mat EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/DeepHPMs/ns_pretrained.pdparams

# 案例 8
# linux
wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/NLS.mat -P ./datasets/
# windows
# curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/DeepHPMs/NLS.mat --output ./datasets/NLS.mat
python schrodinger.py mode=eval DATASET_PATH=./datasets/NLS.mat DATASET_PATH_SOL=./datasets/NLS.mat EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/DeepHPMs/schrodinger_pretrained.pdparams
序号 案例名称 stage1、2 数据集 stage3(eval)数据集 预训练模型 指标
1 burgers burgers_sine.mat burgers_sine.mat burgers_same_pretrained.pdparams l2 error: 0.0088
2 burgers burgers_sine.mat burgers.mat burgers_diff_pretrained.pdparams l2 error: 0.0379
3 burgers burgers.mat burgers_sine.mat burgers_diff_swap_pretrained.pdparams l2 error: 0.2904
4 korteweg_de_vries KdV_sine.mat KdV_sine.mat kdv_same_pretrained.pdparams l2 error: 0.0567
5 korteweg_de_vries KdV_sine.mat KdV_cos.mat kdv_diff_pretrained.pdparams l2 error: 0.1142
6 kuramoto_sivashinsky KS.mat KS.mat ks_pretrained.pdparams l2 error: 0.1166
7 navier_stokes cylinder.mat cylinder.mat ns_pretrained.pdparams l2 error: 0.0288
8 schrodinger NLS.mat NLS.mat schrodinger_pretrained.pdparams l2 error: 0.0735

注:根据 参考文献, 序号 3 的效果较差。

1. 背景简介

求解偏微分方程(PDE) 是一类基础的物理问题,在过去几十年里,以有限差分(FDM)、有限体积(FVM)、有限元(FEM)为代表的多种偏微分方程组数值解法趋于成熟。随着人工智能技术的高速发展,利用深度学习求解偏微分方程成为新的研究趋势。PINNs(Physics-informed neural networks) 是一种加入物理约束的深度学习网络,因此与纯数据驱动的神经网络学习相比,PINNs 可以用更少的数据样本学习到更具泛化能力的模型,其应用范围包括但不限于流体力学、热传导、电磁场、量子力学等领域。

传统的 PINNs 会将 PDE 作为 loss 的一项参与到网络训练中去,这就要求 PDE 公式为已知的先验条件,当 PDE 公式未知时,这种方法就不能实现。

DeepHPMs 着眼于 PDE 公式未知的情况,通过深度学习网络,从实验产生的高维数据中发现物理规律,即非线性 PDE 方程,并用一个深度学习网络来表征这个 PDE 方程,再将这个 PDE 网络替代传统 PINNs 方法中的 PDE 公式,对新的数据进行预测。

本问题对 Burgers, Korteweg- de Vries (KdV), Kuramoto-Sivashinsky, nonlinear Schro ̈dinger 和 Navier- Stokes equations 多种 PDE 方程进行了研究,本文档主要针对 Burgers 方程进行说明。

2. 问题定义

伯格斯方程(Burgers equation) 是一个模拟冲击波的传播和反射的非线性偏微分方程,该方程认为输出的解 \(u\) 与输入的位置、时间参数 \((x, t)\) 之间的关系为:

\[ u_t + \lambda_1 u u_x - \lambda_2 u_{xx} = 0 \]

其中 \(u_t\)\(u\)\(t\) 的偏导数,\(u_x\)\(u\)\(x\) 的偏导数,\(u_{xx}\)\(u\)\(x\) 的二阶偏导数。

通过深度学习网络表示 PDE,即 \(u_t\) 是输入为 \(u, u_x, u_{xx}\) 的网络的输出:

\[ u_t = \mathcal{N}(u, u_x, u_{xx}) \]

3. 问题求解

接下来开始讲解如何将问题一步一步地转化为 PaddleScience 代码,用深度学习的方法求解该问题。为了快速理解 PaddleScience,接下来仅对模型构建、方程构建、计算域构建等关键步骤进行阐述,而其余细节请参考 API文档

3.1 数据集介绍

数据集为处理好的 burgers 数据集,包含不同初始化条件下模拟数据的 \(x, t, u\) 以字典的形式存储在 .mat 文件中。

运行本问题代码前请下载 模拟数据集1模拟数据集2, 下载后分别存放在路径:

DATASET_PATH: ./datasets/burgers_sine.mat
DATASET_PATH_SOL: ./datasets/burgers_sine.mat

3.2 模型构建

本问题共包含 3 个深度学习网络,分别为数据驱动的 Net1,表征 PDE 方程的 Net2 以及用于推理新数据的 Net3。

Net1 通过数据驱动的方式,使用输入的某种模拟情况 1 下的少量随机数据进行训练,学习数据规律,从而得到该模拟情况下其他所有数据的数值 \(u\)。输入为模拟情况 1 数据的 \(x, t\),输出为 \(u\),是一个\((x, t)\)\(u\) 的映射函数 \(f_1: \mathbb{R}^2 \to \mathbb{R}^1\)

对 Net1 前向推理得到的 \(u\) 值,计算其对 \(x, t\) 的偏导数 \(u_t, u_x, u_{xx}\),并将计算得到的值当作真实物理值,作为输入和标签传到 Net2 中,通过优化 loss,训练 Net2。对于 Net2,输入为 Net1 推理得到的 \(u\) 以及它对x的偏导数 \(u_x, u_{xx}\),输出为 PDE 的运算结果 \(f_{pde}\),这个值应该与 \(u_t\) 接近,即 \(u_t\)\(f_{pde}\) 的 label。映射函数为 \(f_2: \mathbb{R}^3 \to \mathbb{R}^1\)

最后,将训练好的 Net2 当作 PDE 公式,将新的模拟情况 2 下的少量数据作为输入,与 Net3 一起进行类似 PINNs 的训练,最终得到可以对模拟情况 2 进行预测的深度学习网络 Net3。对于 Net3,输入为模拟情况 2 数据的 \(x, t\),输出为 \(u\),是一个\((x, t)\)\(u\) 的映射函数 \(f_3: \mathbb{R}^2 \to \mathbb{R}^1\)

因为训练中后一个阶段网络需要使用前一个阶段网络的前向推理值,因此本问题使用 Model List 来实现,上式中 \(f_1,f_2,f_3\) 分别为一个 MLP 模型,三者共同构成了一个 Model List,用 PaddleScience 代码表示如下

# initialize model list
model_list = ppsci.arch.ModelList((model_idn, model_pde, model_sol))

注意到部分网络的输入由之前的网络计算得到,而不仅仅是数据中 \((x, t)\) 这两个变量,这也就意味着我们需要对部分网络输入进行 transform。

3.3 transform构建

对于 Net1,输入为 \((x, t)\) 本来不需要 transform,但由于训练中根据数据的定义域对输入数据进行了数值变换,因此同样需要transform,同样,Net3 也需要对输入进行数值变换的 transform

def transform_u(_in):
    t, x = _in["t"], _in["x"]
    t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0
    x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0
    input_trans = {"t": t, "x": x}
    return input_trans

对于 Net2,因为它的输入为 \(u, u_x, u_{xx}\)\(u\) 为其他两个网络的输出,只要进行 Net2 的前向推理,就需要 transform,因此需要两种 transform。同时,在训练 Net3 之前,需要重新注册 transform

def transform_f(input, model, out_key):
    in_idn = {"t": input["t"], "x": input["x"]}
    x = input["x"]
    u = model(in_idn)[out_key]
    du_x = jacobian(u, x)
    du_xx = hessian(u, x)
    input_trans = {"u_x": u, "du_x": du_x, "du_xx": du_xx}
    return input_trans

def transform_f_idn(_in):
    return transform_f(_in, model_idn, "u_idn")

然后依次注册 transform 后,将3 个 MLP 模型组成 Model List

# register transform
model_idn.register_input_transform(transform_u)
model_pde.register_input_transform(transform_f_idn)
model_sol.register_input_transform(transform_u)

# initialize model list
model_list = ppsci.arch.ModelList((model_idn, model_pde, model_sol))

注意 Net3 开始训练前,重新注册 Net2 的transform

# re-register transform for model 2, fit for loss of stage 3
model_pde.register_input_transform(transform_f_sol)

这样我们就实例化出了一个拥有 3 个 MLP 模型,每个 MLP 包含 4 层隐藏神经元,每层神经元数为 50,使用 "sin" 作为激活函数,并包含输入 transform 的神经网络模型 model list

3.4 参数和超参数设定

我们需要指定问题相关的参数,如数据集路径、输出文件路径、定义域的值等

DATASET_PATH: ./datasets/burgers_sine.mat
DATASET_PATH_SOL: ./datasets/burgers_sine.mat

# set working condition
T_LB: 0.0
T_UB: 10.0
X_LB: -8.0
X_UB: 8.0

同时需要指定训练轮数和学习率等超参数

TRAIN:
  epochs: 50000 # set 1 for LBFGS
  iters_per_epoch: 1
  max_iter: 50000  # for LBFGS
  learning_rate: 1.0e-3

3.5 优化器构建

本问题提供了两种优化器,分别为 Adam 优化器和 LBFGS 优化器,训练时只选择其中一种,需要将另一种优化器注释掉。

# initialize optimizer
# Adam
optimizer_idn = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_idn)
optimizer_pde = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_pde)
optimizer_sol = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_sol)

# LBFGS
# optimizer_idn = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)((model_idn,))
# optimizer_pde = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)((model_pde,))
# optimizer_sol = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)((model_sol,))

3.6 约束构建

本问题分为三个训练阶段,部分采用监督学习的方式,对 \(u\) 进行约束,部分采用无监督学习的方式,约束结果满足 PDE 公式。

无监督仍然可以采用监督约束 SupervisedConstraint,在定义约束之前,需要给监督约束指定文件路径等数据读取配置,因为数据集中没有标签数据,因此在数据读取时我们需要使用训练数据充当标签数据,并注意在之后不要使用这部分“假的”标签数据,例如

train_dataloader_cfg_idn = {
    "dataset": {
        "name": "IterableMatDataset",
        "file_path": cfg.DATASET_PATH,
        "input_keys": ("t", "x"),
        "label_keys": ("u_idn",),
        "alias_dict": {"t": "t_train", "x": "x_train", "u_idn": "u_train"},
    },
}

du_t 值读取了 t 的值,是“假的”标签数据。

3.6.1 第一阶段约束构建

第一阶段对 Net1 的训练是纯监督学习,此处采用监督约束 SupervisedConstraint

sup_constraint_idn = ppsci.constraint.SupervisedConstraint(
    train_dataloader_cfg_idn,
    ppsci.loss.MSELoss("sum"),
    {"u_idn": lambda out: out["u_idn"]},
    name="u_mse_sup",
)
constraint_idn = {sup_constraint_idn.name: sup_constraint_idn}

SupervisedConstraint 的第一个参数是监督约束的读取配置,配置中 “dataset” 字段表示使用的训练数据集信息,其各个字段分别表示:

  1. name: 数据集类型,此处 "IterableMatDataset" 表示不分 batch 顺序读取的 .mat 类型的数据集;
  2. file_path: 数据集文件路径;
  3. input_keys: 输入变量名;
  4. label_keys: 标签变量名;
  5. alias_dict: 变量别名。

第二个参数是损失函数,由于是纯数据驱动,此处使用 MSE

第三个参数是方程表达式,用于描述如何计算约束目标,计算后的值将会按照指定名称存入输出列表中,从而保证 loss 计算时可以使用这些值。

第四个参数是约束条件的名字,我们需要给每一个约束条件命名,方便后续对其索引。

在约束构建完毕之后,以我们刚才的命名为关键字,封装到一个字典中,方便后续访问。

3.6.2 第二阶段约束构建

第二阶段对 Net2 的训练是无监督学习,但仍可采用监督约束 SupervisedConstraint,要注意上述提到的给定“假的”标签数据

sup_constraint_pde = ppsci.constraint.SupervisedConstraint(
    train_dataloader_cfg_pde,
    ppsci.loss.FunctionalLoss(pde_loss_func),
    {
        "du_t": lambda out: jacobian(out["u_idn"], out["t"]),
        "f_pde": lambda out: out["f_pde"],
    },
    name="f_mse_sup",
)
constraint_pde = {sup_constraint_pde.name: sup_constraint_pde}

各个参数含义与 第一阶段约束构建 一致,唯一的区别是这个约束中的第二个参数,损失函数,采用 PaddleScience 预留的自定义 loss 函数类 FunctionalLoss,该类支持编写代码时自定义 loss 的计算方法,而不是使用诸如 MSE 等现有方法。本约束中的自定义 loss 函数代码请参考 自定义 loss 和 metric

在约束构建完毕之后,以我们刚才的命名为关键字,封装到一个字典中,方便后续访问。

3.6.3 第三阶段约束构建

第三阶段 Net3 的训练复杂,包含了对部分初始点的监督学习、与 PDE 有关的无监督学习以及与边界条件有关的无监督学习,这里仍采用监督约束 SupervisedConstraint,同样要注意给定“假的”标签数据,各参数含义同上

sup_constraint_sol_f = ppsci.constraint.SupervisedConstraint(
    train_dataloader_cfg_sol_f,
    ppsci.loss.FunctionalLoss(pde_loss_func),
    {
        "f_pde": lambda out: out["f_pde"],
        "du_t": lambda out: jacobian(out["u_sol"], out["t"]),
    },
    name="f_mse_sup",
)
sup_constraint_sol_init = ppsci.constraint.SupervisedConstraint(
    train_dataloader_cfg_sol_init,
    ppsci.loss.MSELoss("sum"),
    {"u_sol": lambda out: out["u_sol"]},
    name="u0_mse_sup",
)
sup_constraint_sol_bc = ppsci.constraint.SupervisedConstraint(
    train_dataloader_cfg_sol_bc,
    ppsci.loss.FunctionalLoss(boundary_loss_func),
    {
        "x": lambda out: out["x"],
        "u_sol": lambda out: out["u_sol"],
    },
    name="ub_mse_sup",
)
constraint_sol = {
    sup_constraint_sol_f.name: sup_constraint_sol_f,
    sup_constraint_sol_init.name: sup_constraint_sol_init,
    sup_constraint_sol_bc.name: sup_constraint_sol_bc,
}

在约束构建完毕之后,以我们刚才的命名为关键字,封装到一个字典中,方便后续访问。

3.7 评估器构建

与约束同理,虽然本问题部分采用监督学习的方式,部分采用无监督学习的方式,但仍可以使用 ppsci.validate.SupervisedValidator 构建评估器,参数含义也与约束构建,唯一的区别是评价指标 metric

3.7.1 第一阶段评估器构建

评价指标 metricL2 正则化函数

sup_validator_idn = ppsci.validate.SupervisedValidator(
    eval_dataloader_cfg_idn,
    ppsci.loss.MSELoss("sum"),
    {"u_idn": lambda out: out["u_idn"]},
    {"l2": ppsci.metric.L2Rel()},
    name="u_L2_sup",
)
validator_idn = {sup_validator_idn.name: sup_validator_idn}

3.7.2 第二阶段评估器构建

评价指标 metricFunctionalMetric,这是 PaddleScience 预留的自定义 metric 函数类,该类支持编写代码时自定义 metric 的计算方法,而不是使用诸如 MSEL2 等现有方法。自定义 metric 函数代码请参考下一部分 自定义 loss 和 metric

sup_validator_pde = ppsci.validate.SupervisedValidator(
    eval_dataloader_cfg_pde,
    ppsci.loss.FunctionalLoss(pde_loss_func),
    {
        "du_t": lambda out: jacobian(out["u_idn"], out["t"]),
        "f_pde": lambda out: out["f_pde"],
    },
    {"l2": ppsci.metric.FunctionalMetric(pde_l2_rel_func)},
    name="f_L2_sup",
)
validator_pde = {sup_validator_pde.name: sup_validator_pde}

3.7.3 第三阶段评估器构建

因为第三阶段评价时只需要对训练得到的点的值进行评价,而不需要对边界条件满足程度或 PDE 满足程度进行评价,因此评价指标 metricL2 正则化函数

sup_validator_sol = ppsci.validate.SupervisedValidator(
    eval_dataloader_cfg_sol,
    ppsci.loss.MSELoss("sum"),
    {"u_sol": lambda out: out["u_sol"]},
    {"l2": ppsci.metric.L2Rel()},
    name="u_L2_sup",
)
validator_sol = {sup_validator_sol.name: sup_validator_sol}

3.8 自定义 loss 和 metric

由于本问题包含无监督学习,数据中不存在标签数据,loss 和 metric 根据 PDE 计算得到,因此需要自定义 loss 和 metric。方法为先定义相关函数,再将函数名作为参数传给 FunctionalLossFunctionalMetric

需要注意自定义 loss 和 metric 函数的输入输出参数需要与 PaddleScience 中如 MSE 等其他函数保持一致,即输入为模型输出 output_dict 等字典变量,loss 函数输出为 loss 值 paddle.Tensor,metric 函数输出为字典 Dict[str, paddle.Tensor]

与 PDE 相关的自定义 loss 函数为

def pde_loss_func(output_dict, *args):
    losses = F.mse_loss(output_dict["f_pde"], output_dict["du_t"], "sum")
    return losses

与 PDE 相关的自定义 metric 函数为

def pde_l2_rel_func(output_dict, *args):
    rel_l2 = paddle.norm(output_dict["du_t"] - output_dict["f_pde"]) / paddle.norm(
        output_dict["du_t"]
    )
    metric_dict = {"f_pde": rel_l2}
    return metric_dict

与边界条件相关的自定义 loss 函数为

def boundary_loss_func(output_dict, *args):
    u_b = output_dict["u_sol"]
    u_lb, u_ub = paddle.split(u_b, 2, axis=0)

    x_b = output_dict["x"]
    du_x = jacobian(u_b, x_b)

    du_x_lb, du_x_ub = paddle.split(du_x, 2, axis=0)

    losses = F.mse_loss(u_lb, u_ub, "sum")
    losses += F.mse_loss(du_x_lb, du_x_ub, "sum")
    return losses

3.9 模型训练、评估

完成上述设置之后,只需要将上述实例化的对象按顺序传递给各个阶段 ppsci.solver.Solver,然后启动训练、评估。

第一阶段训练、评估

# initialize solver
solver = ppsci.solver.Solver(
    model_list,
    constraint_idn,
    cfg.output_dir,
    optimizer_idn,
    None,
    cfg.TRAIN.epochs,
    cfg.TRAIN.iters_per_epoch,
    eval_during_train=cfg.TRAIN.eval_during_train,
    validator=validator_idn,
)

# train model
solver.train()
# evaluate after finished training
solver.eval()

第二阶段训练、评估

# update solver
solver = ppsci.solver.Solver(
    model_list,
    constraint_pde,
    cfg.output_dir,
    optimizer_pde,
    None,
    cfg.TRAIN.epochs,
    cfg.TRAIN.iters_per_epoch,
    eval_during_train=cfg.TRAIN.eval_during_train,
    validator=validator_pde,
)

# train model
solver.train()
# evaluate after finished training
solver.eval()

第三阶段训练、评估

# update solver
solver = ppsci.solver.Solver(
    model_list,
    constraint_sol,
    cfg.output_dir,
    optimizer_sol,
    None,
    cfg.TRAIN.epochs,
    cfg.TRAIN.iters_per_epoch,
    eval_during_train=cfg.TRAIN.eval_during_train,
    validator=validator_sol,
)

# train model
solver.train()
# evaluate after finished training
solver.eval()

3.10 可视化

本问题训练结束后,可以在 evalution 中使用第三阶段网络 Net3 对模拟情况 2 的数据进行推理,结果为 \(u|_{(x,t)}\) 值,同时输出 l2 error 的值。画图部分在 plotting.py 文件中。

# stage 3: solution net
# load pretrained model
save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path)

# load dataset
dataset_val = reader.load_mat_file(
    cfg.DATASET_PATH_SOL,
    keys=("t", "x", "u_sol"),
    alias_dict={
        "t": "t_ori",
        "x": "x_ori",
        "u_sol": "Exact_ori",
    },
)

t_sol, x_sol = np.meshgrid(
    np.squeeze(dataset_val["t"]), np.squeeze(dataset_val["x"])
)
t_sol_flatten = paddle.to_tensor(
    t_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False
)
x_sol_flatten = paddle.to_tensor(
    x_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False
)
u_sol_pred = model_list({"t": t_sol_flatten, "x": x_sol_flatten})

# eval
l2_error = np.linalg.norm(
    dataset_val["u_sol"] - u_sol_pred["u_sol"], 2
) / np.linalg.norm(dataset_val["u_sol"], 2)
logger.info(f"l2_error: {l2_error}")

# plotting
plot_points = paddle.concat([t_sol_flatten, x_sol_flatten], axis=-1).numpy()
plot_func.draw_and_save(
    figname="burgers_sol",
    data_exact=dataset_val["u_sol"],
    data_learned=u_sol_pred["u_sol"].numpy(),
    boundary=[cfg.T_LB, cfg.T_UB, cfg.X_LB, cfg.X_UB],
    griddata_points=plot_points,
    griddata_xi=(t_sol, x_sol),
    save_path=cfg.output_dir,
)

4. 完整代码

burgers.py
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from os import path as osp

import hydra
import numpy as np
import paddle
import paddle.nn.functional as F
import plotting as plot_func
from omegaconf import DictConfig

import ppsci
from ppsci.autodiff import hessian
from ppsci.autodiff import jacobian
from ppsci.utils import logger
from ppsci.utils import reader
from ppsci.utils import save_load


def pde_loss_func(output_dict, *args):
    losses = F.mse_loss(output_dict["f_pde"], output_dict["du_t"], "sum")
    return losses


def pde_l2_rel_func(output_dict, *args):
    rel_l2 = paddle.norm(output_dict["du_t"] - output_dict["f_pde"]) / paddle.norm(
        output_dict["du_t"]
    )
    metric_dict = {"f_pde": rel_l2}
    return metric_dict


def boundary_loss_func(output_dict, *args):
    u_b = output_dict["u_sol"]
    u_lb, u_ub = paddle.split(u_b, 2, axis=0)

    x_b = output_dict["x"]
    du_x = jacobian(u_b, x_b)

    du_x_lb, du_x_ub = paddle.split(du_x, 2, axis=0)

    losses = F.mse_loss(u_lb, u_ub, "sum")
    losses += F.mse_loss(du_x_lb, du_x_ub, "sum")
    return losses


def train(cfg: DictConfig):
    ppsci.utils.misc.set_random_seed(cfg.seed)
    # initialize logger
    logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info")

    # initialize burgers boundaries
    t_lb = paddle.to_tensor(cfg.T_LB)
    t_ub = paddle.to_tensor(cfg.T_UB)
    x_lb = paddle.to_tensor(cfg.X_LB)
    x_ub = paddle.to_tensor(cfg.T_UB)

    # initialize models
    model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net)
    model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net)
    model_sol = ppsci.arch.MLP(**cfg.MODEL.sol_net)

    # initialize transform
    def transform_u(_in):
        t, x = _in["t"], _in["x"]
        t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0
        x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0
        input_trans = {"t": t, "x": x}
        return input_trans

    def transform_f(input, model, out_key):
        in_idn = {"t": input["t"], "x": input["x"]}
        x = input["x"]
        u = model(in_idn)[out_key]
        du_x = jacobian(u, x)
        du_xx = hessian(u, x)
        input_trans = {"u_x": u, "du_x": du_x, "du_xx": du_xx}
        return input_trans

    def transform_f_idn(_in):
        return transform_f(_in, model_idn, "u_idn")

    def transform_f_sol(_in):
        return transform_f(_in, model_sol, "u_sol")

    # register transform
    model_idn.register_input_transform(transform_u)
    model_pde.register_input_transform(transform_f_idn)
    model_sol.register_input_transform(transform_u)

    # initialize model list
    model_list = ppsci.arch.ModelList((model_idn, model_pde, model_sol))

    # initialize optimizer
    # Adam
    optimizer_idn = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_idn)
    optimizer_pde = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_pde)
    optimizer_sol = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_sol)

    # LBFGS
    # optimizer_idn = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)((model_idn,))
    # optimizer_pde = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)((model_pde,))
    # optimizer_sol = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)((model_sol,))

    # stage 1: training identification net
    # manually build constraint(s)
    train_dataloader_cfg_idn = {
        "dataset": {
            "name": "IterableMatDataset",
            "file_path": cfg.DATASET_PATH,
            "input_keys": ("t", "x"),
            "label_keys": ("u_idn",),
            "alias_dict": {"t": "t_train", "x": "x_train", "u_idn": "u_train"},
        },
    }

    sup_constraint_idn = ppsci.constraint.SupervisedConstraint(
        train_dataloader_cfg_idn,
        ppsci.loss.MSELoss("sum"),
        {"u_idn": lambda out: out["u_idn"]},
        name="u_mse_sup",
    )
    constraint_idn = {sup_constraint_idn.name: sup_constraint_idn}

    # manually build validator
    eval_dataloader_cfg_idn = {
        "dataset": {
            "name": "IterableMatDataset",
            "file_path": cfg.DATASET_PATH,
            "input_keys": ("t", "x"),
            "label_keys": ("u_idn",),
            "alias_dict": {"t": "t_star", "x": "x_star", "u_idn": "u_star"},
        },
    }

    sup_validator_idn = ppsci.validate.SupervisedValidator(
        eval_dataloader_cfg_idn,
        ppsci.loss.MSELoss("sum"),
        {"u_idn": lambda out: out["u_idn"]},
        {"l2": ppsci.metric.L2Rel()},
        name="u_L2_sup",
    )
    validator_idn = {sup_validator_idn.name: sup_validator_idn}

    # initialize solver
    solver = ppsci.solver.Solver(
        model_list,
        constraint_idn,
        cfg.output_dir,
        optimizer_idn,
        None,
        cfg.TRAIN.epochs,
        cfg.TRAIN.iters_per_epoch,
        eval_during_train=cfg.TRAIN.eval_during_train,
        validator=validator_idn,
    )

    # train model
    solver.train()
    # evaluate after finished training
    solver.eval()

    # stage 2: training pde net
    # manually build constraint(s)
    train_dataloader_cfg_pde = {
        "dataset": {
            "name": "IterableMatDataset",
            "file_path": cfg.DATASET_PATH,
            "input_keys": ("t", "x"),
            "label_keys": ("du_t",),
            "alias_dict": {"t": "t_train", "x": "x_train", "du_t": "t_train"},
        },
    }

    sup_constraint_pde = ppsci.constraint.SupervisedConstraint(
        train_dataloader_cfg_pde,
        ppsci.loss.FunctionalLoss(pde_loss_func),
        {
            "du_t": lambda out: jacobian(out["u_idn"], out["t"]),
            "f_pde": lambda out: out["f_pde"],
        },
        name="f_mse_sup",
    )
    constraint_pde = {sup_constraint_pde.name: sup_constraint_pde}

    # manually build validator
    eval_dataloader_cfg_pde = {
        "dataset": {
            "name": "IterableMatDataset",
            "file_path": cfg.DATASET_PATH,
            "input_keys": ("t", "x"),
            "label_keys": ("du_t",),
            "alias_dict": {"t": "t_star", "x": "x_star", "du_t": "t_star"},
        },
    }

    sup_validator_pde = ppsci.validate.SupervisedValidator(
        eval_dataloader_cfg_pde,
        ppsci.loss.FunctionalLoss(pde_loss_func),
        {
            "du_t": lambda out: jacobian(out["u_idn"], out["t"]),
            "f_pde": lambda out: out["f_pde"],
        },
        {"l2": ppsci.metric.FunctionalMetric(pde_l2_rel_func)},
        name="f_L2_sup",
    )
    validator_pde = {sup_validator_pde.name: sup_validator_pde}

    # update solver
    solver = ppsci.solver.Solver(
        model_list,
        constraint_pde,
        cfg.output_dir,
        optimizer_pde,
        None,
        cfg.TRAIN.epochs,
        cfg.TRAIN.iters_per_epoch,
        eval_during_train=cfg.TRAIN.eval_during_train,
        validator=validator_pde,
    )

    # train model
    solver.train()
    # evaluate after finished training
    solver.eval()

    # stage 3: training solution net
    # re-register transform for model 2, fit for loss of stage 3
    model_pde.register_input_transform(transform_f_sol)

    # manually build constraint(s)
    train_dataloader_cfg_sol_f = {
        "dataset": {
            "name": "IterableMatDataset",
            "file_path": cfg.DATASET_PATH_SOL,
            "input_keys": ("t", "x"),
            "label_keys": ("du_t",),
            "alias_dict": {"t": "t_f_train", "x": "x_f_train", "du_t": "t_f_train"},
        },
    }
    train_dataloader_cfg_sol_init = {
        "dataset": {
            "name": "IterableMatDataset",
            "file_path": cfg.DATASET_PATH_SOL,
            "input_keys": ("t", "x"),
            "label_keys": ("u_sol",),
            "alias_dict": {"t": "t0", "x": "x0", "u_sol": "u0"},
        },
    }
    train_dataloader_cfg_sol_bc = {
        "dataset": {
            "name": "IterableMatDataset",
            "file_path": cfg.DATASET_PATH_SOL,
            "input_keys": ("t", "x"),
            "label_keys": ("x",),
            "alias_dict": {"t": "tb", "x": "xb"},
        },
    }

    sup_constraint_sol_f = ppsci.constraint.SupervisedConstraint(
        train_dataloader_cfg_sol_f,
        ppsci.loss.FunctionalLoss(pde_loss_func),
        {
            "f_pde": lambda out: out["f_pde"],
            "du_t": lambda out: jacobian(out["u_sol"], out["t"]),
        },
        name="f_mse_sup",
    )
    sup_constraint_sol_init = ppsci.constraint.SupervisedConstraint(
        train_dataloader_cfg_sol_init,
        ppsci.loss.MSELoss("sum"),
        {"u_sol": lambda out: out["u_sol"]},
        name="u0_mse_sup",
    )
    sup_constraint_sol_bc = ppsci.constraint.SupervisedConstraint(
        train_dataloader_cfg_sol_bc,
        ppsci.loss.FunctionalLoss(boundary_loss_func),
        {
            "x": lambda out: out["x"],
            "u_sol": lambda out: out["u_sol"],
        },
        name="ub_mse_sup",
    )
    constraint_sol = {
        sup_constraint_sol_f.name: sup_constraint_sol_f,
        sup_constraint_sol_init.name: sup_constraint_sol_init,
        sup_constraint_sol_bc.name: sup_constraint_sol_bc,
    }

    # manually build validator
    eval_dataloader_cfg_sol = {
        "dataset": {
            "name": "IterableMatDataset",
            "file_path": cfg.DATASET_PATH_SOL,
            "input_keys": ("t", "x"),
            "label_keys": ("u_sol",),
            "alias_dict": {"t": "t_star", "x": "x_star", "u_sol": "u_star"},
        },
    }

    sup_validator_sol = ppsci.validate.SupervisedValidator(
        eval_dataloader_cfg_sol,
        ppsci.loss.MSELoss("sum"),
        {"u_sol": lambda out: out["u_sol"]},
        {"l2": ppsci.metric.L2Rel()},
        name="u_L2_sup",
    )
    validator_sol = {sup_validator_sol.name: sup_validator_sol}

    # update solver
    solver = ppsci.solver.Solver(
        model_list,
        constraint_sol,
        cfg.output_dir,
        optimizer_sol,
        None,
        cfg.TRAIN.epochs,
        cfg.TRAIN.iters_per_epoch,
        eval_during_train=cfg.TRAIN.eval_during_train,
        validator=validator_sol,
    )

    # train model
    solver.train()
    # evaluate after finished training
    solver.eval()


def evaluate(cfg: DictConfig):
    ppsci.utils.misc.set_random_seed(cfg.seed)
    # initialize logger
    logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info")

    # initialize burgers boundaries
    t_lb = paddle.to_tensor(cfg.T_LB)
    t_ub = paddle.to_tensor(cfg.T_UB)
    x_lb = paddle.to_tensor(cfg.X_LB)
    x_ub = paddle.to_tensor(cfg.T_UB)

    # initialize models
    model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net)
    model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net)
    model_sol = ppsci.arch.MLP(**cfg.MODEL.sol_net)

    # initialize transform
    def transform_u(_in):
        t, x = _in["t"], _in["x"]
        t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0
        x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0
        input_trans = {"t": t, "x": x}
        return input_trans

    def transform_f(input, model, out_key):
        in_idn = {"t": input["t"], "x": input["x"]}
        x = input["x"]
        u = model(in_idn)[out_key]
        du_x = jacobian(u, x)
        du_xx = hessian(u, x)
        input_trans = {"u_x": u, "du_x": du_x, "du_xx": du_xx}
        return input_trans

    def transform_f_sol(_in):
        return transform_f(_in, model_sol, "u_sol")

    # register transform
    model_idn.register_input_transform(transform_u)
    model_pde.register_input_transform(transform_f_sol)
    model_sol.register_input_transform(transform_u)

    # initialize model list
    model_list = ppsci.arch.ModelList((model_idn, model_pde, model_sol))

    # stage 3: solution net
    # load pretrained model
    save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path)

    # load dataset
    dataset_val = reader.load_mat_file(
        cfg.DATASET_PATH_SOL,
        keys=("t", "x", "u_sol"),
        alias_dict={
            "t": "t_ori",
            "x": "x_ori",
            "u_sol": "Exact_ori",
        },
    )

    t_sol, x_sol = np.meshgrid(
        np.squeeze(dataset_val["t"]), np.squeeze(dataset_val["x"])
    )
    t_sol_flatten = paddle.to_tensor(
        t_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False
    )
    x_sol_flatten = paddle.to_tensor(
        x_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False
    )
    u_sol_pred = model_list({"t": t_sol_flatten, "x": x_sol_flatten})

    # eval
    l2_error = np.linalg.norm(
        dataset_val["u_sol"] - u_sol_pred["u_sol"], 2
    ) / np.linalg.norm(dataset_val["u_sol"], 2)
    logger.info(f"l2_error: {l2_error}")

    # plotting
    plot_points = paddle.concat([t_sol_flatten, x_sol_flatten], axis=-1).numpy()
    plot_func.draw_and_save(
        figname="burgers_sol",
        data_exact=dataset_val["u_sol"],
        data_learned=u_sol_pred["u_sol"].numpy(),
        boundary=[cfg.T_LB, cfg.T_UB, cfg.X_LB, cfg.X_UB],
        griddata_points=plot_points,
        griddata_xi=(t_sol, x_sol),
        save_path=cfg.output_dir,
    )


@hydra.main(version_base=None, config_path="./conf", config_name="burgers.yaml")
def main(cfg: DictConfig):
    if cfg.mode == "train":
        train(cfg)
    elif cfg.mode == "eval":
        evaluate(cfg)
    else:
        raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'")


if __name__ == "__main__":
    main()
plotting.py
from os import path as osp

import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import griddata


def _draw_subplot(subfigname, figdata, fig, gs, cmap, boundary, loc):
    ax = plt.subplot(gs[:, loc])
    h = ax.imshow(
        figdata,
        interpolation="nearest",
        cmap=cmap,
        extent=boundary,  # [cfg.T_LB, cfg.T_UB, cfg.X_LB, cfg.X_UB]
        origin="lower",
        aspect="auto",
    )
    divider = make_axes_locatable(ax)
    cax = divider.append_axes("right", size="5%", pad=0.05)

    fig.colorbar(h, cax=cax)
    ax.set_xlabel("$t$")
    ax.set_ylabel("$x$")
    ax.set_aspect("auto", "box")
    ax.set_title(subfigname, fontsize=10)


def draw_and_save(
    figname, data_exact, data_learned, boundary, griddata_points, griddata_xi, save_path
):
    fig = plt.figure(figname, figsize=(10, 6))
    gs = gridspec.GridSpec(1, 2)
    gs.update(top=0.8, bottom=0.2, left=0.1, right=0.9, wspace=0.5)

    # Exact p(t,x,y)
    plot_data_label = griddata(
        griddata_points, data_exact.flatten(), griddata_xi, method="cubic"
    )
    _draw_subplot("Exact Dynamics", plot_data_label, fig, gs, "jet", boundary, loc=0)
    # Predicted p(t,x,y)
    plot_data_pred = griddata(
        griddata_points, data_learned.flatten(), griddata_xi, method="cubic"
    )
    _draw_subplot("Learned Dynamics", plot_data_pred, fig, gs, "jet", boundary, loc=1)

    plt.savefig(osp.join(save_path, figname))
    plt.close()


def draw_and_save_ns(figname, data_exact, data_learned, grid_data, save_path):
    snap = 120
    nn = 200
    lb_x, lb_y = grid_data[:, 0].min(), grid_data[:, 1].min()
    ub_x, ub_y = grid_data[:, 0].max(), grid_data[:, 1].max()
    x_plot = np.linspace(lb_x, ub_x, nn)
    y_plot = np.linspace(lb_y, ub_y, nn)
    X_plot, Y_plot = np.meshgrid(x_plot, y_plot)

    fig = plt.figure(figname, figsize=(10, 6))
    gs = gridspec.GridSpec(1, 2)
    gs.update(top=0.8, bottom=0.2, left=0.1, right=0.9, wspace=0.5)
    # Exact p(t,x,y)
    plot_data_label = griddata(
        grid_data,
        data_exact[:, snap].flatten(),
        (X_plot, Y_plot),
        method="cubic",
    )
    _draw_subplot(
        "Exact Dynamics",
        plot_data_label,
        fig,
        gs,
        "seismic",
        [lb_x, lb_y, ub_x, ub_y],
        loc=0,
    )
    # Predicted p(t,x,y)
    plot_data_pred = griddata(
        grid_data,
        data_learned[:, snap].flatten(),
        (X_plot, Y_plot),
        method="cubic",
    )
    _draw_subplot(
        "Learned Dynamics",
        plot_data_pred,
        fig,
        gs,
        "seismic",
        [lb_x, lb_y, ub_x, ub_y],
        loc=1,
    )
    plt.savefig(osp.join(save_path, figname))
    plt.close()

5. 结果展示

参考 问题定义,下图横、纵坐标分别为时间、位置参数,颜色表示 burgers 的解 u,大小参照图片右侧的颜色卡。将 burgers 方程应用在不同的问题上,u 存在不同的含义,这里可以简单的认为 u 值代表速度。

下图展示了在一定初始条件(t=0 时刻 x 对应的 u 值)下,随 t 增长,u 随 x 变化 的情况。u 的真实值和模型预测结果如下,与传统的光谱方法相比基本一致。

模拟数据集 1 是初始条件为 sin 方程时的数据集 burgers_sine.mat,模拟数据集 2 是初始条件为 exp 方程时的数据集 burgers.mat:

burgers_diff_lbfgs

真实 u 值和预测 u 值对比

模拟数据集 1、2 都是初始条件为 sin 方程时的数据集 burgers_sine.mat 时:

burgers_same_lbfgs

真实 u 值和预测 u 值对比

6. 参考文献


最后更新: November 22, 2023
创建日期: November 6, 2023