跳转至

Loss.loss(损失函数) 模块

ppsci.loss

Loss

Bases: Layer

Base class for loss.

Source code in ppsci/loss/base.py
class Loss(nn.Layer):
    """Base class for loss."""

    def __init__(
        self,
        reduction: Literal["mean", "sum"],
        weight: Optional[Union[float, Dict[str, float]]] = None,
    ):
        super().__init__()
        self.reduction = reduction
        self.weight = weight

    def __str__(self):
        return f"{self.__class__.__name__}(reduction={self.reduction}, weight={self.weight})"

FunctionalLoss

Bases: Loss

Functional loss class, which allows to use custom loss computing function from given loss_expr for complex computation cases.

\[ L = f(\mathbf{x}, \mathbf{y}) \]
\[ \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} \]

Parameters:

Name Type Description Default
loss_expr Callable[..., Tensor]

Function for custom loss computation.

required
weight Optional[Union[float, Dict[str, float]]]

Weight for loss. Defaults to None.

None

Examples:

>>> import paddle
>>> from ppsci.loss import FunctionalLoss
>>> import paddle.nn.functional as F
>>> def mse_sum_loss(output_dict, label_dict, weight_dict=None):
...     losses = 0
...     for key in output_dict.keys():
...         loss = F.mse_loss(output_dict[key], label_dict[key], "sum")
...         if weight_dict:
...             loss *=  weight_dict[key]
...         losses += loss
...     return losses
>>> loss = FunctionalLoss(mse_sum_loss)
>>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
...             'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
...             'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
>>> weight_dict = {'u': 0.8, 'v': 0.2}
>>> result = loss(output_dict, label_dict, weight_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       17.89600182)
Source code in ppsci/loss/func.py
class FunctionalLoss(base.Loss):
    r"""Functional loss class, which allows to use custom loss computing function from given loss_expr for complex computation cases.

    $$
    L = f(\mathbf{x}, \mathbf{y})
    $$

    $$
    \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N}
    $$

    Args:
        loss_expr (Callable[..., paddle.Tensor]): Function for custom loss computation.
        weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.

    Examples:
        >>> import paddle
        >>> from ppsci.loss import FunctionalLoss
        >>> import paddle.nn.functional as F
        >>> def mse_sum_loss(output_dict, label_dict, weight_dict=None):
        ...     losses = 0
        ...     for key in output_dict.keys():
        ...         loss = F.mse_loss(output_dict[key], label_dict[key], "sum")
        ...         if weight_dict:
        ...             loss *=  weight_dict[key]
        ...         losses += loss
        ...     return losses
        >>> loss = FunctionalLoss(mse_sum_loss)
        >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
        ...             'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
        >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
        ...             'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
        >>> weight_dict = {'u': 0.8, 'v': 0.2}
        >>> result = loss(output_dict, label_dict, weight_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               17.89600182)
    """

    def __init__(
        self,
        loss_expr: Callable[..., paddle.Tensor],
        weight: Optional[Union[float, Dict[str, float]]] = None,
    ):
        super().__init__(None, weight)
        self.loss_expr = loss_expr

    def forward(self, output_dict, label_dict=None, weight_dict=None) -> paddle.Tensor:
        loss = self.loss_expr(output_dict, label_dict, weight_dict)

        assert isinstance(
            loss, (paddle.Tensor, paddle.static.Variable, paddle.pir.Value)
        ), (
            "Loss computed by custom function should be type of 'paddle.Tensor', "
            f"'paddle.static.Variable' or 'paddle.pir.Value', but got {type(loss)}."
            " Please check the return type of custom loss function."
        )

        return loss

L1Loss

Bases: Loss

Class for l1 loss.

\[ L = \Vert \mathbf{x} - \mathbf{y} \Vert_1 \]
\[ \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} \]

when reduction is set to "mean"

\[ L = MEAN \left( \Vert \mathbf{x} - \mathbf{y} \Vert_1 \right) \]

when reduction is set to "sum"

\[ L = SUM \left( \Vert \mathbf{x} - \mathbf{y} \Vert_1 \right) \]

Parameters:

Name Type Description Default
reduction Literal['mean', 'sum']

Reduction method. Defaults to "mean".

'mean'
weight Optional[Union[float, Dict[str, float]]]

Weight for loss. Defaults to None.

None

Examples:

>>> import paddle
>>> from ppsci.loss import L1Loss
>>> output_dict = {"u": paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
...                "v": paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
>>> label_dict = {"u": paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
...               "v": paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
>>> weight = {"u": 0.8, "v": 0.2}
>>> loss = L1Loss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       3.35999990)
>>> loss = L1Loss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       6.71999979)
Source code in ppsci/loss/l1.py
class L1Loss(base.Loss):
    r"""Class for l1 loss.

    $$
    L = \Vert \mathbf{x} - \mathbf{y} \Vert_1
    $$

    $$
    \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N}
    $$

    when `reduction` is set to "mean"

    $$
    L = MEAN \left( \Vert \mathbf{x} - \mathbf{y} \Vert_1 \right)
    $$

    when `reduction` is set to "sum"

    $$
    L = SUM \left( \Vert \mathbf{x} - \mathbf{y} \Vert_1 \right)
    $$

    Args:
        reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean".
        weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.

    Examples:
        >>> import paddle
        >>> from ppsci.loss import L1Loss
        >>> output_dict = {"u": paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
        ...                "v": paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
        >>> label_dict = {"u": paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
        ...               "v": paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
        >>> weight = {"u": 0.8, "v": 0.2}
        >>> loss = L1Loss(weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               3.35999990)

        >>> loss = L1Loss(reduction="sum", weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               6.71999979)
    """

    def __init__(
        self,
        reduction: Literal["mean", "sum"] = "mean",
        weight: Optional[Union[float, Dict[str, float]]] = None,
    ):
        if reduction not in ["mean", "sum"]:
            raise ValueError(
                f"reduction should be 'mean' or 'sum', but got {reduction}"
            )
        super().__init__(reduction, weight)

    def forward(self, output_dict, label_dict, weight_dict=None):
        losses = 0.0
        for key in label_dict:
            loss = F.l1_loss(output_dict[key], label_dict[key], "none")
            if weight_dict and key in weight_dict:
                loss *= weight_dict[key]

            if "area" in output_dict:
                loss *= output_dict["area"]

            loss = loss.sum(axis=1)

            if self.reduction == "sum":
                loss = loss.sum()
            elif self.reduction == "mean":
                loss = loss.mean()

            if isinstance(self.weight, (float, int)):
                loss *= self.weight
            elif isinstance(self.weight, dict) and key in self.weight:
                loss *= self.weight[key]

            losses += loss
        return losses

L2Loss

Bases: Loss

Class for l2 loss.

\[ L =\Vert \mathbf{x} - \mathbf{y} \Vert_2 \]
\[ \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} \]

when reduction is set to "mean"

\[ L = MEAN \left( \Vert \mathbf{x} - \mathbf{y} \Vert_2 \right) \]

when reduction is set to "sum"

\[ L = SUM \left( \Vert \mathbf{x} - \mathbf{y} \Vert_2 \right) \]

Parameters:

Name Type Description Default
reduction Literal['mean', 'sum']

Reduction method. Defaults to "mean".

'mean'
weight Optional[Union[float, Dict[str, float]]]

Weight for loss. Defaults to None.

None

Examples:

>>> import paddle
>>> from ppsci.loss import L2Loss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
...                'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
...               'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> loss = L2Loss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       2.78884506)
>>> loss = L2Loss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       5.57769012)
Source code in ppsci/loss/l2.py
class L2Loss(base.Loss):
    r"""Class for l2 loss.

    $$
    L =\Vert \mathbf{x} - \mathbf{y} \Vert_2
    $$

    $$
    \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N}
    $$

    when `reduction` is set to "mean"

    $$
    L = MEAN \left( \Vert \mathbf{x} - \mathbf{y} \Vert_2 \right)
    $$

    when `reduction` is set to "sum"

    $$
    L = SUM \left( \Vert \mathbf{x} - \mathbf{y} \Vert_2 \right)
    $$

    Args:
        reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean".
        weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.

    Examples:
        >>> import paddle
        >>> from ppsci.loss import L2Loss
        >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
        ...                'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
        >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
        ...               'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
        >>> weight = {'u': 0.8, 'v': 0.2}
        >>> loss = L2Loss(weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               2.78884506)
        >>> loss = L2Loss(reduction="sum", weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               5.57769012)
    """

    def __init__(
        self,
        reduction: Literal["mean", "sum"] = "mean",
        weight: Optional[Union[float, Dict[str, float]]] = None,
    ):
        if reduction not in ["mean", "sum"]:
            raise ValueError(
                f"reduction should be 'mean' or 'sum', but got {reduction}"
            )
        super().__init__(reduction, weight)

    def forward(self, output_dict, label_dict, weight_dict=None):
        losses = 0.0
        for key in label_dict:
            loss = F.mse_loss(output_dict[key], label_dict[key], "none")
            if weight_dict and key in weight_dict:
                loss *= weight_dict[key]

            if "area" in output_dict:
                loss *= output_dict["area"]

            loss = loss.sum(axis=1).sqrt()

            if self.reduction == "sum":
                loss = loss.sum()
            elif self.reduction == "mean":
                loss = loss.mean()

            if isinstance(self.weight, (float, int)):
                loss *= self.weight
            elif isinstance(self.weight, dict) and key in self.weight:
                loss *= self.weight[key]

            losses += loss
        return losses

L2RelLoss

Bases: Loss

Class for l2 relative loss.

\[ L = \dfrac{\Vert \mathbf{x} - \mathbf{y} \Vert_2}{\Vert \mathbf{y} \Vert_2} \]
\[ \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} \]

when reduction is set to "mean"

\[ L = MEAN \left( \dfrac{\Vert \mathbf{x} - \mathbf{y} \Vert_2}{\Vert \mathbf{y} \Vert_2} \right) \]

when reduction is set to "sum"

\[ L = SUM \left( \dfrac{\Vert \mathbf{x} - \mathbf{y} \Vert_2}{\Vert \mathbf{y} \Vert_2} \right) \]

Parameters:

Name Type Description Default
reduction Literal['mean', 'sum']

Specifies the reduction to apply to the output: 'mean' | 'sum'. Defaults to "mean".

'mean'
weight Optional[Union[float, Dict[str, float]]]

Weight for loss. Defaults to None.

None

Examples:

>>> import paddle
>>> from ppsci.loss import L2RelLoss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
...                'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
...               'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> loss = L2RelLoss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       2.93676996)
>>> loss = L2RelLoss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       5.87353992)
Source code in ppsci/loss/l2.py
class L2RelLoss(base.Loss):
    r"""Class for l2 relative loss.

    $$
    L = \dfrac{\Vert \mathbf{x} - \mathbf{y} \Vert_2}{\Vert \mathbf{y} \Vert_2}
    $$

    $$
    \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N}
    $$

    when `reduction` is set to "mean"

    $$
    L = MEAN \left( \dfrac{\Vert \mathbf{x} - \mathbf{y} \Vert_2}{\Vert \mathbf{y} \Vert_2} \right)
    $$

    when `reduction` is set to "sum"

    $$
    L = SUM \left( \dfrac{\Vert \mathbf{x} - \mathbf{y} \Vert_2}{\Vert \mathbf{y} \Vert_2} \right)
    $$

    Args:
        reduction (Literal["mean", "sum"], optional): Specifies the reduction to apply to the output: 'mean' | 'sum'. Defaults to "mean".
        weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.

    Examples:
        >>> import paddle
        >>> from ppsci.loss import L2RelLoss

        >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
        ...                'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
        >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
        ...               'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
        >>> weight = {'u': 0.8, 'v': 0.2}
        >>> loss = L2RelLoss(weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               2.93676996)

        >>> loss = L2RelLoss(reduction="sum", weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               5.87353992)
    """

    def __init__(
        self,
        reduction: Literal["mean", "sum"] = "mean",
        weight: Optional[Union[float, Dict[str, float]]] = None,
    ):
        if reduction not in ["mean", "sum"]:
            raise ValueError(
                f"reduction should be 'mean' or 'sum', but got {reduction}"
            )
        super().__init__(reduction, weight)

    def rel_loss(self, x, y):
        batch_size = x.shape[0]
        x_ = x.reshape((batch_size, -1))
        y_ = y.reshape((batch_size, -1))
        diff_norms = paddle.norm(x_ - y_, p=2, axis=1)
        y_norms = paddle.norm(y_, p=2, axis=1)
        return diff_norms / y_norms

    def forward(self, output_dict, label_dict, weight_dict=None):
        losses = 0
        for key in label_dict:
            loss = self.rel_loss(output_dict[key], label_dict[key])
            if weight_dict:
                loss *= weight_dict[key]

            if self.reduction == "sum":
                loss = loss.sum()
            elif self.reduction == "mean":
                loss = loss.mean()

            if isinstance(self.weight, float):
                loss *= self.weight
            elif isinstance(self.weight, dict) and key in self.weight:
                loss *= self.weight[key]

            losses += loss

        return losses

MAELoss

Bases: Loss

Class for mean absolute error loss.

\[ L = \begin{cases} \dfrac{1}{N} \Vert {\mathbf{x}-\mathbf{y}} \Vert_1, & \text{if reduction='mean'} \\ \Vert {\mathbf{x}-\mathbf{y}} \Vert_1, & \text{if reduction='sum'} \end{cases} \]
\[ \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} \]

Parameters:

Name Type Description Default
reduction Literal['mean', 'sum']

Reduction method. Defaults to "mean".

'mean'
weight Optional[Union[float, Dict[str, float]]]

Weight for loss. Defaults to None.

None

Examples:

>>> import paddle
>>> from ppsci.loss import MAELoss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
...                'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
...               'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> loss = MAELoss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       1.67999995)
>>> loss = MAELoss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       6.71999979)
Source code in ppsci/loss/mae.py
class MAELoss(base.Loss):
    r"""Class for mean absolute error loss.

    $$
    L =
    \begin{cases}
        \dfrac{1}{N} \Vert {\mathbf{x}-\mathbf{y}} \Vert_1, & \text{if reduction='mean'} \\
        \Vert {\mathbf{x}-\mathbf{y}} \Vert_1, & \text{if reduction='sum'}
    \end{cases}
    $$

    $$
    \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N}
    $$

    Args:
        reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean".
        weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.

    Examples:
        >>> import paddle
        >>> from ppsci.loss import MAELoss

        >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
        ...                'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
        >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
        ...               'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
        >>> weight = {'u': 0.8, 'v': 0.2}
        >>> loss = MAELoss(weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               1.67999995)

        >>> loss = MAELoss(reduction="sum", weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               6.71999979)
    """

    def __init__(
        self,
        reduction: Literal["mean", "sum"] = "mean",
        weight: Optional[Union[float, Dict[str, float]]] = None,
    ):
        if reduction not in ["mean", "sum"]:
            raise ValueError(
                f"reduction should be 'mean' or 'sum', but got {reduction}"
            )
        super().__init__(reduction, weight)

    def forward(self, output_dict, label_dict, weight_dict=None):
        losses = 0.0
        for key in label_dict:
            loss = F.l1_loss(output_dict[key], label_dict[key], "none")
            if weight_dict and key in weight_dict:
                loss *= weight_dict[key]

            if "area" in output_dict:
                loss *= output_dict["area"]

            if self.reduction == "sum":
                loss = loss.sum()
            elif self.reduction == "mean":
                loss = loss.mean()
            if isinstance(self.weight, (float, int)):
                loss *= self.weight
            elif isinstance(self.weight, dict) and key in self.weight:
                loss *= self.weight[key]

            losses += loss
        return losses

MSELoss

Bases: Loss

Class for mean squared error loss.

\[ L = \begin{cases} \dfrac{1}{N} \Vert {\mathbf{x}-\mathbf{y}} \Vert_2^2, & \text{if reduction='mean'} \\ \Vert {\mathbf{x}-\mathbf{y}} \Vert_2^2, & \text{if reduction='sum'} \end{cases} \]
\[ \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} \]

Parameters:

Name Type Description Default
reduction Literal['mean', 'sum']

Reduction method. Defaults to "mean".

'mean'
weight Optional[Union[float, Dict[str, float]]]

Weight for loss. Defaults to None.

None

Examples:

>>> import paddle
>>> from ppsci.loss import MSELoss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
...                'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
...               'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> loss = MSELoss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       4.47400045)
>>> loss = MSELoss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       17.89600182)
Source code in ppsci/loss/mse.py
class MSELoss(base.Loss):
    r"""Class for mean squared error loss.

    $$
    L =
    \begin{cases}
        \dfrac{1}{N} \Vert {\mathbf{x}-\mathbf{y}} \Vert_2^2, & \text{if reduction='mean'} \\
        \Vert {\mathbf{x}-\mathbf{y}} \Vert_2^2, & \text{if reduction='sum'}
    \end{cases}
    $$

    $$
    \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N}
    $$

    Args:
        reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean".
        weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.

    Examples:
        >>> import paddle
        >>> from ppsci.loss import MSELoss

        >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
        ...                'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
        >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
        ...               'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
        >>> weight = {'u': 0.8, 'v': 0.2}
        >>> loss = MSELoss(weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               4.47400045)

        >>> loss = MSELoss(reduction="sum", weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               17.89600182)
    """

    def __init__(
        self,
        reduction: Literal["mean", "sum"] = "mean",
        weight: Optional[Union[float, Dict[str, float]]] = None,
    ):
        if reduction not in ["mean", "sum"]:
            raise ValueError(
                f"reduction should be 'mean' or 'sum', but got {reduction}"
            )
        super().__init__(reduction, weight)

    def forward(self, output_dict, label_dict, weight_dict=None):
        losses = 0.0
        for key in label_dict:
            loss = F.mse_loss(output_dict[key], label_dict[key], "none")
            if weight_dict and key in weight_dict:
                loss *= weight_dict[key]

            if "area" in output_dict:
                loss *= output_dict["area"]

            if self.reduction == "sum":
                loss = loss.sum()
            elif self.reduction == "mean":
                loss = loss.mean()
            if isinstance(self.weight, (float, int)):
                loss *= self.weight
            elif isinstance(self.weight, dict) and key in self.weight:
                loss *= self.weight[key]

            losses += loss
        return losses

ChamferLoss

Bases: Loss

Class for Chamfe distance loss.

\[ L = \dfrac{1}{S_1} \sum_{x \in S_1} \min_{y \in S_2} \Vert x - y \Vert_2^2 + \dfrac{1}{S_2} \sum_{y \in S_2} \min_{x \in S_1} \Vert y - x \Vert_2^2 \]
\[ \text{where } S_1 \text{ and } S_2 \text{ is the coordinate matrix of two point clouds}. \]

Parameters:

Name Type Description Default
weight Optional[Union[float, Dict[str, float]]]

Weight for loss. Defaults to None.

None

Examples:

>>> import paddle
>>> from ppsci.loss import ChamferLoss
>>> _ = paddle.seed(42)
>>> batch_point_cloud1 = paddle.rand([2, 100, 3])
>>> batch_point_cloud2 = paddle.rand([2, 50, 3])
>>> output_dict = {"s1": batch_point_cloud1}
>>> label_dict  = {"s1": batch_point_cloud2}
>>> weight = {"s1": 0.8}
>>> loss = ChamferLoss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       0.04415882)
Source code in ppsci/loss/chamfer.py
class ChamferLoss(base.Loss):
    r"""Class for Chamfe distance loss.

    $$
    L = \dfrac{1}{S_1} \sum_{x \in S_1} \min_{y \in S_2} \Vert x - y \Vert_2^2 + \dfrac{1}{S_2} \sum_{y \in S_2} \min_{x \in S_1} \Vert y - x \Vert_2^2
    $$

    $$
    \text{where } S_1 \text{ and } S_2 \text{ is the coordinate matrix of two point clouds}.
    $$

    Args:
        weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.

    Examples:
        >>> import paddle
        >>> from ppsci.loss import ChamferLoss
        >>> _ = paddle.seed(42)
        >>> batch_point_cloud1 = paddle.rand([2, 100, 3])
        >>> batch_point_cloud2 = paddle.rand([2, 50, 3])
        >>> output_dict = {"s1": batch_point_cloud1}
        >>> label_dict  = {"s1": batch_point_cloud2}
        >>> weight = {"s1": 0.8}
        >>> loss = ChamferLoss(weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               0.04415882)
    """

    def __init__(
        self,
        weight: Optional[Union[float, Dict[str, float]]] = None,
    ):
        super().__init__("mean", weight)

    def forward(self, output_dict, label_dict, weight_dict=None):
        losses = 0.0
        for key in label_dict:
            s1 = output_dict[key]
            s2 = label_dict[key]
            N1, N2 = s1.shape[1], s2.shape[1]

            # [B, N1, N2, 3]
            s1_expand = paddle.expand(s1.reshape([-1, N1, 1, 3]), shape=[-1, N1, N2, 3])
            # [B, N1, N2, 3]
            s2_expand = paddle.expand(s2.reshape([-1, 1, N2, 3]), shape=[-1, N1, N2, 3])

            dis = ((s1_expand - s2_expand) ** 2).sum(axis=3)  # [B, N1, N2]
            loss_s12 = dis.min(axis=2)  # [B, N1]
            loss_s21 = dis.min(axis=1)  # [B, N2]
            loss = loss_s12.mean() + loss_s21.mean()

            if weight_dict and key in weight_dict:
                loss *= weight_dict[key]

            if isinstance(self.weight, (float, int)):
                loss *= self.weight
            elif isinstance(self.weight, dict) and key in self.weight:
                loss *= self.weight[key]

            losses += loss
        return losses

CausalMSELoss

Bases: Loss

Class for mean squared error loss.

\[ L = \frac{1}{M} \displaystyle\sum_{i=1}^M{w_i} \mathcal{L}_r^i, \]

where \(w_i=\exp (-\epsilon \displaystyle\sum_{k=1}^{i-1} \mathcal{L}_r^k), i=2,3, \ldots, M.\)

Parameters:

Name Type Description Default
n_chunks int

\(M\), Number of split time windows.

required
reduction Literal['mean', 'sum']

Reduction method. Defaults to "mean".

'mean'
weight Optional[Union[float, Dict[str, float]]]

Weight for loss. Defaults to None.

None
tol float

Causal tolerance, i.e. \(\epsilon\) in paper. Defaults to 1.0.

1.0

Examples:

>>> import paddle
>>> from ppsci.loss import MSELoss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9, 1.0], [1.1, -1.3, 0.0]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0, -0.1], [-0.2, 2.5, 2.0]])}
>>> loss = CausalMSELoss(n_chunks=3)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       0.96841478)
Source code in ppsci/loss/mse.py
class CausalMSELoss(base.Loss):
    r"""Class for mean squared error loss.

    $$
    L = \frac{1}{M} \displaystyle\sum_{i=1}^M{w_i} \mathcal{L}_r^i,
    $$

    where $w_i=\exp (-\epsilon \displaystyle\sum_{k=1}^{i-1} \mathcal{L}_r^k), i=2,3, \ldots, M.$

    Args:
        n_chunks (int): $M$, Number of split time windows.
        reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean".
        weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.
        tol (float, optional): Causal tolerance, i.e. $\epsilon$ in paper. Defaults to 1.0.

    Examples:
        >>> import paddle
        >>> from ppsci.loss import MSELoss

        >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9, 1.0], [1.1, -1.3, 0.0]])}
        >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0, -0.1], [-0.2, 2.5, 2.0]])}
        >>> loss = CausalMSELoss(n_chunks=3)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               0.96841478)
    """

    def __init__(
        self,
        n_chunks: int,
        reduction: Literal["mean", "sum"] = "mean",
        weight: Optional[Union[float, Dict[str, float]]] = None,
        tol: float = 1.0,
    ):
        if n_chunks <= 0:
            raise ValueError(f"n_chunks should be positive, but got {n_chunks}")
        if reduction not in ["mean", "sum"]:
            raise ValueError(
                f"reduction should be 'mean' or 'sum', but got {reduction}"
            )
        super().__init__(reduction, weight)
        self.n_chunks = n_chunks
        self.tol = tol
        self.register_buffer(
            "acc_mat", paddle.tril(paddle.ones([n_chunks, n_chunks]), -1)
        )

    def forward(self, output_dict, label_dict, weight_dict=None):
        losses = 0.0
        for key in label_dict:
            loss = F.mse_loss(output_dict[key], label_dict[key], "none")
            if weight_dict and key in weight_dict:
                loss *= weight_dict[key]

            if "area" in output_dict:
                loss *= output_dict["area"]

            # causal weighting
            loss_t = loss.reshape([self.n_chunks, -1])  # [nt, nx]
            weight_t = paddle.exp(
                -self.tol * (self.acc_mat @ loss_t.mean(-1, keepdim=True))
            )  # [nt, nt] x [nt, 1] ==> [nt, 1]
            assert weight_t.shape[0] == self.n_chunks
            loss = loss_t * weight_t.detach()

            if self.reduction == "sum":
                loss = loss.sum()
            elif self.reduction == "mean":
                loss = loss.mean()
            if isinstance(self.weight, (float, int)):
                loss *= self.weight
            elif isinstance(self.weight, dict) and key in self.weight:
                loss *= self.weight[key]

            losses += loss
        return losses

MSELossWithL2Decay

Bases: MSELoss

MSELoss with L2 decay.

\[ L = \begin{cases} \dfrac{1}{N} \Vert {\mathbf{x}-\mathbf{y}} \Vert_2^2 + \displaystyle\sum_{i=1}^{M}{\Vert \mathbf{K_i} \Vert_F^2}, & \text{if reduction='mean'} \\ \Vert {\mathbf{x}-\mathbf{y}} \Vert_2^2 + \displaystyle\sum_{i=1}^{M}{\Vert \mathbf{K_i} \Vert_F^2}, & \text{if reduction='sum'} \end{cases} \]
\[ \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N}, \mathbf{K_i} \in \mathcal{R}^{O_i \times P_i} \]

\(M\) is the number of which apply regularization on.

Parameters:

Name Type Description Default
reduction Literal['mean', 'sum']

Specifies the reduction to apply to the output: 'mean' | 'sum'. Defaults to "mean".

'mean'
regularization_dict Optional[Dict[str, float]]

Regularization dictionary. Defaults to None.

None
weight Optional[Union[float, Dict[str, float]]]

Weight for loss. Defaults to None.

None

Raises:

Type Description
ValueError

reduction should be 'mean' or 'sum'.

Examples:

>>> import paddle
>>> from ppsci.loss import MSELossWithL2Decay
>>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
...                'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
...               'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> regularization_dict = {'u': 2.0}
>>> loss = MSELossWithL2Decay(regularization_dict=regularization_dict, weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       12.39400005)
>>> regularization_dict = {'v': 1.0}
>>> loss = MSELossWithL2Decay(reduction="sum", regularization_dict=regularization_dict, weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       21.85600090)
Source code in ppsci/loss/mse.py
class MSELossWithL2Decay(MSELoss):
    r"""MSELoss with L2 decay.

    $$
    L =
    \begin{cases}
        \dfrac{1}{N} \Vert {\mathbf{x}-\mathbf{y}} \Vert_2^2 + \displaystyle\sum_{i=1}^{M}{\Vert \mathbf{K_i} \Vert_F^2}, & \text{if reduction='mean'} \\
         \Vert {\mathbf{x}-\mathbf{y}} \Vert_2^2 + \displaystyle\sum_{i=1}^{M}{\Vert \mathbf{K_i} \Vert_F^2}, & \text{if reduction='sum'}
    \end{cases}
    $$

    $$
    \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N}, \mathbf{K_i} \in \mathcal{R}^{O_i \times P_i}
    $$

    $M$ is the number of  which apply regularization on.

    Args:
        reduction (Literal["mean", "sum"], optional): Specifies the reduction to apply to the output: 'mean' | 'sum'. Defaults to "mean".
        regularization_dict (Optional[Dict[str, float]]): Regularization dictionary. Defaults to None.
        weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.

    Raises:
        ValueError: reduction should be 'mean' or 'sum'.

    Examples:
        >>> import paddle
        >>> from ppsci.loss import MSELossWithL2Decay

        >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
        ...                'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
        >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
        ...               'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
        >>> weight = {'u': 0.8, 'v': 0.2}
        >>> regularization_dict = {'u': 2.0}
        >>> loss = MSELossWithL2Decay(regularization_dict=regularization_dict, weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               12.39400005)

        >>> regularization_dict = {'v': 1.0}
        >>> loss = MSELossWithL2Decay(reduction="sum", regularization_dict=regularization_dict, weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               21.85600090)
    """

    def __init__(
        self,
        reduction: Literal["mean", "sum"] = "mean",
        regularization_dict: Optional[Dict[str, float]] = None,
        weight: Optional[Union[float, Dict[str, float]]] = None,
    ):
        if reduction not in ["mean", "sum"]:
            raise ValueError(
                f"reduction should be 'mean' or 'sum', but got {reduction}"
            )
        super().__init__(reduction, weight)
        self.regularization_dict = regularization_dict

    def forward(self, output_dict, label_dict, weight_dict=None):
        losses = super().forward(output_dict, label_dict, weight_dict)

        if self.regularization_dict is not None:
            for reg_key, reg_weight in self.regularization_dict.items():
                loss = output_dict[reg_key].pow(2).sum()
                losses += loss * reg_weight
        return losses

IntegralLoss

Bases: Loss

Class for integral loss with Monte-Carlo integration algorithm.

\[ L = \begin{cases} \dfrac{1}{N} \Vert \displaystyle\sum_{i=1}^{M}{\mathbf{s}_i \cdot \mathbf{x}_i} - \mathbf{y} \Vert_2^2, & \text{if reduction='mean'} \\ \Vert \displaystyle\sum_{i=0}^{M}{\mathbf{s}_i \cdot \mathbf{x}_i} - \mathbf{y} \Vert_2^2, & \text{if reduction='sum'} \end{cases} \]
\[ \mathbf{x}, \mathbf{s} \in \mathcal{R}^{M \times N}, \mathbf{y} \in \mathcal{R}^{N} \]

Parameters:

Name Type Description Default
reduction Literal['mean', 'sum']

Reduction method. Defaults to "mean".

'mean'
weight Optional[Union[float, Dict[str, float]]]

Weight for loss. Defaults to None.

None

Examples:

>>> import paddle
>>> from ppsci.loss import IntegralLoss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]),
...                'v': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]),
...                'area': paddle.to_tensor([[0.01, 0.02, 0.03], [0.01, 0.02, 0.03]])}
>>> label_dict = {'u': paddle.to_tensor([-1.8, 0.0]),
...               'v': paddle.to_tensor([0.1, 0.1])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> loss = IntegralLoss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       1.40911996)
>>> loss = IntegralLoss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       2.81823993)
Source code in ppsci/loss/integral.py
class IntegralLoss(base.Loss):
    r"""Class for integral loss with Monte-Carlo integration algorithm.

    $$
    L =
    \begin{cases}
        \dfrac{1}{N} \Vert \displaystyle\sum_{i=1}^{M}{\mathbf{s}_i \cdot \mathbf{x}_i} - \mathbf{y} \Vert_2^2, & \text{if reduction='mean'} \\
         \Vert \displaystyle\sum_{i=0}^{M}{\mathbf{s}_i \cdot \mathbf{x}_i} - \mathbf{y} \Vert_2^2, & \text{if reduction='sum'}
    \end{cases}
    $$

    $$
    \mathbf{x}, \mathbf{s} \in \mathcal{R}^{M \times N}, \mathbf{y} \in \mathcal{R}^{N}
    $$

    Args:
        reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean".
        weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.

    Examples:
        >>> import paddle
        >>> from ppsci.loss import IntegralLoss

        >>> output_dict = {'u': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]),
        ...                'v': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]),
        ...                'area': paddle.to_tensor([[0.01, 0.02, 0.03], [0.01, 0.02, 0.03]])}
        >>> label_dict = {'u': paddle.to_tensor([-1.8, 0.0]),
        ...               'v': paddle.to_tensor([0.1, 0.1])}
        >>> weight = {'u': 0.8, 'v': 0.2}
        >>> loss = IntegralLoss(weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               1.40911996)

        >>> loss = IntegralLoss(reduction="sum", weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               2.81823993)
    """

    def __init__(
        self,
        reduction: Literal["mean", "sum"] = "mean",
        weight: Optional[Union[float, Dict[str, float]]] = None,
    ):
        if reduction not in ["mean", "sum"]:
            raise ValueError(
                f"reduction should be 'mean' or 'sum', but got {reduction}"
            )
        super().__init__(reduction, weight)

    def forward(self, output_dict, label_dict, weight_dict=None):
        losses = 0.0
        for key in label_dict:
            loss = F.mse_loss(
                (output_dict[key] * output_dict["area"]).sum(axis=1),
                label_dict[key],
                "none",
            )
            if weight_dict and key in weight_dict:
                loss *= weight_dict[key]

            if self.reduction == "sum":
                loss = loss.sum()
            elif self.reduction == "mean":
                loss = loss.mean()

            if isinstance(self.weight, (float, int)):
                loss *= self.weight
            elif isinstance(self.weight, dict) and key in self.weight:
                loss *= self.weight[key]

            losses += loss
        return losses

PeriodicL1Loss

Bases: Loss

Class for periodic l1 loss.

\[ L = \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_1 \]

\(\mathbf{x_l} \in \mathcal{R}^{N}\) is the first half of batch output, \(\mathbf{x_r} \in \mathcal{R}^{N}\) is the second half of batch output.

when reduction is set to "mean"

\[ L = MEAN \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_1 \right) \]

when reduction is set to "sum"

\[ L = SUM \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_1 \right) \]

Parameters:

Name Type Description Default
reduction Literal['mean', 'sum']

Reduction method. Defaults to "mean".

'mean'
weight Optional[Union[float, Dict[str, float]]]

Weight for loss. Defaults to None.

None

Examples:

>>> import paddle
>>> from ppsci.loss import PeriodicL1Loss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]),
...                'v': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 0.0, 1.0], [-0.2, 0.2, 2.5]]),
...               'v': paddle.to_tensor([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1]])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> loss = PeriodicL1Loss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       4.19999981)
>>> loss = PeriodicL1Loss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       4.19999981)
Source code in ppsci/loss/l1.py
class PeriodicL1Loss(base.Loss):
    r"""Class for periodic l1 loss.

    $$
    L = \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_1
    $$

    $\mathbf{x_l} \in \mathcal{R}^{N}$ is the first half of batch output,
    $\mathbf{x_r} \in \mathcal{R}^{N}$ is the second half of batch output.

    when `reduction` is set to "mean"

    $$
    L = MEAN \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_1 \right)
    $$

    when `reduction` is set to "sum"

    $$
    L = SUM \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_1 \right)
    $$

    Args:
        reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean".
        weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.

    Examples:
        >>> import paddle
        >>> from ppsci.loss import PeriodicL1Loss

        >>> output_dict = {'u': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]),
        ...                'v': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]])}
        >>> label_dict = {'u': paddle.to_tensor([[-1.8, 0.0, 1.0], [-0.2, 0.2, 2.5]]),
        ...               'v': paddle.to_tensor([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1]])}
        >>> weight = {'u': 0.8, 'v': 0.2}
        >>> loss = PeriodicL1Loss(weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               4.19999981)

        >>> loss = PeriodicL1Loss(reduction="sum", weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               4.19999981)
    """

    def __init__(
        self,
        reduction: Literal["mean", "sum"] = "mean",
        weight: Optional[Union[float, Dict[str, float]]] = None,
    ):
        if reduction not in ["mean", "sum"]:
            raise ValueError(
                f"reduction should be 'mean' or 'sum', but got {reduction}"
            )
        super().__init__(reduction, weight)

    def forward(self, output_dict, label_dict, weight_dict=None):
        losses = 0.0
        for key in label_dict:
            n_output = len(output_dict[key])
            if n_output % 2 > 0:
                raise ValueError(
                    f"Length of output({n_output}) of key({key}) should be even."
                )

            n_output //= 2
            loss = F.l1_loss(
                output_dict[key][:n_output], output_dict[key][n_output:], "none"
            )
            if weight_dict and key in weight_dict:
                loss *= weight_dict[key]
            if "area" in output_dict:
                loss *= output_dict["area"]

            loss = loss.sum(axis=1)

            if self.reduction == "sum":
                loss = loss.sum()
            elif self.reduction == "mean":
                loss = loss.mean()

            if isinstance(self.weight, (float, int)):
                loss *= self.weight
            elif isinstance(self.weight, dict) and key in self.weight:
                loss *= self.weight[key]

            losses += loss
        return losses

PeriodicL2Loss

Bases: Loss

Class for Periodic l2 loss.

\[ L = \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2 \]

\(\mathbf{x_l} \in \mathcal{R}^{N}\) is the first half of batch output, \(\mathbf{x_r} \in \mathcal{R}^{N}\) is the second half of batch output.

when reduction is set to "mean"

\[ L = MEAN \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2 \right) \]

when reduction is set to "sum"

\[ L = SUM \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2 \right) \]

Parameters:

Name Type Description Default
reduction Literal['mean', 'sum']

Reduction method. Defaults to "mean".

'mean'
weight Optional[Union[float, Dict[str, float]]]

Weight for loss. Defaults to None.

None

Examples:

>>> import paddle
>>> from ppsci.loss import PeriodicL2Loss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]),
...                'v': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 0.0, 1.0], [-0.2, 0.2, 2.5]]),
...               'v': paddle.to_tensor([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1]])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> loss = PeriodicL2Loss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       2.67581749)
>>> loss = PeriodicL2Loss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       2.67581749)
Source code in ppsci/loss/l2.py
class PeriodicL2Loss(base.Loss):
    r"""Class for Periodic l2 loss.

    $$
    L = \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2
    $$

    $\mathbf{x_l} \in \mathcal{R}^{N}$ is the first half of batch output,
    $\mathbf{x_r} \in \mathcal{R}^{N}$ is the second half of batch output.

    when `reduction` is set to "mean"

    $$
    L = MEAN \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2 \right)
    $$

    when `reduction` is set to "sum"

    $$
    L = SUM \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2 \right)
    $$

    Args:
        reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean".
        weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.

    Examples:
        >>> import paddle
        >>> from ppsci.loss import PeriodicL2Loss

        >>> output_dict = {'u': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]),
        ...                'v': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]])}
        >>> label_dict = {'u': paddle.to_tensor([[-1.8, 0.0, 1.0], [-0.2, 0.2, 2.5]]),
        ...               'v': paddle.to_tensor([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1]])}
        >>> weight = {'u': 0.8, 'v': 0.2}
        >>> loss = PeriodicL2Loss(weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               2.67581749)

        >>> loss = PeriodicL2Loss(reduction="sum", weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               2.67581749)
    """

    def __init__(
        self,
        reduction: Literal["mean", "sum"] = "mean",
        weight: Optional[Union[float, Dict[str, float]]] = None,
    ):
        if reduction not in ["mean", "sum"]:
            raise ValueError(
                f"reduction should be 'mean' or 'sum', but got {reduction}"
            )
        super().__init__(reduction, weight)

    def forward(self, output_dict, label_dict, weight_dict=None):
        losses = 0.0
        for key in label_dict:
            n_output = len(output_dict[key])
            if n_output % 2 > 0:
                raise ValueError(
                    f"Length of output({n_output}) of key({key}) should be even."
                )
            n_output //= 2

            loss = F.mse_loss(
                output_dict[key][:n_output], output_dict[key][n_output:], "none"
            )
            if weight_dict and key in weight_dict:
                loss *= weight_dict[key]

            if "area" in output_dict:
                loss *= output_dict["area"]

            loss = loss.sum(axis=1).sqrt()

            if self.reduction == "sum":
                loss = loss.sum()
            elif self.reduction == "mean":
                loss = loss.mean()

            if isinstance(self.weight, (float, int)):
                loss *= self.weight
            elif isinstance(self.weight, dict) and key in self.weight:
                loss *= self.weight[key]

            losses += loss
        return losses

PeriodicMSELoss

Bases: Loss

Class for periodic mean squared error loss.

\[ L = \begin{cases} \dfrac{1}{N} \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2^2, & \text{if reduction='mean'} \\ \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2^2, & \text{if reduction='sum'} \end{cases} \]

\(\mathbf{x_l} \in \mathcal{R}^{N}\) is the first half of batch output, \(\mathbf{x_r} \in \mathcal{R}^{N}\) is the second half of batch output.

Parameters:

Name Type Description Default
reduction Literal['mean', 'sum']

Reduction method. Defaults to "mean".

'mean'
weight Optional[Union[float, Dict[str, float]]]

Weight for loss. Defaults to None.

None

Examples:

>>> import paddle
>>> from ppsci.loss import PeriodicMSELoss
>>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
...                'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
>>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
...               'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
>>> weight = {'u': 0.8, 'v': 0.2}
>>> loss = PeriodicMSELoss(weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       2.59999967)
>>> loss = PeriodicMSELoss(reduction="sum", weight=weight)
>>> result = loss(output_dict, label_dict)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       5.19999933)
Source code in ppsci/loss/mse.py
class PeriodicMSELoss(base.Loss):
    r"""Class for periodic mean squared error loss.

    $$
    L =
    \begin{cases}
        \dfrac{1}{N} \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2^2, & \text{if reduction='mean'} \\
        \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2^2, & \text{if reduction='sum'}
    \end{cases}
    $$

    $\mathbf{x_l} \in \mathcal{R}^{N}$ is the first half of batch output,
    $\mathbf{x_r} \in \mathcal{R}^{N}$ is the second half of batch output.

    Args:
        reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean".
        weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None.

    Examples:
        >>> import paddle
        >>> from ppsci.loss import PeriodicMSELoss

        >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]),
        ...                'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])}
        >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]),
        ...               'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])}
        >>> weight = {'u': 0.8, 'v': 0.2}
        >>> loss = PeriodicMSELoss(weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               2.59999967)

        >>> loss = PeriodicMSELoss(reduction="sum", weight=weight)
        >>> result = loss(output_dict, label_dict)
        >>> print(result)
        Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
               5.19999933)
    """

    def __init__(
        self,
        reduction: Literal["mean", "sum"] = "mean",
        weight: Optional[Union[float, Dict[str, float]]] = None,
    ):
        if reduction not in ["mean", "sum"]:
            raise ValueError(
                f"reduction should be 'mean' or 'sum', but got {reduction}"
            )
        super().__init__(reduction, weight)

    def forward(self, output_dict, label_dict, weight_dict=None):
        losses = 0.0
        for key in label_dict:
            n_output = len(output_dict[key])
            if n_output % 2 > 0:
                raise ValueError(
                    f"Length of output({n_output}) of key({key}) should be even."
                )

            n_output //= 2
            loss = F.mse_loss(
                output_dict[key][:n_output], output_dict[key][n_output:], "none"
            )
            if weight_dict:
                loss *= weight_dict[key]
            if "area" in output_dict:
                loss *= output_dict["area"]

            if self.reduction == "sum":
                loss = loss.sum()
            elif self.reduction == "mean":
                loss = loss.mean()

            if isinstance(self.weight, (float, int)):
                loss *= self.weight
            elif isinstance(self.weight, dict) and key in self.weight:
                loss *= self.weight[key]

            losses += loss
        return losses