master 3b6bc307c63c cached
252 files
505.8 KB
169.5k tokens
412 symbols
1 requests
Download .txt
Showing preview only (563K chars total). Download the full file or copy to clipboard to get everything.
Repository: neuraloperator/physics_informed
Branch: master
Commit: 3b6bc307c63c
Files: 252
Total size: 505.8 KB

Directory structure:
gitextract_nppznbmv/

├── .dockerignore
├── .gitignore
├── Dockerfile
├── LICENSE
├── README.md
├── baselines/
│   ├── __init__.py
│   ├── data.py
│   ├── deepxde_deeponet.py
│   ├── loss.py
│   ├── model.py
│   ├── pinns_ns_05s.py
│   ├── pinns_ns_50s.py
│   ├── sapinns-50s.py
│   ├── sapinns.py
│   ├── test.py
│   ├── tqd_sapinns.py
│   ├── tqd_utils.py
│   ├── train_darcy.py
│   ├── train_ns.py
│   ├── unet3d.py
│   └── utils.py
├── cavity_flow.py
├── configs/
│   ├── baseline/
│   │   ├── NS-50s-LAAF.yaml
│   │   ├── NS-50s.yaml
│   │   ├── Re500-05s-deeponet.yaml
│   │   ├── Re500-pinns-05s-LAAF.yaml
│   │   ├── Re500-pinns-05s-SA.yaml
│   │   ├── Re500-pinns-05s.yaml
│   │   └── Re500-pinns.yaml
│   ├── finetune/
│   │   ├── Darcy-finetune.yaml
│   │   ├── Re100-finetune-1s.yaml
│   │   ├── Re200-finetune-1s.yaml
│   │   ├── Re250-finetune-1s.yaml
│   │   ├── Re300-finetune-1s.yaml
│   │   ├── Re350-finetune-1s.yaml
│   │   ├── Re400-finetune-1s.yaml
│   │   ├── Re500-finetune-05s-2layer.yaml
│   │   ├── Re500-finetune-05s-eqn.yaml
│   │   ├── Re500-finetune-05s4C0.yaml
│   │   ├── Re500-finetune-05s4C1.yaml
│   │   ├── Re500-finetune-05s4C4.yaml
│   │   ├── Re500-finetune-05s4k-2layer.yaml
│   │   ├── Re500-finetune-05s4k1k.yaml
│   │   ├── Re500-finetune-05s4k4-2layer.yaml
│   │   ├── Re500-finetune-05s4k4k.yaml
│   │   └── Re500-finetune-1s.yaml
│   ├── instance/
│   │   ├── Re500-1_8-FNO.yaml
│   │   ├── Re500-1_8-PINO-s.yaml
│   │   └── Re500-1_8-PINO.yaml
│   ├── ngc/
│   │   ├── Re500-1_8-dat0-PINO.yaml
│   │   ├── Re500-1_8-dat200-PINO.yaml
│   │   ├── Re500-1_8-dat40-PINO.yaml
│   │   ├── Re500-1_8-dat400-PINO.yaml
│   │   ├── Re500-1_8-dat80-PINO.yaml
│   │   ├── Re500-1_8-dat800-PINO.yaml
│   │   ├── Re500-1_8-res16-PINO.yaml
│   │   └── Re500-1_8-res32-PINO.yaml
│   ├── operator/
│   │   ├── Darcy-pretrain.yaml
│   │   ├── Re500-05s-1000-FNO.yaml
│   │   ├── Re500-05s-1000-PINO.yaml
│   │   ├── Re500-05s-3000-FNO.yaml
│   │   ├── Re500-05s-600-FNO.yaml
│   │   ├── Re500-05s-600-PINO-xl.yaml
│   │   ├── Re500-05s-600-PINO.yaml
│   │   ├── Re500-05s-FNO.yaml
│   │   ├── Re500-1_16-800-FNO-s.yaml
│   │   ├── Re500-1_16-800-PINO-s.yaml
│   │   ├── Re500-1_4-2000-FNO.yaml
│   │   ├── Re500-1_8-0-PINO-s.yaml
│   │   ├── Re500-1_8-1200-FNO.yaml
│   │   ├── Re500-1_8-1200-PINO.yaml
│   │   ├── Re500-1_8-200-FNO-s.yaml
│   │   ├── Re500-1_8-2000-FNO-s.yaml
│   │   ├── Re500-1_8-2000-FNO-xl.yaml
│   │   ├── Re500-1_8-2000-PINO.yaml
│   │   ├── Re500-1_8-2200-FNO-s.yaml
│   │   ├── Re500-1_8-2200-PINO-s.yaml
│   │   ├── Re500-1_8-800-FNO-s.yaml
│   │   ├── Re500-1_8-800-FNO-s32.yaml
│   │   ├── Re500-1_8-800-PINO-s.yaml
│   │   ├── Re500-1_8-800-PINO-s16.yaml
│   │   ├── Re500-1_8-800-PINO-s32.yaml
│   │   ├── Re500-1_8-800-UNet.yaml
│   │   ├── Re500-1_8-dat1.6k-PINO.yaml
│   │   ├── Re500-1_8-dat400-FNO.yaml
│   │   ├── Re500-1s-FNO.yaml
│   │   ├── Re500-3000-FNO.yaml
│   │   ├── Re500-3000-PINO.yaml
│   │   ├── Re500-4000-FNO.yaml
│   │   ├── Re500-FNO.yaml
│   │   └── Re500-PINO.yaml
│   ├── pretrain/
│   │   ├── Darcy-pretrain-deeponet.yaml
│   │   ├── Darcy-pretrain.yaml
│   │   ├── Re100-pretrain-1s.yaml
│   │   ├── Re200-pretrain-1s.yaml
│   │   ├── Re250-pretrain-1s.yaml
│   │   ├── Re300-pretrain-1s.yaml
│   │   ├── Re350-pretrain-1s.yaml
│   │   ├── Re400-pretrain-1s.yaml
│   │   ├── Re500-05s-deeponet.yaml
│   │   ├── Re500-FNO-1s-100.yaml
│   │   ├── Re500-FNO-1s-200.yaml
│   │   ├── Re500-FNO-1s-400.yaml
│   │   ├── Re500-PINO-1s-100-4v4.yaml
│   │   ├── Re500-PINO-1s-200-4v4.yaml
│   │   ├── Re500-PINO-1s-400-1v1.yaml
│   │   ├── Re500-pretrain-05s-4C1.yaml
│   │   ├── Re500-pretrain-05s-4C4.yaml
│   │   ├── Re500-pretrain-05s-eqn.yaml
│   │   ├── Re500-pretrain-1s.yaml
│   │   └── burgers-pretrain.yaml
│   ├── scratch/
│   │   ├── Re100-scratch-1s.yaml
│   │   ├── Re200-scratch-1s.yaml
│   │   ├── Re250-scratch-1s.yaml
│   │   ├── Re300-scratch-1s.yaml
│   │   ├── Re350-scratch-1s.yaml
│   │   ├── Re400-scratch-1s.yaml
│   │   ├── Re500-scratch-05s-new.yaml
│   │   ├── Re500-scratch-05s.yaml
│   │   ├── Re500-scratch-1s-progressive.yaml
│   │   └── Re500-scratch-1s.yaml
│   ├── test/
│   │   ├── Re500-05s-deeponet.yaml
│   │   ├── Re500-05s-test.yaml
│   │   ├── Re500-05s.yaml
│   │   ├── Re500-1s-100.yaml
│   │   ├── burgers.yaml
│   │   ├── darcy-deeponet.yaml
│   │   └── darcy.yaml
│   └── transfer/
│       ├── Re100to100-1s.yaml
│       ├── Re100to200-1s.yaml
│       ├── Re100to250-1s.yaml
│       ├── Re100to300-1s.yaml
│       ├── Re100to350-1s.yaml
│       ├── Re100to400-1s.yaml
│       ├── Re100to500-1s.yaml
│       ├── Re200to100-1s.yaml
│       ├── Re200to200-1s.yaml
│       ├── Re200to250-1s.yaml
│       ├── Re200to300-1s.yaml
│       ├── Re200to350-1s.yaml
│       ├── Re200to400-1s.yaml
│       ├── Re200to500-1s.yaml
│       ├── Re250to100-1s.yaml
│       ├── Re250to200-1s.yaml
│       ├── Re250to250-1s.yaml
│       ├── Re250to300-1s.yaml
│       ├── Re250to350-1s.yaml
│       ├── Re250to400-1s.yaml
│       ├── Re250to500-1s.yaml
│       ├── Re300to100-1s.yaml
│       ├── Re300to200-1s.yaml
│       ├── Re300to250-1s.yaml
│       ├── Re300to300-1s.yaml
│       ├── Re300to350-1s.yaml
│       ├── Re300to400-1s.yaml
│       ├── Re300to500-1s.yaml
│       ├── Re350to100-1s.yaml
│       ├── Re350to200-1s.yaml
│       ├── Re350to250-1s.yaml
│       ├── Re350to300-1s.yaml
│       ├── Re350to350-1s.yaml
│       ├── Re350to400-1s.yaml
│       ├── Re350to500-1s.yaml
│       ├── Re400to100-1s.yaml
│       ├── Re400to200-1s.yaml
│       ├── Re400to250-1s.yaml
│       ├── Re400to300-1s.yaml
│       ├── Re400to350-1s.yaml
│       ├── Re400to400-1s.yaml
│       ├── Re400to500-1s.yaml
│       ├── Re500to100-1s.yaml
│       ├── Re500to200-1s.yaml
│       ├── Re500to250-1s.yaml
│       ├── Re500to300-1s.yaml
│       ├── Re500to350-1s.yaml
│       ├── Re500to400-1s.yaml
│       ├── Re500to500-05s-new.yaml
│       ├── Re500to500-05s.yaml
│       └── Re500to500-1s.yaml
├── deeponet.py
├── download_data.py
├── eval_operator.py
├── generate_data.py
├── inference.py
├── instance_opt.py
├── inverse-darcy-foward.py
├── inverse-darcy.py
├── models/
│   ├── FCN.py
│   ├── __init__.py
│   ├── basics.py
│   ├── core.py
│   ├── fourier1d.py
│   ├── fourier2d.py
│   ├── fourier3d.py
│   ├── lowrank2d.py
│   ├── tfno.py
│   └── utils.py
├── pinns.py
├── prepare_data.py
├── profile-solver-legacy.py
├── profiler/
│   └── calmacs.py
├── run_pino2d.py
├── run_pino3d.py
├── run_solver.py
├── scripts/
│   ├── device1-finetune.sh
│   ├── device2-finetune.sh
│   ├── device3.sh
│   ├── finetune-4k-2layer.sh
│   ├── finetune-4k0.sh
│   ├── finetune-4k1-2layer.sh
│   ├── finetune-4k1.sh
│   ├── finetune-4k4-2layer.sh
│   ├── fnoRe500.sh
│   ├── ngc_submit_pino.sh
│   ├── ngc_test_submit_pino.sh
│   ├── pretrain.sh
│   ├── scratchRe500.sh
│   ├── test-opt/
│   │   └── Re500-1_8.sh
│   ├── train_dat0.sh
│   ├── train_dat200.sh
│   ├── train_dat40.sh
│   ├── train_dat400.sh
│   ├── train_dat80.sh
│   ├── train_dat800.sh
│   ├── train_res16.sh
│   └── train_res32.sh
├── solver/
│   ├── __init__.py
│   ├── kolmogorov_flow.py
│   ├── legacy_solver.py
│   ├── periodic.py
│   ├── random_fields.py
│   ├── rfsampler.py
│   └── spectrum.py
├── train_PINO3d.py
├── train_burgers.py
├── train_darcy.py
├── train_no.py
├── train_operator.py
├── train_pino.py
├── train_unet.py
└── train_utils/
    ├── __init__.py
    ├── adam.py
    ├── data_utils.py
    ├── datasets.py
    ├── distributed.py
    ├── eval_2d.py
    ├── eval_3d.py
    ├── losses.py
    ├── negadam.py
    ├── train_2d.py
    ├── train_3d.py
    └── utils.py

================================================
FILE CONTENTS
================================================

================================================
FILE: .dockerignore
================================================
.vscode
*.py
wandb
config
docs
models
*/*.py
exp
checkpoints
*/__pycache__/**

================================================
FILE: .gitignore
================================================
data
log
.vscode
wandb
**/__pycache__/**
.idea
figs
checkpoints
.ipynb_checkpoints
*.ipynb
*.pt
*.pth
tensordiffeq
exp

================================================
FILE: Dockerfile
================================================
FROM nvcr.io/nvidia/pytorch:22.09-py3
RUN useradd -ms /bin/bash pino
USER pino
ENV PATH=/home/pino/.local/bin:$PATH
RUN pip install --user \
    wandb tqdm pyyaml

================================================
FILE: LICENSE
================================================
                                 Apache License
                           Version 2.0, January 2004
                        http://www.apache.org/licenses/

   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

   1. Definitions.

      "License" shall mean the terms and conditions for use, reproduction,
      and distribution as defined by Sections 1 through 9 of this document.

      "Licensor" shall mean the copyright owner or entity authorized by
      the copyright owner that is granting the License.

      "Legal Entity" shall mean the union of the acting entity and all
      other entities that control, are controlled by, or are under common
      control with that entity. For the purposes of this definition,
      "control" means (i) the power, direct or indirect, to cause the
      direction or management of such entity, whether by contract or
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      outstanding shares, or (iii) beneficial ownership of such entity.

      "You" (or "Your") shall mean an individual or Legal Entity
      exercising permissions granted by this License.

      "Source" form shall mean the preferred form for making modifications,
      including but not limited to software source code, documentation
      source, and configuration files.

      "Object" form shall mean any form resulting from mechanical
      transformation or translation of a Source form, including but
      not limited to compiled object code, generated documentation,
      and conversions to other media types.

      "Work" shall mean the work of authorship, whether in Source or
      Object form, made available under the License, as indicated by a
      copyright notice that is included in or attached to the work
      (an example is provided in the Appendix below).

      "Derivative Works" shall mean any work, whether in Source or Object
      form, that is based on (or derived from) the Work and for which the
      editorial revisions, annotations, elaborations, or other modifications
      represent, as a whole, an original work of authorship. For the purposes
      of this License, Derivative Works shall not include works that remain
      separable from, or merely link (or bind by name) to the interfaces of,
      the Work and Derivative Works thereof.

      "Contribution" shall mean any work of authorship, including
      the original version of the Work and any modifications or additions
      to that Work or Derivative Works thereof, that is intentionally
      submitted to Licensor for inclusion in the Work by the copyright owner
      or by an individual or Legal Entity authorized to submit on behalf of
      the copyright owner. For the purposes of this definition, "submitted"
      means any form of electronic, verbal, or written communication sent
      to the Licensor or its representatives, including but not limited to
      communication on electronic mailing lists, source code control systems,
      and issue tracking systems that are managed by, or on behalf of, the
      Licensor for the purpose of discussing and improving the Work, but
      excluding communication that is conspicuously marked or otherwise
      designated in writing by the copyright owner as "Not a Contribution."

      "Contributor" shall mean Licensor and any individual or Legal Entity
      on behalf of whom a Contribution has been received by Licensor and
      subsequently incorporated within the Work.

   2. Grant of Copyright License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      copyright license to reproduce, prepare Derivative Works of,
      publicly display, publicly perform, sublicense, and distribute the
      Work and such Derivative Works in Source or Object form.

   3. Grant of Patent License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      (except as stated in this section) patent license to make, have made,
      use, offer to sell, sell, import, and otherwise transfer the Work,
      where such license applies only to those patent claims licensable
      by such Contributor that are necessarily infringed by their
      Contribution(s) alone or by combination of their Contribution(s)
      with the Work to which such Contribution(s) was submitted. If You
      institute patent litigation against any entity (including a
      cross-claim or counterclaim in a lawsuit) alleging that the Work
      or a Contribution incorporated within the Work constitutes direct
      or contributory patent infringement, then any patent licenses
      granted to You under this License for that Work shall terminate
      as of the date such litigation is filed.

   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained
          within such NOTICE file, excluding those notices that do not
          pertain to any part of the Derivative Works, in at least one
          of the following places: within a NOTICE text file distributed
          as part of the Derivative Works; within the Source form or
          documentation, if provided along with the Derivative Works; or,
          within a display generated by the Derivative Works, if and
          wherever such third-party notices normally appear. The contents
          of the NOTICE file are for informational purposes only and
          do not modify the License. You may add Your own attribution
          notices within Derivative Works that You distribute, alongside
          or as an addendum to the NOTICE text from the Work, provided
          that such additional attribution notices cannot be construed
          as modifying the License.

      You may add Your own copyright statement to Your modifications and
      may provide additional or different license terms and conditions
      for use, reproduction, or distribution of Your modifications, or
      for any such Derivative Works as a whole, provided Your use,
      reproduction, and distribution of the Work otherwise complies with
      the conditions stated in this License.

   5. Submission of Contributions. Unless You explicitly state otherwise,
      any Contribution intentionally submitted for inclusion in the Work
      by You to the Licensor shall be under the terms and conditions of
      this License, without any additional terms or conditions.
      Notwithstanding the above, nothing herein shall supersede or modify
      the terms of any separate license agreement you may have executed
      with Licensor regarding such Contributions.

   6. Trademarks. This License does not grant permission to use the trade
      names, trademarks, service marks, or product names of the Licensor,
      except as required for reasonable and customary use in describing the
      origin of the Work and reproducing the content of the NOTICE file.

   7. Disclaimer of Warranty. Unless required by applicable law or
      agreed to in writing, Licensor provides the Work (and each
      Contributor provides its Contributions) on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      implied, including, without limitation, any warranties or conditions
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      PARTICULAR PURPOSE. You are solely responsible for determining the
      appropriateness of using or redistributing the Work and assume any
      risks associated with Your exercise of permissions under this License.

   8. Limitation of Liability. In no event and under no legal theory,
      whether in tort (including negligence), contract, or otherwise,
      unless required by applicable law (such as deliberate and grossly
      negligent acts) or agreed to in writing, shall any Contributor be
      liable to You for damages, including any direct, indirect, special,
      incidental, or consequential damages of any character arising as a
      result of this License or out of the use or inability to use the
      Work (including but not limited to damages for loss of goodwill,
      work stoppage, computer failure or malfunction, or any and all
      other commercial damages or losses), even if such Contributor
      has been advised of the possibility of such damages.

   9. Accepting Warranty or Additional Liability. While redistributing
      the Work or Derivative Works thereof, You may choose to offer,
      and charge a fee for, acceptance of support, warranty, indemnity,
      or other liability obligations and/or rights consistent with this
      License. However, in accepting such obligations, You may act only
      on Your own behalf and on Your sole responsibility, not on behalf
      of any other Contributor, and only if You agree to indemnify,
      defend, and hold each Contributor harmless for any liability
      incurred by, or claims asserted against, such Contributor by reason
      of your accepting any such warranty or additional liability.

   END OF TERMS AND CONDITIONS

   APPENDIX: How to apply the Apache License to your work.

      To apply the Apache License to your work, attach the following
      boilerplate notice, with the fields enclosed by brackets "[]"
      replaced with your own identifying information. (Don't include
      the brackets!)  The text should be enclosed in the appropriate
      comment syntax for the file format. We also recommend that a
      file or class name and description of purpose be included on the
      same "printed page" as the copyright notice for easier
      identification within third-party archives.

   Copyright [yyyy] [name of copyright owner]

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.


================================================
FILE: README.md
================================================
# Physics-Informed Neural Operator for Learning Partial Differential Equations

# 📢 DEPRECATION NOTICE 📢  
----------------------------

🚨 **This repository is no longer maintained.** 🚨 The code in this repository is **deprecated** and may not work with newer dependencies or frameworks.  
For the most up-to-date implementation and continued development, please visit:  

## ➡️ **[NeuralOperator](https://github.com/neuraloperator/neuraloperator)** ⬅️

🔴 We strongly recommend using the latest version to ensure compatibility, performance, and support.🔴  

----------------------------
![PINO Diagram](docs/pino-diagram4.png)

[comment]: <> (![Results on Navier Stokes equation]&#40;docs/solver-pino.png&#41;)

<img src="docs/solver-pino-pinn.png" alt="Results on Navier Stokes equation" width="720" height="501"/>

# Paper Info


This repo contains code for experiments from the paper [Physics-Informed Neural Operator for Learning Partial Differential Equations](https://arxiv.org/abs/2111.03794) (2021) by Zongyi Li, Hongkai Zheng, Nikola Kovachki, David Jin, Haoxuan Chen, Burigede Liu, Kamyar Azizzadenesheli, and Anima Anandkumar.

Abstract: 
> Machine learning methods have recently shown promise in solving partial differential equations (PDEs). They can be classified into two broad categories: solution function approximation and operator learning. The Physics-Informed Neural Network (PINN) is an example of the former while the Fourier neural operator (FNO) is an example of the latter. Both these approaches have shortcomings. The optimization in PINN is challenging and prone to failure, especially on multi-scale dynamic systems. FNO does not suffer from this optimization issue since it carries out supervised learning on a given dataset, but obtaining such data may be too expensive or infeasible. In this work, we propose the physics-informed neural operator (PINO), where we combine the operating-learning and function-optimization frameworks, and this improves convergence rates and accuracy over both PINN and FNO models. In the operator-learning phase, PINO learns the solution operator over multiple instances of the parametric PDE family. In the test-time optimization phase, PINO optimizes the pre-trained operator ansatz for the querying instance of the PDE. Experiments show PINO outperforms previous ML methods on many popular PDE families while retaining the extraordinary speed-up of FNO compared to solvers. In particular, PINO accurately solves long temporal transient flows and  Kolmogorov flows, while PINN and other methods fail to converge.
## Requirements
- Pytorch 1.8.0 or later
- wandb
- tqdm
- scipy
- h5py
- numpy
- DeepXDE:latest
- Latest code from tensordiffeq github master branch (Not tensordiffeq 0.19)
- tensorflow 2.4.0

## Data description
### Burgers equation
[burgers_pino.mat](https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/burgers_pino.mat)

### Darcy flow 
- spatial domain: $x\in (0,1)^2$
- Data file: 
  - [piececonst_r421_N1024_smooth1.mat](https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/piececonst_r421_N1024_smooth1.mat)
  - [piececonst_r421_N1024_smooth2.mat](https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/piececonst_r421_N1024_smooth2.mat)
- Raw data shape: 1024x421x421


### Long roll out of Navier Stokes equation
- spatial domain: $x\in (0, 1)^2$
- temporal domain: $t\in \[0, 49\]$
- forcing: $0.1(\sin(2\pi(x_1+x_2)) + \cos(2\pi(x_1+x_2)))$
- viscosity = 0.001

Data file: `nv_V1e-3_N5000_T50.mat`, with shape 50 x 64 x 64 x 5000 

- train set: -1-4799
- test set: 4799-4999
### Navier Stokes with Reynolds number 500
- spatial domain: $x\in (0, 2\pi)^2$
- temporal domain: $t \in \[0, 0.5\]$
- forcing: $-4\cos(4x_2)$
- Reynolds number: 500

Train set: data of shape (N, T, X, Y) where N is the number of instances, T is temporal resolution, X, Y are spatial resolutions. 
1. [NS_fft_Re500_T4000.npy](https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fft_Re500_T4000.npy) : 4000x64x64x65
2. [NS_fine_Re500_T128_part0.npy](https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re500_T128_part0.npy): 100x129x128x128
3. [NS_fine_Re500_T128_part1.npy](https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re500_T128_part1.npy): 100x129x128x128

Test set: data of shape (N, T, X, Y) where N is the number of instances, T is temporal resolution, X, Y are spatial resolutions. 
1. [NS_Re500_s256_T100_test.npy](https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_Re500_s256_T100_test.npy): 100x129x256x256
2. [NS_fine_Re500_T128_part2.npy](https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re500_T128_part2.npy): 100x129x128x128

Configuration file format: see `.yaml` files under folder `configs` for detail. 

## Code for Burgers equation
### Train PINO
To run PINO for Burgers equation, use, e.g.,
```bash 
python3 train_burgers.py --config_path configs/pretrain/burgers-pretrain.yaml --mode train
```

To test PINO for burgers equation, use, e.g., 
```bash
python3 train_burgers.py --config_path configs/test/burgers.yaml --mode test
```

## Code for Darcy Flow

### Operator learning
To run PINO for Darcy Flow, use, e.g., 
```bash
python3 train_operator.py --config_path configs/pretrain/Darcy-pretrain.yaml
```
To evaluate operator for Darcy Flow, use, e.g., 
```bash
python3 eval_operator.py --config_path configs/test/darcy.yaml
```

### Test-time optimization
To do test-time optimization for Darcy Flow, use, e.g., 
```bash
python3 run_pino2d.py --config_path configs/finetune/Darcy-finetune.yaml --start [starting index] --stop [stopping index]
```

### Baseline
To run DeepONet, use, e.g., 
```bash
python3 deeponet.py --config_path configs/pretrain/Darcy-pretrain-deeponet.yaml --mode train 
```
To test DeepONet, use, e.g., 
```bash
python3 deeponet.py --config_path configs/test/darcy.yaml --mode test
```


## Code for Navier Stokes equation
### Run exp on new dataset
Train PINO with 800 low-res data and 2200 PDE. 
```bash
python3 train_pino.py --config configs/operator/Re500-1_8-800-PINO-s.yaml
```
Train FNO with 800 low-res data and 2200 PDE.
```bash
python3 train_pino.py --config configs/operator/Re500-1_8-800-FNO-s.yaml
```
Run instance-wise finetuning
```bash
python3 instance_opt.py --config configs/instance/Re500-1_8-PINO-s.yaml
```

### Train PINO for short time period
To run operator learning, use, e.g., 
```bash
python3 train_operator.py --config_path configs/pretrain/Re500-pretrain-05s-4C0.yaml
```
To evaluate trained operator, use
```bash
python3 eval_operator.py --config_path configs/test/Re500-05s.yaml
```
To run test-time optimization, use
```bash
python3 train_PINO3d.py --config_path configs/***.yaml 
```

To train Navier Stokes equations sequentially without running `train_PINO3d.py` multiple times, use

```bash
python3 run_pino3d.py --config_path configs/[configuration file name].yaml --start [index of the first data] --stop [which data to stop]
```


### Baseline for short time period
To train DeepONet, use 
```bash
python3 deeponet.py --config_path configs/[configuration file].yaml --mode train
```

To test DeepONet, use 
```bash
python3 deeponet.py --config_path configs/[configuration file].yaml --mode test
```

To train and test PINNs, use, e.g.,  
```bash
python3 pinns.py --config_path configs/baseline/Re500-pinns-05s.yaml --start [starting index] --stop [stopping index]
```

To train and test LAAF-PINN, use, e.g., 
```bash
python3 pinns.py configs/baseline/Re500-pinns-05s-LAAF.yaml --start [starting index] --stop [stopping index]
```

To train and test SA-PINNs, first copy the latest code of tensordiffeq under the working directory.
Then run: 
```bash 
DDEBACKEND=pytorch python3 pinns.py configs/baseline/Re500-pinns-05s-SA.yaml --start [starting index] --stop [stopping index]
```

### Baseline for long roll out
To train and test PINNs, use
```bash
python3 pinns.py --config_path configs/baseline/NS-50s.yaml --start [starting index] --stop [stopping index]
```

To train and test LAAF-PINN, use, e.g., 
```bash
python3 pinns.py --config_path configs/baseline/NS-50s-LAAF.yaml --start [starting index] --stop [stopping index]
```

### Pseudospectral solver for Navier Stokes equation
To run solver, use 
```bash
python3 run_solver.py --config_path configs/Re500-0.5s.yaml
```


================================================
FILE: baselines/__init__.py
================================================


================================================
FILE: baselines/data.py
================================================
import numpy as np
import torch
from torch.utils.data import Dataset
from .utils import get_xytgrid, get_3dboundary, get_3dboundary_points
from train_utils.utils import vor2vel, torch2dgrid
import scipy.io
import h5py


class DarcyFlow(Dataset):
    def __init__(self,
                 datapath,
                 nx, sub,
                 offset=0,
                 num=1):
        self.S = int(nx // sub) + 1
        data = scipy.io.loadmat(datapath)
        a = data['coeff']
        u = data['sol']
        self.a = torch.tensor(a[offset: offset + num, ::sub, ::sub], dtype=torch.float)
        self.u = torch.tensor(u[offset: offset + num, ::sub, ::sub], dtype=torch.float)
        self.mesh = torch2dgrid(self.S, self.S)

    def __len__(self):
        return self.a.shape[0]

    def __getitem__(self, item):
        fa = self.a[item]
        return fa.reshape(-1), self.u[item].reshape(-1)


class NSLong(object):
    def __init__(self,
                 datapath,
                 nx, nt,
                 time_scale,
                 offset=0,
                 num=1, vel=False):
        '''
        Load data from mat
        Args:
            datapath: path to data file
            nx: number of points in each spatial domain
            nt: number of points in temporal domain
            offset: index of the instance
            num: number of instances
            vel: compute velocity from vorticity if True
        '''

        self.time_scale = time_scale
        self.S = nx
        self.T = nt

        with h5py.File(datapath, mode='r') as file:
            raw = file['u']
            data = np.array(raw)
        vor = torch.tensor(data, dtype=torch.float).permute(3, 1, 2, 0)
        self.vor = vor[offset: offset + num, :, :, :]     # num x 64 x 64 x 50
        if vel:
            self.vel_u, self.vel_v = vor2vel(self.vor, L=1.0)

    def get_boundary_value(self, component=0):
        '''
            Get the boundary value for component-th output
            Args:
                component: int, 0: velocity_u; 1: velocity_v; 2: vorticity;
            Returns:
                value: N by 1 array, boundary value of the component
        '''
        if component == 0:
            value = self.vel_u
        elif component == 1:
            value = self.vel_v
        elif component == 2:
            value = self.vor
        else:
            raise ValueError(f'No component {component} ')

        boundary = get_3dboundary(value)
        return boundary

    def get_boundary_points(self, num_x, num_y, num_t):
        points = get_3dboundary_points(num_x, num_y, num_t,
                                       bot=(0,0,0),
                                       top=(1, 1, self.time_scale))
        return points

    def get_test_xyt(self):
        '''

        Returns:
            points: (x, y, t) array with shape (S * S * T, 3)
            values: (u, v, w) array with shape (S * S * T, 3)

        '''
        points = get_xytgrid(S=self.S, T=self.T,
                             bot=[0, 0, 0],
                             top=[1, 1, self.time_scale])
        u_val = np.ravel(self.vel_u)
        v_val = np.ravel(self.vel_v)
        w_val = np.ravel(self.vor)
        values = np.stack([u_val, v_val, w_val], axis=0).T
        return points, values


class NSdata(object):
    def __init__(self, datapath1,
                 nx, nt,
                 offset=0, num=1,
                 datapath2=None,
                 sub=1, sub_t=1,
                 vel=False, t_interval=1.0):
        '''
        Load data from npy and reshape to (N, X, Y, T)
        Args:
            datapath1: path to data
            nx: number of points in each spatial domain
            nt: number of points in temporal domain
            offset: index of the instance
            num: number of instances
            datapath2: path to second part of data, default None
            sub: downsample interval of spatial domain
            sub_t: downsample interval of temporal domain
            N:
            t_interval:
        '''
        self.S = nx // sub
        self.T = int(nt * t_interval) // sub_t + 1
        self.time_scale = t_interval
        data1 = np.load(datapath1)
        data1 = torch.tensor(data1, dtype=torch.float)[..., ::sub_t, ::sub, ::sub]

        if datapath2 is not None:
            data2 = np.load(datapath2)
            data2 = torch.tensor(data2, dtype=torch.float)[..., ::sub_t, ::sub, ::sub]
        if t_interval == 0.5:
            data1 = self.extract(data1)
            if datapath2 is not None:
                data2 = self.extract(data2)
        # transpose data into (N, S, S, T)
        part1 = data1.permute(0, 2, 3, 1)
        if datapath2 is not None:
            part2 = data2.permute(0, 2, 3, 1)
            self.data = torch.cat((part1, part2), dim=0)
        else:
            self.data = part1
        self.vor = self.data[offset: offset + num, :, :, :].cpu()
        if vel:
            self.vel_u, self.vel_v = vor2vel(self.vor)  # Compute velocity from vorticity

    def get_init_cond(self):
        values = np.stack([self.vel_u[0, :, :, 0],
                           self.vel_v[0, :, :, 0],
                           self.vor[0, :, :, 0]], axis=2)
        return values

    def get_boundary_value(self, component=0):
        '''
        Get the boundary value for component-th output
        Args:
            component: int, 0: velocity_u; 1: velocity_v; 2: vorticity;
        Returns:
            value: N by 1 array, boundary value of the component
        '''
        if component == 0:
            value = self.vel_u
        elif component == 1:
            value = self.vel_v
        elif component == 2:
            value = self.vor
        else:
            raise ValueError(f'No component {component} ')

        boundary = get_3dboundary(value)
        return boundary

    def get_boundary_points(self, num_x, num_y, num_t):
        '''
        Args:
            num_x:
            num_y:

        Returns:
            points: N by 3 array
        '''
        points = get_3dboundary_points(num_x, num_y, num_t,
                                       bot=(0, 0, 0),
                                       top=(2 * np.pi, 2 * np.pi, self.time_scale))
        # x_arr = np.linspace(0, 2 * np.pi, num=num_x, endpoint=False)
        # y_arr = np.linspace(0, 2 * np.pi, num=num_y, endpoint=False)
        # xx, yy = np.meshgrid(x_arr, y_arr, indexing='ij')
        # xarr = np.ravel(xx)
        # yarr = np.ravel(yy)
        # tarr = np.zeros_like(xarr)
        # point0 = np.stack([xarr, yarr, tarr], axis=0).T     # (128x128x1, 3), boundary on t=0
        #
        # # tarr = np.ones_like(xarr) * self.time_scale
        # # point1 = np.stack([xarr, yarr, tarr], axis=0).T     # (128x128x1, 3), boundary on t=0.5
        #
        # t_arr = np.linspace(0, self.time_scale, num=num_t)
        # yy, tt = np.meshgrid(y_arr, t_arr, indexing='ij')
        # yarr = np.ravel(yy)
        # tarr = np.ravel(tt)
        # xarr = np.zeros_like(yarr)
        # point2 = np.stack([xarr, yarr, tarr], axis=0).T     # (1x128x65, 3), boundary on x=0
        #
        # xarr = np.ones_like(yarr) * 2 * np.pi
        # point3 = np.stack([xarr, yarr, tarr], axis=0).T     # (1x128x65, 3), boundary on x=2pi
        #
        # xx, tt = np.meshgrid(x_arr, t_arr, indexing='ij')
        # xarr = np.ravel(xx)
        # tarr = np.ravel(tt)
        # yarr = np.zeros_like(xarr)
        # point4 = np.stack([xarr, yarr, tarr], axis=0).T     # (128x1x65, 3), boundary on y=0
        #
        # yarr = np.ones_like(xarr) * 2 * np.pi
        # point5 = np.stack([xarr, yarr, tarr], axis=0).T     # (128x1x65, 3), boundary on y=2pi
        #
        # points = np.concatenate([point0,
        #                          point2, point3,
        #                          point4, point5],
        #                         axis=0)
        return points

    def get_test_xyt(self):
        '''

        Returns:
            points: (x, y, t) array with shape (S * S * T, 3)
            values: (u, v, w) array with shape (S * S * T, 3)

        '''
        points = get_xytgrid(S=self.S, T=self.T,
                             bot=[0, 0, 0],
                             top=[2 * np.pi, 2 * np.pi, self.time_scale])
        u_val = np.ravel(self.vel_u)
        v_val = np.ravel(self.vel_v)
        w_val = np.ravel(self.vor)
        values = np.stack([u_val, v_val, w_val], axis=0).T
        return points, values


    @staticmethod
    def extract(data):
        '''
        Extract data with time range 0-0.5, 0.25-0.75, 0.5-1.0, 0.75-1.25,...
        Args:
            data: tensor with size N x 129 x 128 x 128

        Returns:
            output: (4*N-1) x 65 x 128 x 128
        '''
        T = data.shape[1] // 2
        interval = data.shape[1] // 4
        N = data.shape[0]
        new_data = torch.zeros(4 * N - 1, T + 1, data.shape[2], data.shape[3])
        for i in range(N):
            for j in range(4):
                if i == N - 1 and j == 3:
                    # reach boundary
                    break
                if j != 3:
                    new_data[i * 4 + j] = data[i, interval * j:interval * j + T + 1]
                else:
                    new_data[i * 4 + j, 0: interval] = data[i, interval * j:interval * j + interval]
                    new_data[i * 4 + j, interval: T + 1] = data[i + 1, 0:interval + 1]
        return new_data


class DeepOnetNS(Dataset):
    '''
    Dataset class customized for DeepONet's input format
    '''
    def __init__(self, datapath,
                 nx, nt,
                 offset=0, num=1,
                 sub=1, sub_t=1,
                 t_interval=1.0):
        self.S = nx // sub
        self.T = int(nt * t_interval) // sub_t + 1
        self.time_scale = t_interval
        self.N = num
        data = np.load(datapath)
        data = torch.tensor(data, dtype=torch.float)[..., ::sub_t, ::sub, ::sub]
        if t_interval == 0.5:
                    data = NSdata.extract(data)
        # transpose data into (N, S, S, T)
        data = data.permute(0, 2, 3, 1)
        self.vor = data[offset: offset + num, :, :, :]
        points = get_xytgrid(S=self.S, T=self.T,
                             bot=[0, 0, 0],
                             top=[2 * np.pi, 2 * np.pi, self.time_scale])
        self.xyt = torch.tensor(points, dtype=torch.float)
        # (SxSxT, 3)

    def __len__(self):
        return self.N * self.S * self.S * self.T

    def __getitem__(self, idx):
        num_per_instance = self.S ** 2 * self.T
        instance_id = idx // num_per_instance
        pos_id = idx % num_per_instance
        point = self.xyt[pos_id]
        u0 = self.vor[instance_id, :, :, 0].reshape(-1)
        y = self.vor[instance_id].reshape(-1)[pos_id]
        return u0, point, y


class DeepONetCPNS(Dataset):
    '''
        Dataset class customized for DeepONet cartesian product's input format
        '''

    def __init__(self, datapath,
                 nx, nt,
                 offset=0, num=1,
                 sub=1, sub_t=1,
                 t_interval=1.0):
        self.S = nx // sub
        self.T = int(nt * t_interval) // sub_t + 1
        self.time_scale = t_interval
        self.N = num
        data = np.load(datapath)
        data = torch.tensor(data, dtype=torch.float)[..., ::sub_t, ::sub, ::sub]
        if t_interval == 0.5:
            data = NSdata.extract(data)
        # transpose data into (N, S, S, T)
        data = data.permute(0, 2, 3, 1)
        self.vor = data[offset: offset + num, :, :, :]
        points = get_xytgrid(S=self.S, T=self.T,
                             bot=[0, 0, 0],
                             top=[2 * np.pi, 2 * np.pi, self.time_scale])
        self.xyt = torch.tensor(points, dtype=torch.float)
        # (SxSxT, 3)

    def __len__(self):
        return self.N

    def __getitem__(self, idx):
        '''

        Args:
            idx:

        Returns:
            u0: (batchsize, u0_dim)
            y: (batchsize, SxSxT)
        '''
        u0 = self.vor[idx, :, :, 0].reshape(-1)
        y = self.vor[idx, :, :, :].reshape(-1)
        return u0, y


================================================
FILE: baselines/deepxde_deeponet.py
================================================
import random
import deepxde as dde
from baselines.data import NSdata

'''
Training deepONet using deepxde implementation. 
Note that deepxde requires passing the whole dataset to Triple, which is very memory consuming. 
'''


def train(config):
    seed = random.randint(1, 10000)
    print(f'Random seed :{seed}')
    # construct dataloader
    data_config = config['data']
    train_set = NSdata(datapath1=data_config['datapath'],
                       offset=0, num=10,
                       nx=data_config['nx'], nt=data_config['nt'],
                       sub=data_config['sub'], sub_t=data_config['sub_t'],
                       vel=False,
                       t_interval=data_config['time_interval'])
    val_set = NSdata(datapath1=data_config['data_val'],
                     offset=310, num=10,
                     nx=data_config['val_nx'], nt=data_config['val_nt'],
                     sub=data_config['val_sub'], sub_t=data_config['val_subt'],
                     vel=False,
                     t_interval=data_config['time_interval'])
    # assert train_set.S == val_set.S
    dim_a = train_set.S ** 2
    dim_x = 3
    X_train, y_train = train_set.get_operator_data()
    X_val, y_val = val_set.get_operator_data()
    data = dde.data.Triple(X_train=X_train, y_train=y_train, X_test=X_val, y_test=y_val)

    activation = config['model']['activation']
    initializer = 'Glorot normal'   # He normal or Glorot normal

    net = dde.maps.DeepONet([dim_a] + config['model']['layers'],
                            [dim_x] + config['model']['layers'],
                            activation,
                            initializer,
                            use_bias=True,
                            stacked=False)
    model = dde.Model(data, net)
    model.compile('adam', lr=config['train']['base_lr'])
    checker = dde.callbacks.ModelCheckpoint(
        'checkpoints/deeponet.ckpt', save_better_only=True, period=10,
    )
    model.train(epochs=config['train']['epochs'], callbacks=[checker])


================================================
FILE: baselines/loss.py
================================================
import torch
import torch.autograd as autograd
from train_utils.utils import set_grad
from .utils import get_sample, net_NS, sub_mse


def boundary_loss(model, npt=100):
    device = next(model.parameters()).device

    bc1_x_sample, bc1_y_sample, bc1_t_sample, bc2_x_sample, bc2_y_sample, bc2_t_sample \
        = get_sample(npt)

    bc1_x_sample, bc1_y_sample, bc1_t_sample, bc2_x_sample, bc2_y_sample, bc2_t_sample \
        = bc1_x_sample.to(device), bc1_y_sample.to(device), bc1_t_sample.to(device), \
          bc2_x_sample.to(device), bc2_y_sample.to(device), bc2_t_sample.to(device)
    set_grad([bc1_x_sample, bc1_y_sample, bc1_t_sample, bc2_x_sample, bc2_y_sample, bc2_t_sample])

    u1, v1, _ = net_NS(bc1_x_sample, bc1_y_sample, bc1_t_sample, model)
    u2, v2, _ = net_NS(bc2_x_sample, bc2_y_sample, bc2_t_sample, model)
    bc_loss = sub_mse(u1) + sub_mse(v1) + sub_mse(u2) + sub_mse(v2)
    return 0.5 * bc_loss  # 0.5 is the normalization factor


def resf_NS(u, v, p, x, y, t, re=40):
    '''
    Args:
        u: x-component, tensor
        v: y-component, tensor
        x: x-dimension, tensor
        y: y-dimension, tensor
        t: time dimension, tensor
    Returns:
        Residual f error
    '''
    u_x, u_y, u_t = autograd.grad(outputs=[u.sum()], inputs=[x, y, t], create_graph=True)
    v_x, v_y, v_t = autograd.grad(outputs=[v.sum()], inputs=[x, y, t], create_graph=True)
    u_xx, = autograd.grad(outputs=[u_x.sum()], inputs=[x], create_graph=True)
    u_yy, = autograd.grad(outputs=[u_y.sum()], inputs=[y], create_graph=True)
    v_xx, = autograd.grad(outputs=[v_x.sum()], inputs=[x], create_graph=True)
    v_yy, = autograd.grad(outputs=[v_y.sum()], inputs=[y], create_graph=True)
    p_x, = autograd.grad(outputs=[p.sum()], inputs=[x], create_graph=True)
    p_y, = autograd.grad(outputs=[p.sum()], inputs=[y], create_graph=True)
    res_x = u_t + u * u_x + v * u_y + p_x - 1 / re * (u_xx + u_yy) - torch.sin(4 * y)
    res_y = v_t + u * v_x + v * v_y + p_y - 1 / re * (v_xx + v_yy)
    evp3 = u_x + v_y
    return res_x, res_y, evp3



================================================
FILE: baselines/model.py
================================================
import torch
import torch.nn as nn
from models.FCN import DenseNet
from typing import List
from .utils import weighted_mse


class DeepONet(nn.Module):
    def __init__(self, branch_layer, trunk_layer):
        super(DeepONet, self).__init__()
        self.branch = DenseNet(branch_layer, nn.ReLU)
        self.trunk = DenseNet(trunk_layer, nn.ReLU)

    def forward(self, u0, grid):
        a = self.branch(u0)
        b = self.trunk(grid)
        batchsize = a.shape[0]
        dim = a.shape[1]
        return torch.bmm(a.view(batchsize, 1, dim), b.view(batchsize, dim, 1))


class DeepONetCP(nn.Module):
    def __init__(self, branch_layer, trunk_layer):
        super(DeepONetCP, self).__init__()
        self.branch = DenseNet(branch_layer, nn.ReLU)
        self.trunk = DenseNet(trunk_layer, nn.ReLU)

    def forward(self, u0, grid):
        a = self.branch(u0)
        # batchsize x width
        b = self.trunk(grid)
        # N x width
        return torch.einsum('bi,ni->bn', a, b)


class SAWeight(nn.Module):
    def __init__(self, out_dim, num_init: List, num_bd: List, num_collo: List):
        super(SAWeight, self).__init__()
        self.init_param = nn.ParameterList(
            [nn.Parameter(100 * torch.rand(num, out_dim)) for num in num_init]
        )

        self.bd_param = nn.ParameterList(
            [nn.Parameter(torch.rand(num, out_dim)) for num in num_bd]
        )

        self.collo_param = nn.ParameterList(
            [nn.Parameter(torch.rand(num, out_dim)) for num in num_collo]
        )

    def forward(self, init_cond: List, bd_cond: List, residual: List):
        total_loss = 0.0
        for param, init_loss in zip(self.init_param, init_cond):
            total_loss += weighted_mse(init_loss, 0, param)

        for param, bd in zip(self.bd_param, bd_cond):
            total_loss += weighted_mse(bd, 0, param)

        for param, res in zip(self.collo_param, residual):
            total_loss += weighted_mse(res, 0, param)
        return total_loss

================================================
FILE: baselines/pinns_ns_05s.py
================================================
'''
training for Navier Stokes with Reynolds number 500, 0.5 second time period
'''
import csv
import random
from timeit import default_timer
import deepxde as dde
from deepxde.optimizers.config import set_LBFGS_options
import numpy as np
from baselines.data import NSdata

import tensorflow as tf

Re = 500


def forcing(x):
    return - 4 * tf.math.cos(4 * x[:, 1:2])


def pde(x, u):
    '''
    Args:
        x: (x, y, t)
        u: (u, v, w), where (u,v) is the velocity, w is the vorticity
    Returns: list of pde loss

    '''
    u_vel, v_vel, w = u[:, 0:1], u[:, 1:2], u[:, 2:3]

    u_vel_x = dde.grad.jacobian(u, x, i=0, j=0)
    u_vel_xx = dde.grad.hessian(u, x, component=0, i=0, j=0)
    u_vel_yy = dde.grad.hessian(u, x, component=0, i=1, j=1)

    v_vel_y = dde.grad.jacobian(u, x, i=1, j=1)
    v_vel_xx = dde.grad.hessian(u, x, component=1, i=0, j=0)
    v_vel_yy = dde.grad.hessian(u, x, component=1, i=1, j=1)

    w_vor_x = dde.grad.jacobian(u, x, i=2, j=0)
    w_vor_y = dde.grad.jacobian(u, x, i=2, j=1)
    w_vor_t = dde.grad.jacobian(u, x, i=2, j=2)

    w_vor_xx = dde.grad.hessian(u, x, component=2, i=0, j=0)
    w_vor_yy = dde.grad.hessian(u, x, component=2, i=1, j=1)

    eqn1 = w_vor_t + u_vel * w_vor_x + v_vel * w_vor_y - \
           1 / Re * (w_vor_xx + w_vor_yy) - forcing(x)
    eqn2 = u_vel_x + v_vel_y
    eqn3 = u_vel_xx + u_vel_yy + w_vor_y
    eqn4 = v_vel_xx + v_vel_yy - w_vor_x
    return [eqn1, eqn2, eqn3, eqn4]


def eval(model, dataset,
         step, time_cost,
         offset, config):
    '''
    evaluate test error for the model over dataset
    '''
    test_points, test_vals = dataset.get_test_xyt()

    pred = model.predict(test_points)
    vel_u_truth = test_vals[:, 0]
    vel_v_truth = test_vals[:, 1]
    vor_truth = test_vals[:, 2]

    vel_u_pred = pred[:, 0]
    vel_v_pred = pred[:, 1]
    vor_pred = pred[:, 2]

    u_err = dde.metrics.l2_relative_error(vel_u_truth, vel_u_pred)
    v_err = dde.metrics.l2_relative_error(vel_v_truth, vel_v_pred)
    vor_err = dde.metrics.l2_relative_error(vor_truth, vor_pred)
    print(f'Instance index : {offset}')
    print(f'L2 relative error in u: {u_err}')
    print(f'L2 relative error in v: {v_err}')
    print(f'L2 relative error in vorticity: {vor_err}')
    with open(config['log']['logfile'], 'a') as f:
        writer = csv.writer(f)
        writer.writerow([offset, u_err, v_err, vor_err, step, time_cost])


def train(offset, config, args):
    seed = random.randint(1, 10000)
    print(f'Random seed :{seed}')
    np.random.seed(seed)
    # construct dataloader
    data_config = config['data']
    if 'datapath2' in data_config:
        dataset = NSdata(datapath1=data_config['datapath'],
                         datapath2=data_config['datapath2'],
                         offset=offset, num=1,
                         nx=data_config['nx'], nt=data_config['nt'],
                         sub=data_config['sub'], sub_t=data_config['sub_t'],
                         vel=True,
                         t_interval=data_config['time_interval'])
    else:
        dataset = NSdata(datapath1=data_config['datapath'],
                         offset=offset, num=1,
                         nx=data_config['nx'], nt=data_config['nt'],
                         sub=data_config['sub'], sub_t=data_config['sub_t'],
                         vel=True,
                         t_interval=data_config['time_interval'])
    spatial_domain = dde.geometry.Rectangle(xmin=[0, 0], xmax=[2 * np.pi, 2 * np.pi])
    temporal_domain = dde.geometry.TimeDomain(0, data_config['time_interval'])
    st_domain = dde.geometry.GeometryXTime(spatial_domain, temporal_domain)
    num_boundary_points = dataset.S
    points = dataset.get_boundary_points(num_x=num_boundary_points, num_y=num_boundary_points,
                                         num_t=dataset.T)
    u_value = dataset.get_boundary_value(component=0)
    v_value = dataset.get_boundary_value(component=1)
    w_value = dataset.get_boundary_value(component=2)
    # u, v are velocity, w is vorticity
    boundary_u = dde.PointSetBC(points=points, values=u_value, component=0)
    boundary_v = dde.PointSetBC(points=points, values=v_value, component=1)
    boundary_w = dde.PointSetBC(points=points, values=w_value, component=2)

    data = dde.data.TimePDE(
        st_domain,
        pde,
        [
            boundary_u,
            boundary_v,
            boundary_w
        ],
        num_domain=config['train']['num_domain'],
        num_boundary=config['train']['num_boundary'],
        num_test=config['train']['num_test'],
    )

    net = dde.maps.FNN(config['model']['layers'],
                       config['model']['activation'],
                       'Glorot normal')
    # net = dde.maps.STMsFFN([3] + 4 * [50] + [3], 'tanh', 'Glorot normal', [50], [50])
    model = dde.Model(data, net)

    model.compile('adam', lr=config['train']['base_lr'], loss_weights=[1, 1, 1, 1, 100, 100, 100])
    if 'log_step' in config['train']:
        step_size = config['train']['log_step']
    else:
        step_size = 100
    epochs = config['train']['epochs'] // step_size

    for i in range(epochs):
        time_start = default_timer()
        model.train(epochs=step_size, display_every=step_size)
        time_end = default_timer()
        eval(model, dataset, i * step_size,
             time_cost=time_end - time_start,
             offset=offset,
             config=config)
    print('Done!')
    # set_LBFGS_options(maxiter=10000)
    # model.compile('L-BFGS', loss_weights=[1, 1, 1, 1, 100, 100, 100])
    # model.train()

    # test_points, test_vals = dataset.get_test_xyt()
    #
    # pred = model.predict(test_points)
    # vel_u_truth = test_vals[:, 0]
    # vel_v_truth = test_vals[:, 1]
    # vor_truth = test_vals[:, 2]
    #
    # vel_u_pred = pred[:, 0]
    # vel_v_pred = pred[:, 1]
    # vor_pred = pred[:, 2]
    #
    # u_err = dde.metrics.l2_relative_error(vel_u_truth, vel_u_pred)
    # v_err = dde.metrics.l2_relative_error(vel_v_truth, vel_v_pred)
    # vor_err = dde.metrics.l2_relative_error(vor_truth, vor_pred)
    # print(f'Instance index : {offset}')
    # print(f'L2 relative error in u: {u_err}')
    # print(f'L2 relative error in v: {v_err}')
    # print(f'L2 relative error in vorticity: {vor_err}')
    # with open(args.logfile, 'a') as f:
    #     writer = csv.writer(f)
    #     writer.writerow([offset, u_err, v_err, vor_err])


================================================
FILE: baselines/pinns_ns_50s.py
================================================
'''
training for Navier Stokes with viscosity 0.001
spatial domain: (0, 1) ** 2
temporal domain: [0, 49]
'''
import csv
import random
from timeit import default_timer
import deepxde as dde
from deepxde.optimizers.config import set_LBFGS_options
import numpy as np
from baselines.data import NSLong

import tensorflow as tf


def forcing(x):
    theta = x[:, 0:1] + x[:, 1:2]
    return 0.1 * (tf.math.sin(2 * np.pi * theta) + tf.math.cos(2 * np.pi * theta))


def pde(x, u):
    '''
    Args:
        x: (x, y, t)
        u: (u, v, w), where (u,v) is the velocity, w is the vorticity
    Returns: list of pde loss

    '''
    u_vel, v_vel, w = u[:, 0:1], u[:, 1:2], u[:, 2:3]

    u_vel_x = dde.grad.jacobian(u, x, i=0, j=0)
    u_vel_xx = dde.grad.hessian(u, x, component=0, i=0, j=0)
    u_vel_yy = dde.grad.hessian(u, x, component=0, i=1, j=1)

    v_vel_y = dde.grad.jacobian(u, x, i=1, j=1)
    v_vel_xx = dde.grad.hessian(u, x, component=1, i=0, j=0)
    v_vel_yy = dde.grad.hessian(u, x, component=1, i=1, j=1)

    w_vor_x = dde.grad.jacobian(u, x, i=2, j=0)
    w_vor_y = dde.grad.jacobian(u, x, i=2, j=1)
    w_vor_t = dde.grad.jacobian(u, x, i=2, j=2)

    w_vor_xx = dde.grad.hessian(u, x, component=2, i=0, j=0)
    w_vor_yy = dde.grad.hessian(u, x, component=2, i=1, j=1)

    eqn1 = w_vor_t + u_vel * w_vor_x + v_vel * w_vor_y - \
           0.001 * (w_vor_xx + w_vor_yy) - forcing(x)
    eqn2 = u_vel_x + v_vel_y
    eqn3 = u_vel_xx + u_vel_yy + w_vor_y
    eqn4 = v_vel_xx + v_vel_yy - w_vor_x
    return [eqn1, eqn2, eqn3, eqn4]


def eval(model, dataset,
         step, time_cost,
         offset, config):
    '''
    evaluate test error for the model over dataset
    '''
    test_points, test_vals = dataset.get_test_xyt()

    pred = model.predict(test_points)
    vel_u_truth = test_vals[:, 0]
    vel_v_truth = test_vals[:, 1]
    vor_truth = test_vals[:, 2]

    vel_u_pred = pred[:, 0]
    vel_v_pred = pred[:, 1]
    vor_pred = pred[:, 2]

    u_err = dde.metrics.l2_relative_error(vel_u_truth, vel_u_pred)
    v_err = dde.metrics.l2_relative_error(vel_v_truth, vel_v_pred)
    vor_err = dde.metrics.l2_relative_error(vor_truth, vor_pred)

    total_num = test_vals.shape[0]
    u50 = test_vals[dataset.T - 1: total_num: dataset.T, 0]
    v50 = test_vals[dataset.T - 1: total_num: dataset.T, 1]
    vor50 = test_vals[dataset.T - 1: total_num: dataset.T, 2]

    u50_pred = pred[dataset.T - 1: total_num: dataset.T, 0]
    v50_pred = pred[dataset.T - 1: total_num: dataset.T, 1]
    vor50_pred = pred[dataset.T - 1: total_num: dataset.T, 2]

    u50_err = dde.metrics.l2_relative_error(u50, u50_pred)
    v50_err = dde.metrics.l2_relative_error(v50, v50_pred)
    vor50_err = dde.metrics.l2_relative_error(vor50, vor50_pred)

    print(f'Instance index : {offset}')
    print(f'L2 relative error in u: {u_err}')
    print(f'L2 relative error in v: {v_err}')
    print(f'L2 relative error in vorticity: {vor_err}')

    print(f'Time {dataset.T - 1} L2 relative error of u : {u50_err}')
    print(f'Time {dataset.T - 1} L2 relative error of v : {v50_err}')
    print(f'Time {dataset.T - 1} L2 relative error of vor : {vor50_err}')
    with open(config['log']['logfile'], 'a') as f:
        writer = csv.writer(f)
        writer.writerow([offset, u_err, v_err, vor_err, step, time_cost, u50_err, v50_err, vor50_err])


def train_longtime(offset, config, args):
    seed = random.randint(1, 10000)
    print(f'Random seed :{seed}')
    np.random.seed(seed)
    # construct dataloader
    data_config = config['data']
    spatial_domain = dde.geometry.Rectangle(xmin=[0, 0], xmax=[1, 1])
    temporal_domain = dde.geometry.TimeDomain(0, data_config['time_scale'])
    st_domain = dde.geometry.GeometryXTime(spatial_domain, temporal_domain)

    dataset = NSLong(datapath=data_config['datapath'],
                     nx=data_config['nx'], nt=data_config['nt'],
                     time_scale=data_config['time_scale'],
                     offset=offset, num=data_config['n_sample'],
                     vel=True)

    points = dataset.get_boundary_points(dataset.S, dataset.S, dataset.T)
    u_value = dataset.get_boundary_value(component=0)
    v_value = dataset.get_boundary_value(component=1)
    w_value = dataset.get_boundary_value(component=2)
    # u, v are velocity, w is vorticity
    boundary_u = dde.PointSetBC(points=points, values=u_value, component=0)
    boundary_v = dde.PointSetBC(points=points, values=v_value, component=1)
    boundary_w = dde.PointSetBC(points=points, values=w_value, component=2)

    data = dde.data.TimePDE(
        st_domain,
        pde,
        [
            boundary_u,
            boundary_v,
            boundary_w
        ],
        num_domain=config['train']['num_domain'],
        num_boundary=config['train']['num_boundary'],
        num_test=config['train']['num_test'],
    )

    net = dde.maps.FNN(config['model']['layers'],
                       config['model']['activation'],
                       'Glorot normal')
    # net = dde.maps.STMsFFN([3] + 4 * [50] + [3], 'tanh', 'Glorot normal', [50], [50])
    model = dde.Model(data, net)

    model.compile('adam', lr=config['train']['base_lr'], loss_weights=[1, 1, 1, 1, 100, 100, 100])
    if 'log_step' in config['train']:
        step_size = config['train']['log_step']
    else:
        step_size = 100
    epochs = config['train']['epochs'] // step_size
    for i in range(epochs):
        time_start = default_timer()
        model.train(epochs=step_size, display_every=step_size)
        time_end = default_timer()
        eval(model, dataset, i * step_size,
             time_cost=time_end - time_start,
             offset=offset,
             config=config)
    print('Done!')



================================================
FILE: baselines/sapinns-50s.py
================================================
import csv
import random
from timeit import default_timer
from tqdm import tqdm
import deepxde as dde
import numpy as np
from baselines.data import NSdata
import torch
from torch.optim import Adam

from tensordiffeq.boundaries import DomainND, periodicBC
from .tqd_utils import PointsIC
from .model import SAWeight

from models.FCN import DenseNet
from train_utils.negadam import NAdam



def forcing(x):
    theta = x[:, 0:1] + x[:, 1:2]
    return 0.1 * (torch.sin(2 * np.pi * theta) + torch.cos(2 * np.pi * theta))


def pde(x, u):
    '''
    Args:
        x: (x, y, t)
        u: (u, v, w), where (u,v) is the velocity, w is the vorticity
    Returns: list of pde loss

    '''
    u_vel, v_vel, w = u[:, 0:1], u[:, 1:2], u[:, 2:3]

    u_vel_x = dde.grad.jacobian(u, x, i=0, j=0)
    u_vel_xx = dde.grad.hessian(u, x, component=0, i=0, j=0)
    u_vel_yy = dde.grad.hessian(u, x, component=0, i=1, j=1)

    v_vel_y = dde.grad.jacobian(u, x, i=1, j=1)
    v_vel_xx = dde.grad.hessian(u, x, component=1, i=0, j=0)
    v_vel_yy = dde.grad.hessian(u, x, component=1, i=1, j=1)

    w_vor_x = dde.grad.jacobian(u, x, i=2, j=0)
    w_vor_y = dde.grad.jacobian(u, x, i=2, j=1)
    w_vor_t = dde.grad.jacobian(u, x, i=2, j=2)

    w_vor_xx = dde.grad.hessian(u, x, component=2, i=0, j=0)
    w_vor_yy = dde.grad.hessian(u, x, component=2, i=1, j=1)

    eqn1 = w_vor_t + u_vel * w_vor_x + v_vel * w_vor_y - \
           0.001 * (w_vor_xx + w_vor_yy) - forcing(x)
    eqn2 = u_vel_x + v_vel_y
    eqn3 = u_vel_xx + u_vel_yy + w_vor_y
    eqn4 = v_vel_xx + v_vel_yy - w_vor_x
    return [eqn1, eqn2, eqn3, eqn4]



def eval(model, dataset,
         step, time_cost,
         offset, config):
    '''
    evaluate test error for the model over dataset
    '''
    test_points, test_vals = dataset.get_test_xyt()

    test_points = torch.tensor(test_points, dtype=torch.float32)
    with torch.no_grad():
        pred = model(test_points).cpu().numpy()
    vel_u_truth = test_vals[:, 0]
    vel_v_truth = test_vals[:, 1]
    vor_truth = test_vals[:, 2]

    vel_u_pred = pred[:, 0]
    vel_v_pred = pred[:, 1]
    vor_pred = pred[:, 2]

    u_err = dde.metrics.l2_relative_error(vel_u_truth, vel_u_pred)
    v_err = dde.metrics.l2_relative_error(vel_v_truth, vel_v_pred)
    vor_err = dde.metrics.l2_relative_error(vor_truth, vor_pred)
    print(f'Instance index : {offset}')
    print(f'L2 relative error in u: {u_err}')
    print(f'L2 relative error in v: {v_err}')
    print(f'L2 relative error in vorticity: {vor_err}')
    with open(config['log']['logfile'], 'a') as f:
        writer = csv.writer(f)
        writer.writerow([offset, u_err, v_err, vor_err, step, time_cost])


def train_sapinn(offset, config, args):
    seed = random.randint(1, 10000)
    print(f'Random seed :{seed}')
    np.random.seed(seed)
    # construct dataloader
    data_config = config['data']
    if 'datapath2' in data_config:
        dataset = NSdata(datapath1=data_config['datapath'],
                         datapath2=data_config['datapath2'],
                         offset=offset, num=1,
                         nx=data_config['nx'], nt=data_config['nt'],
                         sub=data_config['sub'], sub_t=data_config['sub_t'],
                         vel=True,
                         t_interval=data_config['time_interval'])
    else:
        dataset = NSdata(datapath1=data_config['datapath'],
                         offset=offset, num=1,
                         nx=data_config['nx'], nt=data_config['nt'],
                         sub=data_config['sub'], sub_t=data_config['sub_t'],
                         vel=True,
                         t_interval=data_config['time_interval'])
    domain = DomainND(['x', 'y', 't'], time_var='t')
    domain.add('x', [0.0, 2 * np.pi], dataset.S)
    domain.add('y', [0.0, 2 * np.pi], dataset.S)
    domain.add('t', [0.0, data_config['time_interval']], dataset.T)
    num_collo = config['train']['num_domain']
    domain.generate_collocation_points(num_collo)
    init_vals = dataset.get_init_cond()
    num_inits = config['train']['num_init']
    if num_inits > dataset.S ** 2:
        num_inits = dataset.S ** 2
    init_cond = PointsIC(domain, init_vals, var=['x', 'y'], n_values=num_inits)
    bd_cond = periodicBC(domain, ['x', 'y'], n_values=config['train']['num_boundary'])

    # prepare initial condition inputs
    init_input = torch.tensor(init_cond.input, dtype=torch.float32)
    init_val = torch.tensor(init_cond.val, dtype=torch.float32)

    # prepare boundary condition inputs
    upper_input0 = torch.tensor(bd_cond.upper[0], dtype=torch.float32).squeeze().t()     # shape N x 3
    upper_input1 = torch.tensor(bd_cond.upper[1], dtype=torch.float32).squeeze().t()
    lower_input0 = torch.tensor(bd_cond.lower[0], dtype=torch.float32).squeeze().t()
    lower_input1 = torch.tensor(bd_cond.lower[1], dtype=torch.float32).squeeze().t()

    # prepare collocation points
    collo_input = torch.tensor(domain.X_f, dtype=torch.float32, requires_grad=True)
    weight_net = SAWeight(out_dim=3,
                          num_init=[num_inits],
                          num_bd=[upper_input0.shape[0]] * 2,
                          num_collo=[num_collo] * 4)
    net = DenseNet(config['model']['layers'], config['model']['activation'])
    weight_optim = NAdam(weight_net.parameters(), lr=config['train']['base_lr'])
    net_optim = Adam(net.parameters(), lr=config['train']['base_lr'])

    pbar = tqdm(range(config['train']['epochs']), dynamic_ncols=True)

    start_time = default_timer()
    for e in pbar:
        net.zero_grad()
        weight_net.zero_grad()
        if collo_input.grad is not None:
            collo_input.grad.zero_()

        init_pred = net(init_input) - init_val

        bd_0 = net(upper_input0) - net(lower_input0)
        bd_1 = net(upper_input1) - net(lower_input1)

        predu = net(collo_input)
        pde_residual = pde(collo_input, predu)

        loss = weight_net(init_cond=[init_pred], bd_cond=[bd_0, bd_1], residual=pde_residual)
        loss.backward()
        weight_optim.step()
        net_optim.step()
        pbar.set_description(
            (
                f'Epoch: {e}, loss: {loss.item()}'
            )
        )
        if e % config['train']['log_step'] == 0:
            end_time = default_timer()
            eval(net, dataset, e, time_cost=end_time - start_time, offset=offset, config=config)
            start_time = default_timer()
    print('Done!')





    


================================================
FILE: baselines/sapinns.py
================================================
import csv
import random
from timeit import default_timer
from tqdm import tqdm
import deepxde as dde
import numpy as np
from baselines.data import NSdata
import torch
from torch.optim import Adam

from tensordiffeq.boundaries import DomainND, periodicBC
from .tqd_utils import PointsIC
from .model import SAWeight

from models.FCN import DenseNet
from train_utils.negadam import NAdam


Re = 500


def forcing(x):
    return - 4 * torch.cos(4 * x[:, 1:2])


def pde(x, u):
    '''
    Args:
        x: (x, y, t)
        u: (u, v, w), where (u,v) is the velocity, w is the vorticity
    Returns: list of pde loss

    '''
    u_vel, v_vel, w = u[:, 0:1], u[:, 1:2], u[:, 2:3]

    u_vel_x = dde.grad.jacobian(u, x, i=0, j=0)
    u_vel_xx = dde.grad.hessian(u, x, component=0, i=0, j=0)
    u_vel_yy = dde.grad.hessian(u, x, component=0, i=1, j=1)

    v_vel_y = dde.grad.jacobian(u, x, i=1, j=1)
    v_vel_xx = dde.grad.hessian(u, x, component=1, i=0, j=0)
    v_vel_yy = dde.grad.hessian(u, x, component=1, i=1, j=1)

    w_vor_x = dde.grad.jacobian(u, x, i=2, j=0)
    w_vor_y = dde.grad.jacobian(u, x, i=2, j=1)
    w_vor_t = dde.grad.jacobian(u, x, i=2, j=2)

    w_vor_xx = dde.grad.hessian(u, x, component=2, i=0, j=0)
    w_vor_yy = dde.grad.hessian(u, x, component=2, i=1, j=1)

    eqn1 = w_vor_t + u_vel * w_vor_x + v_vel * w_vor_y - \
           1 / Re * (w_vor_xx + w_vor_yy) - forcing(x)
    eqn2 = u_vel_x + v_vel_y
    eqn3 = u_vel_xx + u_vel_yy + w_vor_y
    eqn4 = v_vel_xx + v_vel_yy - w_vor_x
    return [eqn1, eqn2, eqn3, eqn4]


def eval(model, dataset,
         step, time_cost,
         offset, config):
    '''
    evaluate test error for the model over dataset
    '''
    test_points, test_vals = dataset.get_test_xyt()

    test_points = torch.tensor(test_points, dtype=torch.float32)
    with torch.no_grad():
        pred = model(test_points).cpu().numpy()
    vel_u_truth = test_vals[:, 0]
    vel_v_truth = test_vals[:, 1]
    vor_truth = test_vals[:, 2]

    vel_u_pred = pred[:, 0]
    vel_v_pred = pred[:, 1]
    vor_pred = pred[:, 2]

    u_err = dde.metrics.l2_relative_error(vel_u_truth, vel_u_pred)
    v_err = dde.metrics.l2_relative_error(vel_v_truth, vel_v_pred)
    vor_err = dde.metrics.l2_relative_error(vor_truth, vor_pred)
    print(f'Instance index : {offset}')
    print(f'L2 relative error in u: {u_err}')
    print(f'L2 relative error in v: {v_err}')
    print(f'L2 relative error in vorticity: {vor_err}')
    with open(config['log']['logfile'], 'a') as f:
        writer = csv.writer(f)
        writer.writerow([offset, u_err, v_err, vor_err, step, time_cost])


def train_sapinn(offset, config, args):
    seed = random.randint(1, 10000)
    print(f'Random seed :{seed}')
    np.random.seed(seed)
    # construct dataloader
    data_config = config['data']
    if 'datapath2' in data_config:
        dataset = NSdata(datapath1=data_config['datapath'],
                         datapath2=data_config['datapath2'],
                         offset=offset, num=1,
                         nx=data_config['nx'], nt=data_config['nt'],
                         sub=data_config['sub'], sub_t=data_config['sub_t'],
                         vel=True,
                         t_interval=data_config['time_interval'])
    else:
        dataset = NSdata(datapath1=data_config['datapath'],
                         offset=offset, num=1,
                         nx=data_config['nx'], nt=data_config['nt'],
                         sub=data_config['sub'], sub_t=data_config['sub_t'],
                         vel=True,
                         t_interval=data_config['time_interval'])
    domain = DomainND(['x', 'y', 't'], time_var='t')
    domain.add('x', [0.0, 2 * np.pi], dataset.S)
    domain.add('y', [0.0, 2 * np.pi], dataset.S)
    domain.add('t', [0.0, data_config['time_interval']], dataset.T)
    num_collo = config['train']['num_domain']
    domain.generate_collocation_points(num_collo)
    init_vals = dataset.get_init_cond()
    num_inits = config['train']['num_init']
    if num_inits > dataset.S ** 2:
        num_inits = dataset.S ** 2
    init_cond = PointsIC(domain, init_vals, var=['x', 'y'], n_values=num_inits)
    bd_cond = periodicBC(domain, ['x', 'y'], n_values=config['train']['num_boundary'])

    # prepare initial condition inputs
    init_input = torch.tensor(init_cond.input, dtype=torch.float32)
    init_val = torch.tensor(init_cond.val, dtype=torch.float32)

    # prepare boundary condition inputs
    upper_input0 = torch.tensor(bd_cond.upper[0], dtype=torch.float32).squeeze().t()     # shape N x 3
    upper_input1 = torch.tensor(bd_cond.upper[1], dtype=torch.float32).squeeze().t()
    lower_input0 = torch.tensor(bd_cond.lower[0], dtype=torch.float32).squeeze().t()
    lower_input1 = torch.tensor(bd_cond.lower[1], dtype=torch.float32).squeeze().t()

    # prepare collocation points
    collo_input = torch.tensor(domain.X_f, dtype=torch.float32, requires_grad=True)

    weight_net = SAWeight(out_dim=3,
                          num_init=[num_inits],
                          num_bd=[upper_input0.shape[0]] * 2,
                          num_collo=[num_collo] * 4)
    net = DenseNet(config['model']['layers'], config['model']['activation'])
    weight_optim = NAdam(weight_net.parameters(), lr=config['train']['base_lr'])
    net_optim = Adam(net.parameters(), lr=config['train']['base_lr'])

    pbar = tqdm(range(config['train']['epochs']), dynamic_ncols=True)

    start_time = default_timer()
    for e in pbar:
        net.zero_grad()
        weight_net.zero_grad()
        if collo_input.grad is not None:
            collo_input.grad.zero_()

        init_pred = net(init_input) - init_val

        bd_0 = net(upper_input0) - net(lower_input0)
        bd_1 = net(upper_input1) - net(lower_input1)

        predu = net(collo_input)
        pde_residual = pde(collo_input, predu)

        loss = weight_net(init_cond=[init_pred], bd_cond=[bd_0, bd_1], residual=pde_residual)
        loss.backward()
        weight_optim.step()
        net_optim.step()
        dde.gradients.clear()
        pbar.set_description(
            (
                f'Epoch: {e}, loss: {loss.item()}'
            )
        )

        if e % config['train']['log_step'] == 0:
            end_time = default_timer()
            eval(net, dataset, e, time_cost=end_time - start_time, offset=offset, config=config)
            start_time = default_timer()
    print('Done!')





    


================================================
FILE: baselines/test.py
================================================
from tqdm import tqdm
import numpy as np

import torch
from torch.utils.data import DataLoader
from baselines.model import DeepONetCP
from baselines.data import DeepONetCPNS, DarcyFlow
from train_utils.losses import LpLoss


def test(model,
         test_loader,
         grid,
         device):
    pbar = tqdm(test_loader, dynamic_ncols=True, smoothing=0.1)
    myloss = LpLoss(size_average=True)
    model.eval()

    test_error = []
    with torch.no_grad():
        for x, y in pbar:
            x = x.to(device)
            y = y.to(device)

            grid = grid.to(device)

            pred = model(x, grid)
            loss = myloss(pred, y)

            test_error.append(loss.item())
            pbar.set_description(
                (
                    f'test error: {loss.item():.5f}'
                )
            )

    mean = np.mean(test_error)
    std = np.std(test_error, ddof=1) / np.sqrt(len(test_error))
    print(f'Averaged test error :{mean}, standard error: {std}')


def test_deeponet_ns(config):
    '''
    Evaluate deeponet model on Navier Stokes equation
    Args:
        config: configurations

    Returns:

    '''
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    data_config = config['data']
    batch_size = config['test']['batchsize']
    dataset = DeepONetCPNS(datapath=data_config['datapath'],
                           nx=data_config['nx'], nt=data_config['nt'],
                           sub=data_config['sub'], sub_t=data_config['sub_t'],
                           offset=data_config['offset'], num=data_config['n_sample'],
                           t_interval=data_config['time_interval'])
    test_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
    u0_dim = dataset.S ** 2
    model = DeepONetCP(branch_layer=[u0_dim] + config['model']['branch_layers'],
                       trunk_layer=[3] + config['model']['trunk_layers']).to(device)
    if 'ckpt' in config['test']:
        ckpt = torch.load(config['test']['ckpt'])
        model.load_state_dict(ckpt['model'])
    grid = test_loader.dataset.xyt
    test(model, test_loader, grid, device=device)


def test_deeponet_darcy(config):
    '''
    Evaluate deeponet mode on Darcy Flow
    '''

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    data_config = config['data']
    batch_size = config['test']['batchsize']
    dataset = DarcyFlow(data_config['datapath'],
                        nx=data_config['nx'], sub=data_config['sub'],
                        offset=data_config['offset'], num=data_config['n_sample'])
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

    u0_dim = dataset.S ** 2
    model = DeepONetCP(branch_layer=[u0_dim] + config['model']['branch_layers'],
                       trunk_layer=[2] + config['model']['trunk_layers']).to(device)
    if 'ckpt' in config['test']:
        ckpt = torch.load(config['test']['ckpt'])
        model.load_state_dict(ckpt['model'])
        print('Load model weights from %s' % config['test']['ckpt'])

    grid = dataset.mesh.reshape(-1, 2)
    test(model, dataloader, grid, device)

================================================
FILE: baselines/tqd_sapinns.py
================================================
import random
import numpy as np
import csv
from timeit import default_timer

import tensorflow as tf
import deepxde as dde
import tensordiffeq as tdq
from tensordiffeq.models import CollocationSolverND
from tensordiffeq.boundaries import DomainND, periodicBC

from .tqd_utils import PointsIC
from baselines.data import NSdata


Re = 500


def forcing(x):
    return - 4 * tf.math.cos(4 * x)


def bd_model(u_model, x, y, t):
    u = u_model(tf.concat([x, y, t], 1))
    u_vel, v_vel, w = u[:, 0:1], u[:, 1:2], u[:, 2:3]
    return u_vel, v_vel, w


def f_model(u_model, x, y, t):
    inp = tf.concat([x, y, t], 1)
    u = u_model(inp)
    u_vel, v_vel, w = u[:, 0:1], u[:, 1:2], u[:, 2:3]

    u_vel_x = tf.gradients(u_vel, x)[0]
    u_vel_xx = tf.gradients(u_vel_x, x)[0]
    u_vel_y = tf.gradients(u_vel, y)[0]
    u_vel_yy = tf.gradients(u_vel_y, y)[0]

    v_vel_y = tf.gradients(v_vel, y)[0]
    v_vel_x = tf.gradients(v_vel, x)[0]
    v_vel_xx = tf.gradients(v_vel_x, x)[0]
    v_vel_yy = tf.gradients(v_vel_y, y)[0]

    w_vor_x = tf.gradients(w, x)[0]
    w_vor_y = tf.gradients(w, y)[0]
    w_vor_t = tf.gradients(w, t)[0]

    w_vor_xx = tf.gradients(w_vor_x, x)[0]
    w_vor_yy = tf.gradients(w_vor_y, y)[0]

    c1 = tdq.utils.constant(1 / Re)
    eqn1 = w_vor_t + u_vel * w_vor_x + v_vel * w_vor_y - c1 * (w_vor_xx + w_vor_yy) - forcing(x)
    eqn2 = u_vel_x + v_vel_y
    eqn3 = u_vel_xx + u_vel_yy + w_vor_y
    eqn4 = v_vel_xx + v_vel_yy - w_vor_x
    return eqn1, eqn2, eqn3, eqn4


def eval(model, dataset,
         step, time_cost,
         offset, config):
    '''
    evaluate test error for the model over dataset
    '''
    test_points, test_vals = dataset.get_test_xyt()

    pred = model.predict(test_points)
    vel_u_truth = test_vals[:, 0]
    vel_v_truth = test_vals[:, 1]
    vor_truth = test_vals[:, 2]

    vel_u_pred = pred[:, 0]
    vel_v_pred = pred[:, 1]
    vor_pred = pred[:, 2]

    u_err = dde.metrics.l2_relative_error(vel_u_truth, vel_u_pred)
    v_err = dde.metrics.l2_relative_error(vel_v_truth, vel_v_pred)
    vor_err = dde.metrics.l2_relative_error(vor_truth, vor_pred)
    print(f'Instance index : {offset}')
    print(f'L2 relative error in u: {u_err}')
    print(f'L2 relative error in v: {v_err}')
    print(f'L2 relative error in vorticity: {vor_err}')
    with open(config['log']['logfile'], 'a') as f:
        writer = csv.writer(f)
        writer.writerow([offset, u_err, v_err, vor_err, step, time_cost])


def train_sa(offset, config, args):
    seed = random.randint(1, 10000)
    print(f'Random seed :{seed}')
    np.random.seed(seed)
    # construct dataloader
    data_config = config['data']
    if 'datapath2' in data_config:
        dataset = NSdata(datapath1=data_config['datapath'],
                         datapath2=data_config['datapath2'],
                         offset=offset, num=1,
                         nx=data_config['nx'], nt=data_config['nt'],
                         sub=data_config['sub'], sub_t=data_config['sub_t'],
                         vel=True,
                         t_interval=data_config['time_interval'])
    else:
        dataset = NSdata(datapath1=data_config['datapath'],
                         offset=offset, num=1,
                         nx=data_config['nx'], nt=data_config['nt'],
                         sub=data_config['sub'], sub_t=data_config['sub_t'],
                         vel=True,
                         t_interval=data_config['time_interval'])
    domain = DomainND(['x', 'y', 't'], time_var='t')
    domain.add('x', [0.0, 2 * np.pi], dataset.S)
    domain.add('y', [0.0, 2 * np.pi], dataset.S)
    domain.add('t', [0.0, data_config['time_interval']], dataset.T)
    domain.generate_collocation_points(config['train']['num_domain'])
    model = CollocationSolverND()
    init_vals = dataset.get_init_cond()
    num_inits = config['train']['num_init']
    if num_inits > dataset.S ** 2:
        num_inits = dataset.S ** 2
    init_cond = PointsIC(domain, init_vals, var=['x', 'y'], n_values=num_inits)
    bd_cond = periodicBC(domain, ['x', 'y'], [bd_model], n_values=config['train']['num_boundary'])
    BCs = [init_cond, bd_cond]

    dict_adaptive = {'residual': [True, True, True, True],
                     'BCs': [True, False]}
    init_weights = {
        'residual': [tf.random.uniform([config['train']['num_domain'], 1]),
                     tf.random.uniform([config['train']['num_domain'], 1]),
                     tf.random.uniform([config['train']['num_domain'], 1]),
                     tf.random.uniform([config['train']['num_domain'], 1])],
        'BCs': [100 * tf.random.uniform([num_inits, 1]),
                100 * tf.ones([config['train']['num_boundary'], 1])]
    }

    model.compile(config['model']['layers'], f_model, domain, BCs,
                  isAdaptive=True, dict_adaptive=dict_adaptive, init_weights=init_weights)

    if 'log_step' in config['train']:
        step_size = config['train']['log_step']
    else:
        step_size = 100
    epochs = config['train']['epochs'] // step_size

    for i in range(epochs):
        time_start = default_timer()
        model.fit(tf_iter=step_size)
        time_end = default_timer()
        eval(model, dataset, i * step_size,
             time_cost=time_end - time_start,
             offset=offset,
             config=config)
    print('Done!')


================================================
FILE: baselines/tqd_utils.py
================================================
import numpy as np

from tensordiffeq.boundaries import BC
from tensordiffeq.utils import flatten_and_stack, multimesh, MSE, convertTensor


class PointsIC(BC):
    '''
    Create Initial condition class from array on domain
    '''
    def __init__(self, domain, values, var, n_values=None):
        '''
        args:
            - domain:
            - values:
        '''
        super(PointsIC, self).__init__()
        self.isInit = True
        self.n_values = n_values
        self.domain = domain
        self.values = values
        self.vars = var
        self.isInit = True
        self.dicts_ = [item for item in self.domain.domaindict if item['identifier'] != self.domain.time_var]
        self.dict_ = next(item for item in self.domain.domaindict if item["identifier"] == self.domain.time_var)
        self.compile()
        self.create_target(self.values)

    def create_input(self):
        dims = self.get_not_dims(self.domain.time_var)
        mesh = flatten_and_stack(multimesh(dims))
        t_repeat = np.repeat(0.0, len(mesh))

        mesh = np.concatenate((mesh, np.reshape(t_repeat, (-1, 1))), axis=1)
        if self.n_values is not None:
            self.nums = np.random.randint(0, high=len(mesh), size=self.n_values)
            mesh = mesh[self.nums]
        return mesh

    def create_target(self, values):
        # for i, var_ in enumerate(self.vars):
        #     arg_list = []
        #     for j, var in enumerate(var_):
        #         var_dict = self.get_dict(var)
        #         arg_list.append(get_linspace(var_dict))
        #     inp = flatten_and_stack(multimesh(arg_list))
        #     fun_vals.append(self.fun[i](*inp.T))
        if self.n_values is not None:
            self.val = np.reshape(values, (-1, 3))[self.nums]
        else:
            self.val = np.reshape(values, (-1, 3))

    def loss(self):
        return MSE(self.preds, self.val)

================================================
FILE: baselines/train_darcy.py
================================================
from tqdm import tqdm

import torch
from torch.utils.data import DataLoader
from torch.optim import Adam
from torch.optim.lr_scheduler import MultiStepLR

from baselines.model import DeepONetCP
from train_utils.losses import LpLoss
from train_utils.utils import save_checkpoint
from baselines.data import DarcyFlow


def train_deeponet_darcy(config):
    '''
    train deepONet for darcy flow
    '''
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    data_config = config['data']
    batch_size = config['train']['batchsize']
    dataset = DarcyFlow(data_config['datapath'],
                        nx=data_config['nx'], sub=data_config['sub'],
                        offset=data_config['offset'], num=data_config['n_sample'])
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

    u0_dim = dataset.S ** 2
    model = DeepONetCP(branch_layer=[u0_dim] + config['model']['branch_layers'],
                       trunk_layer=[2] + config['model']['trunk_layers']).to(device)
    optimizer = Adam(model.parameters(), lr=config['train']['base_lr'])
    scheduler = MultiStepLR(optimizer, milestones=config['train']['milestones'],
                            gamma=config['train']['scheduler_gamma'])
    pbar = range(config['train']['epochs'])
    pbar = tqdm(pbar, dynamic_ncols=True, smoothing=0.1)
    myloss = LpLoss(size_average=True)
    model.train()
    grid = dataset.mesh
    grid = grid.reshape(-1, 2).to(device)  # grid value, (SxS, 2)
    for e in pbar:
        train_loss = 0.0
        for x, y in dataloader:
            x = x.to(device)  # initial condition, (batchsize, u0_dim)

            y = y.to(device)  # ground truth, (batchsize, SxS)

            pred = model(x, grid)
            loss = myloss(pred, y)

            model.zero_grad()
            loss.backward()
            optimizer.step()

            train_loss += loss.item() * y.shape[0]
        train_loss /= len(dataset)
        scheduler.step()

        pbar.set_description(
            (
                f'Epoch: {e}; Averaged train loss: {train_loss:.5f}; '
            )
        )
        if e % 1000 == 0:
            print(f'Epoch: {e}, averaged train loss: {train_loss:.5f}')
            save_checkpoint(config['train']['save_dir'],
                            config['train']['save_name'].replace('.pt', f'_{e}.pt'),
                            model, optimizer)
    save_checkpoint(config['train']['save_dir'],
                    config['train']['save_name'],
                    model, optimizer)

================================================
FILE: baselines/train_ns.py
================================================
from tqdm import tqdm

import torch
from torch.utils.data import DataLoader
from torch.optim import Adam
from torch.optim.lr_scheduler import MultiStepLR

from baselines.model import DeepONet, DeepONetCP
from baselines.data import DeepOnetNS, DeepONetCPNS
from train_utils.losses import LpLoss
from train_utils.utils import save_checkpoint
from train_utils.data_utils import sample_data


def train_deeponet_cp(config):
    '''
    Train Cartesian product DeepONet
    Args:
        config:

    Returns:
    '''
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    data_config = config['data']
    batch_size = config['train']['batchsize']
    dataset = DeepONetCPNS(datapath=data_config['datapath'],
                           nx=data_config['nx'], nt=data_config['nt'],
                           sub=data_config['sub'], sub_t=data_config['sub_t'],
                           offset=data_config['offset'], num=data_config['n_sample'],
                           t_interval=data_config['time_interval'])
    train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
    u0_dim = dataset.S ** 2
    model = DeepONetCP(branch_layer=[u0_dim] + config['model']['branch_layers'],
                       trunk_layer=[3] + config['model']['trunk_layers']).to(device)
    optimizer = Adam(model.parameters(), lr=config['train']['base_lr'])
    scheduler = MultiStepLR(optimizer, milestones=config['train']['milestones'],
                            gamma=config['train']['scheduler_gamma'])
    pbar = range(config['train']['epochs'])
    pbar = tqdm(pbar, dynamic_ncols=True, smoothing=0.1)
    myloss = LpLoss(size_average=True)
    model.train()

    for e in pbar:
        train_loss = 0.0
        for x, y in train_loader:
            x = x.to(device)  # initial condition, (batchsize, u0_dim)
            grid = dataset.xyt
            grid = grid.to(device)  # grid value, (SxSxT, 3)
            y = y.to(device)  # ground truth, (batchsize, SxSxT)

            pred = model(x, grid)
            loss = myloss(pred, y)

            model.zero_grad()
            loss.backward()
            optimizer.step()

            train_loss += loss.item() * y.shape[0]
        train_loss /= len(dataset)
        scheduler.step()

        pbar.set_description(
            (
                f'Epoch: {e}; Averaged train loss: {train_loss:.5f}; '
            )
        )
        if e % 500 == 0:
            print(f'Epoch: {e}, averaged train loss: {train_loss:.5f}')
            save_checkpoint(config['train']['save_dir'],
                            config['train']['save_name'].replace('.pt', f'_{e}.pt'),
                            model, optimizer)


def train_deeponet(config):
    '''
    train plain DeepOnet
    Args:
        config:

    Returns:

    '''
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    data_config = config['data']
    dataset = DeepOnetNS(datapath=data_config['datapath'],
                         nx=data_config['nx'], nt=data_config['nt'],
                         sub=data_config['sub'], sub_t=data_config['sub_t'],
                         offset=data_config['offset'], num=data_config['n_sample'],
                         t_interval=data_config['time_interval'])
    train_loader = DataLoader(dataset, batch_size=config['train']['batchsize'], shuffle=False)

    u0_dim = dataset.S ** 2
    model = DeepONet(branch_layer=[u0_dim] + config['model']['branch_layers'],
                     trunk_layer=[3] + config['model']['trunk_layers']).to(device)
    optimizer = Adam(model.parameters(), lr=config['train']['base_lr'])
    scheduler = MultiStepLR(optimizer, milestones=config['train']['milestones'],
                            gamma=config['train']['scheduler_gamma'])

    pbar = range(config['train']['epochs'])
    pbar = tqdm(pbar, dynamic_ncols=True, smoothing=0.1)
    myloss = LpLoss(size_average=True)
    model.train()
    loader = sample_data(train_loader)
    for e in pbar:
        u0, x, y = next(loader)
        u0 = u0.to(device)
        x = x.to(device)
        y = y.to(device)
        pred = model(u0, x)
        loss = myloss(pred, y)
        model.zero_grad()
        loss.backward()
        optimizer.step()
        scheduler.step()

        pbar.set_description(
            (
                f'Epoch: {e}; Train loss: {loss.item():.5f}; '
            )
        )
    save_checkpoint(config['train']['save_dir'],
                    config['train']['save_name'],
                    model, optimizer)
    print('Done!')


================================================
FILE: baselines/unet3d.py
================================================
from functools import partial

import torch
from torch import nn as nn
from torch.nn import functional as F



# UNet3d from https://github.com/wolny/pytorch-3dunet


class BaseModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.device_indicator_param = nn.Parameter(torch.empty(0))

    @property
    def device(self):
        """Returns the device that the model is on."""
        return self.device_indicator_param.device

    def data_dict_to_input(self, data_dict, **kwargs):
        """
        Convert data dictionary to appropriate input for the model.
        """
        raise NotImplementedError

    def loss_dict(self, data_dict, **kwargs):
        """
        Compute the loss dictionary for the model.
        """
        raise NotImplementedError

    @torch.no_grad()
    def eval_dict(self, data_dict, **kwargs):
        """
        Compute the evaluation dictionary for the model.
        """
        raise NotImplementedError

def create_conv(
    in_channels, out_channels, kernel_size, order, num_groups, padding, is3d
):
    """
    Create a list of modules with together constitute a single conv layer with non-linearity
    and optional batchnorm/groupnorm.

    Args:
        in_channels (int): number of input channels
        out_channels (int): number of output channels
        kernel_size(int or tuple): size of the convolving kernel
        order (string): order of things, e.g.
            'cr' -> conv + ReLU
            'gcr' -> groupnorm + conv + ReLU
            'cl' -> conv + LeakyReLU
            'ce' -> conv + ELU
            'bcr' -> batchnorm + conv + ReLU
        num_groups (int): number of groups for the GroupNorm
        padding (int or tuple): add zero-padding added to all three sides of the input
        is3d (bool): is3d (bool): if True use Conv3d, otherwise use Conv2d
    Return:
        list of tuple (name, module)
    """
    assert "c" in order, "Conv layer MUST be present"
    assert (
        order[0] not in "rle"
    ), "Non-linearity cannot be the first operation in the layer"

    modules = []
    for i, char in enumerate(order):
        if char == "r":
            modules.append(("ReLU", nn.ReLU(inplace=True)))
        elif char == "l":
            modules.append(("LeakyReLU", nn.LeakyReLU(inplace=True)))
        elif char == "e":
            modules.append(("ELU", nn.ELU(inplace=True)))
        elif char == "c":
            # add learnable bias only in the absence of batchnorm/groupnorm
            bias = not ("g" in order or "b" in order)
            if is3d:
                conv = nn.Conv3d(
                    in_channels, out_channels, kernel_size, padding=padding, bias=bias
                )
            else:
                conv = nn.Conv2d(
                    in_channels, out_channels, kernel_size, padding=padding, bias=bias
                )

            modules.append(("conv", conv))
        elif char == "g":
            is_before_conv = i < order.index("c")
            if is_before_conv:
                num_channels = in_channels
            else:
                num_channels = out_channels

            # use only one group if the given number of groups is greater than the number of channels
            if num_channels < num_groups:
                num_groups = 1

            assert (
                num_channels % num_groups == 0
            ), f"Expected number of channels in input to be divisible by num_groups. num_channels={num_channels}, num_groups={num_groups}"
            modules.append(
                (
                    "groupnorm",
                    nn.GroupNorm(num_groups=num_groups, num_channels=num_channels),
                )
            )
        elif char == "b":
            is_before_conv = i < order.index("c")
            if is3d:
                bn = nn.BatchNorm3d
            else:
                bn = nn.BatchNorm2d

            if is_before_conv:
                modules.append(("batchnorm", bn(in_channels)))
            else:
                modules.append(("batchnorm", bn(out_channels)))
        else:
            raise ValueError(
                f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
            )

    return modules


class SingleConv(nn.Sequential):
    """
    Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
    of operations can be specified via the `order` parameter

    Args:
        in_channels (int): number of input channels
        out_channels (int): number of output channels
        kernel_size (int or tuple): size of the convolving kernel
        order (string): determines the order of layers, e.g.
            'cr' -> conv + ReLU
            'crg' -> conv + ReLU + groupnorm
            'cl' -> conv + LeakyReLU
            'ce' -> conv + ELU
        num_groups (int): number of groups for the GroupNorm
        padding (int or tuple): add zero-padding
        is3d (bool): if True use Conv3d, otherwise use Conv2d
    """

    def __init__(
        self,
        in_channels,
        out_channels,
        kernel_size=3,
        order="gcr",
        num_groups=8,
        padding=1,
        is3d=True,
    ):
        super(SingleConv, self).__init__()

        for name, module in create_conv(
            in_channels, out_channels, kernel_size, order, num_groups, padding, is3d
        ):
            self.add_module(name, module)


class DoubleConv(nn.Sequential):
    """
    A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d).
    We use (Conv3d+ReLU+GroupNorm3d) by default.
    This can be changed however by providing the 'order' argument, e.g. in order
    to change to Conv3d+BatchNorm3d+ELU use order='cbe'.
    Use padded convolutions to make sure that the output (H_out, W_out) is the same
    as (H_in, W_in), so that you don't have to crop in the decoder path.

    Args:
        in_channels (int): number of input channels
        out_channels (int): number of output channels
        encoder (bool): if True we're in the encoder path, otherwise we're in the decoder
        kernel_size (int or tuple): size of the convolving kernel
        order (string): determines the order of layers, e.g.
            'cr' -> conv + ReLU
            'crg' -> conv + ReLU + groupnorm
            'cl' -> conv + LeakyReLU
            'ce' -> conv + ELU
        num_groups (int): number of groups for the GroupNorm
        padding (int or tuple): add zero-padding added to all three sides of the input
        is3d (bool): if True use Conv3d instead of Conv2d layers
    """

    def __init__(
        self,
        in_channels,
        out_channels,
        encoder,
        kernel_size=3,
        order="gcr",
        num_groups=8,
        padding=1,
        is3d=True,
    ):
        super(DoubleConv, self).__init__()
        if encoder:
            # we're in the encoder path
            conv1_in_channels = in_channels
            conv1_out_channels = out_channels // 2
            if conv1_out_channels < in_channels:
                conv1_out_channels = in_channels
            conv2_in_channels, conv2_out_channels = conv1_out_channels, out_channels
        else:
            # we're in the decoder path, decrease the number of channels in the 1st convolution
            conv1_in_channels, conv1_out_channels = in_channels, out_channels
            conv2_in_channels, conv2_out_channels = out_channels, out_channels

        # conv1
        self.add_module(
            "SingleConv1",
            SingleConv(
                conv1_in_channels,
                conv1_out_channels,
                kernel_size,
                order,
                num_groups,
                padding=padding,
                is3d=is3d,
            ),
        )
        # conv2
        self.add_module(
            "SingleConv2",
            SingleConv(
                conv2_in_channels,
                conv2_out_channels,
                kernel_size,
                order,
                num_groups,
                padding=padding,
                is3d=is3d,
            ),
        )


class Encoder(nn.Module):
    """
    A single module from the encoder path consisting of the optional max
    pooling layer (one may specify the MaxPool kernel_size to be different
    from the standard (2,2,2), e.g. if the volumetric data is anisotropic
    (make sure to use complementary scale_factor in the decoder path) followed by
    a basic module (DoubleConv or ResNetBlock).

    Args:
        in_channels (int): number of input channels
        out_channels (int): number of output channels
        conv_kernel_size (int or tuple): size of the convolving kernel
        apply_pooling (bool): if True use MaxPool3d before DoubleConv
        pool_kernel_size (int or tuple): the size of the window
        pool_type (str): pooling layer: 'max' or 'avg'
        basic_module(nn.Module): either ResNetBlock or DoubleConv
        conv_layer_order (string): determines the order of layers
            in `DoubleConv` module. See `DoubleConv` for more info.
        num_groups (int): number of groups for the GroupNorm
        padding (int or tuple): add zero-padding added to all three sides of the input
        is3d (bool): use 3d or 2d convolutions/pooling operation
    """

    def __init__(
        self,
        in_channels,
        out_channels,
        conv_kernel_size=3,
        apply_pooling=True,
        pool_kernel_size=2,
        pool_type="max",
        basic_module=DoubleConv,
        conv_layer_order="gcr",
        num_groups=8,
        padding=1,
        is3d=True,
    ):
        super(Encoder, self).__init__()
        assert pool_type in ["max", "avg"]
        if apply_pooling:
            if pool_type == "max":
                if is3d:
                    self.pooling = nn.MaxPool3d(kernel_size=pool_kernel_size)
                else:
                    self.pooling = nn.MaxPool2d(kernel_size=pool_kernel_size)
            else:
                if is3d:
                    self.pooling = nn.AvgPool3d(kernel_size=pool_kernel_size)
                else:
                    self.pooling = nn.AvgPool2d(kernel_size=pool_kernel_size)
        else:
            self.pooling = None

        self.basic_module = basic_module(
            in_channels,
            out_channels,
            encoder=True,
            kernel_size=conv_kernel_size,
            order=conv_layer_order,
            num_groups=num_groups,
            padding=padding,
            is3d=is3d,
        )

    def forward(self, x):
        if self.pooling is not None:
            x = self.pooling(x)
        x = self.basic_module(x)
        return x


class Decoder(nn.Module):
    """
    A single module for decoder path consisting of the upsampling layer
    (either learned ConvTranspose3d or nearest neighbor interpolation)
    followed by a basic module (DoubleConv or ResNetBlock).

    Args:
        in_channels (int): number of input channels
        out_channels (int): number of output channels
        conv_kernel_size (int or tuple): size of the convolving kernel
        scale_factor (tuple): used as the multiplier for the image H/W/D in
            case of nn.Upsample or as stride in case of ConvTranspose3d, must reverse the MaxPool3d operation
            from the corresponding encoder
        basic_module(nn.Module): either ResNetBlock or DoubleConv
        conv_layer_order (string): determines the order of layers
            in `DoubleConv` module. See `DoubleConv` for more info.
        num_groups (int): number of groups for the GroupNorm
        padding (int or tuple): add zero-padding added to all three sides of the input
        upsample (bool): should the input be upsampled
    """

    def __init__(
        self,
        in_channels,
        out_channels,
        conv_kernel_size=3,
        scale_factor=(2, 2, 2),
        basic_module=DoubleConv,
        conv_layer_order="gcr",
        num_groups=8,
        mode="nearest",
        padding=1,
        upsample=True,
        is3d=True,
    ):
        super(Decoder, self).__init__()

        if upsample:
            if basic_module == DoubleConv:
                # if DoubleConv is the basic_module use interpolation for upsampling and concatenation joining
                self.upsampling = InterpolateUpsampling(mode=mode)
                # concat joining
                self.joining = partial(self._joining, concat=True)
            else:
                # if basic_module=ResNetBlock use transposed convolution upsampling and summation joining
                self.upsampling = TransposeConvUpsampling(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    kernel_size=conv_kernel_size,
                    scale_factor=scale_factor,
                )
                # sum joining
                self.joining = partial(self._joining, concat=False)
                # adapt the number of in_channels for the ResNetBlock
                in_channels = out_channels
        else:
            # no upsampling
            self.upsampling = NoUpsampling()
            # concat joining
            self.joining = partial(self._joining, concat=True)

        self.basic_module = basic_module(
            in_channels,
            out_channels,
            encoder=False,
            kernel_size=conv_kernel_size,
            order=conv_layer_order,
            num_groups=num_groups,
            padding=padding,
            is3d=is3d,
        )

    def forward(self, encoder_features, x):
        x = self.upsampling(encoder_features=encoder_features, x=x)
        x = self.joining(encoder_features, x)
        x = self.basic_module(x)
        return x

    @staticmethod
    def _joining(encoder_features, x, concat):
        if concat:
            return torch.cat((encoder_features, x), dim=1)
        else:
            return encoder_features + x


def create_encoders(
    in_channels,
    f_maps,
    basic_module,
    conv_kernel_size,
    conv_padding,
    layer_order,
    num_groups,
    pool_kernel_size,
    is3d,
):
    # create encoder path consisting of Encoder modules. Depth of the encoder is equal to `len(f_maps)`
    encoders = []
    for i, out_feature_num in enumerate(f_maps):
        if i == 0:
            # apply conv_coord only in the first encoder if any
            encoder = Encoder(
                in_channels,
                out_feature_num,
                apply_pooling=False,  # skip pooling in the firs encoder
                basic_module=basic_module,
                conv_layer_order=layer_order,
                conv_kernel_size=conv_kernel_size,
                num_groups=num_groups,
                padding=conv_padding,
                is3d=is3d,
            )
        else:
            encoder = Encoder(
                f_maps[i - 1],
                out_feature_num,
                basic_module=basic_module,
                conv_layer_order=layer_order,
                conv_kernel_size=conv_kernel_size,
                num_groups=num_groups,
                pool_kernel_size=pool_kernel_size,
                padding=conv_padding,
                is3d=is3d,
            )

        encoders.append(encoder)

    return nn.ModuleList(encoders)


def create_decoders(
    f_maps, basic_module, conv_kernel_size, conv_padding, layer_order, num_groups, is3d
):
    # create decoder path consisting of the Decoder modules. The length of the decoder list is equal to `len(f_maps) - 1`
    decoders = []
    reversed_f_maps = list(reversed(f_maps))
    for i in range(len(reversed_f_maps) - 1):
        if basic_module == DoubleConv:
            in_feature_num = reversed_f_maps[i] + reversed_f_maps[i + 1]
        else:
            in_feature_num = reversed_f_maps[i]

        out_feature_num = reversed_f_maps[i + 1]

        decoder = Decoder(
            in_feature_num,
            out_feature_num,
            basic_module=basic_module,
            conv_layer_order=layer_order,
            conv_kernel_size=conv_kernel_size,
            num_groups=num_groups,
            padding=conv_padding,
            is3d=is3d,
        )
        decoders.append(decoder)
    return nn.ModuleList(decoders)


class AbstractUpsampling(nn.Module):
    """
    Abstract class for upsampling. A given implementation should upsample a given 5D input tensor using either
    interpolation or learned transposed convolution.
    """

    def __init__(self, upsample):
        super(AbstractUpsampling, self).__init__()
        self.upsample = upsample

    def forward(self, encoder_features, x):
        # get the spatial dimensions of the output given the encoder_features
        output_size = encoder_features.size()[2:]
        # upsample the input and return
        return self.upsample(x, output_size)


class InterpolateUpsampling(AbstractUpsampling):
    """
    Args:
        mode (str): algorithm used for upsampling:
            'nearest' | 'linear' | 'bilinear' | 'trilinear' | 'area'. Default: 'nearest'
            used only if transposed_conv is False
    """

    def __init__(self, mode="nearest"):
        upsample = partial(self._interpolate, mode=mode)
        super().__init__(upsample)

    @staticmethod
    def _interpolate(x, size, mode):
        return F.interpolate(x, size=size, mode=mode)


class TransposeConvUpsampling(AbstractUpsampling):
    """
    Args:
        in_channels (int): number of input channels for transposed conv
            used only if transposed_conv is True
        out_channels (int): number of output channels for transpose conv
            used only if transposed_conv is True
        kernel_size (int or tuple): size of the convolving kernel
            used only if transposed_conv is True
        scale_factor (int or tuple): stride of the convolution
            used only if transposed_conv is True

    """

    def __init__(
        self, in_channels=None, out_channels=None, kernel_size=3, scale_factor=(2, 2, 2)
    ):
        # make sure that the output size reverses the MaxPool3d from the corresponding encoder
        upsample = nn.ConvTranspose3d(
            in_channels,
            out_channels,
            kernel_size=kernel_size,
            stride=scale_factor,
            padding=1,
        )
        super().__init__(upsample)


class NoUpsampling(AbstractUpsampling):
    def __init__(self):
        super().__init__(self._no_upsampling)

    @staticmethod
    def _no_upsampling(x, size):
        return x


def number_of_features_per_level(init_channel_number, num_levels):
    return [init_channel_number * 2**k for k in range(num_levels)]


class AbstractUNet(BaseModel):
    """
    Base class for standard and residual UNet.

    Args:
        in_channels (int): number of input channels
        out_channels (int): number of output segmentation masks;
            Note that the of out_channels might correspond to either
            different semantic classes or to different binary segmentation mask.
            It's up to the user of the class to interpret the out_channels and
            use the proper loss criterion during training (i.e. CrossEntropyLoss (multi-class)
            or BCEWithLogitsLoss (two-class) respectively)
        f_maps (int, tuple): number of feature maps at each level of the encoder; if it's an integer the number
            of feature maps is given by the geometric progression: f_maps ^ k, k=1,2,3,4
        final_sigmoid (bool): if True apply element-wise nn.Sigmoid after the final 1x1 convolution,
            otherwise apply nn.Softmax. In effect only if `self.training == False`, i.e. during validation/testing
        basic_module: basic model for the encoder/decoder (DoubleConv, ResNetBlock, ....)
        layer_order (string): determines the order of layers in `SingleConv` module.
            E.g. 'crg' stands for GroupNorm3d+Conv3d+ReLU. See `SingleConv` for more info
        num_groups (int): number of groups for the GroupNorm
        num_levels (int): number of levels in the encoder/decoder path (applied only if f_maps is an int)
            default: 4
        is_segmentation (bool): if True and the model is in eval mode, Sigmoid/Softmax normalization is applied
            after the final convolution; if False (regression problem) the normalization layer is skipped
        conv_kernel_size (int or tuple): size of the convolving kernel in the basic_module
        pool_kernel_size (int or tuple): the size of the window
        conv_padding (int or tuple): add zero-padding added to all three sides of the input
        is3d (bool): if True the model is 3D, otherwise 2D, default: True
    """

    def __init__(
        self,
        in_channels,
        out_channels,
        final_sigmoid,
        basic_module,
        f_maps=64,
        layer_order="gcr",
        num_groups=8,
        num_levels=4,
        is_segmentation=False,
        conv_kernel_size=3,
        pool_kernel_size=2,
        conv_padding=1,
        is3d=True,
    ):
        super(AbstractUNet, self).__init__()

        if isinstance(f_maps, int):
            f_maps = number_of_features_per_level(f_maps, num_levels=num_levels)

        assert isinstance(f_maps, list) or isinstance(f_maps, tuple)
        assert len(f_maps) > 1, "Required at least 2 levels in the U-Net"
        if "g" in layer_order:
            assert (
                num_groups is not None
            ), "num_groups must be specified if GroupNorm is used"

        # create encoder path
        self.encoders = create_encoders(
            in_channels,
            f_maps,
            basic_module,
            conv_kernel_size,
            conv_padding,
            layer_order,
            num_groups,
            pool_kernel_size,
            is3d,
        )

        # create decoder path
        self.decoders = create_decoders(
            f_maps,
            basic_module,
            conv_kernel_size,
            conv_padding,
            layer_order,
            num_groups,
            is3d,
        )

        # in the last layer a 1×1 convolution reduces the number of output channels to the number of labels
        if is3d:
            self.final_conv = nn.Conv3d(f_maps[0], out_channels, 1)
        else:
            self.final_conv = nn.Conv2d(f_maps[0], out_channels, 1)

        if is_segmentation:
            # semantic segmentation problem
            if final_sigmoid:
                self.final_activation = nn.Sigmoid()
            else:
                self.final_activation = nn.Softmax(dim=1)
        else:
            # regression problem
            self.final_activation = None

    def forward(self, x):
        # encoder part
        encoders_features = []
        for encoder in self.encoders:
            x = encoder(x)
            # reverse the encoder outputs to be aligned with the decoder
            encoders_features.insert(0, x)

        # remove the last encoder's output from the list
        # !!remember: it's the 1st in the list
        encoders_features = encoders_features[1:]

        # decoder part
        for decoder, encoder_features in zip(self.decoders, encoders_features):
            # pass the output from the corresponding encoder and the output
            # of the previous decoder
            x = decoder(encoder_features, x)

        x = self.final_conv(x)

        # apply final_activation (i.e. Sigmoid or Softmax) only during prediction.
        # During training the network outputs logits
        if not self.training and self.final_activation is not None:
            x = self.final_activation(x)

        return x


class UNet3D(AbstractUNet):
    """
    3DUnet model from
    `"3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation"
        <https://arxiv.org/pdf/1606.06650.pdf>`.

    Uses `DoubleConv` as a basic_module and nearest neighbor upsampling in the decoder
    """

    def __init__(
        self,
        in_channels,
        out_channels,
        final_sigmoid=False,
        f_maps=64,
        layer_order="gcr",
        num_groups=8,
        num_levels=4,
        is_segmentation=False,
        conv_padding=1,
        **kwargs,
    ):
        super(UNet3D, self).__init__(
            in_channels=in_channels,
            out_channels=out_channels,
            final_sigmoid=final_sigmoid,
            basic_module=DoubleConv,
            f_maps=f_maps,
            layer_order=layer_order,
            num_groups=num_groups,
            num_levels=num_levels,
            is_segmentation=is_segmentation,
            conv_padding=conv_padding,
            is3d=True,
        )



================================================
FILE: baselines/utils.py
================================================
import numpy as np

import torch
import torch.autograd as autograd


def weighted_mse(pred, target, weight=None):
    if weight is None:
        return torch.mean((pred - target) ** 2)
    else:
        return torch.mean(weight * (pred - target) ** 2)


def get_3dboundary_points(num_x,                # number of points on x axis
                          num_y,                # number of points on y axis
                          num_t,                # number of points on t axis
                          bot=(0, 0, 0),        # lower bound
                          top=(1.0, 1.0, 1.0)   # upper bound
                          ):
    x_top, y_top, t_top = top
    x_bot, y_bot, t_bot = bot

    x_arr = np.linspace(x_bot, x_top, num=num_x, endpoint=False)
    y_arr = np.linspace(y_bot, y_top, num=num_y, endpoint=False)
    xx, yy = np.meshgrid(x_arr, y_arr, indexing='ij')
    xarr = np.ravel(xx)
    yarr = np.ravel(yy)
    tarr = np.ones_like(xarr) * t_bot
    point0 = np.stack([xarr, yarr, tarr], axis=0).T  # (SxSx1, 3), boundary on t=0

    t_arr = np.linspace(t_bot, t_top, num=num_t)
    yy, tt = np.meshgrid(y_arr, t_arr, indexing='ij')
    yarr = np.ravel(yy)
    tarr = np.ravel(tt)
    xarr = np.ones_like(yarr) * x_bot
    point2 = np.stack([xarr, yarr, tarr], axis=0).T  # (1xSxT, 3), boundary on x=0

    xarr = np.ones_like(yarr) * x_top
    point3 = np.stack([xarr, yarr, tarr], axis=0).T  # (1xSxT, 3), boundary on x=2pi

    xx, tt = np.meshgrid(x_arr, t_arr, indexing='ij')
    xarr = np.ravel(xx)
    tarr = np.ravel(tt)
    yarr = np.ones_like(xarr) * y_bot
    point4 = np.stack([xarr, yarr, tarr], axis=0).T  # (128x1x65, 3), boundary on y=0

    yarr = np.ones_like(xarr) * y_top
    point5 = np.stack([xarr, yarr, tarr], axis=0).T  # (128x1x65, 3), boundary on y=2pi

    points = np.concatenate([point0,
                             point2, point3,
                             point4, point5],
                            axis=0)
    return points


def get_3dboundary(value):
    boundary0 = value[0, :, :, 0:1]  # 128x128x1, boundary on t=0
    # boundary1 = value[0, :, :, -1:]     # 128x128x1, boundary on t=0.5
    boundary2 = value[0, 0:1, :, :]  # 1x128x65, boundary on x=0
    boundary3 = value[0, -1:, :, :]  # 1x128x65, boundary on x=1
    boundary4 = value[0, :, 0:1, :]  # 128x1x65, boundary on y=0
    boundary5 = value[0, :, -1:, :]  # 128x1x65, boundary on y=1

    part0 = np.ravel(boundary0)
    # part1 = np.ravel(boundary1)
    part2 = np.ravel(boundary2)
    part3 = np.ravel(boundary3)
    part4 = np.ravel(boundary4)
    part5 = np.ravel(boundary5)
    boundary = np.concatenate([part0,
                               part2, part3,
                               part4, part5],
                              axis=0)[:, np.newaxis]
    return boundary


def get_xytgrid(S, T, bot=[0, 0, 0], top=[1, 1, 1]):
    '''
    Args:
        S: number of points on each spatial domain
        T: number of points on temporal domain including endpoint
        bot: list or tuple, lower bound on each dimension
        top: list or tuple, upper bound on each dimension

    Returns:
        (S * S * T, 3) array
    '''
    x_arr = np.linspace(bot[0], top[0], num=S, endpoint=False)
    y_arr = np.linspace(bot[1], top[1], num=S, endpoint=False)
    t_arr = np.linspace(bot[2], top[2], num=T)
    xgrid, ygrid, tgrid = np.meshgrid(x_arr, y_arr, t_arr, indexing='ij')
    xaxis = np.ravel(xgrid)
    yaxis = np.ravel(ygrid)
    taxis = np.ravel(tgrid)
    points = np.stack([xaxis, yaxis, taxis], axis=0).T
    return points


def get_2dgird(num=31):
    x = np.linspace(-1, 1, num)
    y = np.linspace(-1, 1, num)
    gridx, gridy = np.meshgrid(x, y)
    xs = gridx.reshape(-1, 1)
    ys = gridy.reshape(-1, 1)
    result = np.hstack((xs, ys))
    return result


def get_3dgrid(num=11):
    x = np.linspace(-1, 1, num)
    y = np.linspace(-1, 1, num)
    z = np.linspace(-1, 1, num)
    gridx, gridy, gridz = np.meshgrid(x, y, z)
    xs = gridx.reshape(-1, 1)
    ys = gridy.reshape(-1, 1)
    zs = gridz.reshape(-1, 1)
    return np.hstack((xs, ys, zs))


def get_4dgrid(num=11):
    '''
    4-D meshgrid
    Args:
        num: number of collocation points of each dimension

    Returns:
        (num**4, 4) tensor
    '''
    t = np.linspace(0, 1, num)
    x = np.linspace(-1, 1, num)
    y = np.linspace(-1, 1, num)
    z = np.linspace(-1, 1, num)
    gridx, gridy, gridz, gridt = np.meshgrid(x, y, z, t)
    xs = gridx.reshape(-1, 1)
    ys = gridy.reshape(-1, 1)
    zs = gridz.reshape(-1, 1)
    ts = gridt.reshape(-1, 1)
    result = np.hstack((xs, ys, zs, ts))
    return result


def vel2vor(u, v, x, y):
    u_y, = autograd.grad(outputs=[u.sum()], inputs=[y], create_graph=True)
    v_x, = autograd.grad(outputs=[v.sum()], inputs=[x], create_graph=True)
    vorticity = - u_y + v_x
    return vorticity


def sub_mse(vec):
    '''
    Compute mse of two parts of a vector
    Args:
        vec:

    Returns:

    '''
    length = vec.shape[0] // 2
    diff = (vec[:length] - vec[length: 2 * length]) ** 2
    return diff.mean()


def get_sample(npt=100):
    num = npt // 2
    bc1_y_sample = torch.rand(size=(num, 1)).repeat(2, 1)
    bc1_t_sample = torch.rand(size=(num, 1)).repeat(2, 1)

    bc1_x_sample = torch.cat([torch.zeros(num, 1), torch.ones(num, 1)], dim=0)

    bc2_x_sample = torch.rand(size=(num, 1)).repeat(2, 1)
    bc2_t_sample = torch.rand(size=(num, 1)).repeat(2, 1)

    bc2_y_sample = torch.cat([torch.zeros(num, 1), torch.ones(num, 1)], dim=0)
    return bc1_x_sample, bc1_y_sample, bc1_t_sample, \
           bc2_x_sample, bc2_y_sample, bc2_t_sample


def concat(xy, z, t=0.0, offset=0):
    '''
    Args:
        xy: (N, 2)
        z: (N, 1)
        t: (N, 1)
        offset: start index of xy
    Returns:
        (N, 4) array
    '''
    output = np.zeros((z.shape[0], 4)) * t
    if offset < 2:
        output[:, offset: offset+2] = xy
        output[:, (offset+2) % 3: (offset+2) % 3 + 1] = z
    else:
        output[:, 2:] = xy[:, 0:1]
        output[:, 0:1] = xy[:, 1:]
        output[:, 1:2] = z
    return output


def cal_mixgrad(outputs, inputs):
    out_grad, = autograd.grad(outputs=[outputs.sum()], inputs=[inputs], create_graph=True)
    out_x2, = autograd.grad(outputs=[out_grad[:, 0].sum()], inputs=[inputs], create_graph=True)
    out_xx = out_x2[:, 0]
    out_y2, = autograd.grad(outputs=[out_grad[:, 1].sum()], inputs=[inputs], create_graph=True)
    out_yy = out_y2[:, 1]
    out_z2, = autograd.grad(outputs=[out_grad[:, 2].sum()], inputs=[inputs], create_graph=True)
    out_zz = out_z2[:, 2]
    return out_grad, out_xx, out_yy, out_zz

================================================
FILE: cavity_flow.py
================================================
"""
@author: Zongyi Li
This file is the Fourier Neural Operator for 3D problem such as the Navier-Stokes equation discussed in Section 5.3 in the [paper](https://arxiv.org/pdf/2010.08895.pdf),
which takes the 2D spatial + 1D temporal equation directly as a 3D problem
"""

import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F

import matplotlib.pyplot as plt


from timeit import default_timer
from torch.optim import Adam
from train_utils.datasets import MatReader
from train_utils.losses import LpLoss
from train_utils.utils import count_params

torch.manual_seed(0)
np.random.seed(0)


################################################################
# 3d fourier layers
################################################################

class SpectralConv3d(nn.Module):
    def __init__(self, in_channels, out_channels, modes1, modes2, modes3):
        super(SpectralConv3d, self).__init__()

        """
        3D Fourier layer. It does FFT, linear transform, and Inverse FFT.    
        """

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.modes1 = modes1  # Number of Fourier modes to multiply, at most floor(N/2) + 1
        self.modes2 = modes2
        self.modes3 = modes3

        self.scale = (1 / (in_channels * out_channels))
        self.weights1 = nn.Parameter(
            self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3,
                                    dtype=torch.cfloat))
        self.weights2 = nn.Parameter(
            self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3,
                                    dtype=torch.cfloat))
        self.weights3 = nn.Parameter(
            self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3,
                                    dtype=torch.cfloat))
        self.weights4 = nn.Parameter(
            self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3,
                                    dtype=torch.cfloat))

    # Complex multiplication
    def compl_mul3d(self, input, weights):
        # (batch, in_channel, x,y,t ), (in_channel, out_channel, x,y,t) -> (batch, out_channel, x,y,t)
        return torch.einsum("bixyz,ioxyz->boxyz", input, weights)

    def forward(self, x):
        batchsize = x.shape[0]
        # Compute Fourier coeffcients up to factor of e^(- something constant)
        x_ft = torch.fft.rfftn(x, dim=[-3, -2, -1])

        # Multiply relevant Fourier modes
        out_ft = torch.zeros(batchsize, self.out_channels, x.size(-3), x.size(-2), x.size(-1) // 2 + 1,
                             dtype=torch.cfloat, device=x.device)
        out_ft[:, :, :self.modes1, :self.modes2, :self.modes3] = \
            self.compl_mul3d(x_ft[:, :, :self.modes1, :self.modes2, :self.modes3], self.weights1)
        out_ft[:, :, -self.modes1:, :self.modes2, :self.modes3] = \
            self.compl_mul3d(x_ft[:, :, -self.modes1:, :self.modes2, :self.modes3], self.weights2)
        out_ft[:, :, :self.modes1, -self.modes2:, :self.modes3] = \
            self.compl_mul3d(x_ft[:, :, :self.modes1, -self.modes2:, :self.modes3], self.weights3)
        out_ft[:, :, -self.modes1:, -self.modes2:, :self.modes3] = \
            self.compl_mul3d(x_ft[:, :, -self.modes1:, -self.modes2:, :self.modes3], self.weights4)

        # Return to physical space
        x = torch.fft.irfftn(out_ft, s=(x.size(-3), x.size(-2), x.size(-1)))
        return x


class FNO3d(nn.Module):
    def __init__(self, modes1, modes2, modes3, width, padding):
        super(FNO3d, self).__init__()

        """
        The overall network. It contains 4 layers of the Fourier layer.
        1. Lift the input to the desire channel dimension by self.fc0 .
        2. 4 layers of the integral operators u' = (W + K)(u).
            W defined by self.w; K defined by self.conv .
        3. Project from the channel space to the output space by self.fc1 and self.fc2 .

        input: the solution of the first 10 timesteps + 3 locations (u(1, x, y), ..., u(10, x, y),  x, y, t). It's a constant function in time, except for the last index.
        input shape: (batchsize, x=64, y=64, t=40, c=13)
        output: the solution of the next 40 timesteps
        output shape: (batchsize, x=64, y=64, t=40, c=1)
        """

        self.modes1 = modes1
        self.modes2 = modes2
        self.modes3 = modes3
        self.width = width
        self.padding = padding  # pad the domain if input is non-periodic
        self.fc0 = nn.Linear(5, 32)
        self.fc1 = nn.Linear(32, self.width)
        # input channel is 12: the solution of the first 10 timesteps + 3 locations (u(1, x, y), ..., u(10, x, y),  x, y, t)

        self.conv0 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3)
        self.conv1 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3)
        self.conv2 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3)
        self.conv3 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3)
        self.w0 = nn.Conv3d(self.width, self.width, 1)
        self.w1 = nn.Conv3d(self.width, self.width, 1)
        self.w2 = nn.Conv3d(self.width, self.width, 1)
        self.w3 = nn.Conv3d(self.width, self.width, 1)

        self.fc2 = nn.Linear(self.width, 128)
        self.fc3 = nn.Linear(128, 3)

    def forward(self, x):
        grid = self.get_grid(x.shape, x.device)
        x = torch.cat((x, grid), dim=-1)
        x = self.fc0(x)
        x = F.tanh(x)
        x = self.fc1(x)
        x = x.permute(0, 4, 1, 2, 3)
        x = F.pad(x, [0, self.padding, 0, self.padding, 0, self.padding])  # pad the domain if input is non-periodic

        x1 = self.conv0(x)
        x2 = self.w0(x)
        x = x1 + x2
        x = F.tanh(x)

        x1 = self.conv1(x)
        x2 = self.w1(x)
        x = x1 + x2
        x = F.tanh(x)

        x1 = self.conv2(x)
        x2 = self.w2(x)
        x = x1 + x2
        x = F.tanh(x)

        x1 = self.conv3(x)
        x2 = self.w3(x)
        x = x1 + x2

        # x = x[:, :, :-self.padding, :-self.padding, :-self.padding]
        x = x.permute(0, 2, 3, 4, 1)  # pad the domain if input is non-periodic
        x = self.fc2(x)
        x = F.tanh(x)
        x = self.fc3(x)
        return x

    def get_grid(self, shape, device):
        batchsize, size_x, size_y, size_z = shape[0], shape[1], shape[2], shape[3]
        gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
        gridx = gridx.reshape(1, size_x, 1, 1, 1).repeat([batchsize, 1, size_y, size_z, 1])
        gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
        gridy = gridy.reshape(1, 1, size_y, 1, 1).repeat([batchsize, size_x, 1, size_z, 1])
        gridz = torch.tensor(np.linspace(0, 1, size_z), dtype=torch.float)
        gridz = gridz.reshape(1, 1, 1, size_z, 1).repeat([batchsize, size_x, size_y, 1, 1])
        return torch.cat((gridx, gridy, gridz), dim=-1).to(device)


################################################################
# configs
################################################################



# PATH = '../data/cavity.mat'
PATH = '../data/lid-cavity.mat'
ntest = 1

modes = 8
width = 32

batch_size = 1

path = 'cavity'
path_model = 'model/' + path
path_train_err = 'results/' + path + 'train.txt'
path_test_err = 'results/' + path + 'test.txt'
path_image = 'image/' + path



sub_s = 4
sub_t = 20
S = 256 // sub_s
T_in = 1000 # 1000*0.005 = 5s
T = 50 # 1000 + 50*20*0.005 = 10s
padding = 14

################################################################
# load data
################################################################

# 15s, 3000 frames
reader = MatReader(PATH)
data_u = reader.read_field('u')[T_in:T_in+T*sub_t:sub_t, ::sub_s, ::sub_s].permute(1,2,0)
data_v = reader.read_field('v')[T_in:T_in+T*sub_t:sub_t, ::sub_s, ::sub_s].permute(1,2,0)

data_output = torch.stack([data_u, data_v],dim=-1).reshape(batch_size,S,S,T,2)
data_input = data_output[:,:,:,:1,:].repeat(1,1,1,T,1).reshape(batch_size,S,S,T,2)

print(data_output.shape)


device = torch.device('cuda')

def PINO_loss_Fourier_f(out, Re=500):
    pi = np.pi
    Lx = 1*(S + padding-1)/S
    Ly = 1*(S + padding-1)/S
    Lt = (0.005*sub_t*T) *(T + padding)/T

    nx = out.size(1)
    ny = out.size(2)
    nt = out.size(3)
    device = out.device

    # Wavenumbers in y-direction
    k_x = torch.cat((torch.arange(start=0, end=nx//2, step=1, device=device),
                     torch.arange(start=-nx//2, end=0, step=1, device=device)), 0).reshape(nx, 1, 1).repeat(1, ny, nt).reshape(1,nx,ny,nt,1)
    k_y = torch.cat((torch.arange(start=0, end=ny//2, step=1, device=device),
                     torch.arange(start=-ny//2, end=0, step=1, device=device)), 0).reshape(1, ny, 1).repeat(nx, 1, nt).reshape(1,nx,ny,nt,1)
    k_t = torch.cat((torch.arange(start=0, end=nt//2, step=1, device=device),
                     torch.arange(start=-nt//2, end=0, step=1, device=device)), 0).reshape(1, 1, nt).repeat(nx, ny, 1).reshape(1,nx,ny,nt,1)

    out_h = torch.fft.fftn(out, dim=[1, 2, 3])
    outx_h = 1j * k_x * out_h * (2 * pi / Lx)
    outy_h = 1j * k_y * out_h * (2 * pi / Ly)
    outt_h = 1j * k_t * out_h * (2 * pi / Lt)
    outxx_h = 1j * k_x * outx_h * (2 * pi / Lx)
    outyy_h = 1j * k_y * outy_h * (2 * pi / Ly)

    outx = torch.fft.irfftn(outx_h[:, :, :, :nt//2+1, :], dim=[1,2,3])[:,:S,:S,:T]
    outy = torch.fft.irfftn(outy_h[:, :, :, :nt//2+1, :], dim=[1,2,3])[:,:S,:S,:T]
    outt = torch.fft.irfftn(outt_h[:, :, :, :nt//2+1, :], dim=[1,2,3])[:,:S,:S,:T]
    outxx = torch.fft.irfftn(outxx_h[:, :, :, :nt//2+1, :], dim=[1,2,3])[:,:S,:S,:T]
    outyy = torch.fft.irfftn(outyy_h[:, :, :, :nt//2+1, :], dim=[1,2,3])[:,:S,:S,:T]
    out = out[:,:S,:S,:T]


    E1 = outt[..., 0] + out[..., 0]*outx[..., 0] + out[..., 1]*outy[..., 0] + outx[..., 2] - 1/Re*(outxx[..., 0] + outyy[..., 0])
    E2 = outt[..., 1] + out[..., 0]*outx[..., 1] + out[..., 1]*outy[..., 1] + outy[..., 2] - 1/Re*(outxx[..., 1] + outyy[..., 1])
    E3 = outx[..., 0] + outy[..., 1]

    target = torch.zeros(E1.shape, device=E1.device)
    E1 = F.mse_loss(E1,target)
    E2 = F.mse_loss(E2,target)
    E3 = F.mse_loss(E3,target)

    return E1, E2, E3

def PINO_loss_FDM_f(out, Re=500):
    dx = 1 / (S+2)
    dy = 1 / (S+2)
    dt = 0.005*sub_t

    out = out[:,:S,:S,:T,:]
    out = F.pad(out, [0,0, 1,0, 1,1, 1,1])
    out[:, :, -1, :, 0] = 1

    outx = (out[:,2:,1:-1,1:-1] - out[:,:-2,1:-1,1:-1]) / (2*dx)
    outy = (out[:,1:-1,2:,1:-1] - out[:,1:-1,:-2,1:-1]) / (2*dy)
    outt = (out[:,1:-1,1:-1,2:] - out[:,1:-1,1:-1,:-2]) / (2*dt)
    outlap = (out[:,2:,1:-1,1:-1] + out[:,:-2,1:-1,1:-1] + out[:,1:-1,2:,1:-1] + out[:,1:-1,:-2,1:-1] - 4*out[:,1:-1,1:-1,1:-1]) / (dx*dy)

    out = out[:,1:-1,1:-1,1:-1]

    E1 = outt[..., 0] + out[..., 0]*outx[..., 0] + out[..., 1]*outy[..., 0] + outx[..., 2] - 1/Re*(outlap[..., 0])
    E2 = outt[..., 1] + out[..., 0]*outx[..., 1] + out[..., 1]*outy[..., 1] + outy[..., 2] - 1/Re*(outlap[..., 1])
    E3 = outx[..., 0] + outy[..., 1]

    target = torch.zeros(E1.shape, device=E1.device)
    E1 = F.mse_loss(E1,target)
    E2 = F.mse_loss(E2,target)
    E3 = F.mse_loss(E3,target)

    return E1, E2, E3



def PINO_loss_ic(out, y):
    myloss = LpLoss(size_average=True)
    # target = torch.zeros(out.shape, device=out.device)
    # target[:, :, -1, 0] = 1
    # IC = myloss(out, target)
    # return IC

    IC = F.mse_loss(out, y)
    return IC

def PINO_loss_bc(out, y):
    myloss = LpLoss(size_average=True)
    # target = torch.zeros((batch_size,S,T,2), device=out.device)
    # target3 = torch.zeros((batch_size,S,T,2), device=out.device)
    # target3[..., 0] = 1
    # out = torch.stack([out[:,0,:], out[:,-1,:], out[:,:,-1], out[:,:,0]], -1)
    # target = torch.stack([target, target, target3, target], -1)
    # BC = myloss(out, target)
    # return BC

    BC1 = F.mse_loss(out[:,0,:], y[:,0,:])
    BC2 = F.mse_loss(out[:,-1,:], y[:,-1,:])
    BC3 = F.mse_loss(out[:,:,-1], y[:,:,-1])
    BC4 = F.mse_loss(out[:,:,0], y[:,:,0])
    return (BC1+BC2+BC3+BC4)/4

################################################################
# training and evaluation
################################################################



model = model = FNO3d(modes, modes, modes, width, padding).cuda()
print(count_params(model))

optimizer = Adam(model.parameters(), lr=0.0025, weight_decay=0)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=500, gamma=0.5)

myloss = LpLoss(size_average=False)
model.train()
x = data_input.cuda().reshape(batch_size,S,S,T,2)
y = data_output.cuda().reshape(batch_size,S,S,T,2)

for ep in range(5000):
    t1 = default_timer()
    optimizer.zero_grad()

    out = model(x)

    loss_l2 = myloss(out[:,:S,:S,:T,:2], y)
    IC = PINO_loss_ic(out[:,:S,:S,0,:2], y[:,:,:,0])
    BC = PINO_loss_bc(out[:,:S,:S,:T,:2], y)
    E1, E2, E3 = PINO_loss_Fourier_f(out)
    # E1, E2, E3 = PINO_loss_FDM_f(out)
    loss_pino = IC*1 + BC*1 + E1*1 + E2*1 + E3*1

    loss_pino.backward()

    optimizer.step()
    scheduler.step()
    t2 = default_timer()
    print(ep, t2-t1, IC.item(), BC.item(), E1.item(),  E2.item(), E3.item(), loss_l2.item())

    if ep % 1000 == 500:
        y_plot = y[0,:,:,:].cpu().numpy()
        out_plot = out[0,:S,:S,:T].detach().cpu().numpy()

        fig, ax = plt.subplots(2, 2)
        ax[0,0].imshow(y_plot[..., -1, 0])
        ax[0,1].imshow(y_plot[..., -1, 1])
        ax[1,0].imshow(out_plot[..., -1, 0])
        ax[1,1].imshow(out_plot[..., -1, 1])
        plt.show()

================================================
FILE: configs/baseline/NS-50s-LAAF.yaml
================================================
data:
  datapath: 'data/ns_V1e-3_N5000_T50.mat'
  vis: 0.001
  total_num: 5000
  offset: 4900
  n_sample: 1
  time_scale: 49
  nx: 64
  nt: 50
  sub: 1      # not used here
  sub_t: 1    # not used here
  shuffle: True

model:
  layers: [3, 50, 50, 50, 50, 50, 50, 3]
  activation: LAAF-10 tanh

train:
  batchsize: 1
  epochs: 5000
  milestones: [1000, 1500, 2000]
  base_lr: 0.001
  num_domain: 10000
  num_boundary: 18000
  num_test: 100
  log_step: 100

log:
  logfile: 'log/pinns-50s-laaf.csv'

================================================
FILE: configs/baseline/NS-50s.yaml
================================================
data:
  datapath: 'data/ns_V1e-3_N5000_T50.mat'
  vis: 0.001
  total_num: 5000
  offset: 4900
  n_sample: 1
  time_scale: 49
  nx: 64
  nt: 50
  sub: 1      # not used here
  sub_t: 1    # not used here
  shuffle: True

model:
  layers: [3, 50, 50, 50, 50, 50, 50, 3]

train:
  epochs: 15000
  base_lr: 0.001
  save_dir: 'Re500-FDM'
  save_name: 'PINO-scratch-05s.pt'

log:
  logfile: 'log/pinns-50s-best.csv'

================================================
FILE: configs/baseline/Re500-05s-deeponet.yaml
================================================
data:
  datapath: '/mnt/md1/zongyi/NS_fft_Re500_T4000.npy'
  Re: 500
  total_num: 4000
  offset: 0
  n_sample: 4000
  time_interval: 0.5
  nx: 64
  nt: 64
  sub: 1
  sub_t: 1
  shuffle: False
  data_val: 'data/NS_Re500_s256_T100_test.npy'
  val_nx: 256
  val_nt: 128
  val_sub: 4
  val_subt: 2

model:
  layers: [40, 40]
  activation: 'relu'

train:
  batchsize: 1
  epochs: 100
  milestones: [25000, 50000, 75000]
  base_lr: 0.001

  ckpt: 'checkpoints/Re500-FDM/pretrain-Re500-05s-4000.pt'

log:
  project: 'PINO-None'
  group: 'eval'




================================================
FILE: configs/baseline/Re500-pinns-05s-LAAF.yaml
================================================
data:
  datapath: 'data/NS_Re500_s256_T100_test.npy'
  Re: 500
  total_num: 100
  offset: 0
  n_sample: 1
  time_interval: 0.5
  nx: 256
  nt: 128
  sub: 2
  sub_t: 1
  shuffle: True

model:
  layers: [3, 50, 50, 50, 50, 50, 50, 3]
  activation: LAAF-10 tanh

train:
  batchsize: 1
  epochs: 3000
  milestones: [1000, 1500, 2000]
  base_lr: 0.01
  num_domain: 5000
  num_boundary: 10000
  num_test: 100
  log_step: 100

log:
  project: 'PINO-Re500-ICLR'
  group: 'Re500-PINNs'
  logfile: 'log/pinns-plot-laaf128.csv'





================================================
FILE: configs/baseline/Re500-pinns-05s-SA.yaml
================================================
data:
  datapath: 'data/NS_Re500_s256_T100_test.npy'
  Re: 500
  total_num: 100
  offset: 0
  n_sample: 1
  time_interval: 0.5
  nx: 256
  nt: 128
  sub: 2
  sub_t: 1
  shuffle: True

model:
  layers: [3, 100, 100, 100, 100, 100, 100, 3]
  activation: tanh

train:
  batchsize: 1
  epochs: 5000
  milestones: [1000, 1500, 2000]
  base_lr: 0.005
  num_domain: 10000
  num_boundary: 10000
  num_init: 5000
  num_test: 100
  log_step: 100

log:
  project: 'PINO-Re500-ICLR'
  group: 'SA-PINNs'
  logfile: 'log/sa-pinns128-plot.csv'





================================================
FILE: configs/baseline/Re500-pinns-05s.yaml
================================================
data:
  datapath: 'data/NS_Re500_s256_T100_test.npy'
  Re: 500
  total_num: 100
  offset: 0
  n_sample: 1
  time_interval: 0.5
  nx: 256
  nt: 128
  sub: 2
  sub_t: 1
  shuffle: True

model:
  layers: [3, 50, 50, 50, 50, 50, 50, 3]
  activation: tanh

train:
  batchsize: 1
  epochs: 3000
  milestones: [1000, 1500, 2000]
  base_lr: 0.01
  num_domain: 5000
  num_boundary: 10000
  num_test: 100
  log_step: 100

log:
  project: 'PINO-Re500-ICLR'
  group: 'Re500-PINNs'
  logfile: 'log/pinns128-plot.csv'





================================================
FILE: configs/baseline/Re500-pinns.yaml
================================================
data:
  datapath: 'data/NS_Re500_s256_T100_test.npy'
  Re: 500
  total_num: 100
  offset: 0
  n_sample: 1
  time_interval: 0.5
  nx: 256
  nt: 128
  sub: 4
  sub_t: 1
  shuffle: True

train:
  batchsize: 1
  epochs: 5000
  base_lr: 0.001
  num_domain: 5000
  num_boundary: 10000
  num_test: 100
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0
  log_step: 100

log:
  project: 'PINO-Re500-ICLR'
  group: 'Re500-PINNs-long'




================================================
FILE: configs/finetune/Darcy-finetune.yaml
================================================
data:
  name: 'Darcy'
  datapath: '/mnt/md1/zongyi/piececonst_r421_N1024_smooth2.mat'
  total_num: 1024
  offset: 500
  n_sample: 1
  nx: 421
  sub: 7

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [20, 20, 20, 20]
  modes2: [20, 20, 20, 20]
  fc_dim: 128
  act: gelu

train:
  batchsize: 1
  epochs: 500
  milestones: [100, 200, 300, 400]
  base_lr: 0.0025
  scheduler_gamma: 0.5
  f_loss: 1.0
  xy_loss: 0.0
  save_dir: 'darcy-FDM'
  save_name: 'darcy-finetune-pino.pt'
  ckpt: 'checkpoints/darcy-FDM/darcy-pretrain-pino.pt'

log:
  project: 'ICLR-Darcy-finetune'
  group: 'gelu-pino-pino'




================================================
FILE: configs/finetune/Re100-finetune-1s.yaml
================================================
data:
  datapath: 'data/NS_fine_Re100_T128_part0.npy'
  Re: 100
  total_num: 100
  offset: 190
  n_sample: 1
  time_interval: 1.0
  nx: 128
  nt: 128
  sub: 1
  sub_t: 2
  shuffle: True

model:
  layers: [32, 32, 32, 32, 32]
  modes1: [16, 16, 16, 16]
  modes2: [16, 16, 16, 16]
  modes3: [16, 16, 16, 16]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 7500
  milestones: [500, 1500, 3000, 4000, 5000]
  base_lr: 0.0025
  scheduler_gamma: 0.5
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0.0
  save_dir: 'Re100-FDM'
  save_name: 'PINO-finetune-Re100-1s.pt'
  ckpt: 'checkpoints/Re100-FDM/PINO-pretrain-Re100-1s.pt'

log:
  project: 'PINO-finetune'
  group: 'Re100-finetune-1s'




================================================
FILE: configs/finetune/Re200-finetune-1s.yaml
================================================
data:
  datapath: 'data/NS_fine_Re200_T128_part0.npy'
  Re: 200
  total_num: 100
  offset: 194
  n_sample: 1
  time_interval: 1.0
  nx: 128
  nt: 128
  sub: 1
  sub_t: 2
  shuffle: True

model:
  layers: [32, 32, 32, 32, 32]
  modes1: [16, 16, 16, 16]
  modes2: [16, 16, 16, 16]
  modes3: [16, 16, 16, 16]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 10000
  milestones: [500, 1500, 3000, 4000, 6000]
  base_lr: 0.0025
  scheduler_gamma: 0.5
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0.0
  save_dir: 'Re200-FDM'
  save_name: 'PINO-finetune-Re200-1s.pt'
  ckpt: 'checkpoints/Re200-FDM/PINO-pretrain-Re200-1s.pt'

log:
  project: 'PINO-finetune'
  group: 'Re200-finetune-1s'




================================================
FILE: configs/finetune/Re250-finetune-1s.yaml
================================================
data:
  datapath: 'data/NS_fine_Re250_T128_part0.npy'
  Re: 250
  total_num: 100
  offset: 198
  n_sample: 1
  time_interval: 1.0
  nx: 128
  nt: 128
  sub: 1
  sub_t: 2
  shuffle: True

model:
  layers: [32, 32, 32, 32, 32]
  modes1: [16, 16, 16, 16]
  modes2: [16, 16, 16, 16]
  modes3: [16, 16, 16, 16]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 10000
  milestones: [500, 1500, 3000, 4000, 6000]
  base_lr: 0.0025
  scheduler_gamma: 0.5
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0.0
  save_dir: 'Re250-FDM'
  save_name: 'PINO-finetune-Re250-1s.pt'
  ckpt: 'checkpoints/Re250-FDM/PINO-pretrain-Re250-1s.pt'

log:
  project: 'PINO-finetune'
  group: 'Re250-finetune-1s'




================================================
FILE: configs/finetune/Re300-finetune-1s.yaml
================================================
data:
  datapath: 'data/NS_fine_Re300_T128_part0.npy'
  Re: 300
  total_num: 100
  offset: 190
  n_sample: 1
  time_interval: 1.0
  nx: 128
  nt: 128
  sub: 1
  sub_t: 2
  shuffle: True

model:
  layers: [32, 32, 32, 32, 32]
  modes1: [16, 16, 16, 16]
  modes2: [16, 16, 16, 16]
  modes3: [16, 16, 16, 16]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 10000
  milestones: [500, 1500, 3000, 4000, 6000]
  base_lr: 0.0025
  scheduler_gamma: 0.5
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0.0
  save_dir: 'Re300-FDM'
  save_name: 'PINO-finetine-Re300-1s.pt'
  ckpt: 'checkpoints/Re300-FDM/PINO-pretrain-Re300-1s.pt'

log:
  project: 'PINO-finetune'
  group: 'Re300-finetune-1s'




================================================
FILE: configs/finetune/Re350-finetune-1s.yaml
================================================
data:
  datapath: 'data/NS_fine_Re350_T128_part0.npy'
  Re: 350
  total_num: 100
  offset: 198
  n_sample: 1
  time_interval: 1.0
  nx: 128
  nt: 128
  sub: 1
  sub_t: 2
  shuffle: True

model:
  layers: [32, 32, 32, 32, 32]
  modes1: [16, 16, 16, 16]
  modes2: [16, 16, 16, 16]
  modes3: [16, 16, 16, 16]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 10000
  milestones: [500, 1500, 3000, 4000, 6000]
  base_lr: 0.0025
  scheduler_gamma: 0.5
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0.0
  save_dir: 'Re350-FDM'
  save_name: 'PINO-finetine-Re300-1s.pt'
  ckpt: 'checkpoints/Re350-FDM/PINO-pretrain-Re350-1s.pt'

log:
  project: 'PINO-finetune'
  group: 'Re350-finetune-1s'




================================================
FILE: configs/finetune/Re400-finetune-1s.yaml
================================================
data:
  datapath: 'data/NS_fine_Re400_T128_part0.npy'
  Re: 400
  total_num: 100
  offset: 199
  n_sample: 1
  time_interval: 1.0
  nx: 128
  nt: 128
  sub: 1
  sub_t: 2
  shuffle: True

model:
  layers: [32, 32, 32, 32, 32]
  modes1: [16, 16, 16, 16]
  modes2: [16, 16, 16, 16]
  modes3: [16, 16, 16, 16]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 10000
  milestones: [500, 1500, 3000, 4000, 6000]
  base_lr: 0.0025
  scheduler_gamma: 0.5
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0.0
  save_dir: 'Re400-FDM'
  save_name: 'PINO-finetune-Re400-1s.pt'
  ckpt: 'checkpoints/Re400-FDM/PINO-pretrain-Re400-1s.pt'

log:
  project: 'PINO-finetune'
  group: 'Re400-finetune-1s'




================================================
FILE: configs/finetune/Re500-finetune-05s-2layer.yaml
================================================
data:
  datapath: 'data/NS_Re500_s256_T100_test.npy'
  Re: 500
  total_num: 100
  offset: 0
  n_sample: 1
  time_interval: 0.5
  nx: 256
  nt: 128
  sub: 2
  sub_t: 1
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 6000
  milestones: [1000, 3000, 5000]
  base_lr: 0.0025
  beta1: 0.9
  beta2: 0.999
  scheduler_gamma: 0.5
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0
  save_dir: 'Re500'
  save_name: 'PINO-fintune-05s.pt'
  ckpt: 'checkpoints/Re500-FDM/PINO-pretrain-Re500-05s-4k1k.pt'
  twolayer: True

log:
  project: 'PINO-Re500-ICLR'
  group: 'Re500-finetune-128-4k1-2layer'




================================================
FILE: configs/finetune/Re500-finetune-05s-eqn.yaml
================================================
data:
  datapath: 'data/NS_Re500_s256_T100_test.npy'
  Re: 500
  total_num: 100
  offset: 0
  n_sample: 1
  time_interval: 0.5
  nx: 256
  nt: 128
  sub: 2
  sub_t: 1
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 2500
  milestones: [1000, 1500, 2000]
  base_lr: 0.0025
  beta1: 0.9
  beta2: 0.999
  scheduler_gamma: 0.5
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0
  save_dir: 'Re500'
  save_name: 'PINO-fintune-05s.pt'
  ckpt: 'checkpoints/Re500-FDM/PINO-pretrain-Re500-05s-eqn.pt'

log:
  project: 'PINO-Re500-ICLR'
  group: 'Re500-finetune-128-eqn'




================================================
FILE: configs/finetune/Re500-finetune-05s4C0.yaml
================================================
data:
  datapath: 'data/NS_Re500_s256_T100_test.npy'
  Re: 500
  total_num: 100
  offset: 0
  n_sample: 1
  time_interval: 0.5
  nx: 256
  nt: 128
  sub: 2
  sub_t: 1
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 2500
  milestones: [1000, 1500, 2000]
  base_lr: 0.0025
  beta1: 0.9
  beta2: 0.999
  scheduler_gamma: 0.5
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0
  save_dir: 'Re500'
  save_name: 'PINO-fintune-05s.pt'
  ckpt: 'checkpoints/Re500-FDM/PINO-pretrain-Re500-05s-4C0.pt'

log:
  project: 'PINO-Re500-ICLR'
  group: 'Re500-finetune-128-4C0'




================================================
FILE: configs/finetune/Re500-finetune-05s4C1.yaml
================================================
data:
  datapath: 'data/NS_Re500_s256_T100_test.npy'
  Re: 500
  total_num: 100
  offset: 0
  n_sample: 1
  time_interval: 0.5
  nx: 256
  nt: 128
  sub: 4
  sub_t: 1
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 2500
  milestones: [1000, 1500, 2000]
  base_lr: 0.0025
  beta1: 0.9
  beta2: 0.999
  scheduler_gamma: 0.5
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0
  save_dir: 'Re500'
  save_name: 'PINO-fintune-05s.pt'
  ckpt: 'checkpoints/Re500-FDM/PINO-pretrain-Re500-05s-4C1.pt'
  profile: True

log:
  project: 'PINO-Re500-ICLR'
  group: 'Re500-finetune-128-4C1-profile-long'




================================================
FILE: configs/finetune/Re500-finetune-05s4C4.yaml
================================================
data:
  datapath: 'data/NS_Re500_s256_T100_test.npy'
  Re: 500
  total_num: 100
  offset: 0
  n_sample: 1
  time_interval: 0.5
  nx: 256
  nt: 128
  sub: 2
  sub_t: 1
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 2500
  milestones: [1000, 1500, 2000]
  base_lr: 0.0025
  beta1: 0.9
  beta2: 0.999
  scheduler_gamma: 0.5
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0
  save_dir: 'Re500'
  save_name: 'PINO-fintune-05s.pt'
  ckpt: 'checkpoints/Re500-FDM/PINO-pretrain-Re500-05s-4C4.pt'

log:
  project: 'PINO-Re500-ICLR'
  group: 'Re500-finetune-128-4C4'




================================================
FILE: configs/finetune/Re500-finetune-05s4k-2layer.yaml
================================================
data:
  datapath: 'data/NS_Re500_s256_T100_test.npy'
  Re: 500
  total_num: 100
  offset: 0
  n_sample: 1
  time_interval: 0.5
  nx: 256
  nt: 128
  sub: 2
  sub_t: 1
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 6000
  milestones: [1000, 3000, 5000]
  base_lr: 0.0025
  beta1: 0.9
  beta2: 0.999
  scheduler_gamma: 0.5
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0
  save_dir: 'Re500'
  save_name: 'PINO-fintune-05s.pt'
  ckpt: 'checkpoints/Re500-FDM/pretrain-Re500-05s-4000.pt'
  twolayer: True

log:
  project: 'PINO-Re500-ICLR'
  group: 'Re500-finetune-128-4k-2layer'




================================================
FILE: configs/finetune/Re500-finetune-05s4k1k.yaml
================================================
data:
  datapath: 'data/NS_Re500_s256_T100_test.npy'
  Re: 500
  total_num: 100
  offset: 0
  n_sample: 1
  time_interval: 0.5
  nx: 256
  nt: 128
  sub: 2
  sub_t: 1
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 2500
  milestones: [1000, 1500, 2000]
  base_lr: 0.0025
  beta1: 0.9
  beta2: 0.999
  scheduler_gamma: 0.5
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0
  save_dir: 'Re500'
  save_name: 'PINO-fintune-05s.pt'
  ckpt: 'checkpoints/Re500-FDM/PINO-pretrain-Re500-05s-4k1k.pt'
  profile: True

log:
  project: 'PINO-Re500-ICLR-rebuttal'
  group: 'Re500-finetune-128-4k1'




================================================
FILE: configs/finetune/Re500-finetune-05s4k4-2layer.yaml
================================================
data:
  datapath: 'data/NS_Re500_s256_T100_test.npy'
  Re: 500
  total_num: 100
  offset: 0
  n_sample: 1
  time_interval: 0.5
  nx: 256
  nt: 128
  sub: 2
  sub_t: 1
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 6000
  milestones: [1000, 3000, 5000]
  base_lr: 0.0025
  beta1: 0.9
  beta2: 0.999
  scheduler_gamma: 0.5
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0
  save_dir: 'Re500'
  save_name: 'PINO-fintune-05s.pt'
  ckpt: 'checkpoints/Re500-FDM/PINO-pretrain-Re500-05s-4k.pt'
  twolayer: True

log:
  project: 'PINO-Re500-ICLR'
  group: 'Re500-finetune-128-4k4-2layer'




================================================
FILE: configs/finetune/Re500-finetune-05s4k4k.yaml
================================================
data:
  datapath: 'data/NS_Re500_s256_T100_test.npy'
  Re: 500
  total_num: 100
  offset: 0
  n_sample: 1
  time_interval: 0.5
  nx: 256
  nt: 128
  sub: 4
  sub_t: 1
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 2500
  milestones: [1000, 1500, 2000]
  base_lr: 0.0025
  beta1: 0.9
  beta2: 0.999
  scheduler_gamma: 0.5
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0
  save_dir: 'Re500'
  save_name: 'PINO-fintune-05s.pt'
  ckpt: 'checkpoints/Re500-FDM/PINO-pretrain-Re500-05s-4k4.pt'
  profile: True

log:
  project: 'PINO-Re500-ICLR'
  group: 'Re500-finetune-128-4k4-profile'




================================================
FILE: configs/finetune/Re500-finetune-1s.yaml
================================================
data:
  datapath: 'data/NS_fine_Re500_T128_part2.npy'
  Re: 500
  total_num: 100
  offset: 0
  n_sample: 1
  time_interval: 1
  nx: 256
  nt: 128
  sub: 2
  sub_t: 2
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 10000
  milestones: [500, 1500, 3000, 4000, 5000]
  base_lr: 0.0025
  scheduler_gamma: 0.5
  ckpt: 'checkpoints/Re500-FDM/PINO-pretrain-Re500-1s.pt'
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 0
  save_dir: 'Re500-FDM'
  save_name: 'PINO-Re500-fintune-1s.pt'

log:
  project: 'PINO-sweep'
  group: 'Re500-finetune'






================================================
FILE: configs/instance/Re500-1_8-FNO.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 100
  data_res: [256, 256, 513]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 275
  n_test_samples: 10
  total_test_samples: 50
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [12, 12, 12, 12]
  modes2: [12, 12, 12, 12]
  modes3: [12, 12, 12, 12]
  fc_dim: 128
  act: gelu
  pad_ratio: [0, 0.125]

train:
  batchsize: 1
  epochs: 201
  num_iter: 1_001
  milestones: [400, 800]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 10.0
  f_loss: 1.0
  save_step: 500

test:
  batchsize: 1
  data_res: [256, 256, 513]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-800-FNO
  entity: hzzheng-pino
  project: PINO-NS-test-time-opt
  group: Re500-1_8s-800-FNO


================================================
FILE: configs/instance/Re500-1_8-PINO-s.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 100
  data_res: [256, 256, 513]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 250
  n_test_samples: 1
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  pad_ratio: 0.125

train:
  batchsize: 1
  epochs: 201
  num_iter: 1_001
  milestones: [400, 800]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 10.0
  f_loss: 1.0
  save_step: 500

test:
  batchsize: 1
  data_res: [256, 256, 513]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-800-PINO-tto
  entity: hzzheng-pino
  project: PINO-NS-test-time-opt
  group: Re500-1_8s-800-PINO-s


================================================
FILE: configs/instance/Re500-1_8-PINO.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 100
  data_res: [256, 256, 513]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 275
  n_test_samples: 10
  total_test_samples: 50
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [12, 12, 12, 12]
  modes2: [12, 12, 12, 12]
  modes3: [12, 12, 12, 12]
  fc_dim: 128
  act: gelu
  pad_ratio: [0.0, 0.125]

train:
  batchsize: 1
  epochs: 201
  num_iter: 1_001
  milestones: [400, 800]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 10.0
  f_loss: 1.0
  save_step: 500

test:
  batchsize: 1
  data_res: [256, 256, 513]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-800-PINO-tto
  entity: hzzheng-pino
  project: PINO-NS-test-time-opt
  group: Re500-1_8s-800-PINO-s


================================================
FILE: configs/ngc/Re500-1_8-dat0-PINO.yaml
================================================
data:
  name: KF
  paths: ['/mount/data/NS-Re500_T300_256x256x500.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 5
  data_res: [64, 64, 257]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 200
  n_test_samples: 50
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.0625

train:
  batchsize: 1
  epochs: 201
  num_iter: 150_001
  milestones: [30_000, 60_000, 90_000, 120_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 0.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 257]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-dat0-PINO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1_8s-dat0-PINO


================================================
FILE: configs/ngc/Re500-1_8-dat200-PINO.yaml
================================================
data:
  name: KF
  paths: ['/mount/data/NS-Re500_T300_256x256x500.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 25
  data_res: [64, 64, 257]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 200
  n_test_samples: 50
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.0625

train:
  batchsize: 1
  epochs: 201
  num_iter: 150_001
  milestones: [20_000, 50_000, 80_000, 110_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 257]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-dat200-PINO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1_8s-dat200-PINO


================================================
FILE: configs/ngc/Re500-1_8-dat40-PINO.yaml
================================================
data:
  name: KF
  paths: ['/mount/data/NS-Re500_T300_256x256x500.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 5
  data_res: [64, 64, 257]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 200
  n_test_samples: 50
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.0625

train:
  batchsize: 1
  epochs: 201
  num_iter: 150_001
  milestones: [30_000, 60_000, 90_000, 120_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 257]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-dat40-PINO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1_8s-dat40-PINO


================================================
FILE: configs/ngc/Re500-1_8-dat400-PINO.yaml
================================================
data:
  name: KF
  paths: ['/mount/data/NS-Re500_T300_256x256x500.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 50
  data_res: [64, 64, 257]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 200
  n_test_samples: 50
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.0625

train:
  batchsize: 1
  epochs: 201
  num_iter: 150_001
  milestones: [20_000, 50_000, 80_000, 110_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 257]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-dat400-PINO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1_8s-dat400-PINO


================================================
FILE: configs/ngc/Re500-1_8-dat80-PINO.yaml
================================================
data:
  name: KF
  paths: ['/mount/data/NS-Re500_T300_256x256x500.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 10
  data_res: [64, 64, 257]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 200
  n_test_samples: 50
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.0625

train:
  batchsize: 1
  epochs: 201
  num_iter: 150_001
  milestones: [30_000, 60_000, 90_000, 120_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 257]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-dat80-PINO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1_8s-dat80-PINO


================================================
FILE: configs/ngc/Re500-1_8-dat800-PINO.yaml
================================================
data:
  name: KF
  paths: ['/mount/data/NS-Re500_T300_256x256x500.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 100
  data_res: [64, 64, 257]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 200
  n_test_samples: 50
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.0625

train:
  batchsize: 1
  epochs: 201
  num_iter: 150_001
  milestones: [30_000, 60_000, 90_000, 120_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 257]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-dat800-PINO
  entity: hzzheng-pino
  project: PINO-NS-ngc
  group: Re500-1_8s-dat800-PINO


================================================
FILE: configs/ngc/Re500-1_8-res16-PINO.yaml
================================================
data:
  name: KF
  paths: ['/mount/data/NS-Re500_T300_256x256x500.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 150
  data_res: [16, 16, 129]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 200
  n_test_samples: 50
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.0625

train:
  batchsize: 1
  epochs: 201
  num_iter: 150_001
  milestones: [30_000, 60_000, 90_000, 120_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 257]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-res16-PINO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1_8s-res16-PINO


================================================
FILE: configs/ngc/Re500-1_8-res32-PINO.yaml
================================================
data:
  name: KF
  paths: ['/mount/data/NS-Re500_T300_256x256x500.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 150
  data_res: [32, 32, 129]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 200
  n_test_samples: 50
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.0625

train:
  batchsize: 1
  epochs: 201
  num_iter: 150_001
  milestones: [30_000, 60_000, 90_000, 120_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 257]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-res32-PINO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1_8s-res32-PINO


================================================
FILE: configs/operator/Darcy-pretrain.yaml
================================================
data:
  name: 'Darcy'
  path: '/raid/hongkai/darcy-train.mat'
  total_num: 1024
  offset: 0
  n_sample: 1000
  nx: 421
  sub: 7
  pde_sub: 2

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [20, 20, 20, 20]
  modes2: [20, 20, 20, 20]
  fc_dim: 128
  act: gelu
  pad_ratio: [0., 0.]

train:
  batchsize: 20
  num_iter: 15_001
  milestones: [5_000, 7_500, 10_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  f_loss: 1.0
  xy_loss: 5.0
  save_step: 2_500
  eval_step: 2_500

test:
  path: '/raid/hongkai/darcy-test.mat'
  total_num: 1024
  offset: 0
  n_sample: 500
  nx: 421
  sub: 2
  batchsize: 1
  
  
log:
  logdir: Darcy-PINO-new
  entity: hzzheng-pino
  project: DarcyFlow
  group: PINO-1000-new




================================================
FILE: configs/operator/Re500-05s-1000-FNO.yaml
================================================
data:
  paths: ['../data/NS-Re500Part0.npy', '../data/NS-Re500Part1.npy', '../data/NS-Re500Part2.npy']
  Re: 500
  total_num: 200
  offset: 0
  n_samples: 1000
  t_duration: 0.5
  data_res: [64, 64, 33]
  pde_res: [128, 128, 65]
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4

train:
  batchsize: 2
  epochs: 501
  milestones: [300]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 100

test:
  batchsize: 1
  data_res: [128, 128, 65]
  ckpt: model-500.pt

log:
  logdir: Re500-05s-1000-FNO
  entity: hzzheng-pino
  project: 'PINO-NS'
  group: 'Re500-05s-1000-FNO'


================================================
FILE: configs/operator/Re500-05s-1000-PINO.yaml
================================================
data:
  paths: ['../data/NS-Re500Part0.npy', '../data/NS-Re500Part1.npy', '../data/NS-Re500Part2.npy']
  Re: 500
  total_num: 300
  offset: 0
  n_samples: 1000
  t_duration: 0.5
  data_res: [64, 64, 33]
  pde_res: [128, 128, 65]
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4

train:
  batchsize: 2
  epochs: 501
  milestones: [300]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_step: 100

test:
  batchsize: 1
  data_res: [128, 128, 65]
  ckpt: model-500.pt

log:
  logdir: Re500-05s-1000-PINO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-05s-1000-PINO


================================================
FILE: configs/operator/Re500-05s-3000-FNO.yaml
================================================
data:
  name: KF
  paths: ['../data/NS-Re500_T3000_id0.npy']
  Re: 500
  total_num: 3000
  offset: 0
  n_samples: 300
  testoffset: 2500
  n_test_samples: 300
  t_duration: 0.5
  raw_res: [256, 256, 257]
  data_res: [64, 64, 33]
  pde_res: [64, 64, 33]
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4

train:
  batchsize: 4
  epochs: 401
  milestones: [200]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 50

test:
  batchsize: 1
  data_res: [128, 128, 65]
  ckpt: model-400.pt

log:
  logdir: Re500-1s-3000-FNO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1s-3000-FNO


================================================
FILE: configs/operator/Re500-05s-600-FNO.yaml
================================================
data:
  name: KF
  paths: ['../data/NS-Re500_T3000_id0.npy']
  Re: 500
  total_num: 3000
  offset: 0
  n_samples: 300
  testoffset: 2500
  n_test_samples: 200
  t_duration: 0.5
  raw_res: [256, 256, 257]
  data_res: [64, 64, 65]  # resolution in 1 second
  pde_res: [64, 64, 65]   # resolution in 1 second
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4

train:
  batchsize: 2
  epochs: 401
  milestones: [200]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 50

test:
  batchsize: 1
  data_res: [64, 64, 65]
  ckpt: model-400.pt

log:
  logdir: Re500-05s-600-FNO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-05s-600-FNO


================================================
FILE: configs/operator/Re500-05s-600-PINO-xl.yaml
================================================
data:
  name: KF
  paths: ['../data/NS-Re500_T3000_id0.npy']
  Re: 500
  total_num: 3000
  offset: 0
  n_samples: 300
  testoffset: 2500
  n_test_samples: 200
  t_duration: 0.5
  raw_res: [256, 256, 257]
  data_res: [64, 64, 65]  # resolution in 1 second
  pde_res: [256, 256, 257]   # resolution in 1 second
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [12, 12, 12, 12]
  modes2: [12, 12, 12, 12]
  modes3: [12, 12, 12, 12]
  fc_dim: 128
  act: gelu
  num_pad: 4

train:
  batchsize: 1
  epochs: 301
  milestones: [200]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_step: 10

test:
  batchsize: 1
  data_res: [64, 64, 65]
  ckpt: model-400.pt

log:
  logdir: Re500-05s-600-PINO-xl
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-05s-600-PINO-xl


================================================
FILE: configs/operator/Re500-05s-600-PINO.yaml
================================================
data:
  name: KF
  paths: ['../data/NS-Re500_T3000_id0.npy']
  Re: 500
  total_num: 3000
  offset: 0
  n_samples: 300
  testoffset: 2500
  n_test_samples: 200
  t_duration: 0.5
  raw_res: [256, 256, 257]
  data_res: [64, 64, 65]  # resolution in 1 second
  pde_res: [256, 256, 257]   # resolution in 1 second
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4

train:
  batchsize: 1
  epochs: 301
  milestones: [200]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_step: 10

test:
  batchsize: 1
  data_res: [64, 64, 65]
  ckpt: model-400.pt

log:
  logdir: Re500-05s-600-PINO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-05s-600-PINO


================================================
FILE: configs/operator/Re500-05s-FNO.yaml
================================================
data:
  paths: ['../data/NS-Re500Part0.npy', '../data/NS-Re500Part1.npy']
  Re: 500
  total_num: 200
  offset: 0
  n_samples: 700
  t_duration: 0.5
  data_res: [64, 64, 33]
  pde_res: [128, 128, 65]
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4

train:
  batchsize: 2
  epochs: 501
  milestones: [300]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 100

test:
  batchsize: 1
  data_res: [128, 128, 65]
  ckpt: model-500.pt

log:
  logdir: Re500-05s-FNO
  entity: hzzheng-pino
  project: 'PINO-NS'
  group: 'Re500-05s-FNO'


================================================
FILE: configs/operator/Re500-1_16-800-FNO-s.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 50
  data_res: [64, 64, 257]  # resolution in 1 second
  pde_res: [64, 64, 257]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 50
  testoffset: 275
  n_test_samples: 25
  t_duration: 0.0625
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  pad_ratio: 0.125

train:
  batchsize: 2
  start_iter: 0
  num_iter: 50_001
  milestones: [20_000, 40_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [64, 64, 257]
  ckpt: model-400.pt

log:
  logdir: Re500-1_16s-800-FNO-s
  entity: hzzheng-pino
  project: PINO-KF-Re500
  group: Re500-1_16s-800-FNO-s


================================================
FILE: configs/operator/Re500-1_16-800-PINO-s.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 50
  data_res: [64, 64, 257]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 200
  testoffset: 275
  n_test_samples: 25
  t_duration: 0.0625
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.125

train:
  batchsize: 2
  start_iter: 0
  num_iter: 200_001
  milestones: [20_000, 60_000, 120_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 5.0
  f_loss: 1.0
  xy_loss: 10.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [128, 128, 257]
  ckpt: model-400.pt

log:
  logdir: Re500-1_16s-800-PINO-s
  entity: hzzheng-pino
  project: PINO-KF-Re500
  group: Re500-1_16s-800-PINO-s


================================================
FILE: configs/operator/Re500-1_4-2000-FNO.yaml
================================================
data:
  name: KF
  paths: ['../data/NS-Re500_T3000_id0.npy']
  Re: 500
  total_num: 3000
  offset: 0
  n_samples: 600
  testoffset: 2500
  n_test_samples: 400
  t_duration: 0.25
  raw_res: [256, 256, 257]
  data_res: [256, 256, 257]  # resolution in 1 second
  pde_res: [256, 256, 257]   # resolution in 1 second
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4

train:
  batchsize: 2
  epochs: 401
  milestones: [100, 300]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 50

test:
  batchsize: 1
  data_res: [256, 256, 257]
  ckpt: model-400.pt

log:
  logdir: Re500-1_4s-2000-FNO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1_4s-2000-FNO


================================================
FILE: configs/operator/Re500-1_8-0-PINO-s.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 10
  data_res: [64, 64, 129]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 275
  n_test_samples: 25
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [12, 12, 12, 12]
  modes2: [12, 12, 12, 12]
  modes3: [12, 12, 12, 12]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.125

train:
  batchsize: 2
  start_iter: 35_001
  num_iter: 200_001
  milestones: [30_000, 70_000, 110_000, 150_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 10.0
  f_loss: 1.0
  xy_loss: 0.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 513]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-0-PINO-s
  entity: hzzheng-pino
  project: PINO-KF-Re500
  group: Re500-1_8s-0-PINO-s


================================================
FILE: configs/operator/Re500-1_8-1200-FNO.yaml
================================================
data:
  name: KF
  paths: ['../data/NS-Re500_T300_id0.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 150
  data_res: [64, 64, 129]  # resolution in 1 second
  pde_res: [64, 64, 129]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 250
  n_test_samples: 50
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.125

train:
  batchsize: 2
  epochs: 201
  num_iter: 50_001
  milestones: [20_000, 40_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [64, 64, 129]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-1200-FNO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1_8s-1200-FNO


================================================
FILE: configs/operator/Re500-1_8-1200-PINO.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 150
  data_res: [64, 64, 129]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 250
  n_test_samples: 50
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.125

train:
  batchsize: 2
  epochs: 201
  num_iter: 150_001
  milestones: [30_000, 60_000, 90_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 513]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-1200-PINO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1_8s-1200-PINO


================================================
FILE: configs/operator/Re500-1_8-200-FNO-s.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 25
  data_res: [128, 128, 257]  # resolution in 1 second
  pde_res: [128, 128, 257]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 250
  n_test_samples: 25
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: [0, 0.125]

train:
  batchsize: 1
  epochs: 201
  num_iter: 50_001
  milestones: [20_000, 40_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 513]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-dat200-FNO
  entity: hzzheng-pino
  project: PINO-KF-Re500
  group: Re500-1_8s-dat200-FNO


================================================
FILE: configs/operator/Re500-1_8-2000-FNO-s.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 250
  data_res: [64, 64, 129]  # resolution in 1 second
  pde_res: [64, 64, 129]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 275
  n_test_samples: 25
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  pad_ratio: 0.125

train:
  batchsize: 1
  epochs: 201
  num_iter: 60_001
  milestones: [20_000, 40_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 513]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-dat2000-FNO
  entity: hzzheng-pino
  project: PINO-KF-Re500
  group: Re500-1_8s-dat2000-FNO


================================================
FILE: configs/operator/Re500-1_8-2000-FNO-xl.yaml
================================================
data:
  name: KF
  paths: ['../data/NS-Re500_T3000_id0.npy']
  Re: 500
  total_num: 3000
  offset: 0
  n_samples: 350
  testoffset: 2500
  n_test_samples: 400
  t_duration: 0.125
  raw_res: [256, 256, 257]
  data_res: [256, 256, 257]  # resolution in 1 second
  pde_res: [256, 256, 257]   # resolution in 1 second
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [12, 12, 12, 12]
  modes2: [12, 12, 12, 12]
  modes3: [12, 12, 12, 12]
  fc_dim: 128
  act: gelu
  num_pad: 4

train:
  batchsize: 2
  epochs: 201
  milestones: [50, 100, 150]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 20

test:
  batchsize: 1
  data_res: [256, 256, 257]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-2400-FNO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1_8s-2400-FNO


================================================
FILE: configs/operator/Re500-1_8-2000-PINO.yaml
================================================
data:
  name: KF
  paths: ['../data/NS-Re500_T300_id0.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 150
  data_res: [64, 64, 257]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 200
  n_test_samples: 50
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.0625

train:
  batchsize: 2
  epochs: 201
  num_iter: 100_001
  milestones: [10_000, 30_000, 50_000, 70_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [64, 64, 257]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-2k-PINO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1_8s-2k-PINO


================================================
FILE: configs/operator/Re500-1_8-2200-FNO-s.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 275
  data_res: [64, 64, 129]  # resolution in 1 second
  pde_res: [64, 64, 257]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 275
  n_test_samples: 25
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [12, 12, 12, 12]
  modes2: [12, 12, 12, 12]
  modes3: [12, 12, 12, 12]
  fc_dim: 128
  act: gelu
  pad_ratio: 0.125

train:
  batchsize: 1
  start_iter: 30_001
  num_iter: 60_001
  milestones: [20_000, 40_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 513]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-dat2200-FNO
  entity: hzzheng-pino
  project: PINO-KF-Re500
  group: Re500-1_8s-dat2200-FNO


================================================
FILE: configs/operator/Re500-1_8-2200-PINO-s.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 275
  data_res: [64, 64, 257]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 275
  testoffset: 275
  n_test_samples: 25
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [12, 12, 12, 12]
  modes2: [12, 12, 12, 12]
  modes3: [12, 12, 12, 12]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.125

train:
  batchsize: 2
  start_iter: 30_001
  num_iter: 400_001
  milestones: [30_000, 90_000, 150_000, 250_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 10.0
  f_loss: 1.0
  xy_loss: 50.0
  save_step: 10000
  eval_step: 10000

test:
  batchsize: 1
  data_res: [64, 64, 257]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-2200-PINO-s
  entity: hzzheng-pino
  project: PINO-KF-Re500
  group: Re500-1_8s-2200-PINO-s


================================================
FILE: configs/operator/Re500-1_8-800-FNO-s.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 100
  data_res: [64, 64, 257]  # resolution in 1 second
  pde_res: [64, 64, 129]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 275
  n_test_samples: 25
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [12, 12, 12, 12]
  modes2: [12, 12, 12, 12]
  modes3: [12, 12, 12, 12]
  fc_dim: 128
  act: gelu
  pad_ratio: [0, 0.125]

train:
  batchsize: 2
  start_iter: 0
  num_iter: 50_001
  milestones: [20_000, 40_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [64, 64, 257]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-800-FNO-s
  entity: hzzheng-pino
  project: PINO-KF-Re500
  group: Re500-1_8s-800-FNO-s


================================================
FILE: configs/operator/Re500-1_8-800-FNO-s32.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 100
  data_res: [32, 32, 129]  # resolution in 1 second
  pde_res: [32, 32, 129]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 275
  n_test_samples: 25
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [12, 12, 12, 12]
  modes2: [12, 12, 12, 12]
  modes3: [12, 12, 12, 12]
  fc_dim: 128
  act: gelu
  pad_ratio: [0.0, 0.125]

train:
  batchsize: 2
  start_iter: 0
  num_iter: 50_001
  milestones: [20_000, 40_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 513]

log:
  logdir: Re500-1_8s-800-FNO-s32
  entity: hzzheng-pino
  project: PINO-KF-Re500
  group: Re500-1_8s-800-FNO-s32


================================================
FILE: configs/operator/Re500-1_8-800-PINO-s.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 100
  data_res: [64, 64, 257]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 275
  testoffset: 275
  n_test_samples: 25
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [12, 12, 12, 12]
  modes2: [12, 12, 12, 12]
  modes3: [12, 12, 12, 12]
  fc_dim: 128
  act: gelu
  pad_ratio: [0.0, 0.125]

train:
  batchsize: 2
  start_iter: 0
  num_iter: 200_001
  milestones: [20_000, 60_000, 120_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 10.0
  f_loss: 1.0
  xy_loss: 10.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 513]

log:
  logdir: Re500-1_8s-800-PINO-s
  entity: hzzheng-pino
  project: PINO-KF-Re500
  group: Re500-1_8s-800-PINO-s


================================================
FILE: configs/operator/Re500-1_8-800-PINO-s16.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 100
  data_res: [16, 16, 65]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 275
  testoffset: 275
  n_test_samples: 25
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [12, 12, 12, 12]
  modes2: [12, 12, 12, 12]
  modes3: [12, 12, 12, 12]
  fc_dim: 128
  act: gelu
  pad_ratio: [0.0, 0.125]

train:
  batchsize: 1
  start_iter: 0
  num_iter: 200_001
  milestones: [20_000, 60_000, 120_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 10.0
  f_loss: 1.0
  xy_loss: 10.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 513]

log:
  logdir: Re500-1_8s-800-PINO-s-16
  entity: hzzheng-pino
  project: PINO-KF-Re500
  group: Re500-1_8s-800-PINO-s-16


================================================
FILE: configs/operator/Re500-1_8-800-PINO-s32.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 100
  data_res: [32, 32, 129]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 275
  testoffset: 275
  n_test_samples: 25
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [12, 12, 12, 12]
  modes2: [12, 12, 12, 12]
  modes3: [12, 12, 12, 12]
  fc_dim: 128
  act: gelu
  pad_ratio: [0.0, 0.125]

train:
  batchsize: 2
  start_iter: 0
  num_iter: 200_001
  milestones: [20_000, 60_000, 120_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 10.0
  f_loss: 1.0
  xy_loss: 10.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [64, 64, 257]

log:
  logdir: Re500-1_8s-800-PINO-s-32
  entity: hzzheng-pino
  project: PINO-KF-Re500
  group: Re500-1_8s-800-PINO-s-32


================================================
FILE: configs/operator/Re500-1_8-800-UNet.yaml
================================================
data:
  name: KF
  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 100
  data_res: [64, 64, 257]  # resolution in 1 second
  pde_res: [64, 64, 129]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 275
  n_test_samples: 25
  t_duration: 0.125
  shuffle: True

model:
  f_maps: 128

train:
  batchsize: 2
  start_iter: 0
  num_iter: 50_001
  milestones: [20_000, 40_000]
  base_lr: 0.0002
  scheduler_gamma: 0.5
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [64, 64, 257]
  ckpt: model-5000.pt

log:
  logdir: Re500-1_8s-800-UNet
  entity: hzzheng-pino
  project: PINO-KF-Re500
  group: Re500-1_8s-800-UNet


================================================
FILE: configs/operator/Re500-1_8-dat1.6k-PINO.yaml
================================================
data:
  name: KF
  paths: ['../data/NS-Re500_T300_id0.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 200
  data_res: [64, 64, 257]  # resolution in 1 second
  pde_res: [256, 256, 513]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 250
  testoffset: 200
  n_test_samples: 50
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.0625

train:
  batchsize: 2
  epochs: 201
  num_iter: 200_001
  milestones: [20_000, 70_000, 120_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [64, 64, 257]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-pde2k-dat16-PINO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1_8s-pde2k-dat16-PINO


================================================
FILE: configs/operator/Re500-1_8-dat400-FNO.yaml
================================================
data:
  name: KF
  paths: ['../data/NS-Re500_T300_id0.npy']
  Re: 500
  offset: 0
  total_num: 300
  raw_res: [256, 256, 513]
  n_data_samples: 50
  data_res: [64, 64, 129]  # resolution in 1 second
  pde_res: [64, 64, 129]   # resolution in 1 second
  a_offset: 0
  n_a_samples: 50
  testoffset: 250
  n_test_samples: 50
  t_duration: 0.125
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.0625

train:
  batchsize: 2
  epochs: 201
  num_iter: 100_001
  milestones: [10_000, 30_000, 50_000, 70_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 5000
  eval_step: 5000

test:
  batchsize: 1
  data_res: [256, 256, 513]
  ckpt: model-400.pt

log:
  logdir: Re500-1_8s-dat400-FNO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1_8s-dat400-FNO


================================================
FILE: configs/operator/Re500-1s-FNO.yaml
================================================
data:
  paths: ['../data/NS-Re500Part0.npy', '../data/NS-Re500Part1.npy', '../data/NS-Re500Part2.npy']
  Re: 500
  total_num: 300
  offset: 0
  n_samples: 200
  t_duration: 1.0
  data_res: [64, 64, 65]
  pde_res: [128, 128, 129]
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4

train:
  batchsize: 2
  epochs: 501
  milestones: [200, 400]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 100

log:
  logdir: Re500-1s-200-FNO
  entity: hzzheng-pino
  project: 'PINO-NS'
  group: 'Re500-1s-200-FNO'


================================================
FILE: configs/operator/Re500-3000-FNO.yaml
================================================
data:
  name: KF
  paths: ['../data/NS-Re500_T3000_id0.npy']
  Re: 500
  total_num: 3000
  offset: 0
  n_samples: 300
  testoffset: 2500
  n_test_samples: 500
  sub_x: 4
  sub_t: 4
  t_duration: 1.0
  data_res: [64, 64, 65]
  pde_res: [256, 256, 65]
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4

train:
  batchsize: 4
  epochs: 401
  milestones: [200]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 50

log:
  logdir: Re500-1s-3000-FNO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1s-3000-FNO


================================================
FILE: configs/operator/Re500-3000-PINO.yaml
================================================
data:
  name: KF
  paths: ['../data/NS-Re500_T3000_id0.npy']
  Re: 500
  total_num: 3000
  offset: 0
  n_samples: 2400
  sub_x: 4
  sub_t: 4
  pde_subx: 1
  pde_subt: 2
  t_duration: 1.0
  data_res: [64, 64, 65]
  pde_res: [256, 256, 129]
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4

train:
  batchsize: 1
  epochs: 401
  milestones: [200]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_step: 50

log:
  logdir: Re500-1s-3000-PINO
  entity: hzzheng-pino
  project: PINO-NS
  group: Re500-1s-3000-PINO


================================================
FILE: configs/operator/Re500-4000-FNO.yaml
================================================
data:
  paths: ['../data/NS-T4000.npy']
  Re: 500
  total_num: 4000
  offset: 0
  n_samples: 3200
  t_duration: 1.0
  data_res: [64, 64, 65]
  pde_res: [128, 128, 65]
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4

train:
  batchsize: 2
  epochs: 501
  milestones: [300]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 100

log:
  logdir: Re500-1s-FNO
  entity: hzzheng-pino
  project: 'PINO-NS'
  group: 'Re500-1s-FNO'


================================================
FILE: configs/operator/Re500-FNO.yaml
================================================
data:
  paths: ['../data/NS-Re500Part0.npy', '../data/NS-Re500Part1.npy']
  Re: 500
  total_num: 200
  offset: 0
  n_samples: 700
  t_duration: 0.5
  data_res: [64, 64, 33]
  pde_res: [128, 128, 65]
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4
  pad_ratio: 0.03125

train:
  batchsize: 2
  epochs: 501
  milestones: [300]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_step: 100

test:
  batchsize: 1
  data_res: [128, 128, 65]
  ckpt: model-500.pt

log:
  logdir: Re500-05s-FNO
  entity: hzzheng-pino
  project: 'PINO-NS'
  group: 'Re500-05s-FNO'


================================================
FILE: configs/operator/Re500-PINO.yaml
================================================
data:
  paths: ['../data/NS-Re500Part0.npy', '../data/NS-Re500Part1.npy']
  Re: 500
  total_num: 200
  offset: 0
  n_samples: 700
  t_duration: 0.5
  data_res: [64, 64, 33]
  pde_res: [128, 128, 65]
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu
  num_pad: 4

train:
  batchsize: 2
  epochs: 501
  milestones: [300]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_step: 100

test:
  batchsize: 1
  data_res: [64, 64, 33]
  ckpt: model-500.pt

log:
  logdir: Re500-05s-PINO
  entity: hzzheng-pino
  project: 'PINO-NS'
  group: 'Re500-05s-PINO'




================================================
FILE: configs/pretrain/Darcy-pretrain-deeponet.yaml
================================================
data:
  name: 'Darcy'
  datapath: '/mnt/md1/zongyi/piececonst_r421_N1024_smooth1.mat'
  total_num: 1024
  offset: 0
  n_sample: 1000
  nx: 421
  sub: 7

model:
  branch_layers: [50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50]
  trunk_layers: [50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50]
  activation: tanh

train:
  batchsize: 20
  epochs: 2000
  milestones: [400, 800, 1200]
  base_lr: 0.001
  scheduler_gamma: 0.5
  save_dir: 'darcy-deeponet'
  save_name: 'darcy-pretrain-deeponet.pt'

log:
  project: 'PINO-Darcy-pretrain'
  group: 'deeponet'




================================================
FILE: configs/pretrain/Darcy-pretrain.yaml
================================================
data:
  name: 'Darcy'
  datapath: '/mnt/md1/zongyi/piececonst_r421_N1024_smooth1.mat'
  total_num: 1024
  offset: 0
  n_sample: 1000
  nx: 421
  sub: 7
  pde_sub: 2

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [20, 20, 20, 20]
  modes2: [20, 20, 20, 20]
  fc_dim: 128
  act: gelu

train:
  batchsize: 20
  epochs: 300
  milestones: [100, 150, 200]
  base_lr: 0.001
  scheduler_gamma: 0.5
  f_loss: 1.0
  xy_loss: 5.0
  save_dir: 'darcy-FDM'
  save_name: 'darcy-pretrain-pino.pt'

log:
  project: 'PINO-Darcy-pretrain'
  group: 'gelu-pino'
  entity: hzzheng-pino




================================================
FILE: configs/pretrain/Re100-pretrain-1s.yaml
================================================
data:
  datapath: 'data/NS_fine_Re100_T128_part0.npy'
  datapath2: 'data/NS_fine_Re100_T128_part1.npy'
  Re: 100
  total_num: 100
  offset: 0
  n_sample: 200
  time_interval: 1.0
  nx: 128
  nt: 128
  sub: 1
  sub_t: 2
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 150
  milestones: [25, 50, 75, 100]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_dir: 'Re100-FDM'
  save_name: 'PINO-pretrain-Re100-1s.pt'

log:
  project: 'PINO-pretrain'
  group: 'Re100-1s-tanh'




================================================
FILE: configs/pretrain/Re200-pretrain-1s.yaml
================================================
data:
  datapath: 'data/NS_fine_Re200_T128_part0.npy'
  datapath2: 'data/NS_fine_Re200_T128_part1.npy'
  Re: 200
  total_num: 100
  offset: 0
  n_sample: 200
  time_interval: 1.0
  nx: 128
  nt: 128
  sub: 1
  sub_t: 2
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 150
  milestones: [25, 50, 75, 100]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_dir: 'Re200-FDM'
  save_name: 'PINO-pretrain-Re200-1s.pt'

log:
  project: 'PINO-pretrain'
  group: 'Re200-1s-tanh'




================================================
FILE: configs/pretrain/Re250-pretrain-1s.yaml
================================================
data:
  datapath: 'data/NS_fine_Re250_T128_part0.npy'
  datapath2: 'data/NS_fine_Re250_T128_part1.npy'
  Re: 250
  total_num: 100
  offset: 0
  n_sample: 200
  time_interval: 1.0
  nx: 128
  nt: 128
  sub: 1
  sub_t: 2
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 150
  milestones: [25, 50, 75, 100]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_dir: 'Re250-FDM'
  save_name: 'PINO-pretrain-Re250-1s.pt'

log:
  project: 'PINO-pretrain'
  group: 'Re250-1s-tanh'




================================================
FILE: configs/pretrain/Re300-pretrain-1s.yaml
================================================
data:
  datapath: 'data/NS_fine_Re300_T128_part0.npy'
  datapath2: 'data/NS_fine_Re300_T128_part1.npy'
  Re: 300
  total_num: 100
  offset: 0
  n_sample: 200
  time_interval: 1.0
  nx: 128
  nt: 128
  sub: 1
  sub_t: 2
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 150
  milestones: [25, 50, 75, 100]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_dir: 'Re300-FDM'
  save_name: 'PINO-pretrain-Re300-1s.pt'

log:
  project: 'PINO-pretrain'
  group: 'Re300-1s-tanh'




================================================
FILE: configs/pretrain/Re350-pretrain-1s.yaml
================================================
data:
  datapath: 'data/NS_fine_Re350_T128_part0.npy'
  datapath2: 'data/NS_fine_Re350_T128_part1.npy'
  Re: 350
  total_num: 100
  offset: 0
  n_sample: 200
  time_interval: 1.0
  nx: 128
  nt: 128
  sub: 1
  sub_t: 2
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 150
  milestones: [25, 50, 75, 100]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_dir: 'Re350-FDM'
  save_name: 'PINO-pretrain-Re350-1s.pt'

log:
  project: 'PINO-pretrain'
  group: 'Re350-1s-tanh'




================================================
FILE: configs/pretrain/Re400-pretrain-1s.yaml
================================================
data:
  datapath: 'data/NS_fine_Re400_T128_part0.npy'
  datapath2: 'data/NS_fine_Re400_T128_part1.npy'
  Re: 400
  total_num: 100
  offset: 0
  n_sample: 200
  time_interval: 1.0
  nx: 128
  nt: 128
  sub: 1
  sub_t: 2
  shuffle: True

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128

train:
  batchsize: 1
  epochs: 150
  milestones: [25, 50, 75, 100]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 1.0
  f_loss: 1.0
  xy_loss: 5.0
  save_dir: 'Re400-FDM'
  save_name: 'PINO-pretrain-Re400-1s.pt'

log:
  project: 'PINO-pretrain'
  group: 'Re400-1s-tanh'




================================================
FILE: configs/pretrain/Re500-05s-deeponet.yaml
================================================
data:
  datapath: '/mnt/md1/zongyi/NS_fft_Re500_T4000.npy'
  Re: 500
  total_num: 4000
  offset: 0
  n_sample: 400
  time_interval: 0.5
  nx: 64
  nt: 64
  sub: 1
  sub_t: 1
  shuffle: True

model:
  branch_layers: [100, 100, 100]
  trunk_layers: [100, 100, 100]

train:
  batchsize: 20
  epochs: 10001
  milestones: [2500, 5000, 7500]
  base_lr: 0.001
  scheduler_gamma: 0.5
  save_dir: 'Re500-deepOnet'
  save_name: 'DeepONet-pretrain-Re500.pt'

log:
  project: 'PINO-pretrain-ICLR'
  group: 'Re500-05s-deepONet'




================================================
FILE: configs/pretrain/Re500-FNO-1s-100.yaml
================================================
data:
  datapath: '../data/NS-T4000.npy'
  Re: 500
  total_num: 4000
  offset: 0
  n_sample: 100
  time_interval: 1
  nx: 64
  nt: 64
  sub: 1
  sub_t: 1
  shuffle: True
  S2: 64
  T2: 65

model:
  layers: [64, 64, 64, 64, 64]
  modes1: [8, 8, 8, 8]
  modes2: [8, 8, 8, 8]
  modes3: [8, 8, 8, 8]
  fc_dim: 128
  act: gelu

train:
  batchsize: 2
  epochs: 100_001
  milestones: [20_000, 40_000, 60_000, 80_000]
  base_lr: 0.001
  scheduler_gamma: 0.5
  ic_loss: 0.0
  f_loss: 0.0
  xy_loss: 1.0
  save_dir: Re500-FNO-100
  save_name: FNO-Re500-1s-100.pt
  data_iter: 1
  eqn_iter: 0

log:
  entity: hzzheng-pino
  project: PINO-Operator-Learning
  group: FNO-Re500-1s-100




================================================
FILE: configs/pretrain/Re500-FNO-1s-200.yaml
==================================
Download .txt
gitextract_nppznbmv/

├── .dockerignore
├── .gitignore
├── Dockerfile
├── LICENSE
├── README.md
├── baselines/
│   ├── __init__.py
│   ├── data.py
│   ├── deepxde_deeponet.py
│   ├── loss.py
│   ├── model.py
│   ├── pinns_ns_05s.py
│   ├── pinns_ns_50s.py
│   ├── sapinns-50s.py
│   ├── sapinns.py
│   ├── test.py
│   ├── tqd_sapinns.py
│   ├── tqd_utils.py
│   ├── train_darcy.py
│   ├── train_ns.py
│   ├── unet3d.py
│   └── utils.py
├── cavity_flow.py
├── configs/
│   ├── baseline/
│   │   ├── NS-50s-LAAF.yaml
│   │   ├── NS-50s.yaml
│   │   ├── Re500-05s-deeponet.yaml
│   │   ├── Re500-pinns-05s-LAAF.yaml
│   │   ├── Re500-pinns-05s-SA.yaml
│   │   ├── Re500-pinns-05s.yaml
│   │   └── Re500-pinns.yaml
│   ├── finetune/
│   │   ├── Darcy-finetune.yaml
│   │   ├── Re100-finetune-1s.yaml
│   │   ├── Re200-finetune-1s.yaml
│   │   ├── Re250-finetune-1s.yaml
│   │   ├── Re300-finetune-1s.yaml
│   │   ├── Re350-finetune-1s.yaml
│   │   ├── Re400-finetune-1s.yaml
│   │   ├── Re500-finetune-05s-2layer.yaml
│   │   ├── Re500-finetune-05s-eqn.yaml
│   │   ├── Re500-finetune-05s4C0.yaml
│   │   ├── Re500-finetune-05s4C1.yaml
│   │   ├── Re500-finetune-05s4C4.yaml
│   │   ├── Re500-finetune-05s4k-2layer.yaml
│   │   ├── Re500-finetune-05s4k1k.yaml
│   │   ├── Re500-finetune-05s4k4-2layer.yaml
│   │   ├── Re500-finetune-05s4k4k.yaml
│   │   └── Re500-finetune-1s.yaml
│   ├── instance/
│   │   ├── Re500-1_8-FNO.yaml
│   │   ├── Re500-1_8-PINO-s.yaml
│   │   └── Re500-1_8-PINO.yaml
│   ├── ngc/
│   │   ├── Re500-1_8-dat0-PINO.yaml
│   │   ├── Re500-1_8-dat200-PINO.yaml
│   │   ├── Re500-1_8-dat40-PINO.yaml
│   │   ├── Re500-1_8-dat400-PINO.yaml
│   │   ├── Re500-1_8-dat80-PINO.yaml
│   │   ├── Re500-1_8-dat800-PINO.yaml
│   │   ├── Re500-1_8-res16-PINO.yaml
│   │   └── Re500-1_8-res32-PINO.yaml
│   ├── operator/
│   │   ├── Darcy-pretrain.yaml
│   │   ├── Re500-05s-1000-FNO.yaml
│   │   ├── Re500-05s-1000-PINO.yaml
│   │   ├── Re500-05s-3000-FNO.yaml
│   │   ├── Re500-05s-600-FNO.yaml
│   │   ├── Re500-05s-600-PINO-xl.yaml
│   │   ├── Re500-05s-600-PINO.yaml
│   │   ├── Re500-05s-FNO.yaml
│   │   ├── Re500-1_16-800-FNO-s.yaml
│   │   ├── Re500-1_16-800-PINO-s.yaml
│   │   ├── Re500-1_4-2000-FNO.yaml
│   │   ├── Re500-1_8-0-PINO-s.yaml
│   │   ├── Re500-1_8-1200-FNO.yaml
│   │   ├── Re500-1_8-1200-PINO.yaml
│   │   ├── Re500-1_8-200-FNO-s.yaml
│   │   ├── Re500-1_8-2000-FNO-s.yaml
│   │   ├── Re500-1_8-2000-FNO-xl.yaml
│   │   ├── Re500-1_8-2000-PINO.yaml
│   │   ├── Re500-1_8-2200-FNO-s.yaml
│   │   ├── Re500-1_8-2200-PINO-s.yaml
│   │   ├── Re500-1_8-800-FNO-s.yaml
│   │   ├── Re500-1_8-800-FNO-s32.yaml
│   │   ├── Re500-1_8-800-PINO-s.yaml
│   │   ├── Re500-1_8-800-PINO-s16.yaml
│   │   ├── Re500-1_8-800-PINO-s32.yaml
│   │   ├── Re500-1_8-800-UNet.yaml
│   │   ├── Re500-1_8-dat1.6k-PINO.yaml
│   │   ├── Re500-1_8-dat400-FNO.yaml
│   │   ├── Re500-1s-FNO.yaml
│   │   ├── Re500-3000-FNO.yaml
│   │   ├── Re500-3000-PINO.yaml
│   │   ├── Re500-4000-FNO.yaml
│   │   ├── Re500-FNO.yaml
│   │   └── Re500-PINO.yaml
│   ├── pretrain/
│   │   ├── Darcy-pretrain-deeponet.yaml
│   │   ├── Darcy-pretrain.yaml
│   │   ├── Re100-pretrain-1s.yaml
│   │   ├── Re200-pretrain-1s.yaml
│   │   ├── Re250-pretrain-1s.yaml
│   │   ├── Re300-pretrain-1s.yaml
│   │   ├── Re350-pretrain-1s.yaml
│   │   ├── Re400-pretrain-1s.yaml
│   │   ├── Re500-05s-deeponet.yaml
│   │   ├── Re500-FNO-1s-100.yaml
│   │   ├── Re500-FNO-1s-200.yaml
│   │   ├── Re500-FNO-1s-400.yaml
│   │   ├── Re500-PINO-1s-100-4v4.yaml
│   │   ├── Re500-PINO-1s-200-4v4.yaml
│   │   ├── Re500-PINO-1s-400-1v1.yaml
│   │   ├── Re500-pretrain-05s-4C1.yaml
│   │   ├── Re500-pretrain-05s-4C4.yaml
│   │   ├── Re500-pretrain-05s-eqn.yaml
│   │   ├── Re500-pretrain-1s.yaml
│   │   └── burgers-pretrain.yaml
│   ├── scratch/
│   │   ├── Re100-scratch-1s.yaml
│   │   ├── Re200-scratch-1s.yaml
│   │   ├── Re250-scratch-1s.yaml
│   │   ├── Re300-scratch-1s.yaml
│   │   ├── Re350-scratch-1s.yaml
│   │   ├── Re400-scratch-1s.yaml
│   │   ├── Re500-scratch-05s-new.yaml
│   │   ├── Re500-scratch-05s.yaml
│   │   ├── Re500-scratch-1s-progressive.yaml
│   │   └── Re500-scratch-1s.yaml
│   ├── test/
│   │   ├── Re500-05s-deeponet.yaml
│   │   ├── Re500-05s-test.yaml
│   │   ├── Re500-05s.yaml
│   │   ├── Re500-1s-100.yaml
│   │   ├── burgers.yaml
│   │   ├── darcy-deeponet.yaml
│   │   └── darcy.yaml
│   └── transfer/
│       ├── Re100to100-1s.yaml
│       ├── Re100to200-1s.yaml
│       ├── Re100to250-1s.yaml
│       ├── Re100to300-1s.yaml
│       ├── Re100to350-1s.yaml
│       ├── Re100to400-1s.yaml
│       ├── Re100to500-1s.yaml
│       ├── Re200to100-1s.yaml
│       ├── Re200to200-1s.yaml
│       ├── Re200to250-1s.yaml
│       ├── Re200to300-1s.yaml
│       ├── Re200to350-1s.yaml
│       ├── Re200to400-1s.yaml
│       ├── Re200to500-1s.yaml
│       ├── Re250to100-1s.yaml
│       ├── Re250to200-1s.yaml
│       ├── Re250to250-1s.yaml
│       ├── Re250to300-1s.yaml
│       ├── Re250to350-1s.yaml
│       ├── Re250to400-1s.yaml
│       ├── Re250to500-1s.yaml
│       ├── Re300to100-1s.yaml
│       ├── Re300to200-1s.yaml
│       ├── Re300to250-1s.yaml
│       ├── Re300to300-1s.yaml
│       ├── Re300to350-1s.yaml
│       ├── Re300to400-1s.yaml
│       ├── Re300to500-1s.yaml
│       ├── Re350to100-1s.yaml
│       ├── Re350to200-1s.yaml
│       ├── Re350to250-1s.yaml
│       ├── Re350to300-1s.yaml
│       ├── Re350to350-1s.yaml
│       ├── Re350to400-1s.yaml
│       ├── Re350to500-1s.yaml
│       ├── Re400to100-1s.yaml
│       ├── Re400to200-1s.yaml
│       ├── Re400to250-1s.yaml
│       ├── Re400to300-1s.yaml
│       ├── Re400to350-1s.yaml
│       ├── Re400to400-1s.yaml
│       ├── Re400to500-1s.yaml
│       ├── Re500to100-1s.yaml
│       ├── Re500to200-1s.yaml
│       ├── Re500to250-1s.yaml
│       ├── Re500to300-1s.yaml
│       ├── Re500to350-1s.yaml
│       ├── Re500to400-1s.yaml
│       ├── Re500to500-05s-new.yaml
│       ├── Re500to500-05s.yaml
│       └── Re500to500-1s.yaml
├── deeponet.py
├── download_data.py
├── eval_operator.py
├── generate_data.py
├── inference.py
├── instance_opt.py
├── inverse-darcy-foward.py
├── inverse-darcy.py
├── models/
│   ├── FCN.py
│   ├── __init__.py
│   ├── basics.py
│   ├── core.py
│   ├── fourier1d.py
│   ├── fourier2d.py
│   ├── fourier3d.py
│   ├── lowrank2d.py
│   ├── tfno.py
│   └── utils.py
├── pinns.py
├── prepare_data.py
├── profile-solver-legacy.py
├── profiler/
│   └── calmacs.py
├── run_pino2d.py
├── run_pino3d.py
├── run_solver.py
├── scripts/
│   ├── device1-finetune.sh
│   ├── device2-finetune.sh
│   ├── device3.sh
│   ├── finetune-4k-2layer.sh
│   ├── finetune-4k0.sh
│   ├── finetune-4k1-2layer.sh
│   ├── finetune-4k1.sh
│   ├── finetune-4k4-2layer.sh
│   ├── fnoRe500.sh
│   ├── ngc_submit_pino.sh
│   ├── ngc_test_submit_pino.sh
│   ├── pretrain.sh
│   ├── scratchRe500.sh
│   ├── test-opt/
│   │   └── Re500-1_8.sh
│   ├── train_dat0.sh
│   ├── train_dat200.sh
│   ├── train_dat40.sh
│   ├── train_dat400.sh
│   ├── train_dat80.sh
│   ├── train_dat800.sh
│   ├── train_res16.sh
│   └── train_res32.sh
├── solver/
│   ├── __init__.py
│   ├── kolmogorov_flow.py
│   ├── legacy_solver.py
│   ├── periodic.py
│   ├── random_fields.py
│   ├── rfsampler.py
│   └── spectrum.py
├── train_PINO3d.py
├── train_burgers.py
├── train_darcy.py
├── train_no.py
├── train_operator.py
├── train_pino.py
├── train_unet.py
└── train_utils/
    ├── __init__.py
    ├── adam.py
    ├── data_utils.py
    ├── datasets.py
    ├── distributed.py
    ├── eval_2d.py
    ├── eval_3d.py
    ├── losses.py
    ├── negadam.py
    ├── train_2d.py
    ├── train_3d.py
    └── utils.py
Download .txt
SYMBOL INDEX (412 symbols across 60 files)

FILE: baselines/data.py
  class DarcyFlow (line 10) | class DarcyFlow(Dataset):
    method __init__ (line 11) | def __init__(self,
    method __len__ (line 24) | def __len__(self):
    method __getitem__ (line 27) | def __getitem__(self, item):
  class NSLong (line 32) | class NSLong(object):
    method __init__ (line 33) | def __init__(self,
    method get_boundary_value (line 62) | def get_boundary_value(self, component=0):
    method get_boundary_points (line 82) | def get_boundary_points(self, num_x, num_y, num_t):
    method get_test_xyt (line 88) | def get_test_xyt(self):
  class NSdata (line 106) | class NSdata(object):
    method __init__ (line 107) | def __init__(self, datapath1,
    method get_init_cond (line 151) | def get_init_cond(self):
    method get_boundary_value (line 157) | def get_boundary_value(self, component=0):
    method get_boundary_points (line 177) | def get_boundary_points(self, num_x, num_y, num_t):
    method get_test_xyt (line 225) | def get_test_xyt(self):
    method extract (line 244) | def extract(data):
  class DeepOnetNS (line 270) | class DeepOnetNS(Dataset):
    method __init__ (line 274) | def __init__(self, datapath,
    method __len__ (line 296) | def __len__(self):
    method __getitem__ (line 299) | def __getitem__(self, idx):
  class DeepONetCPNS (line 309) | class DeepONetCPNS(Dataset):
    method __init__ (line 314) | def __init__(self, datapath,
    method __len__ (line 336) | def __len__(self):
    method __getitem__ (line 339) | def __getitem__(self, idx):

FILE: baselines/deepxde_deeponet.py
  function train (line 11) | def train(config):

FILE: baselines/loss.py
  function boundary_loss (line 7) | def boundary_loss(model, npt=100):
  function resf_NS (line 24) | def resf_NS(u, v, p, x, y, t, re=40):

FILE: baselines/model.py
  class DeepONet (line 8) | class DeepONet(nn.Module):
    method __init__ (line 9) | def __init__(self, branch_layer, trunk_layer):
    method forward (line 14) | def forward(self, u0, grid):
  class DeepONetCP (line 22) | class DeepONetCP(nn.Module):
    method __init__ (line 23) | def __init__(self, branch_layer, trunk_layer):
    method forward (line 28) | def forward(self, u0, grid):
  class SAWeight (line 36) | class SAWeight(nn.Module):
    method __init__ (line 37) | def __init__(self, out_dim, num_init: List, num_bd: List, num_collo: L...
    method forward (line 51) | def forward(self, init_cond: List, bd_cond: List, residual: List):

FILE: baselines/pinns_ns_05s.py
  function forcing (line 17) | def forcing(x):
  function pde (line 21) | def pde(x, u):
  function eval (line 54) | def eval(model, dataset,
  function train (line 83) | def train(offset, config, args):

FILE: baselines/pinns_ns_50s.py
  function forcing (line 17) | def forcing(x):
  function pde (line 22) | def pde(x, u):
  function eval (line 55) | def eval(model, dataset,
  function train_longtime (line 102) | def train_longtime(offset, config, args):

FILE: baselines/sapinns-50s.py
  function forcing (line 20) | def forcing(x):
  function pde (line 25) | def pde(x, u):
  function eval (line 59) | def eval(model, dataset,
  function train_sapinn (line 90) | def train_sapinn(offset, config, args):

FILE: baselines/sapinns.py
  function forcing (line 22) | def forcing(x):
  function pde (line 26) | def pde(x, u):
  function eval (line 59) | def eval(model, dataset,
  function train_sapinn (line 90) | def train_sapinn(offset, config, args):

FILE: baselines/test.py
  function test (line 11) | def test(model,
  function test_deeponet_ns (line 42) | def test_deeponet_ns(config):
  function test_deeponet_darcy (line 70) | def test_deeponet_darcy(config):

FILE: baselines/tqd_sapinns.py
  function forcing (line 19) | def forcing(x):
  function bd_model (line 23) | def bd_model(u_model, x, y, t):
  function f_model (line 29) | def f_model(u_model, x, y, t):
  function eval (line 59) | def eval(model, dataset,
  function train_sa (line 88) | def train_sa(offset, config, args):

FILE: baselines/tqd_utils.py
  class PointsIC (line 7) | class PointsIC(BC):
    method __init__ (line 11) | def __init__(self, domain, values, var, n_values=None):
    method create_input (line 29) | def create_input(self):
    method create_target (line 40) | def create_target(self, values):
    method loss (line 53) | def loss(self):

FILE: baselines/train_darcy.py
  function train_deeponet_darcy (line 14) | def train_deeponet_darcy(config):

FILE: baselines/train_ns.py
  function train_deeponet_cp (line 15) | def train_deeponet_cp(config):
  function train_deeponet (line 74) | def train_deeponet(config):

FILE: baselines/unet3d.py
  class BaseModel (line 12) | class BaseModel(nn.Module):
    method __init__ (line 13) | def __init__(self):
    method device (line 18) | def device(self):
    method data_dict_to_input (line 22) | def data_dict_to_input(self, data_dict, **kwargs):
    method loss_dict (line 28) | def loss_dict(self, data_dict, **kwargs):
    method eval_dict (line 35) | def eval_dict(self, data_dict, **kwargs):
  function create_conv (line 41) | def create_conv(
  class SingleConv (line 129) | class SingleConv(nn.Sequential):
    method __init__ (line 148) | def __init__(
  class DoubleConv (line 166) | class DoubleConv(nn.Sequential):
    method __init__ (line 190) | def __init__(
  class Encoder (line 242) | class Encoder(nn.Module):
    method __init__ (line 265) | def __init__(
    method forward (line 306) | def forward(self, x):
  class Decoder (line 313) | class Decoder(nn.Module):
    method __init__ (line 334) | def __init__(
    method forward (line 385) | def forward(self, encoder_features, x):
    method _joining (line 392) | def _joining(encoder_features, x, concat):
  function create_encoders (line 399) | def create_encoders(
  function create_decoders (line 444) | def create_decoders(
  class AbstractUpsampling (line 472) | class AbstractUpsampling(nn.Module):
    method __init__ (line 478) | def __init__(self, upsample):
    method forward (line 482) | def forward(self, encoder_features, x):
  class InterpolateUpsampling (line 489) | class InterpolateUpsampling(AbstractUpsampling):
    method __init__ (line 497) | def __init__(self, mode="nearest"):
    method _interpolate (line 502) | def _interpolate(x, size, mode):
  class TransposeConvUpsampling (line 506) | class TransposeConvUpsampling(AbstractUpsampling):
    method __init__ (line 520) | def __init__(
  class NoUpsampling (line 534) | class NoUpsampling(AbstractUpsampling):
    method __init__ (line 535) | def __init__(self):
    method _no_upsampling (line 539) | def _no_upsampling(x, size):
  function number_of_features_per_level (line 543) | def number_of_features_per_level(init_channel_number, num_levels):
  class AbstractUNet (line 547) | class AbstractUNet(BaseModel):
    method __init__ (line 577) | def __init__(
    method forward (line 645) | def forward(self, x):
  class UNet3D (line 673) | class UNet3D(AbstractUNet):
    method __init__ (line 682) | def __init__(

FILE: baselines/utils.py
  function weighted_mse (line 7) | def weighted_mse(pred, target, weight=None):
  function get_3dboundary_points (line 14) | def get_3dboundary_points(num_x,                # number of points on x ...
  function get_3dboundary (line 57) | def get_3dboundary(value):
  function get_xytgrid (line 78) | def get_xytgrid(S, T, bot=[0, 0, 0], top=[1, 1, 1]):
  function get_2dgird (line 100) | def get_2dgird(num=31):
  function get_3dgrid (line 110) | def get_3dgrid(num=11):
  function get_4dgrid (line 121) | def get_4dgrid(num=11):
  function vel2vor (line 143) | def vel2vor(u, v, x, y):
  function sub_mse (line 150) | def sub_mse(vec):
  function get_sample (line 164) | def get_sample(npt=100):
  function concat (line 179) | def concat(xy, z, t=0.0, offset=0):
  function cal_mixgrad (line 200) | def cal_mixgrad(outputs, inputs):

FILE: cavity_flow.py
  class SpectralConv3d (line 29) | class SpectralConv3d(nn.Module):
    method __init__ (line 30) | def __init__(self, in_channels, out_channels, modes1, modes2, modes3):
    method compl_mul3d (line 58) | def compl_mul3d(self, input, weights):
    method forward (line 62) | def forward(self, x):
  class FNO3d (line 84) | class FNO3d(nn.Module):
    method __init__ (line 85) | def __init__(self, modes1, modes2, modes3, width, padding):
    method forward (line 122) | def forward(self, x):
    method get_grid (line 157) | def get_grid(self, shape, device):
  function PINO_loss_Fourier_f (line 215) | def PINO_loss_Fourier_f(out, Re=500):
  function PINO_loss_FDM_f (line 260) | def PINO_loss_FDM_f(out, Re=500):
  function PINO_loss_ic (line 289) | def PINO_loss_ic(out, y):
  function PINO_loss_bc (line 299) | def PINO_loss_bc(out, y):

FILE: download_data.py
  function download_file (line 23) | def download_file(url, file_path):
  function main (line 33) | def main(args):

FILE: eval_operator.py
  function test_3d (line 14) | def test_3d(config):
  function test_2d (line 48) | def test_2d(config):

FILE: generate_data.py
  function legacy_solver (line 15) | def legacy_solver(args):
  function gen_data (line 58) | def gen_data(args):

FILE: inference.py
  function get_pred (line 20) | def get_pred(args):

FILE: instance_opt.py
  function train_ns (line 25) | def train_ns(model,
  function subprocess (line 122) | def subprocess(args):

FILE: inverse-darcy-foward.py
  class SpectralConv2d (line 24) | class SpectralConv2d(nn.Module):
    method __init__ (line 25) | def __init__(self, in_channels, out_channels, modes1, modes2):
    method compl_mul2d (line 44) | def compl_mul2d(self, input, weights):
    method forward (line 48) | def forward(self, x):
  class FNO2d (line 66) | class FNO2d(nn.Module):
    method __init__ (line 67) | def __init__(self, modes1, modes2, width):
    method forward (line 102) | def forward(self, x):
    method get_grid (line 139) | def get_grid(self, shape, device):
  function FDM_Darcy (line 214) | def FDM_Darcy(u, a, D=1, f=1):
  function PINO_loss (line 248) | def PINO_loss(u, a):
  function darcy_mask1 (line 349) | def darcy_mask1(x):
  function darcy_mask2 (line 352) | def darcy_mask2(x):
  function total_variance (line 359) | def total_variance(x):

FILE: inverse-darcy.py
  class SpectralConv2d (line 22) | class SpectralConv2d(nn.Module):
    method __init__ (line 23) | def __init__(self, in_channels, out_channels, modes1, modes2):
    method compl_mul2d (line 42) | def compl_mul2d(self, input, weights):
    method forward (line 46) | def forward(self, x):
  class FNO2d (line 64) | class FNO2d(nn.Module):
    method __init__ (line 65) | def __init__(self, modes1, modes2, width):
    method forward (line 100) | def forward(self, x):
    method get_grid (line 136) | def get_grid(self, shape, device):
  function FDM_Darcy (line 211) | def FDM_Darcy(u, a, D=1, f=1):
  function PINO_loss (line 245) | def PINO_loss(u, a):
  function darcy_mask1 (line 279) | def darcy_mask1(x):
  function darcy_mask2 (line 282) | def darcy_mask2(x):
  function total_variance (line 289) | def total_variance(x):

FILE: models/FCN.py
  function linear_block (line 4) | def linear_block(in_channel, out_channel):
  class FCNet (line 12) | class FCNet(nn.Module):
    method __init__ (line 18) | def __init__(self, layers=[2, 10, 1]):
    method forward (line 26) | def forward(self, x):
  class DenseNet (line 30) | class DenseNet(nn.Module):
    method __init__ (line 31) | def __init__(self, layers, nonlinearity, out_nonlinearity=None, normal...
    method forward (line 57) | def forward(self, x):

FILE: models/basics.py
  function compl_mul1d (line 8) | def compl_mul1d(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
  function compl_mul2d (line 15) | def compl_mul2d(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
  function compl_mul3d (line 22) | def compl_mul3d(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
  class SpectralConv1d (line 31) | class SpectralConv1d(nn.Module):
    method __init__ (line 32) | def __init__(self, in_channels, out_channels, modes1):
    method forward (line 48) | def forward(self, x):
  class SpectralConv2d (line 66) | class SpectralConv2d(nn.Module):
    method __init__ (line 67) | def __init__(self, in_channels, out_channels, modes1, modes2):
    method forward (line 81) | def forward(self, x):
  class SpectralConv3d (line 101) | class SpectralConv3d(nn.Module):
    method __init__ (line 102) | def __init__(self, in_channels, out_channels, modes1, modes2, modes3):
    method forward (line 116) | def forward(self, x):
  class FourierBlock (line 148) | class FourierBlock(nn.Module):
    method __init__ (line 149) | def __init__(self, in_channels, out_channels, modes1, modes2, modes3, ...
    method forward (line 164) | def forward(self, x):

FILE: models/core.py
  function contract_1D (line 7) | def contract_1D(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
  function contract_2D (line 13) | def contract_2D(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
  function contract_3D (line 19) | def contract_3D(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
  class FactorizedSpectralConv3d (line 24) | class FactorizedSpectralConv3d(nn.Module):
    method __init__ (line 25) | def __init__(self, in_channels, out_channels, modes_height, modes_widt...
    method _get_weight_factorized (line 70) | def _get_weight_factorized(self, layer_index, corner_index):
    method _get_weight_dense (line 77) | def _get_weight_dense(self, layer_index, corner_index):
    method forward (line 84) | def forward(self, x, indices=0):
    method get_conv (line 116) | def get_conv(self, indices):
    method __getitem__ (line 125) | def __getitem__(self, indices):
  class FactorizedSpectralConv2d (line 129) | class FactorizedSpectralConv2d(nn.Module):
    method __init__ (line 130) | def __init__(self, in_channels, out_channels, modes_height, modes_widt...
    method _get_weight_factorized (line 170) | def _get_weight_factorized(self, layer_index, corner_index):
    method _get_weight_dense (line 177) | def _get_weight_dense(self, layer_index, corner_index):
    method forward (line 184) | def forward(self, x, indices=0, super_res=1):
    method get_conv (line 208) | def get_conv(self, indices):
    method __getitem__ (line 217) | def __getitem__(self, indices):
  class SubConv (line 221) | class SubConv(nn.Module):
    method __init__ (line 229) | def __init__(self, main_conv, indices):
    method forward (line 234) | def forward(self, x, **kwargs):
  class FactorizedSpectralConv1d (line 238) | class FactorizedSpectralConv1d(nn.Module):
    method __init__ (line 239) | def __init__(self, in_channels, out_channels, modes, n_layers=1,
    method _get_weight_factorized (line 282) | def _get_weight_factorized(self, layer_index):
    method _get_weight_dense (line 286) | def _get_weight_dense(self, layer_index):
    method forward (line 290) | def forward(self, x, indices=0, s=None):
    method get_conv (line 309) | def get_conv(self, indices):
    method __getitem__ (line 318) | def __getitem__(self, indices):
  class JointFactorizedSpectralConv1d (line 323) | class JointFactorizedSpectralConv1d(nn.Module):
    method __init__ (line 324) | def __init__(self, modes, width, n_layers=1, joint_factorization=True,...
    method forward (line 384) | def forward(self, x, s=None):

FILE: models/fourier1d.py
  class FNO1d (line 6) | class FNO1d(nn.Module):
    method __init__ (line 7) | def __init__(self,
    method forward (line 45) | def forward(self, x):

FILE: models/fourier2d.py
  class FNO2d (line 6) | class FNO2d(nn.Module):
    method __init__ (line 7) | def __init__(self, modes1, modes2,
    method forward (line 53) | def forward(self, x):

FILE: models/fourier3d.py
  class FNO3d (line 6) | class FNO3d(nn.Module):
    method __init__ (line 7) | def __init__(self,
    method forward (line 58) | def forward(self, x):

FILE: models/lowrank2d.py
  class LowRank2d (line 8) | class LowRank2d(nn.Module):
    method __init__ (line 9) | def __init__(self, in_channels, out_channels):
    method get_grid (line 17) | def get_grid(self, S1, S2, batchsize, device):
    method forward (line 24) | def forward(self, x, gridy=None):

FILE: models/tfno.py
  class FactorizedFNO3d (line 6) | class FactorizedFNO3d(nn.Module):
    method __init__ (line 7) | def __init__(self, modes_height, modes_width,  modes_depth, width, fc_...
    method forward (line 65) | def forward(self, x, super_res=1):
  class FactorizedFNO2d (line 98) | class FactorizedFNO2d(nn.Module):
    method __init__ (line 99) | def __init__(self, modes_height, modes_width,  width, fc_channels=256,...
    method forward (line 160) | def forward(self, x, super_res=1):
  class FactorizedFNO1d (line 202) | class FactorizedFNO1d(nn.Module):
    method __init__ (line 203) | def __init__(self, modes, width, in_channels=2, out_channels=1, n_laye...
    method forward (line 231) | def forward(self, x, s=None):

FILE: models/utils.py
  function add_padding (line 4) | def add_padding(x, num_pad):
  function add_padding2 (line 12) | def add_padding2(x, num_pad1, num_pad2):
  function remove_padding (line 20) | def remove_padding(x, num_pad):
  function remove_padding2 (line 28) | def remove_padding2(x, num_pad1, num_pad2):
  function _get_act (line 36) | def _get_act(act):

FILE: prepare_data.py
  function shuffle_data (line 5) | def shuffle_data(datapath):
  function test_data (line 13) | def test_data(datapath):
  function get_slice (line 21) | def get_slice(datapath):
  function plot_test (line 30) | def plot_test(datapath):

FILE: run_pino2d.py
  function train (line 14) | def train(args, config):

FILE: run_pino3d.py
  function run_instance (line 15) | def run_instance(loader, config, data_config):

FILE: run_solver.py
  function solve (line 15) | def solve(a,

FILE: solver/kolmogorov_flow.py
  class KolmogorovFlow2d (line 5) | class KolmogorovFlow2d(object):
    method __init__ (line 7) | def __init__(self, w0, Re, n):
    method vorticity (line 59) | def vorticity(self, stream_f=None, real_space=True):
    method stream_function (line 71) | def stream_function(self, w_h=None, real_space=False):
    method velocity_field (line 86) | def velocity_field(self, stream_f=None, real_space=True):
    method nonlinear_term (line 104) | def nonlinear_term(self, w_h):
    method advance (line 127) | def advance(self, t, delta_t=1e-3):

FILE: solver/legacy_solver.py
  class GaussianRF (line 8) | class GaussianRF(object):
    method __init__ (line 10) | def __init__(self, dim, size, alpha=2, tau=3, sigma=None, boundary="pe...
    method sample (line 54) | def sample(self, N):
  function navier_stokes_2d (line 72) | def navier_stokes_2d(w0, f, visc, T, delta_t=1e-4, record_steps=1):

FILE: solver/periodic.py
  class Poisson2d (line 9) | class Poisson2d(object):
    method __init__ (line 11) | def __init__(self, s1, s2, L1=2*math.pi, L2=2*math.pi, device=None, dt...
    method solve (line 28) | def solve(self, f):
    method __call__ (line 31) | def __call__(self, f):
  class NavierStokes2d (line 38) | class NavierStokes2d(object):
    method __init__ (line 40) | def __init__(self, s1, s2, L1=2*math.pi, L2=2*math.pi, device=None, dt...
    method stream_function (line 81) | def stream_function(self, w_h, real_space=False):
    method velocity_field (line 91) | def velocity_field(self, stream_f, real_space=True):
    method nonlinear_term (line 104) | def nonlinear_term(self, w_h, f_h=None):
    method time_step (line 120) | def time_step(self, q, v, f, Re):
    method advance (line 139) | def advance(self, w, f=None, T=1.0, Re=100, adaptive=True, delta_t=1e-3):
    method __call__ (line 185) | def __call__(self, w, f=None, T=1.0, Re=100, adaptive=True, delta_t=1e...

FILE: solver/random_fields.py
  class GaussianRF (line 8) | class GaussianRF(object):
    method __init__ (line 9) | def __init__(self, dim, size, length=1.0, alpha=2.0, tau=3.0, sigma=No...
    method sample (line 67) | def sample(self, N):
  class GaussianRF2d (line 76) | class GaussianRF2d(object):
    method __init__ (line 78) | def __init__(self, s1, s2, L1=2*math.pi, L2=2*math.pi, alpha=2.0, tau=...
    method sample (line 107) | def sample(self, N, xi=None):

FILE: solver/rfsampler.py
  class GaussianRF (line 5) | class GaussianRF(object):
    method __init__ (line 6) | def __init__(self, dim, size, alpha=2, tau=3, sigma=None, boundary="pe...
    method sample (line 51) | def sample(self, N):

FILE: solver/spectrum.py
  function navier_stokes_2d (line 17) | def navier_stokes_2d(w0, f, visc, T, delta_t=1e-4, record_steps=1):

FILE: train_PINO3d.py
  function subprocess_fn (line 20) | def subprocess_fn(rank, args):

FILE: train_burgers.py
  function run (line 12) | def run(args, config):
  function test (line 50) | def test(config):

FILE: train_darcy.py
  function get_molifier (line 26) | def get_molifier(mesh, device):
  function eval_darcy (line 32) | def eval_darcy(model, val_loader, criterion,
  function train (line 50) | def train(model,
  function subprocess (line 139) | def subprocess(args):

FILE: train_no.py
  function pad_input (line 26) | def pad_input(x, num_pad):
  function train_ns (line 34) | def train_ns(model,
  function eval_ns (line 158) | def eval_ns(model, val_loader, device, config, args):
  function subprocess (line 186) | def subprocess(args):

FILE: train_operator.py
  function train_3d (line 15) | def train_3d(args, config):
  function train_2d (line 78) | def train_2d(args, config):

FILE: train_pino.py
  function eval_ns (line 28) | def eval_ns(model, val_loader, criterion, device):
  function train_ns (line 44) | def train_ns(model,
  function subprocess (line 141) | def subprocess(args):

FILE: train_unet.py
  function eval_ns (line 28) | def eval_ns(model, val_loader, criterion, device):
  function train_ns (line 46) | def train_ns(model,
  function subprocess (line 121) | def subprocess(args):

FILE: train_utils/adam.py
  function adam (line 8) | def adam(params: List[Tensor],
  class Adam (line 54) | class Adam(Optimizer):
    method __init__ (line 79) | def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
    method __setstate__ (line 95) | def __setstate__(self, state):
    method step (line 101) | def step(self, closure=None):

FILE: train_utils/data_utils.py
  function sample_data (line 4) | def sample_data(loader):
  function data_sampler (line 10) | def data_sampler(dataset, shuffle, distributed):

FILE: train_utils/datasets.py
  function online_loader (line 15) | def online_loader(sampler, S, T, time_scale, batchsize=1):
  function sample_data (line 24) | def sample_data(loader):
  class MatReader (line 30) | class MatReader(object):
    method __init__ (line 31) | def __init__(self, file_path, to_torch=True, to_cuda=False, to_float=T...
    method _load_file (line 44) | def _load_file(self):
    method load_file (line 48) | def load_file(self, file_path):
    method read_field (line 52) | def read_field(self, field):
    method set_cuda (line 70) | def set_cuda(self, to_cuda):
    method set_torch (line 73) | def set_torch(self, to_torch):
    method set_float (line 76) | def set_float(self, to_float):
  class BurgersLoader (line 80) | class BurgersLoader(object):
    method __init__ (line 81) | def __init__(self, datapath, nx=2 ** 10, nt=100, sub=8, sub_t=1, new=F...
    method make_loader (line 94) | def make_loader(self, n_sample, batch_size, start=0, train=True):
  class NSLoader (line 117) | class NSLoader(object):
    method __init__ (line 118) | def __init__(self, datapath1,
    method make_loader (line 154) | def make_loader(self, n_sample, batch_size, start=0, train=True):
    method make_dataset (line 169) | def make_dataset(self, n_sample, start=0, train=True):
    method extract (line 187) | def extract(data):
  class NS3DDataset (line 213) | class NS3DDataset(Dataset):
    method __init__ (line 214) | def __init__(self, paths,
    method load (line 231) | def load(self, train=True, sub_x=1, sub_t=1):
    method __getitem__ (line 263) | def __getitem__(self, idx):
    method __len__ (line 266) | def __len__(self, ):
    method extract (line 270) | def extract(data):
  class KFDataset (line 296) | class KFDataset(Dataset):
    method __init__ (line 297) | def __init__(self, paths,
    method load (line 327) | def load(self):
    method partition (line 360) | def partition(self, data):
    method __getitem__ (line 378) | def __getitem__(self, idx):
    method __len__ (line 385) | def __len__(self, ):
  class BurgerData (line 389) | class BurgerData(Dataset):
    method __init__ (line 399) | def __init__(self, datapath):
    method __len__ (line 411) | def __len__(self):
    method __getitem__ (line 414) | def __getitem__(self, idx):
    method get_flatten_data (line 417) | def get_flatten_data(self):
    method get_boundary_data (line 428) | def get_boundary_data(self):
    method sample_xt (line 438) | def sample_xt(self, N=10000):
    method sample_xu (line 448) | def sample_xu(self, N=100):
  class DarcyFlow (line 461) | class DarcyFlow(Dataset):
    method __init__ (line 462) | def __init__(self,
    method __len__ (line 475) | def __len__(self):
    method __getitem__ (line 478) | def __getitem__(self, item):
  class DarcyIC (line 483) | class DarcyIC(Dataset):
    method __init__ (line 484) | def __init__(self,
    method __len__ (line 501) | def __len__(self):
    method __getitem__ (line 504) | def __getitem__(self, item):
  class DarcyCombo (line 509) | class DarcyCombo(Dataset):
    method __init__ (line 510) | def __init__(self,
    method __len__ (line 527) | def __len__(self):
    method __getitem__ (line 530) | def __getitem__(self, item):
  class KFaDataset (line 540) | class KFaDataset(Dataset):
    method __init__ (line 541) | def __init__(self, paths,
    method load (line 561) | def load(self):
    method __getitem__ (line 584) | def __getitem__(self, idx):
    method __len__ (line 591) | def __len__(self, ):

FILE: train_utils/distributed.py
  function setup (line 6) | def setup(rank, world_size):
  function cleanup (line 12) | def cleanup():
  function get_world_size (line 16) | def get_world_size():
  function all_reduce_mean (line 23) | def all_reduce_mean(tensor):
  function reduce_sum (line 37) | def reduce_sum(tensor):
  function reduce_loss_dict (line 52) | def reduce_loss_dict(loss_dict):

FILE: train_utils/eval_2d.py
  function eval_darcy (line 14) | def eval_darcy(model,
  function eval_burgers (line 61) | def eval_burgers(model,

FILE: train_utils/eval_3d.py
  function eval_ns (line 15) | def eval_ns(model,  # model

FILE: train_utils/losses.py
  function FDM_Darcy (line 6) | def FDM_Darcy(u, a, D=1):
  function darcy_loss (line 39) | def darcy_loss(u, a):
  function FDM_NS_vorticity (line 68) | def FDM_NS_vorticity(w, v=1/40, t_interval=1.0):
  function Autograd_Burgers (line 108) | def Autograd_Burgers(u, grid, v=1/100):
  function AD_loss (line 119) | def AD_loss(u, u0, grid, index_ic=None, p=None, q=None):
  class LpLoss (line 152) | class LpLoss(object):
    method __init__ (line 156) | def __init__(self, d=2, p=2, size_average=True, reduction=True):
    method abs (line 167) | def abs(self, x, y):
    method rel (line 183) | def rel(self, x, y):
    method __call__ (line 197) | def __call__(self, x, y):
  function FDM_Burgers (line 201) | def FDM_Burgers(u, v, D=1):
  function PINO_loss (line 224) | def PINO_loss(u, u0, v):
  function PINO_loss3d (line 247) | def PINO_loss3d(u, u0, forcing, v=1/40, t_interval=1.0):
  function PDELoss (line 266) | def PDELoss(model, x, t, nu):
  function get_forcing (line 289) | def get_forcing(S):

FILE: train_utils/negadam.py
  function adam (line 8) | def adam(params: List[Tensor],
  class NAdam (line 54) | class NAdam(Optimizer):
    method __init__ (line 79) | def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
    method __setstate__ (line 95) | def __setstate__(self, state):
    method step (line 101) | def step(self, closure=None):

FILE: train_utils/train_2d.py
  function train_2d_operator (line 13) | def train_2d_operator(model,
  function train_2d_burger (line 119) | def train_2d_burger(model,

FILE: train_utils/train_3d.py
  function train (line 16) | def train(model,
  function mixed_train (line 125) | def mixed_train(model,              # model of neural operator
  function progressive_train (line 258) | def progressive_train(model,

FILE: train_utils/utils.py
  function vor2vel (line 6) | def vor2vel(w, L=2 * np.pi):
  function get_sample (line 45) | def get_sample(N, T, s, p, q):
  function get_grid (line 72) | def get_grid(N, T, s):
  function get_2dgrid (line 81) | def get_2dgrid(S):
  function torch2dgrid (line 97) | def torch2dgrid(num_x, num_y, bot=(0,0), top=(1,1)):
  function get_grid3d (line 107) | def get_grid3d(S, T, time_scale=1.0, device='cpu'):
  function convert_ic (line 117) | def convert_ic(u0, N, S, T, time_scale=1.0):
  function requires_grad (line 126) | def requires_grad(model, flag=True):
  function set_grad (line 131) | def set_grad(tensors, flag=True):
  function zero_grad (line 136) | def zero_grad(params):
  function count_params (line 149) | def count_params(net):
  function save_checkpoint (line 156) | def save_checkpoint(path, name, model, optimizer=None):
  function save_ckpt (line 178) | def save_ckpt(path, model, optimizer=None, scheduler=None):
  function dict2str (line 197) | def dict2str(log_dict):
Condensed preview — 252 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (551K chars).
[
  {
    "path": ".dockerignore",
    "chars": 77,
    "preview": ".vscode\n*.py\nwandb\nconfig\ndocs\nmodels\n*/*.py\nexp\ncheckpoints\n*/__pycache__/**"
  },
  {
    "path": ".gitignore",
    "chars": 118,
    "preview": "data\nlog\n.vscode\nwandb\n**/__pycache__/**\n.idea\nfigs\ncheckpoints\n.ipynb_checkpoints\n*.ipynb\n*.pt\n*.pth\ntensordiffeq\nexp"
  },
  {
    "path": "Dockerfile",
    "chars": 162,
    "preview": "FROM nvcr.io/nvidia/pytorch:22.09-py3\nRUN useradd -ms /bin/bash pino\nUSER pino\nENV PATH=/home/pino/.local/bin:$PATH\nRUN "
  },
  {
    "path": "LICENSE",
    "chars": 11357,
    "preview": "                                 Apache License\n                           Version 2.0, January 2004\n                   "
  },
  {
    "path": "README.md",
    "chars": 8272,
    "preview": "# Physics-Informed Neural Operator for Learning Partial Differential Equations\n\n# 📢 DEPRECATION NOTICE 📢  \n-------------"
  },
  {
    "path": "baselines/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "baselines/data.py",
    "chars": 12157,
    "preview": "import numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom .utils import get_xytgrid, get_3dboundary, get"
  },
  {
    "path": "baselines/deepxde_deeponet.py",
    "chars": 2023,
    "preview": "import random\nimport deepxde as dde\nfrom baselines.data import NSdata\n\n'''\nTraining deepONet using deepxde implementatio"
  },
  {
    "path": "baselines/loss.py",
    "chars": 2073,
    "preview": "import torch\nimport torch.autograd as autograd\nfrom train_utils.utils import set_grad\nfrom .utils import get_sample, net"
  },
  {
    "path": "baselines/model.py",
    "chars": 1999,
    "preview": "import torch\nimport torch.nn as nn\nfrom models.FCN import DenseNet\nfrom typing import List\nfrom .utils import weighted_m"
  },
  {
    "path": "baselines/pinns_ns_05s.py",
    "chars": 6438,
    "preview": "'''\ntraining for Navier Stokes with Reynolds number 500, 0.5 second time period\n'''\nimport csv\nimport random\nfrom timeit"
  },
  {
    "path": "baselines/pinns_ns_50s.py",
    "chars": 5721,
    "preview": "'''\ntraining for Navier Stokes with viscosity 0.001\nspatial domain: (0, 1) ** 2\ntemporal domain: [0, 49]\n'''\nimport csv\n"
  },
  {
    "path": "baselines/sapinns-50s.py",
    "chars": 6477,
    "preview": "import csv\nimport random\nfrom timeit import default_timer\nfrom tqdm import tqdm\nimport deepxde as dde\nimport numpy as np"
  },
  {
    "path": "baselines/sapinns.py",
    "chars": 6448,
    "preview": "import csv\nimport random\nfrom timeit import default_timer\nfrom tqdm import tqdm\nimport deepxde as dde\nimport numpy as np"
  },
  {
    "path": "baselines/test.py",
    "chars": 3150,
    "preview": "from tqdm import tqdm\nimport numpy as np\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom baselines.model impo"
  },
  {
    "path": "baselines/tqd_sapinns.py",
    "chars": 5353,
    "preview": "import random\nimport numpy as np\nimport csv\nfrom timeit import default_timer\n\nimport tensorflow as tf\nimport deepxde as "
  },
  {
    "path": "baselines/tqd_utils.py",
    "chars": 1902,
    "preview": "import numpy as np\n\nfrom tensordiffeq.boundaries import BC\nfrom tensordiffeq.utils import flatten_and_stack, multimesh, "
  },
  {
    "path": "baselines/train_darcy.py",
    "chars": 2541,
    "preview": "from tqdm import tqdm\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.optim import Adam\nfrom torch.opti"
  },
  {
    "path": "baselines/train_ns.py",
    "chars": 4544,
    "preview": "from tqdm import tqdm\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.optim import Adam\nfrom torch.opti"
  },
  {
    "path": "baselines/unet3d.py",
    "chars": 24603,
    "preview": "from functools import partial\n\nimport torch\nfrom torch import nn as nn\nfrom torch.nn import functional as F\n\n\n\n# UNet3d "
  },
  {
    "path": "baselines/utils.py",
    "chars": 6644,
    "preview": "import numpy as np\n\nimport torch\nimport torch.autograd as autograd\n\n\ndef weighted_mse(pred, target, weight=None):\n    if"
  },
  {
    "path": "cavity_flow.py",
    "chars": 13714,
    "preview": "\"\"\"\n@author: Zongyi Li\nThis file is the Fourier Neural Operator for 3D problem such as the Navier-Stokes equation discus"
  },
  {
    "path": "configs/baseline/NS-50s-LAAF.yaml",
    "chars": 498,
    "preview": "data:\n  datapath: 'data/ns_V1e-3_N5000_T50.mat'\n  vis: 0.001\n  total_num: 5000\n  offset: 4900\n  n_sample: 1\n  time_scale"
  },
  {
    "path": "configs/baseline/NS-50s.yaml",
    "chars": 409,
    "preview": "data:\n  datapath: 'data/ns_V1e-3_N5000_T50.mat'\n  vis: 0.001\n  total_num: 5000\n  offset: 4900\n  n_sample: 1\n  time_scale"
  },
  {
    "path": "configs/baseline/Re500-05s-deeponet.yaml",
    "chars": 539,
    "preview": "data:\n  datapath: '/mnt/md1/zongyi/NS_fft_Re500_T4000.npy'\n  Re: 500\n  total_num: 4000\n  offset: 0\n  n_sample: 4000\n  ti"
  },
  {
    "path": "configs/baseline/Re500-pinns-05s-LAAF.yaml",
    "chars": 520,
    "preview": "data:\n  datapath: 'data/NS_Re500_s256_T100_test.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interva"
  },
  {
    "path": "configs/baseline/Re500-pinns-05s-SA.yaml",
    "chars": 532,
    "preview": "data:\n  datapath: 'data/NS_Re500_s256_T100_test.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interva"
  },
  {
    "path": "configs/baseline/Re500-pinns-05s.yaml",
    "chars": 507,
    "preview": "data:\n  datapath: 'data/NS_Re500_s256_T100_test.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interva"
  },
  {
    "path": "configs/baseline/Re500-pinns.yaml",
    "chars": 418,
    "preview": "data:\n  datapath: 'data/NS_Re500_s256_T100_test.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interva"
  },
  {
    "path": "configs/finetune/Darcy-finetune.yaml",
    "chars": 596,
    "preview": "data:\n  name: 'Darcy'\n  datapath: '/mnt/md1/zongyi/piececonst_r421_N1024_smooth2.mat'\n  total_num: 1024\n  offset: 500\n  "
  },
  {
    "path": "configs/finetune/Re100-finetune-1s.yaml",
    "chars": 674,
    "preview": "data:\n  datapath: 'data/NS_fine_Re100_T128_part0.npy'\n  Re: 100\n  total_num: 100\n  offset: 190\n  n_sample: 1\n  time_inte"
  },
  {
    "path": "configs/finetune/Re200-finetune-1s.yaml",
    "chars": 675,
    "preview": "data:\n  datapath: 'data/NS_fine_Re200_T128_part0.npy'\n  Re: 200\n  total_num: 100\n  offset: 194\n  n_sample: 1\n  time_inte"
  },
  {
    "path": "configs/finetune/Re250-finetune-1s.yaml",
    "chars": 675,
    "preview": "data:\n  datapath: 'data/NS_fine_Re250_T128_part0.npy'\n  Re: 250\n  total_num: 100\n  offset: 198\n  n_sample: 1\n  time_inte"
  },
  {
    "path": "configs/finetune/Re300-finetune-1s.yaml",
    "chars": 675,
    "preview": "data:\n  datapath: 'data/NS_fine_Re300_T128_part0.npy'\n  Re: 300\n  total_num: 100\n  offset: 190\n  n_sample: 1\n  time_inte"
  },
  {
    "path": "configs/finetune/Re350-finetune-1s.yaml",
    "chars": 675,
    "preview": "data:\n  datapath: 'data/NS_fine_Re350_T128_part0.npy'\n  Re: 350\n  total_num: 100\n  offset: 198\n  n_sample: 1\n  time_inte"
  },
  {
    "path": "configs/finetune/Re400-finetune-1s.yaml",
    "chars": 675,
    "preview": "data:\n  datapath: 'data/NS_fine_Re400_T128_part0.npy'\n  Re: 400\n  total_num: 100\n  offset: 199\n  n_sample: 1\n  time_inte"
  },
  {
    "path": "configs/finetune/Re500-finetune-05s-2layer.yaml",
    "chars": 701,
    "preview": "data:\n  datapath: 'data/NS_Re500_s256_T100_test.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interva"
  },
  {
    "path": "configs/finetune/Re500-finetune-05s-eqn.yaml",
    "chars": 676,
    "preview": "data:\n  datapath: 'data/NS_Re500_s256_T100_test.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interva"
  },
  {
    "path": "configs/finetune/Re500-finetune-05s4C0.yaml",
    "chars": 676,
    "preview": "data:\n  datapath: 'data/NS_Re500_s256_T100_test.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interva"
  },
  {
    "path": "configs/finetune/Re500-finetune-05s4C1.yaml",
    "chars": 705,
    "preview": "data:\n  datapath: 'data/NS_Re500_s256_T100_test.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interva"
  },
  {
    "path": "configs/finetune/Re500-finetune-05s4C4.yaml",
    "chars": 676,
    "preview": "data:\n  datapath: 'data/NS_Re500_s256_T100_test.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interva"
  },
  {
    "path": "configs/finetune/Re500-finetune-05s4k-2layer.yaml",
    "chars": 695,
    "preview": "data:\n  datapath: 'data/NS_Re500_s256_T100_test.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interva"
  },
  {
    "path": "configs/finetune/Re500-finetune-05s4k1k.yaml",
    "chars": 702,
    "preview": "data:\n  datapath: 'data/NS_Re500_s256_T100_test.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interva"
  },
  {
    "path": "configs/finetune/Re500-finetune-05s4k4-2layer.yaml",
    "chars": 699,
    "preview": "data:\n  datapath: 'data/NS_Re500_s256_T100_test.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interva"
  },
  {
    "path": "configs/finetune/Re500-finetune-05s4k4k.yaml",
    "chars": 700,
    "preview": "data:\n  datapath: 'data/NS_Re500_s256_T100_test.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interva"
  },
  {
    "path": "configs/finetune/Re500-finetune-1s.yaml",
    "chars": 652,
    "preview": "data:\n  datapath: 'data/NS_fine_Re500_T128_part2.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/instance/Re500-1_8-FNO.yaml",
    "chars": 930,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/instance/Re500-1_8-PINO-s.yaml",
    "chars": 895,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/instance/Re500-1_8-PINO.yaml",
    "chars": 940,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/ngc/Re500-1_8-dat0-PINO.yaml",
    "chars": 944,
    "preview": "data:\n  name: KF\n  paths: ['/mount/data/NS-Re500_T300_256x256x500.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_res"
  },
  {
    "path": "configs/ngc/Re500-1_8-dat200-PINO.yaml",
    "chars": 949,
    "preview": "data:\n  name: KF\n  paths: ['/mount/data/NS-Re500_T300_256x256x500.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_res"
  },
  {
    "path": "configs/ngc/Re500-1_8-dat40-PINO.yaml",
    "chars": 946,
    "preview": "data:\n  name: KF\n  paths: ['/mount/data/NS-Re500_T300_256x256x500.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_res"
  },
  {
    "path": "configs/ngc/Re500-1_8-dat400-PINO.yaml",
    "chars": 949,
    "preview": "data:\n  name: KF\n  paths: ['/mount/data/NS-Re500_T300_256x256x500.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_res"
  },
  {
    "path": "configs/ngc/Re500-1_8-dat80-PINO.yaml",
    "chars": 947,
    "preview": "data:\n  name: KF\n  paths: ['/mount/data/NS-Re500_T300_256x256x500.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_res"
  },
  {
    "path": "configs/ngc/Re500-1_8-dat800-PINO.yaml",
    "chars": 954,
    "preview": "data:\n  name: KF\n  paths: ['/mount/data/NS-Re500_T300_256x256x500.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_res"
  },
  {
    "path": "configs/ngc/Re500-1_8-res16-PINO.yaml",
    "chars": 948,
    "preview": "data:\n  name: KF\n  paths: ['/mount/data/NS-Re500_T300_256x256x500.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_res"
  },
  {
    "path": "configs/ngc/Re500-1_8-res32-PINO.yaml",
    "chars": 948,
    "preview": "data:\n  name: KF\n  paths: ['/mount/data/NS-Re500_T300_256x256x500.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_res"
  },
  {
    "path": "configs/operator/Darcy-pretrain.yaml",
    "chars": 700,
    "preview": "data:\n  name: 'Darcy'\n  path: '/raid/hongkai/darcy-train.mat'\n  total_num: 1024\n  offset: 0\n  n_sample: 1000\n  nx: 421\n "
  },
  {
    "path": "configs/operator/Re500-05s-1000-FNO.yaml",
    "chars": 729,
    "preview": "data:\n  paths: ['../data/NS-Re500Part0.npy', '../data/NS-Re500Part1.npy', '../data/NS-Re500Part2.npy']\n  Re: 500\n  total"
  },
  {
    "path": "configs/operator/Re500-05s-1000-PINO.yaml",
    "chars": 727,
    "preview": "data:\n  paths: ['../data/NS-Re500Part0.npy', '../data/NS-Re500Part1.npy', '../data/NS-Re500Part2.npy']\n  Re: 500\n  total"
  },
  {
    "path": "configs/operator/Re500-05s-3000-FNO.yaml",
    "chars": 746,
    "preview": "data:\n  name: KF\n  paths: ['../data/NS-Re500_T3000_id0.npy']\n  Re: 500\n  total_num: 3000\n  offset: 0\n  n_samples: 300\n  "
  },
  {
    "path": "configs/operator/Re500-05s-600-FNO.yaml",
    "chars": 797,
    "preview": "data:\n  name: KF\n  paths: ['../data/NS-Re500_T3000_id0.npy']\n  Re: 500\n  total_num: 3000\n  offset: 0\n  n_samples: 300\n  "
  },
  {
    "path": "configs/operator/Re500-05s-600-PINO-xl.yaml",
    "chars": 820,
    "preview": "data:\n  name: KF\n  paths: ['../data/NS-Re500_T3000_id0.npy']\n  Re: 500\n  total_num: 3000\n  offset: 0\n  n_samples: 300\n  "
  },
  {
    "path": "configs/operator/Re500-05s-600-PINO.yaml",
    "chars": 802,
    "preview": "data:\n  name: KF\n  paths: ['../data/NS-Re500_T3000_id0.npy']\n  Re: 500\n  total_num: 3000\n  offset: 0\n  n_samples: 300\n  "
  },
  {
    "path": "configs/operator/Re500-05s-FNO.yaml",
    "chars": 689,
    "preview": "data:\n  paths: ['../data/NS-Re500Part0.npy', '../data/NS-Re500Part1.npy']\n  Re: 500\n  total_num: 200\n  offset: 0\n  n_sam"
  },
  {
    "path": "configs/operator/Re500-1_16-800-FNO-s.yaml",
    "chars": 921,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/operator/Re500-1_16-800-PINO-s.yaml",
    "chars": 952,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/operator/Re500-1_4-2000-FNO.yaml",
    "chars": 816,
    "preview": "data:\n  name: KF\n  paths: ['../data/NS-Re500_T3000_id0.npy']\n  Re: 500\n  total_num: 3000\n  offset: 0\n  n_samples: 600\n  "
  },
  {
    "path": "configs/operator/Re500-1_8-0-PINO-s.yaml",
    "chars": 971,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/operator/Re500-1_8-1200-FNO.yaml",
    "chars": 909,
    "preview": "data:\n  name: KF\n  paths: ['../data/NS-Re500_T300_id0.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_res: [256, 256,"
  },
  {
    "path": "configs/operator/Re500-1_8-1200-PINO.yaml",
    "chars": 938,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/operator/Re500-1_8-200-FNO-s.yaml",
    "chars": 943,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/operator/Re500-1_8-2000-FNO-s.yaml",
    "chars": 924,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/operator/Re500-1_8-2000-FNO-xl.yaml",
    "chars": 833,
    "preview": "data:\n  name: KF\n  paths: ['../data/NS-Re500_T3000_id0.npy']\n  Re: 500\n  total_num: 3000\n  offset: 0\n  n_samples: 350\n  "
  },
  {
    "path": "configs/operator/Re500-1_8-2000-PINO.yaml",
    "chars": 927,
    "preview": "data:\n  name: KF\n  paths: ['../data/NS-Re500_T300_id0.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_res: [256, 256,"
  },
  {
    "path": "configs/operator/Re500-1_8-2200-FNO-s.yaml",
    "chars": 943,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/operator/Re500-1_8-2200-PINO-s.yaml",
    "chars": 979,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/operator/Re500-1_8-800-FNO-s.yaml",
    "chars": 937,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/operator/Re500-1_8-800-FNO-s32.yaml",
    "chars": 924,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/operator/Re500-1_8-800-PINO-s.yaml",
    "chars": 936,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/operator/Re500-1_8-800-PINO-s16.yaml",
    "chars": 941,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/operator/Re500-1_8-800-PINO-s32.yaml",
    "chars": 940,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/operator/Re500-1_8-800-UNet.yaml",
    "chars": 745,
    "preview": "data:\n  name: KF\n  paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_r"
  },
  {
    "path": "configs/operator/Re500-1_8-dat1.6k-PINO.yaml",
    "chars": 938,
    "preview": "data:\n  name: KF\n  paths: ['../data/NS-Re500_T300_id0.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_res: [256, 256,"
  },
  {
    "path": "configs/operator/Re500-1_8-dat400-FNO.yaml",
    "chars": 931,
    "preview": "data:\n  name: KF\n  paths: ['../data/NS-Re500_T300_id0.npy']\n  Re: 500\n  offset: 0\n  total_num: 300\n  raw_res: [256, 256,"
  },
  {
    "path": "configs/operator/Re500-1s-FNO.yaml",
    "chars": 660,
    "preview": "data:\n  paths: ['../data/NS-Re500Part0.npy', '../data/NS-Re500Part1.npy', '../data/NS-Re500Part2.npy']\n  Re: 500\n  total"
  },
  {
    "path": "configs/operator/Re500-3000-FNO.yaml",
    "chars": 673,
    "preview": "data:\n  name: KF\n  paths: ['../data/NS-Re500_T3000_id0.npy']\n  Re: 500\n  total_num: 3000\n  offset: 0\n  n_samples: 300\n  "
  },
  {
    "path": "configs/operator/Re500-3000-PINO.yaml",
    "chars": 664,
    "preview": "data:\n  name: KF\n  paths: ['../data/NS-Re500_T3000_id0.npy']\n  Re: 500\n  total_num: 3000\n  offset: 0\n  n_samples: 2400\n "
  },
  {
    "path": "configs/operator/Re500-4000-FNO.yaml",
    "chars": 585,
    "preview": "data:\n  paths: ['../data/NS-T4000.npy']\n  Re: 500\n  total_num: 4000\n  offset: 0\n  n_samples: 3200\n  t_duration: 1.0\n  da"
  },
  {
    "path": "configs/operator/Re500-FNO.yaml",
    "chars": 710,
    "preview": "data:\n  paths: ['../data/NS-Re500Part0.npy', '../data/NS-Re500Part1.npy']\n  Re: 500\n  total_num: 200\n  offset: 0\n  n_sam"
  },
  {
    "path": "configs/operator/Re500-PINO.yaml",
    "chars": 691,
    "preview": "data:\n  paths: ['../data/NS-Re500Part0.npy', '../data/NS-Re500Part1.npy']\n  Re: 500\n  total_num: 200\n  offset: 0\n  n_sam"
  },
  {
    "path": "configs/pretrain/Darcy-pretrain-deeponet.yaml",
    "chars": 552,
    "preview": "data:\n  name: 'Darcy'\n  datapath: '/mnt/md1/zongyi/piececonst_r421_N1024_smooth1.mat'\n  total_num: 1024\n  offset: 0\n  n_"
  },
  {
    "path": "configs/pretrain/Darcy-pretrain.yaml",
    "chars": 568,
    "preview": "data:\n  name: 'Darcy'\n  datapath: '/mnt/md1/zongyi/piececonst_r421_N1024_smooth1.mat'\n  total_num: 1024\n  offset: 0\n  n_"
  },
  {
    "path": "configs/pretrain/Re100-pretrain-1s.yaml",
    "chars": 635,
    "preview": "data:\n  datapath: 'data/NS_fine_Re100_T128_part0.npy'\n  datapath2: 'data/NS_fine_Re100_T128_part1.npy'\n  Re: 100\n  total"
  },
  {
    "path": "configs/pretrain/Re200-pretrain-1s.yaml",
    "chars": 635,
    "preview": "data:\n  datapath: 'data/NS_fine_Re200_T128_part0.npy'\n  datapath2: 'data/NS_fine_Re200_T128_part1.npy'\n  Re: 200\n  total"
  },
  {
    "path": "configs/pretrain/Re250-pretrain-1s.yaml",
    "chars": 635,
    "preview": "data:\n  datapath: 'data/NS_fine_Re250_T128_part0.npy'\n  datapath2: 'data/NS_fine_Re250_T128_part1.npy'\n  Re: 250\n  total"
  },
  {
    "path": "configs/pretrain/Re300-pretrain-1s.yaml",
    "chars": 635,
    "preview": "data:\n  datapath: 'data/NS_fine_Re300_T128_part0.npy'\n  datapath2: 'data/NS_fine_Re300_T128_part1.npy'\n  Re: 300\n  total"
  },
  {
    "path": "configs/pretrain/Re350-pretrain-1s.yaml",
    "chars": 635,
    "preview": "data:\n  datapath: 'data/NS_fine_Re350_T128_part0.npy'\n  datapath2: 'data/NS_fine_Re350_T128_part1.npy'\n  Re: 350\n  total"
  },
  {
    "path": "configs/pretrain/Re400-pretrain-1s.yaml",
    "chars": 635,
    "preview": "data:\n  datapath: 'data/NS_fine_Re400_T128_part0.npy'\n  datapath2: 'data/NS_fine_Re400_T128_part1.npy'\n  Re: 400\n  total"
  },
  {
    "path": "configs/pretrain/Re500-05s-deeponet.yaml",
    "chars": 517,
    "preview": "data:\n  datapath: '/mnt/md1/zongyi/NS_fft_Re500_T4000.npy'\n  Re: 500\n  total_num: 4000\n  offset: 0\n  n_sample: 400\n  tim"
  },
  {
    "path": "configs/pretrain/Re500-FNO-1s-100.yaml",
    "chars": 673,
    "preview": "data:\n  datapath: '../data/NS-T4000.npy'\n  Re: 500\n  total_num: 4000\n  offset: 0\n  n_sample: 100\n  time_interval: 1\n  nx"
  },
  {
    "path": "configs/pretrain/Re500-FNO-1s-200.yaml",
    "chars": 673,
    "preview": "data:\n  datapath: '../data/NS-T4000.npy'\n  Re: 500\n  total_num: 4000\n  offset: 0\n  n_sample: 200\n  time_interval: 1\n  nx"
  },
  {
    "path": "configs/pretrain/Re500-FNO-1s-400.yaml",
    "chars": 673,
    "preview": "data:\n  datapath: '../data/NS-T4000.npy'\n  Re: 500\n  total_num: 4000\n  offset: 0\n  n_sample: 400\n  time_interval: 1\n  nx"
  },
  {
    "path": "configs/pretrain/Re500-PINO-1s-100-4v4.yaml",
    "chars": 684,
    "preview": "data:\n  datapath: '../data/NS-T4000.npy'\n  Re: 500\n  total_num: 4000\n  offset: 0\n  n_sample: 100\n  time_interval: 1\n  nx"
  },
  {
    "path": "configs/pretrain/Re500-PINO-1s-200-4v4.yaml",
    "chars": 693,
    "preview": "data:\n  datapath: '../data/NS-T4000.npy'\n  Re: 500\n  total_num: 4000\n  offset: 0\n  n_sample: 200\n  time_interval: 1\n  nx"
  },
  {
    "path": "configs/pretrain/Re500-PINO-1s-400-1v1.yaml",
    "chars": 684,
    "preview": "data:\n  datapath: '../data/NS-T4000.npy'\n  Re: 500\n  total_num: 4000\n  offset: 0\n  n_sample: 400\n  time_interval: 1\n  nx"
  },
  {
    "path": "configs/pretrain/Re500-pretrain-05s-4C1.yaml",
    "chars": 768,
    "preview": "data:\n  datapath: '/mnt/md1/zongyi/NS_fft_Re500_T4000.npy'\n  Re: 500\n  total_num: 4000\n  offset: 0\n  n_sample: 400\n  tim"
  },
  {
    "path": "configs/pretrain/Re500-pretrain-05s-4C4.yaml",
    "chars": 669,
    "preview": "data:\n  datapath: '/mnt/md1/zongyi/NS_fft_Re500_T4000.npy'\n  Re: 500\n  total_num: 4000\n  offset: 0\n  n_sample: 400\n  tim"
  },
  {
    "path": "configs/pretrain/Re500-pretrain-05s-eqn.yaml",
    "chars": 670,
    "preview": "data:\n  datapath: '/mnt/md1/zongyi/NS_fft_Re500_T4000.npy'\n  Re: 500\n  total_num: 4000\n  offset: 0\n  n_sample: 4000\n  ti"
  },
  {
    "path": "configs/pretrain/Re500-pretrain-1s.yaml",
    "chars": 674,
    "preview": "data:\n  datapath: 'data/NS_fine_Re500_T128_part0.npy'\n  datapath2: 'data/NS_fine_Re500_T128_part1.npy'\n  Re: 500\n  total"
  },
  {
    "path": "configs/pretrain/burgers-pretrain.yaml",
    "chars": 569,
    "preview": "data:\n  name: Burgers\n  datapath: '../data/burgers.mat'\n  total_num: 1000\n  offset: 0\n  n_sample: 800\n  nx: 128\n  nt: 10"
  },
  {
    "path": "configs/scratch/Re100-scratch-1s.yaml",
    "chars": 617,
    "preview": "data:\n  datapath: 'data/NS_fine_Re100_T128_part2.npy'\n  Re: 100\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/scratch/Re200-scratch-1s.yaml",
    "chars": 617,
    "preview": "data:\n  datapath: 'data/NS_fine_Re200_T128_part2.npy'\n  Re: 200\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/scratch/Re250-scratch-1s.yaml",
    "chars": 617,
    "preview": "data:\n  datapath: 'data/NS_fine_Re250_T128_part2.npy'\n  Re: 250\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/scratch/Re300-scratch-1s.yaml",
    "chars": 617,
    "preview": "data:\n  datapath: 'data/NS_fine_Re300_T128_part2.npy'\n  Re: 300\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/scratch/Re350-scratch-1s.yaml",
    "chars": 617,
    "preview": "data:\n  datapath: 'data/NS_fine_Re350_T128_part0.npy'\n  Re: 350\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/scratch/Re400-scratch-1s.yaml",
    "chars": 617,
    "preview": "data:\n  datapath: 'data/NS_fine_Re400_T128_part0.npy'\n  Re: 400\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/scratch/Re500-scratch-05s-new.yaml",
    "chars": 624,
    "preview": "data:\n  datapath: 'data/NS_fine_Re500_T128_part2.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/scratch/Re500-scratch-05s.yaml",
    "chars": 649,
    "preview": "data:\n  datapath: 'data/NS_Re500_s256_T100_test.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 20\n  time_interv"
  },
  {
    "path": "configs/scratch/Re500-scratch-1s-progressive.yaml",
    "chars": 606,
    "preview": "data:\n  datapath: 'data/NS_fine_Re500_T128_part2.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/scratch/Re500-scratch-1s.yaml",
    "chars": 627,
    "preview": "data:\n  datapath: '../data/NS-Re500Part1.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interval: 1\n  "
  },
  {
    "path": "configs/test/Re500-05s-deeponet.yaml",
    "chars": 400,
    "preview": "data:\n  datapath: 'data/NS_fine_Re500_T128_part2.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 300\n  time_inte"
  },
  {
    "path": "configs/test/Re500-05s-test.yaml",
    "chars": 440,
    "preview": "data:\n  datapath: 'data/NS_Re500_s256_T100_test.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 20\n  time_interv"
  },
  {
    "path": "configs/test/Re500-05s.yaml",
    "chars": 441,
    "preview": "data:\n  datapath: 'data/NS_fine_Re500_T128_part2.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 300\n  time_inte"
  },
  {
    "path": "configs/test/Re500-1s-100.yaml",
    "chars": 444,
    "preview": "data:\n  datapath: '../data/NS-T4000.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 100\n  time_interval: 1\n  nx:"
  },
  {
    "path": "configs/test/burgers.yaml",
    "chars": 399,
    "preview": "data:\n  name: 'Darcy'\n  datapath: '../data/burgers.mat'\n  total_num: 1000\n  offset: 800\n  n_sample: 200\n  nx: 128\n  nt: "
  },
  {
    "path": "configs/test/darcy-deeponet.yaml",
    "chars": 463,
    "preview": "data:\n  name: 'Darcy'\n  datapath: '/mnt/md1/zongyi/piececonst_r421_N1024_smooth2.mat'\n  total_num: 1000\n  offset: 0\n  n_"
  },
  {
    "path": "configs/test/darcy.yaml",
    "chars": 414,
    "preview": "data:\n  name: 'Darcy'\n  datapath: '/mnt/md1/zongyi/piececonst_r421_N1024_smooth2.mat'\n  total_num: 1000\n  offset: 0\n  n_"
  },
  {
    "path": "configs/transfer/Re100to100-1s.yaml",
    "chars": 674,
    "preview": "data:\n  datapath: 'data/NS_fine_Re100_T128_part2.npy'\n  Re: 100\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re100to200-1s.yaml",
    "chars": 674,
    "preview": "data:\n  datapath: 'data/NS_fine_Re200_T128_part2.npy'\n  Re: 200\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re100to250-1s.yaml",
    "chars": 674,
    "preview": "data:\n  datapath: 'data/NS_fine_Re250_T128_part2.npy'\n  Re: 250\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re100to300-1s.yaml",
    "chars": 674,
    "preview": "data:\n  datapath: 'data/NS_fine_Re300_T128_part2.npy'\n  Re: 300\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re100to350-1s.yaml",
    "chars": 674,
    "preview": "data:\n  datapath: 'data/NS_fine_Re350_T128_part2.npy'\n  Re: 350\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re100to400-1s.yaml",
    "chars": 674,
    "preview": "data:\n  datapath: 'data/NS_fine_Re400_T128_part2.npy'\n  Re: 400\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re100to500-1s.yaml",
    "chars": 663,
    "preview": "data:\n  datapath: 'data/NS_fine_Re500_T128_part2.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re200to100-1s.yaml",
    "chars": 674,
    "preview": "data:\n  datapath: 'data/NS_fine_Re100_T128_part2.npy'\n  Re: 100\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re200to200-1s.yaml",
    "chars": 674,
    "preview": "data:\n  datapath: 'data/NS_fine_Re200_T128_part2.npy'\n  Re: 200\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re200to250-1s.yaml",
    "chars": 674,
    "preview": "data:\n  datapath: 'data/NS_fine_Re250_T128_part2.npy'\n  Re: 250\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re200to300-1s.yaml",
    "chars": 674,
    "preview": "data:\n  datapath: 'data/NS_fine_Re300_T128_part2.npy'\n  Re: 300\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re200to350-1s.yaml",
    "chars": 674,
    "preview": "data:\n  datapath: 'data/NS_fine_Re350_T128_part2.npy'\n  Re: 350\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re200to400-1s.yaml",
    "chars": 674,
    "preview": "data:\n  datapath: 'data/NS_fine_Re400_T128_part2.npy'\n  Re: 400\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re200to500-1s.yaml",
    "chars": 663,
    "preview": "data:\n  datapath: 'data/NS_fine_Re500_T128_part2.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re250to100-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re100_T128_part2.npy'\n  Re: 100\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re250to200-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re200_T128_part2.npy'\n  Re: 200\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re250to250-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re250_T128_part2.npy'\n  Re: 250\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re250to300-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re300_T128_part2.npy'\n  Re: 300\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re250to350-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re350_T128_part2.npy'\n  Re: 350\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re250to400-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re400_T128_part2.npy'\n  Re: 400\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re250to500-1s.yaml",
    "chars": 652,
    "preview": "data:\n  datapath: 'data/NS_fine_Re500_T128_part2.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re300to100-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re100_T128_part2.npy'\n  Re: 100\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re300to200-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re200_T128_part2.npy'\n  Re: 200\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re300to250-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re250_T128_part2.npy'\n  Re: 250\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re300to300-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re300_T128_part2.npy'\n  Re: 300\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re300to350-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re350_T128_part2.npy'\n  Re: 350\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re300to400-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re400_T128_part2.npy'\n  Re: 400\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re300to500-1s.yaml",
    "chars": 663,
    "preview": "data:\n  datapath: 'data/NS_fine_Re500_T128_part2.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re350to100-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re100_T128_part2.npy'\n  Re: 100\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re350to200-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re200_T128_part2.npy'\n  Re: 200\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re350to250-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re250_T128_part2.npy'\n  Re: 250\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re350to300-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re300_T128_part2.npy'\n  Re: 300\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re350to350-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re350_T128_part2.npy'\n  Re: 350\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re350to400-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re400_T128_part2.npy'\n  Re: 400\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re350to500-1s.yaml",
    "chars": 663,
    "preview": "data:\n  datapath: 'data/NS_fine_Re500_T128_part2.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re400to100-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re100_T128_part2.npy'\n  Re: 100\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re400to200-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re200_T128_part2.npy'\n  Re: 200\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re400to250-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re250_T128_part2.npy'\n  Re: 250\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re400to300-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re300_T128_part2.npy'\n  Re: 300\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re400to350-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re350_T128_part2.npy'\n  Re: 350\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re400to400-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re400_T128_part2.npy'\n  Re: 400\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re400to500-1s.yaml",
    "chars": 663,
    "preview": "data:\n  datapath: 'data/NS_fine_Re500_T128_part2.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re500to100-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re100_T128_part2.npy'\n  Re: 100\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re500to200-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re200_T128_part2.npy'\n  Re: 200\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re500to250-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re250_T128_part2.npy'\n  Re: 250\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re500to300-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re300_T128_part2.npy'\n  Re: 300\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re500to350-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re350_T128_part2.npy'\n  Re: 350\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re500to400-1s.yaml",
    "chars": 665,
    "preview": "data:\n  datapath: 'data/NS_fine_Re400_T128_part2.npy'\n  Re: 400\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re500to500-05s-new.yaml",
    "chars": 667,
    "preview": "data:\n  datapath: 'data/NS_fine_Re500_s2048_T100.npy'\n  Re: 500\n  total_num: 100\n  offset: 300\n  n_sample: 1\n  time_inte"
  },
  {
    "path": "configs/transfer/Re500to500-05s.yaml",
    "chars": 651,
    "preview": "data:\n  datapath: 'data/NS_fine_Re500_T128_part2.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "configs/transfer/Re500to500-1s.yaml",
    "chars": 658,
    "preview": "data:\n  datapath: 'data/NS_fine_Re500_T128_part2.npy'\n  Re: 500\n  total_num: 100\n  offset: 0\n  n_sample: 1\n  time_interv"
  },
  {
    "path": "deeponet.py",
    "chars": 1145,
    "preview": "import yaml\nfrom argparse import ArgumentParser\nfrom baselines.train_ns import train_deeponet_cp\nfrom baselines.test imp"
  },
  {
    "path": "download_data.py",
    "chars": 2165,
    "preview": "import os\nfrom argparse import ArgumentParser\nimport requests\nfrom tqdm import tqdm\n\n\n_url_dict = {\n    'NS-T4000': 'htt"
  },
  {
    "path": "eval_operator.py",
    "chars": 3228,
    "preview": "import yaml\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom models import FNO3d, FNO2d\nfrom train_utils impor"
  },
  {
    "path": "generate_data.py",
    "chars": 3654,
    "preview": "import math\nimport numpy as np\nimport os\nfrom tqdm import tqdm\n\nimport torch\nfrom solver.random_fields import GaussianRF"
  },
  {
    "path": "inference.py",
    "chars": 2875,
    "preview": "'''\nThis code generates the prediction on one instance. \nBoth the ground truth and the prediction are saved in a .pt fil"
  },
  {
    "path": "instance_opt.py",
    "chars": 6366,
    "preview": "import os\nimport yaml\nimport random\nfrom argparse import ArgumentParser\nimport math\nfrom tqdm import tqdm\n\nimport torch\n"
  },
  {
    "path": "inverse-darcy-foward.py",
    "chars": 15560,
    "preview": "\n\nfrom timeit import default_timer\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as n"
  },
  {
    "path": "inverse-darcy.py",
    "chars": 14472,
    "preview": "\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport scipy.io\nimport matplotlib"
  },
  {
    "path": "models/FCN.py",
    "chars": 1732,
    "preview": "import torch.nn as nn\n\n\ndef linear_block(in_channel, out_channel):\n    block = nn.Sequential(\n        nn.Linear(in_chann"
  },
  {
    "path": "models/__init__.py",
    "chars": 110,
    "preview": "from .FCN import FCNet\nfrom .fourier1d import FNO1d\nfrom .fourier2d import FNO2d\nfrom .fourier3d import FNO3d\n"
  },
  {
    "path": "models/basics.py",
    "chars": 7701,
    "preview": "import numpy as np\n\nimport torch\nimport torch.nn as nn\n\n\n@torch.jit.script\ndef compl_mul1d(a: torch.Tensor, b: torch.Ten"
  },
  {
    "path": "models/core.py",
    "chars": 17929,
    "preview": "import torch\nimport torch.nn as nn\nimport tltorch\n\n\n@torch.jit.script\ndef contract_1D(a: torch.Tensor, b: torch.Tensor) "
  },
  {
    "path": "models/fourier1d.py",
    "chars": 2026,
    "preview": "import torch.nn as nn\nfrom .basics import SpectralConv1d\nfrom .utils import _get_act\n\n\nclass FNO1d(nn.Module):\n    def _"
  },
  {
    "path": "models/fourier2d.py",
    "chars": 3422,
    "preview": "import torch.nn as nn\nfrom .basics import SpectralConv2d\nfrom .utils import _get_act, add_padding2, remove_padding2\n\n\ncl"
  },
  {
    "path": "models/fourier3d.py",
    "chars": 3197,
    "preview": "import torch.nn as nn\nfrom .basics import SpectralConv3d\nfrom .utils import add_padding, remove_padding, _get_act\n\n\nclas"
  },
  {
    "path": "models/lowrank2d.py",
    "chars": 1668,
    "preview": "from .FCN import DenseNet\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\n\nclass LowRank2d(nn.Module):\n    def _"
  },
  {
    "path": "models/tfno.py",
    "chars": 10422,
    "preview": "import torch.nn as nn\nimport torch.nn.functional as F\nfrom .core import FactorizedSpectralConv2d, JointFactorizedSpectra"
  },
  {
    "path": "models/utils.py",
    "chars": 1110,
    "preview": "import torch.nn.functional as F\n\n\ndef add_padding(x, num_pad):\n    if max(num_pad) > 0:\n        res = F.pad(x, (num_pad["
  },
  {
    "path": "pinns.py",
    "chars": 1264,
    "preview": "from argparse import ArgumentParser\nimport yaml\n\nfrom baselines.pinns_ns_05s import train\nfrom baselines.pinns_ns_50s im"
  },
  {
    "path": "prepare_data.py",
    "chars": 991,
    "preview": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef shuffle_data(datapath):\n    data = np.load(datapath)\n    rng = "
  },
  {
    "path": "profile-solver-legacy.py",
    "chars": 1607,
    "preview": "import math\n\nimport torch\nfrom solver.legacy_solver import navier_stokes_2d, GaussianRF\n\nimport scipy.io\nfrom timeit imp"
  }
]

// ... and 52 more files (download for full content)

About this extraction

This page contains the full source code of the neuraloperator/physics_informed GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 252 files (505.8 KB), approximately 169.5k tokens, and a symbol index with 412 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!