Repository: RenYurui/PIRender Branch: main Commit: 9e59f194f1a0 Files: 56 Total size: 241.6 KB Directory structure: gitextract_uz8rj1lb/ ├── .gitmodules ├── DatasetHelper.md ├── LICENSE.md ├── README.md ├── config/ │ ├── face.yaml │ └── face_demo.yaml ├── config.py ├── data/ │ ├── __init__.py │ ├── image_dataset.py │ ├── vox_dataset.py │ └── vox_video_dataset.py ├── demo_images/ │ └── expression.mat ├── generators/ │ ├── base_function.py │ └── face_model.py ├── inference.py ├── intuitive_control.py ├── loss/ │ └── perceptual.py ├── requirements.txt ├── scripts/ │ ├── coeff_detector.py │ ├── download_demo_dataset.sh │ ├── download_weights.sh │ ├── extract_kp_videos.py │ ├── face_recon_images.py │ ├── face_recon_videos.py │ ├── inference_options.py │ └── prepare_vox_lmdb.py ├── third_part/ │ └── PerceptualSimilarity/ │ ├── models/ │ │ ├── __init__.py │ │ ├── base_model.py │ │ ├── dist_model.py │ │ ├── models.py │ │ ├── networks_basic.py │ │ └── pretrained_networks.py │ ├── util/ │ │ ├── __init__.py │ │ ├── html.py │ │ ├── util.py │ │ └── visualizer.py │ └── weights/ │ ├── v0.0/ │ │ ├── alex.pth │ │ ├── squeeze.pth │ │ └── vgg.pth │ └── v0.1/ │ ├── alex.pth │ ├── squeeze.pth │ └── vgg.pth ├── train.py ├── trainers/ │ ├── __init__.py │ ├── base.py │ └── face_trainer.py └── util/ ├── cudnn.py ├── distributed.py ├── flow_util.py ├── init_weight.py ├── io.py ├── logging.py ├── lpips.py ├── meters.py ├── misc.py └── trainer.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitmodules ================================================ [submodule "Deep3DFaceRecon_pytorch"] path = Deep3DFaceRecon_pytorch url = https://github.com/sicxu/Deep3DFaceRecon_pytorch ================================================ FILE: DatasetHelper.md ================================================ ### Extract 3DMM Coefficients for Videos We provide scripts for extracting 3dmm coefficients for videos by using [DeepFaceRecon_pytorch](https://github.com/sicxu/Deep3DFaceRecon_pytorch/tree/73d491102af6731bded9ae6b3cc7466c3b2e9e48). 1. Follow the instructions of their repo to build the environment of DeepFaceRecon. 2. Copy the provided scrips to the folder `Deep3DFaceRecon_pytorch`. ```bash cp scripts/face_recon_videos.py ./Deep3DFaceRecon_pytorch cp scripts/extract_kp_videos.py ./Deep3DFaceRecon_pytorch cp scripts/coeff_detector.py ./Deep3DFaceRecon_pytorch cp scripts/inference_options.py ./Deep3DFaceRecon_pytorch/options cd Deep3DFaceRecon_pytorch ``` 3. Extract facial landmarks from videos. ```bash python extract_kp_videos.py \ --input_dir path_to_viodes \ --output_dir path_to_keypoint \ --device_ids 0,1,2,3 \ --workers 12 ``` 4. Extract coefficients for videos ```bash python face_recon_videos.py \ --input_dir path_to_videos \ --keypoint_dir path_to_keypoint \ --output_dir output_dir \ --inference_batch_size 100 \ --name=model_name \ --epoch=20 \ --model facerecon ``` ================================================ FILE: LICENSE.md ================================================ ## creative commons # Attribution-NonCommercial 4.0 International Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. ### Using Creative Commons Public Licenses Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. * __Considerations for licensors:__ Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. [More considerations for licensors](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensors). * __Considerations for the public:__ By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. [More considerations for the public](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensees). ## Creative Commons Attribution-NonCommercial 4.0 International Public License By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. ### Section 1 – Definitions. a. __Adapted Material__ means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. b. __Adapter's License__ means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. c. __Copyright and Similar Rights__ means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. d. __Effective Technological Measures__ means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. e. __Exceptions and Limitations__ means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. f. __Licensed Material__ means the artistic or literary work, database, or other material to which the Licensor applied this Public License. g. __Licensed Rights__ means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. h. __Licensor__ means the individual(s) or entity(ies) granting rights under this Public License. i. __NonCommercial__ means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange. j. __Share__ means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. k. __Sui Generis Database Rights__ means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. l. __You__ means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. ### Section 2 – Scope. a. ___License grant.___ 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: A. reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and B. produce, reproduce, and Share Adapted Material for NonCommercial purposes only. 2. __Exceptions and Limitations.__ For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 3. __Term.__ The term of this Public License is specified in Section 6(a). 4. __Media and formats; technical modifications allowed.__ The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material. 5. __Downstream recipients.__ A. __Offer from the Licensor – Licensed Material.__ Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. B. __No downstream restrictions.__ You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 6. __No endorsement.__ Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). b. ___Other rights.___ 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 2. Patent and trademark rights are not licensed under this Public License. 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes. ### Section 3 – License Conditions. Your exercise of the Licensed Rights is expressly made subject to the following conditions. a. ___Attribution.___ 1. If You Share the Licensed Material (including in modified form), You must: A. retain the following if it is supplied by the Licensor with the Licensed Material: i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); ii. a copyright notice; iii. a notice that refers to this Public License; iv. a notice that refers to the disclaimer of warranties; v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. 4. If You Share Adapted Material You produce, the Adapter's License You apply must not prevent recipients of the Adapted Material from complying with this Public License. ### Section 4 – Sui Generis Database Rights. Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only; b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material; and c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. ### Section 5 – Disclaimer of Warranties and Limitation of Liability. a. __Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.__ b. __To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.__ c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. ### Section 6 – Term and Termination. a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 2. upon express reinstatement by the Licensor. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. ### Section 7 – Other Terms and Conditions. a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. ### Section 8 – Interpretation. a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. > Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at [creativecommons.org/policies](http://creativecommons.org/policies), Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. > > Creative Commons may be contacted at creativecommons.org ================================================ FILE: README.md ================================================
Website | ArXiv | Get Start | Video
# PIRenderer The source code of the ICCV2021 paper "[PIRenderer: Controllable Portrait Image Generation via Semantic Neural Rendering](https://arxiv.org/abs/2109.08379)" (ICCV2021) The proposed **PIRenderer** can synthesis portrait images by intuitively controlling the face motions with fully disentangled 3DMM parameters. This model can be applied to tasks such as: * **Intuitive Portrait Image Editing**
Intuitive Portrait Image Control
Pose & Expression Alignment
* **Motion Imitation**
Same & Corss-identity Reenactment
* **Audio-Driven Facial Reenactment**
Audio-Driven Reenactment
## News * 2021.9.20 Code for PyTorch is available! ## Colab Demo Coming soon ## Get Start ### 1). Installation #### Requirements * Python 3 * PyTorch 1.7.1 * CUDA 10.2 #### Conda Installation ```bash # 1. Create a conda virtual environment. conda create -n PIRenderer python=3.6 conda activate PIRenderer conda install -c pytorch pytorch=1.7.1 torchvision cudatoolkit=10.2 # 2. Install other dependencies pip install -r requirements.txt ``` ### 2). Dataset We train our model using the [VoxCeleb](https://arxiv.org/abs/1706.08612). You can download the demo dataset for inference or prepare the dataset for training and testing. #### Download the demo dataset The demo dataset contains all 514 test videos. You can download the dataset with the following code: ```bash ./scripts/download_demo_dataset.sh ``` Or you can choose to download the resources with these links: [Google Driven](https://drive.google.com/drive/folders/16Yn2r46b4cV6ZozOH6a8SdFz_iG7BQk1?usp=sharing) & [BaiDu Driven](https://pan.baidu.com/s/1e615bBHvM4Wz-2snk-86Xw) with extraction passwords ”p9ab“ Then unzip and save the files to `./dataset` #### Prepare the dataset 1. The dataset is preprocessed follow the method used in [First-Order](https://github.com/AliaksandrSiarohin/video-preprocessing). You can follow the instructions in their repo to download and crop videos for training and testing. 2. After obtaining the VoxCeleb videos, we extract 3DMM parameters using [Deep3DFaceReconstruction](https://github.com/microsoft/Deep3DFaceReconstruction). The folder are with format as: ``` ${DATASET_ROOT_FOLDER} └───path_to_videos └───train └───xxx.mp4 └───xxx.mp4 ... └───test └───xxx.mp4 └───xxx.mp4 ... └───path_to_3dmm_coeff └───train └───xxx.mat └───xxx.mat ... └───test └───xxx.mat └───xxx.mat ... ``` **News**: We provide Scripts for extracting 3dmm coeffs from videos. Please check the [DatasetHelper](./DatasetHelper.md) for more details. 3. We save the video and 3DMM parameters in a lmdb file. Please run the following code to do this ```bash python scripts/prepare_vox_lmdb.py \ --path path_to_videos \ --coeff_3dmm_path path_to_3dmm_coeff \ --out path_to_output_dir ``` ### 3). Training and Inference #### Inference The trained weights can be downloaded by running the following code: ```bash ./scripts/download_weights.sh ``` Or you can choose to download the resources with these links: [Google Driven](https://drive.google.com/file/d/1-0xOf6g58OmtKtEWJlU3VlnfRqPN9Uq7/view?usp=sharing) & [Baidu Driven](https://pan.baidu.com/s/18B3xfKMXnm4tOqlFSB8ntg) with extraction passwards "4sy1". Then unzip and save the files to `./result/face`. **Reenactment** Run the demo for face reenactment: ```bash # same identity python -m torch.distributed.launch --nproc_per_node=1 --master_port 12345 inference.py \ --config ./config/face_demo.yaml \ --name face \ --no_resume \ --output_dir ./vox_result/face_reenactment # cross identity python -m torch.distributed.launch --nproc_per_node=1 --master_port 12345 inference.py \ --config ./config/face_demo.yaml \ --name face \ --no_resume \ --output_dir ./vox_result/face_reenactment_cross \ --cross_id ``` The output results are saved at `./vox_result/face_reenactment` and `./vox_result/face_reenactment_cross` **Intuitive Control** Our model can generate results by providing intuitive controlling coefficients. We provide the following code for this task. Please note that you need to build the environment of [DeepFaceRecon](https://github.com/sicxu/Deep3DFaceRecon_pytorch/tree/73d491102af6731bded9ae6b3cc7466c3b2e9e48) first. ```bash # 1. Copy the provided scrips to the folder `Deep3DFaceRecon_pytorch`. cp scripts/face_recon_videos.py ./Deep3DFaceRecon_pytorch cp scripts/extract_kp_videos.py ./Deep3DFaceRecon_pytorch cp scripts/coeff_detector.py ./Deep3DFaceRecon_pytorch cp scripts/inference_options.py ./Deep3DFaceRecon_pytorch/options cd Deep3DFaceRecon_pytorch # 2. Extracte the 3dmm coefficients of the demo images. python coeff_detector.py \ --input_dir ../demo_images \ --keypoint_dir ../demo_images \ --output_dir ../demo_images \ --name=model_name \ --epoch=20 \ --model facerecon # 3. control the source image with our model cd .. python -m torch.distributed.launch --nproc_per_node=1 --master_port 12345 intuitive_control.py \ --config ./config/face_demo.yaml \ --name face \ --no_resume \ --output_dir ./vox_result/face_intuitive \ --input_name ./demo_images ``` #### Train Our model can be trained with the following code ```bash python -m torch.distributed.launch --nproc_per_node=4 --master_port 12345 train.py \ --config ./config/face.yaml \ --name face ``` ## Citation If you find this code is helpful, please cite our paper ```tex @misc{ren2021pirenderer, title={PIRenderer: Controllable Portrait Image Generation via Semantic Neural Rendering}, author={Yurui Ren and Ge Li and Yuanqi Chen and Thomas H. Li and Shan Liu}, year={2021}, eprint={2109.08379}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` ## Acknowledgement We build our project base on [imaginaire](https://github.com/NVlabs/imaginaire). Some dataset preprocessing methods are derived from [video-preprocessing](https://github.com/AliaksandrSiarohin/video-preprocessing). ================================================ FILE: config/face.yaml ================================================ # How often do you want to log the training stats. # network_list: # gen: gen_optimizer # dis: dis_optimizer distributed: True image_to_tensorboard: True snapshot_save_iter: 40000 snapshot_save_epoch: 20 snapshot_save_start_iter: 20000 snapshot_save_start_epoch: 10 image_save_iter: 1000 max_epoch: 200 logging_iter: 100 results_dir: ./eval_results gen_optimizer: type: adam lr: 0.0001 adam_beta1: 0.5 adam_beta2: 0.999 lr_policy: iteration_mode: True type: step step_size: 300000 gamma: 0.2 trainer: type: trainers.face_trainer::FaceTrainer pretrain_warp_iteration: 200000 loss_weight: weight_perceptual_warp: 2.5 weight_perceptual_final: 4 vgg_param_warp: network: vgg19 layers: ['relu_1_1', 'relu_2_1', 'relu_3_1', 'relu_4_1', 'relu_5_1'] use_style_loss: False num_scales: 4 vgg_param_final: network: vgg19 layers: ['relu_1_1', 'relu_2_1', 'relu_3_1', 'relu_4_1', 'relu_5_1'] use_style_loss: True num_scales: 4 style_to_perceptual: 250 init: type: 'normal' gain: 0.02 gen: type: generators.face_model::FaceGenerator param: mapping_net: coeff_nc: 73 descriptor_nc: 256 layer: 3 warpping_net: encoder_layer: 5 decoder_layer: 3 base_nc: 32 editing_net: layer: 3 num_res_blocks: 2 base_nc: 64 common: image_nc: 3 descriptor_nc: 256 max_nc: 256 use_spect: False # Data options. data: type: data.vox_dataset::VoxDataset path: ./dataset/vox_lmdb resolution: 256 semantic_radius: 13 train: batch_size: 5 distributed: True val: batch_size: 8 distributed: True ================================================ FILE: config/face_demo.yaml ================================================ # How often do you want to log the training stats. # network_list: # gen: gen_optimizer # dis: dis_optimizer distributed: True image_to_tensorboard: True snapshot_save_iter: 40000 snapshot_save_epoch: 20 snapshot_save_start_iter: 20000 snapshot_save_start_epoch: 10 image_save_iter: 1000 max_epoch: 200 logging_iter: 100 results_dir: ./eval_results gen_optimizer: type: adam lr: 0.0001 adam_beta1: 0.5 adam_beta2: 0.999 lr_policy: iteration_mode: True type: step step_size: 300000 gamma: 0.2 trainer: type: trainers.face_trainer::FaceTrainer pretrain_warp_iteration: 200000 loss_weight: weight_perceptual_warp: 2.5 weight_perceptual_final: 4 vgg_param_warp: network: vgg19 layers: ['relu_1_1', 'relu_2_1', 'relu_3_1', 'relu_4_1', 'relu_5_1'] use_style_loss: False num_scales: 4 vgg_param_final: network: vgg19 layers: ['relu_1_1', 'relu_2_1', 'relu_3_1', 'relu_4_1', 'relu_5_1'] use_style_loss: True num_scales: 4 style_to_perceptual: 250 init: type: 'normal' gain: 0.02 gen: type: generators.face_model::FaceGenerator param: mapping_net: coeff_nc: 73 descriptor_nc: 256 layer: 3 warpping_net: encoder_layer: 5 decoder_layer: 3 base_nc: 32 editing_net: layer: 3 num_res_blocks: 2 base_nc: 64 common: image_nc: 3 descriptor_nc: 256 max_nc: 256 use_spect: False # Data options. data: type: data.vox_dataset::VoxDataset path: ./dataset/vox_lmdb_demo resolution: 256 semantic_radius: 13 train: batch_size: 5 distributed: True val: batch_size: 8 distributed: True ================================================ FILE: config.py ================================================ import collections import functools import os import re import yaml from util.distributed import master_only_print as print class AttrDict(dict): """Dict as attribute trick.""" def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self for key, value in self.__dict__.items(): if isinstance(value, dict): self.__dict__[key] = AttrDict(value) elif isinstance(value, (list, tuple)): if isinstance(value[0], dict): self.__dict__[key] = [AttrDict(item) for item in value] else: self.__dict__[key] = value def yaml(self): """Convert object to yaml dict and return.""" yaml_dict = {} for key, value in self.__dict__.items(): if isinstance(value, AttrDict): yaml_dict[key] = value.yaml() elif isinstance(value, list): if isinstance(value[0], AttrDict): new_l = [] for item in value: new_l.append(item.yaml()) yaml_dict[key] = new_l else: yaml_dict[key] = value else: yaml_dict[key] = value return yaml_dict def __repr__(self): """Print all variables.""" ret_str = [] for key, value in self.__dict__.items(): if isinstance(value, AttrDict): ret_str.append('{}:'.format(key)) child_ret_str = value.__repr__().split('\n') for item in child_ret_str: ret_str.append(' ' + item) elif isinstance(value, list): if isinstance(value[0], AttrDict): ret_str.append('{}:'.format(key)) for item in value: # Treat as AttrDict above. child_ret_str = item.__repr__().split('\n') for item in child_ret_str: ret_str.append(' ' + item) else: ret_str.append('{}: {}'.format(key, value)) else: ret_str.append('{}: {}'.format(key, value)) return '\n'.join(ret_str) class Config(AttrDict): r"""Configuration class. This should include every human specifiable hyperparameter values for your training.""" def __init__(self, filename=None, args=None, verbose=False, is_train=True): super(Config, self).__init__() # Set default parameters. # Logging. large_number = 1000000000 self.snapshot_save_iter = large_number self.snapshot_save_epoch = large_number self.snapshot_save_start_iter = 0 self.snapshot_save_start_epoch = 0 self.image_save_iter = large_number self.eval_epoch = large_number self.start_eval_epoch = large_number self.eval_epoch = large_number self.max_epoch = large_number self.max_iter = large_number self.logging_iter = 100 self.image_to_tensorboard=False self.which_iter = args.which_iter self.resume = not args.no_resume self.checkpoints_dir = args.checkpoints_dir self.name = args.name self.phase = 'train' if is_train else 'test' # Networks. self.gen = AttrDict(type='generators.dummy') self.dis = AttrDict(type='discriminators.dummy') # Optimizers. self.gen_optimizer = AttrDict(type='adam', lr=0.0001, adam_beta1=0.0, adam_beta2=0.999, eps=1e-8, lr_policy=AttrDict(iteration_mode=False, type='step', step_size=large_number, gamma=1)) self.dis_optimizer = AttrDict(type='adam', lr=0.0001, adam_beta1=0.0, adam_beta2=0.999, eps=1e-8, lr_policy=AttrDict(iteration_mode=False, type='step', step_size=large_number, gamma=1)) # Data. self.data = AttrDict(name='dummy', type='datasets.images', num_workers=0) self.test_data = AttrDict(name='dummy', type='datasets.images', num_workers=0, test=AttrDict(is_lmdb=False, roots='', batch_size=1)) self.trainer = AttrDict( model_average=False, model_average_beta=0.9999, model_average_start_iteration=1000, model_average_batch_norm_estimation_iteration=30, model_average_remove_sn=True, image_to_tensorboard=False, hparam_to_tensorboard=False, distributed_data_parallel='pytorch', delay_allreduce=True, gan_relativistic=False, gen_step=1, dis_step=1) # # Cudnn. self.cudnn = AttrDict(deterministic=False, benchmark=True) # Others. self.pretrained_weight = '' self.inference_args = AttrDict() # Update with given configurations. assert os.path.exists(filename), 'File {} not exist.'.format(filename) loader = yaml.SafeLoader loader.add_implicit_resolver( u'tag:yaml.org,2002:float', re.compile(u'''^(?: [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)? |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) |\\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]* |[-+]?\\.(?:inf|Inf|INF) |\\.(?:nan|NaN|NAN))$''', re.X), list(u'-+0123456789.')) try: with open(filename, 'r') as f: cfg_dict = yaml.load(f, Loader=loader) except EnvironmentError: print('Please check the file with name of "%s"', filename) recursive_update(self, cfg_dict) # Put common opts in both gen and dis. if 'common' in cfg_dict: self.common = AttrDict(**cfg_dict['common']) self.gen.common = self.common self.dis.common = self.common if verbose: print(' config '.center(80, '-')) print(self.__repr__()) print(''.center(80, '-')) def rsetattr(obj, attr, val): """Recursively find object and set value""" pre, _, post = attr.rpartition('.') return setattr(rgetattr(obj, pre) if pre else obj, post, val) def rgetattr(obj, attr, *args): """Recursively find object and return value""" def _getattr(obj, attr): r"""Get attribute.""" return getattr(obj, attr, *args) return functools.reduce(_getattr, [obj] + attr.split('.')) def recursive_update(d, u): """Recursively update AttrDict d with AttrDict u""" for key, value in u.items(): if isinstance(value, collections.abc.Mapping): d.__dict__[key] = recursive_update(d.get(key, AttrDict({})), value) elif isinstance(value, (list, tuple)): if isinstance(value[0], dict): d.__dict__[key] = [AttrDict(item) for item in value] else: d.__dict__[key] = value else: d.__dict__[key] = value return d ================================================ FILE: data/__init__.py ================================================ import importlib import torch.utils.data from util.distributed import master_only_print as print def find_dataset_using_name(dataset_name): dataset_filename = dataset_name module, target = dataset_name.split('::') datasetlib = importlib.import_module(module) dataset = None for name, cls in datasetlib.__dict__.items(): if name == target: dataset = cls if dataset is None: raise ValueError("In %s.py, there should be a class " "with class name that matches %s in lowercase." % (dataset_filename, target)) return dataset def get_option_setter(dataset_name): dataset_class = find_dataset_using_name(dataset_name) return dataset_class.modify_commandline_options def create_dataloader(opt, is_inference): dataset = find_dataset_using_name(opt.type) instance = dataset(opt, is_inference) phase = 'val' if is_inference else 'training' batch_size = opt.val.batch_size if is_inference else opt.train.batch_size print("%s dataset [%s] of size %d was created" % (phase, opt.type, len(instance))) dataloader = torch.utils.data.DataLoader( instance, batch_size=batch_size, sampler=data_sampler(instance, shuffle=not is_inference, distributed=opt.train.distributed), drop_last=not is_inference, num_workers=getattr(opt, 'num_workers', 0), ) return dataloader def data_sampler(dataset, shuffle, distributed): if distributed: return torch.utils.data.distributed.DistributedSampler(dataset, shuffle=shuffle) if shuffle: return torch.utils.data.RandomSampler(dataset) else: return torch.utils.data.SequentialSampler(dataset) def get_dataloader(opt, is_inference=False): dataset = create_dataloader(opt, is_inference=is_inference) return dataset def get_train_val_dataloader(opt): val_dataset = create_dataloader(opt, is_inference=True) train_dataset = create_dataloader(opt, is_inference=False) return val_dataset, train_dataset ================================================ FILE: data/image_dataset.py ================================================ import os import glob import time import numpy as np from PIL import Image import torch import torchvision.transforms.functional as F class ImageDataset(): def __init__(self, opt, input_name): self.opt = opt self.IMAGEEXT = ['png', 'jpg'] self.input_image_list, self.coeff_list = self.obtain_inputs(input_name) self.index = -1 # load image dataset opt self.resolution = opt.resolution self.semantic_radius = opt.semantic_radius def next_image(self): self.index += 1 image_name = self.input_image_list[self.index] coeff_name = self.coeff_list[self.index] img = Image.open(image_name) input_image = self.trans_image(img) coeff_3dmm = np.loadtxt(coeff_name).astype(np.float32) coeff_3dmm = self.transform_semantic(coeff_3dmm) return { 'source_image': input_image[None], 'target_semantics': coeff_3dmm[None], 'name': os.path.splitext(os.path.basename(image_name))[0] } def obtain_inputs(self, root): filenames = list() IMAGE_EXTENSIONS_LOWERCASE = {'jpg', 'png', 'jpeg', 'webp'} IMAGE_EXTENSIONS = IMAGE_EXTENSIONS_LOWERCASE.union({f.upper() for f in IMAGE_EXTENSIONS_LOWERCASE}) extensions = IMAGE_EXTENSIONS for ext in extensions: filenames += glob.glob(f'{root}/*.{ext}', recursive=True) filenames = sorted(filenames) coeffnames = sorted(glob.glob(f'{root}/*_3dmm_coeff.txt')) return filenames, coeffnames def transform_semantic(self, semantic): semantic = semantic[None].repeat(self.semantic_radius*2+1, 0) ex_coeff = semantic[:,80:144] #expression angles = semantic[:,224:227] #euler angles for pose translation = semantic[:,254:257] #translation crop = semantic[:,259:262] #crop param coeff_3dmm = np.concatenate([ex_coeff, angles, translation, crop], 1) return torch.Tensor(coeff_3dmm).permute(1,0) def trans_image(self, image): image = F.resize( image, size=self.resolution, interpolation=Image.BICUBIC) image = F.to_tensor(image) image = F.normalize(image, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) return image def __len__(self): return len(self.input_image_list) ================================================ FILE: data/vox_dataset.py ================================================ import os import lmdb import random import collections import numpy as np from PIL import Image from io import BytesIO import torch from torch.utils.data import Dataset from torchvision import transforms def format_for_lmdb(*args): key_parts = [] for arg in args: if isinstance(arg, int): arg = str(arg).zfill(7) key_parts.append(arg) return '-'.join(key_parts).encode('utf-8') class VoxDataset(Dataset): def __init__(self, opt, is_inference): path = opt.path self.env = lmdb.open( os.path.join(path, str(opt.resolution)), max_readers=32, readonly=True, lock=False, readahead=False, meminit=False, ) if not self.env: raise IOError('Cannot open lmdb dataset', path) list_file = "test_list.txt" if is_inference else "train_list.txt" list_file = os.path.join(path, list_file) with open(list_file, 'r') as f: lines = f.readlines() videos = [line.replace('\n', '') for line in lines] self.resolution = opt.resolution self.semantic_radius = opt.semantic_radius self.video_items, self.person_ids = self.get_video_index(videos) self.idx_by_person_id = self.group_by_key(self.video_items, key='person_id') self.person_ids = self.person_ids * 100 self.transform = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True), ]) def get_video_index(self, videos): video_items = [] for video in videos: video_items.append(self.Video_Item(video)) person_ids = sorted(list({video.split('#')[0] for video in videos})) return video_items, person_ids def group_by_key(self, video_list, key): return_dict = collections.defaultdict(list) for index, video_item in enumerate(video_list): return_dict[video_item[key]].append(index) return return_dict def Video_Item(self, video_name): video_item = {} video_item['video_name'] = video_name video_item['person_id'] = video_name.split('#')[0] with self.env.begin(write=False) as txn: key = format_for_lmdb(video_item['video_name'], 'length') length = int(txn.get(key).decode('utf-8')) video_item['num_frame'] = length return video_item def __len__(self): return len(self.person_ids) def __getitem__(self, index): data={} person_id = self.person_ids[index] video_item = self.video_items[random.choices(self.idx_by_person_id[person_id], k=1)[0]] frame_source, frame_target = self.random_select_frames(video_item) with self.env.begin(write=False) as txn: key = format_for_lmdb(video_item['video_name'], frame_source) img_bytes_1 = txn.get(key) key = format_for_lmdb(video_item['video_name'], frame_target) img_bytes_2 = txn.get(key) semantics_key = format_for_lmdb(video_item['video_name'], 'coeff_3dmm') semantics_numpy = np.frombuffer(txn.get(semantics_key), dtype=np.float32) semantics_numpy = semantics_numpy.reshape((video_item['num_frame'],-1)) img1 = Image.open(BytesIO(img_bytes_1)) data['source_image'] = self.transform(img1) img2 = Image.open(BytesIO(img_bytes_2)) data['target_image'] = self.transform(img2) data['target_semantics'] = self.transform_semantic(semantics_numpy, frame_target) data['source_semantics'] = self.transform_semantic(semantics_numpy, frame_source) return data def random_select_frames(self, video_item): num_frame = video_item['num_frame'] frame_idx = random.choices(list(range(num_frame)), k=2) return frame_idx[0], frame_idx[1] def transform_semantic(self, semantic, frame_index): index = self.obtain_seq_index(frame_index, semantic.shape[0]) coeff_3dmm = semantic[index,...] # id_coeff = coeff_3dmm[:,:80] #identity ex_coeff = coeff_3dmm[:,80:144] #expression # tex_coeff = coeff_3dmm[:,144:224] #texture angles = coeff_3dmm[:,224:227] #euler angles for pose # gamma = coeff_3dmm[:,227:254] #lighting translation = coeff_3dmm[:,254:257] #translation crop = coeff_3dmm[:,257:260] #crop param coeff_3dmm = np.concatenate([ex_coeff, angles, translation, crop], 1) return torch.Tensor(coeff_3dmm).permute(1,0) def obtain_seq_index(self, index, num_frames): seq = list(range(index-self.semantic_radius, index+self.semantic_radius+1)) seq = [ min(max(item, 0), num_frames-1) for item in seq ] return seq ================================================ FILE: data/vox_video_dataset.py ================================================ import os import lmdb import random import collections import numpy as np from PIL import Image from io import BytesIO import torch from data.vox_dataset import VoxDataset from data.vox_dataset import format_for_lmdb class VoxVideoDataset(VoxDataset): def __init__(self, opt, is_inference): super(VoxVideoDataset, self).__init__(opt, is_inference) self.video_index = -1 self.cross_id = opt.cross_id # whether normalize the crop parameters when performing cross_id reenactments # set it as "True" always brings better performance self.norm_crop_param = True def __len__(self): return len(self.video_items) def load_next_video(self): data={} self.video_index += 1 video_item = self.video_items[self.video_index] source_video_item = self.random_video(video_item) if self.cross_id else video_item with self.env.begin(write=False) as txn: key = format_for_lmdb(source_video_item['video_name'], 0) img_bytes_1 = txn.get(key) img1 = Image.open(BytesIO(img_bytes_1)) data['source_image'] = self.transform(img1) semantics_key = format_for_lmdb(video_item['video_name'], 'coeff_3dmm') semantics_numpy = np.frombuffer(txn.get(semantics_key), dtype=np.float32) semantics_numpy = semantics_numpy.reshape((video_item['num_frame'],-1)) if self.cross_id and self.norm_crop_param: semantics_source_key = format_for_lmdb(source_video_item['video_name'], 'coeff_3dmm') semantics_source_numpy = np.frombuffer(txn.get(semantics_source_key), dtype=np.float32) semantic_source_numpy = semantics_source_numpy.reshape((source_video_item['num_frame'],-1))[0:1] crop_norm_ratio = self.find_crop_norm_ratio(semantic_source_numpy, semantics_numpy) else: crop_norm_ratio = None data['target_image'], data['target_semantics'] = [], [] for frame_index in range(video_item['num_frame']): key = format_for_lmdb(video_item['video_name'], frame_index) img_bytes_1 = txn.get(key) img1 = Image.open(BytesIO(img_bytes_1)) data['target_image'].append(self.transform(img1)) data['target_semantics'].append( self.transform_semantic(semantics_numpy, frame_index, crop_norm_ratio) ) data['video_name'] = self.obtain_name(video_item['video_name'], source_video_item['video_name']) return data def random_video(self, target_video_item): target_person_id = target_video_item['person_id'] assert len(self.person_ids) > 1 source_person_id = np.random.choice(self.person_ids) if source_person_id == target_person_id: source_person_id = np.random.choice(self.person_ids) source_video_index = np.random.choice(self.idx_by_person_id[source_person_id]) source_video_item = self.video_items[source_video_index] return source_video_item def find_crop_norm_ratio(self, source_coeff, target_coeffs): alpha = 0.3 exp_diff = np.mean(np.abs(target_coeffs[:,80:144] - source_coeff[:,80:144]), 1) angle_diff = np.mean(np.abs(target_coeffs[:,224:227] - source_coeff[:,224:227]), 1) index = np.argmin(alpha*exp_diff + (1-alpha)*angle_diff) crop_norm_ratio = source_coeff[:,-3] / target_coeffs[index:index+1, -3] return crop_norm_ratio def transform_semantic(self, semantic, frame_index, crop_norm_ratio): index = self.obtain_seq_index(frame_index, semantic.shape[0]) coeff_3dmm = semantic[index,...] # id_coeff = coeff_3dmm[:,:80] #identity ex_coeff = coeff_3dmm[:,80:144] #expression # tex_coeff = coeff_3dmm[:,144:224] #texture angles = coeff_3dmm[:,224:227] #euler angles for pose # gamma = coeff_3dmm[:,227:254] #lighting translation = coeff_3dmm[:,254:257] #translation crop = coeff_3dmm[:,257:300] #crop param if self.cross_id and self.norm_crop_param: crop[:, -3] = crop[:, -3] * crop_norm_ratio coeff_3dmm = np.concatenate([ex_coeff, angles, translation, crop], 1) return torch.Tensor(coeff_3dmm).permute(1,0) def obtain_name(self, target_name, source_name): if not self.cross_id: return target_name else: source_name = os.path.splitext(os.path.basename(source_name))[0] return source_name+'_to_'+target_name ================================================ FILE: generators/base_function.py ================================================ import sys import math import torch from torch import nn from torch.nn import functional as F from torch.autograd import Function from torch.nn.utils.spectral_norm import spectral_norm as SpectralNorm class LayerNorm2d(nn.Module): def __init__(self, n_out, affine=True): super(LayerNorm2d, self).__init__() self.n_out = n_out self.affine = affine if self.affine: self.weight = nn.Parameter(torch.ones(n_out, 1, 1)) self.bias = nn.Parameter(torch.zeros(n_out, 1, 1)) def forward(self, x): normalized_shape = x.size()[1:] if self.affine: return F.layer_norm(x, normalized_shape, \ self.weight.expand(normalized_shape), self.bias.expand(normalized_shape)) else: return F.layer_norm(x, normalized_shape) class ADAINHourglass(nn.Module): def __init__(self, image_nc, pose_nc, ngf, img_f, encoder_layers, decoder_layers, nonlinearity, use_spect): super(ADAINHourglass, self).__init__() self.encoder = ADAINEncoder(image_nc, pose_nc, ngf, img_f, encoder_layers, nonlinearity, use_spect) self.decoder = ADAINDecoder(pose_nc, ngf, img_f, encoder_layers, decoder_layers, True, nonlinearity, use_spect) self.output_nc = self.decoder.output_nc def forward(self, x, z): return self.decoder(self.encoder(x, z), z) class ADAINEncoder(nn.Module): def __init__(self, image_nc, pose_nc, ngf, img_f, layers, nonlinearity=nn.LeakyReLU(), use_spect=False): super(ADAINEncoder, self).__init__() self.layers = layers self.input_layer = nn.Conv2d(image_nc, ngf, kernel_size=7, stride=1, padding=3) for i in range(layers): in_channels = min(ngf * (2**i), img_f) out_channels = min(ngf *(2**(i+1)), img_f) model = ADAINEncoderBlock(in_channels, out_channels, pose_nc, nonlinearity, use_spect) setattr(self, 'encoder' + str(i), model) self.output_nc = out_channels def forward(self, x, z): out = self.input_layer(x) out_list = [out] for i in range(self.layers): model = getattr(self, 'encoder' + str(i)) out = model(out, z) out_list.append(out) return out_list class ADAINDecoder(nn.Module): """docstring for ADAINDecoder""" def __init__(self, pose_nc, ngf, img_f, encoder_layers, decoder_layers, skip_connect=True, nonlinearity=nn.LeakyReLU(), use_spect=False): super(ADAINDecoder, self).__init__() self.encoder_layers = encoder_layers self.decoder_layers = decoder_layers self.skip_connect = skip_connect use_transpose = True for i in range(encoder_layers-decoder_layers, encoder_layers)[::-1]: in_channels = min(ngf * (2**(i+1)), img_f) in_channels = in_channels*2 if i != (encoder_layers-1) and self.skip_connect else in_channels out_channels = min(ngf * (2**i), img_f) model = ADAINDecoderBlock(in_channels, out_channels, out_channels, pose_nc, use_transpose, nonlinearity, use_spect) setattr(self, 'decoder' + str(i), model) self.output_nc = out_channels*2 if self.skip_connect else out_channels def forward(self, x, z): out = x.pop() if self.skip_connect else x for i in range(self.encoder_layers-self.decoder_layers, self.encoder_layers)[::-1]: model = getattr(self, 'decoder' + str(i)) out = model(out, z) out = torch.cat([out, x.pop()], 1) if self.skip_connect else out return out class ADAINEncoderBlock(nn.Module): def __init__(self, input_nc, output_nc, feature_nc, nonlinearity=nn.LeakyReLU(), use_spect=False): super(ADAINEncoderBlock, self).__init__() kwargs_down = {'kernel_size': 4, 'stride': 2, 'padding': 1} kwargs_fine = {'kernel_size': 3, 'stride': 1, 'padding': 1} self.conv_0 = spectral_norm(nn.Conv2d(input_nc, output_nc, **kwargs_down), use_spect) self.conv_1 = spectral_norm(nn.Conv2d(output_nc, output_nc, **kwargs_fine), use_spect) self.norm_0 = ADAIN(input_nc, feature_nc) self.norm_1 = ADAIN(output_nc, feature_nc) self.actvn = nonlinearity def forward(self, x, z): x = self.conv_0(self.actvn(self.norm_0(x, z))) x = self.conv_1(self.actvn(self.norm_1(x, z))) return x class ADAINDecoderBlock(nn.Module): def __init__(self, input_nc, output_nc, hidden_nc, feature_nc, use_transpose=True, nonlinearity=nn.LeakyReLU(), use_spect=False): super(ADAINDecoderBlock, self).__init__() # Attributes self.actvn = nonlinearity hidden_nc = min(input_nc, output_nc) if hidden_nc is None else hidden_nc kwargs_fine = {'kernel_size':3, 'stride':1, 'padding':1} if use_transpose: kwargs_up = {'kernel_size':3, 'stride':2, 'padding':1, 'output_padding':1} else: kwargs_up = {'kernel_size':3, 'stride':1, 'padding':1} # create conv layers self.conv_0 = spectral_norm(nn.Conv2d(input_nc, hidden_nc, **kwargs_fine), use_spect) if use_transpose: self.conv_1 = spectral_norm(nn.ConvTranspose2d(hidden_nc, output_nc, **kwargs_up), use_spect) self.conv_s = spectral_norm(nn.ConvTranspose2d(input_nc, output_nc, **kwargs_up), use_spect) else: self.conv_1 = nn.Sequential(spectral_norm(nn.Conv2d(hidden_nc, output_nc, **kwargs_up), use_spect), nn.Upsample(scale_factor=2)) self.conv_s = nn.Sequential(spectral_norm(nn.Conv2d(input_nc, output_nc, **kwargs_up), use_spect), nn.Upsample(scale_factor=2)) # define normalization layers self.norm_0 = ADAIN(input_nc, feature_nc) self.norm_1 = ADAIN(hidden_nc, feature_nc) self.norm_s = ADAIN(input_nc, feature_nc) def forward(self, x, z): x_s = self.shortcut(x, z) dx = self.conv_0(self.actvn(self.norm_0(x, z))) dx = self.conv_1(self.actvn(self.norm_1(dx, z))) out = x_s + dx return out def shortcut(self, x, z): x_s = self.conv_s(self.actvn(self.norm_s(x, z))) return x_s def spectral_norm(module, use_spect=True): """use spectral normal layer to stable the training process""" if use_spect: return SpectralNorm(module) else: return module class ADAIN(nn.Module): def __init__(self, norm_nc, feature_nc): super().__init__() self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False) nhidden = 128 use_bias=True self.mlp_shared = nn.Sequential( nn.Linear(feature_nc, nhidden, bias=use_bias), nn.ReLU() ) self.mlp_gamma = nn.Linear(nhidden, norm_nc, bias=use_bias) self.mlp_beta = nn.Linear(nhidden, norm_nc, bias=use_bias) def forward(self, x, feature): # Part 1. generate parameter-free normalized activations normalized = self.param_free_norm(x) # Part 2. produce scaling and bias conditioned on feature feature = feature.view(feature.size(0), -1) actv = self.mlp_shared(feature) gamma = self.mlp_gamma(actv) beta = self.mlp_beta(actv) # apply scale and bias gamma = gamma.view(*gamma.size()[:2], 1,1) beta = beta.view(*beta.size()[:2], 1,1) out = normalized * (1 + gamma) + beta return out class FineEncoder(nn.Module): """docstring for Encoder""" def __init__(self, image_nc, ngf, img_f, layers, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False): super(FineEncoder, self).__init__() self.layers = layers self.first = FirstBlock2d(image_nc, ngf, norm_layer, nonlinearity, use_spect) for i in range(layers): in_channels = min(ngf*(2**i), img_f) out_channels = min(ngf*(2**(i+1)), img_f) model = DownBlock2d(in_channels, out_channels, norm_layer, nonlinearity, use_spect) setattr(self, 'down' + str(i), model) self.output_nc = out_channels def forward(self, x): x = self.first(x) out=[x] for i in range(self.layers): model = getattr(self, 'down'+str(i)) x = model(x) out.append(x) return out class FineDecoder(nn.Module): """docstring for FineDecoder""" def __init__(self, image_nc, feature_nc, ngf, img_f, layers, num_block, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False): super(FineDecoder, self).__init__() self.layers = layers for i in range(layers)[::-1]: in_channels = min(ngf*(2**(i+1)), img_f) out_channels = min(ngf*(2**i), img_f) up = UpBlock2d(in_channels, out_channels, norm_layer, nonlinearity, use_spect) res = FineADAINResBlocks(num_block, in_channels, feature_nc, norm_layer, nonlinearity, use_spect) jump = Jump(out_channels, norm_layer, nonlinearity, use_spect) setattr(self, 'up' + str(i), up) setattr(self, 'res' + str(i), res) setattr(self, 'jump' + str(i), jump) self.final = FinalBlock2d(out_channels, image_nc, use_spect, 'tanh') self.output_nc = out_channels def forward(self, x, z): out = x.pop() for i in range(self.layers)[::-1]: res_model = getattr(self, 'res' + str(i)) up_model = getattr(self, 'up' + str(i)) jump_model = getattr(self, 'jump' + str(i)) out = res_model(out, z) out = up_model(out) out = jump_model(x.pop()) + out out_image = self.final(out) return out_image class FirstBlock2d(nn.Module): """ Downsampling block for use in encoder. """ def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False): super(FirstBlock2d, self).__init__() kwargs = {'kernel_size': 7, 'stride': 1, 'padding': 3} conv = spectral_norm(nn.Conv2d(input_nc, output_nc, **kwargs), use_spect) if type(norm_layer) == type(None): self.model = nn.Sequential(conv, nonlinearity) else: self.model = nn.Sequential(conv, norm_layer(output_nc), nonlinearity) def forward(self, x): out = self.model(x) return out class DownBlock2d(nn.Module): def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False): super(DownBlock2d, self).__init__() kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1} conv = spectral_norm(nn.Conv2d(input_nc, output_nc, **kwargs), use_spect) pool = nn.AvgPool2d(kernel_size=(2, 2)) if type(norm_layer) == type(None): self.model = nn.Sequential(conv, nonlinearity, pool) else: self.model = nn.Sequential(conv, norm_layer(output_nc), nonlinearity, pool) def forward(self, x): out = self.model(x) return out class UpBlock2d(nn.Module): def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False): super(UpBlock2d, self).__init__() kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1} conv = spectral_norm(nn.Conv2d(input_nc, output_nc, **kwargs), use_spect) if type(norm_layer) == type(None): self.model = nn.Sequential(conv, nonlinearity) else: self.model = nn.Sequential(conv, norm_layer(output_nc), nonlinearity) def forward(self, x): out = self.model(F.interpolate(x, scale_factor=2)) return out class FineADAINResBlocks(nn.Module): def __init__(self, num_block, input_nc, feature_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False): super(FineADAINResBlocks, self).__init__() self.num_block = num_block for i in range(num_block): model = FineADAINResBlock2d(input_nc, feature_nc, norm_layer, nonlinearity, use_spect) setattr(self, 'res'+str(i), model) def forward(self, x, z): for i in range(self.num_block): model = getattr(self, 'res'+str(i)) x = model(x, z) return x class Jump(nn.Module): def __init__(self, input_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False): super(Jump, self).__init__() kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1} conv = spectral_norm(nn.Conv2d(input_nc, input_nc, **kwargs), use_spect) if type(norm_layer) == type(None): self.model = nn.Sequential(conv, nonlinearity) else: self.model = nn.Sequential(conv, norm_layer(input_nc), nonlinearity) def forward(self, x): out = self.model(x) return out class FineADAINResBlock2d(nn.Module): """ Define an Residual block for different types """ def __init__(self, input_nc, feature_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False): super(FineADAINResBlock2d, self).__init__() kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1} self.conv1 = spectral_norm(nn.Conv2d(input_nc, input_nc, **kwargs), use_spect) self.conv2 = spectral_norm(nn.Conv2d(input_nc, input_nc, **kwargs), use_spect) self.norm1 = ADAIN(input_nc, feature_nc) self.norm2 = ADAIN(input_nc, feature_nc) self.actvn = nonlinearity def forward(self, x, z): dx = self.actvn(self.norm1(self.conv1(x), z)) dx = self.norm2(self.conv2(x), z) out = dx + x return out class FinalBlock2d(nn.Module): """ Define the output layer """ def __init__(self, input_nc, output_nc, use_spect=False, tanh_or_sigmoid='tanh'): super(FinalBlock2d, self).__init__() kwargs = {'kernel_size': 7, 'stride': 1, 'padding':3} conv = spectral_norm(nn.Conv2d(input_nc, output_nc, **kwargs), use_spect) if tanh_or_sigmoid == 'sigmoid': out_nonlinearity = nn.Sigmoid() else: out_nonlinearity = nn.Tanh() self.model = nn.Sequential(conv, out_nonlinearity) def forward(self, x): out = self.model(x) return out ================================================ FILE: generators/face_model.py ================================================ import functools import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from util import flow_util from generators.base_function import LayerNorm2d, ADAINHourglass, FineEncoder, FineDecoder class FaceGenerator(nn.Module): def __init__( self, mapping_net, warpping_net, editing_net, common ): super(FaceGenerator, self).__init__() self.mapping_net = MappingNet(**mapping_net) self.warpping_net = WarpingNet(**warpping_net, **common) self.editing_net = EditingNet(**editing_net, **common) def forward( self, input_image, driving_source, stage=None ): if stage == 'warp': descriptor = self.mapping_net(driving_source) output = self.warpping_net(input_image, descriptor) else: descriptor = self.mapping_net(driving_source) output = self.warpping_net(input_image, descriptor) output['fake_image'] = self.editing_net(input_image, output['warp_image'], descriptor) return output class MappingNet(nn.Module): def __init__(self, coeff_nc, descriptor_nc, layer): super( MappingNet, self).__init__() self.layer = layer nonlinearity = nn.LeakyReLU(0.1) self.first = nn.Sequential( torch.nn.Conv1d(coeff_nc, descriptor_nc, kernel_size=7, padding=0, bias=True)) for i in range(layer): net = nn.Sequential(nonlinearity, torch.nn.Conv1d(descriptor_nc, descriptor_nc, kernel_size=3, padding=0, dilation=3)) setattr(self, 'encoder' + str(i), net) self.pooling = nn.AdaptiveAvgPool1d(1) self.output_nc = descriptor_nc def forward(self, input_3dmm): out = self.first(input_3dmm) for i in range(self.layer): model = getattr(self, 'encoder' + str(i)) out = model(out) + out[:,:,3:-3] out = self.pooling(out) return out class WarpingNet(nn.Module): def __init__( self, image_nc, descriptor_nc, base_nc, max_nc, encoder_layer, decoder_layer, use_spect ): super( WarpingNet, self).__init__() nonlinearity = nn.LeakyReLU(0.1) norm_layer = functools.partial(LayerNorm2d, affine=True) kwargs = {'nonlinearity':nonlinearity, 'use_spect':use_spect} self.descriptor_nc = descriptor_nc self.hourglass = ADAINHourglass(image_nc, self.descriptor_nc, base_nc, max_nc, encoder_layer, decoder_layer, **kwargs) self.flow_out = nn.Sequential(norm_layer(self.hourglass.output_nc), nonlinearity, nn.Conv2d(self.hourglass.output_nc, 2, kernel_size=7, stride=1, padding=3)) self.pool = nn.AdaptiveAvgPool2d(1) def forward(self, input_image, descriptor): final_output={} output = self.hourglass(input_image, descriptor) final_output['flow_field'] = self.flow_out(output) deformation = flow_util.convert_flow_to_deformation(final_output['flow_field']) final_output['warp_image'] = flow_util.warp_image(input_image, deformation) return final_output class EditingNet(nn.Module): def __init__( self, image_nc, descriptor_nc, layer, base_nc, max_nc, num_res_blocks, use_spect): super(EditingNet, self).__init__() nonlinearity = nn.LeakyReLU(0.1) norm_layer = functools.partial(LayerNorm2d, affine=True) kwargs = {'norm_layer':norm_layer, 'nonlinearity':nonlinearity, 'use_spect':use_spect} self.descriptor_nc = descriptor_nc # encoder part self.encoder = FineEncoder(image_nc*2, base_nc, max_nc, layer, **kwargs) self.decoder = FineDecoder(image_nc, self.descriptor_nc, base_nc, max_nc, layer, num_res_blocks, **kwargs) def forward(self, input_image, warp_image, descriptor): x = torch.cat([input_image, warp_image], 1) x = self.encoder(x) gen_image = self.decoder(x, descriptor) return gen_image ================================================ FILE: inference.py ================================================ import os import cv2 import lmdb import math import argparse import numpy as np from io import BytesIO from PIL import Image import torch import torchvision.transforms.functional as F import torchvision.transforms as transforms from util.logging import init_logging, make_logging_dir from util.distributed import init_dist from util.trainer import get_model_optimizer_and_scheduler, set_random_seed, get_trainer from util.distributed import master_only_print as print from data.vox_video_dataset import VoxVideoDataset from config import Config def parse_args(): parser = argparse.ArgumentParser(description='Training') parser.add_argument('--config', default='./config/face.yaml') parser.add_argument('--name', default=None) parser.add_argument('--checkpoints_dir', default='result', help='Dir for saving logs and models.') parser.add_argument('--seed', type=int, default=0, help='Random seed.') parser.add_argument('--cross_id', action='store_true') parser.add_argument('--which_iter', type=int, default=None) parser.add_argument('--no_resume', action='store_true') parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--single_gpu', action='store_true') parser.add_argument('--output_dir', type=str) args = parser.parse_args() return args def write2video(results_dir, *video_list): cat_video=None for video in video_list: video_numpy = video[:,:3,:,:].cpu().float().detach().numpy() video_numpy = (np.transpose(video_numpy, (0, 2, 3, 1)) + 1) / 2.0 * 255.0 video_numpy = video_numpy.astype(np.uint8) cat_video = np.concatenate([cat_video, video_numpy], 2) if cat_video is not None else video_numpy image_array=[] for i in range(cat_video.shape[0]): image_array.append(cat_video[i]) out_name = results_dir+'.mp4' _, height, width, layers = cat_video.shape size = (width,height) out = cv2.VideoWriter(out_name, cv2.VideoWriter_fourcc(*'mp4v'), 15, size) for i in range(len(image_array)): out.write(image_array[i][:,:,::-1]) out.release() if __name__ == '__main__': args = parse_args() set_random_seed(args.seed) opt = Config(args.config, args, is_train=False) if not args.single_gpu: opt.local_rank = args.local_rank init_dist(opt.local_rank) opt.device = torch.cuda.current_device() # create a visualizer date_uid, logdir = init_logging(opt) opt.logdir = logdir make_logging_dir(logdir, date_uid) # create a model net_G, net_G_ema, opt_G, sch_G \ = get_model_optimizer_and_scheduler(opt) trainer = get_trainer(opt, net_G, net_G_ema, \ opt_G, sch_G, None) current_epoch, current_iteration = trainer.load_checkpoint( opt, args.which_iter) net_G = trainer.net_G_ema.eval() output_dir = os.path.join( args.output_dir, 'epoch_{:05}_iteration_{:09}'.format(current_epoch, current_iteration) ) os.makedirs(output_dir, exist_ok=True) opt.data.cross_id = args.cross_id dataset = VoxVideoDataset(opt.data, is_inference=True) with torch.no_grad(): for video_index in range(dataset.__len__()): data = dataset.load_next_video() input_source = data['source_image'][None].cuda() name = data['video_name'] output_images, gt_images, warp_images = [],[],[] for frame_index in range(len(data['target_semantics'])): target_semantic = data['target_semantics'][frame_index][None].cuda() output_dict = net_G(input_source, target_semantic) output_images.append( output_dict['fake_image'].cpu().clamp_(-1, 1) ) warp_images.append( output_dict['warp_image'].cpu().clamp_(-1, 1) ) gt_images.append( data['target_image'][frame_index][None] ) gen_images = torch.cat(output_images, 0) gt_images = torch.cat(gt_images, 0) warp_images = torch.cat(warp_images, 0) write2video("{}/{}".format(output_dir, name), gt_images, warp_images, gen_images) print("write results to video {}/{}".format(output_dir, name)) ================================================ FILE: intuitive_control.py ================================================ import os import math import argparse import numpy as np from scipy.io import savemat,loadmat import torch import torchvision.transforms.functional as F import torchvision.transforms as transforms from config import Config from util.logging import init_logging, make_logging_dir from util.distributed import init_dist from util.trainer import get_model_optimizer_and_scheduler, set_random_seed, get_trainer from util.distributed import master_only_print as print from data.image_dataset import ImageDataset from inference import write2video def parse_args(): parser = argparse.ArgumentParser(description='Training') parser.add_argument('--config', default='./config/face.yaml') parser.add_argument('--name', default=None) parser.add_argument('--checkpoints_dir', default='result', help='Dir for saving logs and models.') parser.add_argument('--seed', type=int, default=0, help='Random seed.') parser.add_argument('--which_iter', type=int, default=None) parser.add_argument('--no_resume', action='store_true') parser.add_argument('--input_name', type=str) parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--single_gpu', action='store_true') parser.add_argument('--output_dir', type=str) args = parser.parse_args() return args def get_control(input_name): control_dict = {} control_dict['rotation_center'] = torch.tensor([0,0,0,0,0,0.45]) control_dict['rotation_left_x'] = torch.tensor([0,0,math.pi/10,0,0,0.45]) control_dict['rotation_right_x'] = torch.tensor([0,0,-math.pi/10,0,0,0.45]) control_dict['rotation_left_y'] = torch.tensor([math.pi/10,0,0,0,0,0.45]) control_dict['rotation_right_y'] = torch.tensor([-math.pi/10,0,0,0,0,0.45]) control_dict['rotation_left_z'] = torch.tensor([0,math.pi/8,0,0,0,0.45]) control_dict['rotation_right_z'] = torch.tensor([0,-math.pi/8,0,0,0,0.45]) expession = loadmat('{}/expression.mat'.format(input_name)) for item in ['expression_center', 'expression_mouth', 'expression_eyebrow', 'expression_eyes']: control_dict[item] = torch.tensor(expession[item])[0] sort_rot_control = [ 'rotation_left_x', 'rotation_center', 'rotation_right_x', 'rotation_center', 'rotation_left_y', 'rotation_center', 'rotation_right_y', 'rotation_center', 'rotation_left_z', 'rotation_center', 'rotation_right_z', 'rotation_center' ] sort_exp_control = [ 'expression_center', 'expression_mouth', 'expression_center', 'expression_eyebrow', 'expression_center', 'expression_eyes', ] return control_dict, sort_rot_control, sort_exp_control if __name__ == '__main__': args = parse_args() set_random_seed(args.seed) opt = Config(args.config, args, is_train=False) if not args.single_gpu: opt.local_rank = args.local_rank init_dist(opt.local_rank) opt.device = torch.cuda.current_device() # create a visualizer date_uid, logdir = init_logging(opt) opt.logdir = logdir make_logging_dir(logdir, date_uid) # create a model net_G, net_G_ema, opt_G, sch_G \ = get_model_optimizer_and_scheduler(opt) trainer = get_trainer(opt, net_G, net_G_ema, \ opt_G, sch_G, None) current_epoch, current_iteration = trainer.load_checkpoint( opt, args.which_iter) net_G = trainer.net_G_ema.eval() output_dir = os.path.join( args.output_dir, 'epoch_{:05}_iteration_{:09}'.format(current_epoch, current_iteration) ) os.makedirs(output_dir, exist_ok=True) image_dataset = ImageDataset(opt.data, args.input_name) control_dict, sort_rot_control, sort_exp_control = get_control(args.input_name) for _ in range(image_dataset.__len__()): with torch.no_grad(): data = image_dataset.next_image() num = 10 output_images = [] # rotation control current = control_dict['rotation_center'] for control in sort_rot_control: for i in range(num): rotation = (control_dict[control]-current)*i/(num-1)+current data['target_semantics'][:, 64:70, :] = rotation[None, :, None] output_dict = net_G(data['source_image'].cuda(), data['target_semantics'].cuda()) output_images.append( output_dict['fake_image'].cpu().clamp_(-1, 1) ) current = rotation # expression control current = data['target_semantics'][0, :64, 0] for control in sort_exp_control: for i in range(num): expression = (control_dict[control]-current)*i/(num-1)+current data['target_semantics'][:, :64, :] = expression[None, :, None] output_dict = net_G(data['source_image'].cuda(), data['target_semantics'].cuda()) output_images.append( output_dict['fake_image'].cpu().clamp_(-1, 1) ) current = expression output_images = torch.cat(output_images, 0) print('write results to file {}/{}'.format(output_dir, data['name'])) write2video('{}/{}'.format(output_dir, data['name']), output_images) ================================================ FILE: loss/perceptual.py ================================================ import torch import torch.nn.functional as F import torchvision from torch import nn from util.distributed import master_only_print as print def apply_imagenet_normalization(input): r"""Normalize using ImageNet mean and std. Args: input (4D tensor NxCxHxW): The input images, assuming to be [-1, 1]. Returns: Normalized inputs using the ImageNet normalization. """ # normalize the input back to [0, 1] normalized_input = (input + 1) / 2 # normalize the input using the ImageNet mean and std mean = normalized_input.new_tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1) std = normalized_input.new_tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1) output = (normalized_input - mean) / std return output class PerceptualLoss(nn.Module): r"""Perceptual loss initialization. Args: network (str) : The name of the loss network: 'vgg16' | 'vgg19'. layers (str or list of str) : The layers used to compute the loss. weights (float or list of float : The loss weights of each layer. criterion (str): The type of distance function: 'l1' | 'l2'. resize (bool) : If ``True``, resize the input images to 224x224. resize_mode (str): Algorithm used for resizing. instance_normalized (bool): If ``True``, applies instance normalization to the feature maps before computing the distance. num_scales (int): The loss will be evaluated at original size and this many times downsampled sizes. """ def __init__(self, network='vgg19', layers='relu_4_1', weights=None, criterion='l1', resize=False, resize_mode='bilinear', instance_normalized=False, num_scales=1, use_style_loss=False, weight_style_to_perceptual=0): super().__init__() if isinstance(layers, str): layers = [layers] if weights is None: weights = [1.] * len(layers) elif isinstance(layers, float) or isinstance(layers, int): weights = [weights] assert len(layers) == len(weights), \ 'The number of layers (%s) must be equal to ' \ 'the number of weights (%s).' % (len(layers), len(weights)) if network == 'vgg19': self.model = _vgg19(layers) elif network == 'vgg16': self.model = _vgg16(layers) elif network == 'alexnet': self.model = _alexnet(layers) elif network == 'inception_v3': self.model = _inception_v3(layers) elif network == 'resnet50': self.model = _resnet50(layers) elif network == 'robust_resnet50': self.model = _robust_resnet50(layers) elif network == 'vgg_face_dag': self.model = _vgg_face_dag(layers) else: raise ValueError('Network %s is not recognized' % network) self.num_scales = num_scales self.layers = layers self.weights = weights if criterion == 'l1': self.criterion = nn.L1Loss() elif criterion == 'l2' or criterion == 'mse': self.criterion = nn.MSELoss() else: raise ValueError('Criterion %s is not recognized' % criterion) self.resize = resize self.resize_mode = resize_mode self.instance_normalized = instance_normalized self.use_style_loss = use_style_loss self.weight_style = weight_style_to_perceptual print('Perceptual loss:') print('\tMode: {}'.format(network)) def forward(self, inp, target, mask=None): r"""Perceptual loss forward. Args: inp (4D tensor) : Input tensor. target (4D tensor) : Ground truth tensor, same shape as the input. Returns: (scalar tensor) : The perceptual loss. """ # Perceptual loss should operate in eval mode by default. self.model.eval() inp, target = \ apply_imagenet_normalization(inp), \ apply_imagenet_normalization(target) if self.resize: inp = F.interpolate( inp, mode=self.resize_mode, size=(224, 224), align_corners=False) target = F.interpolate( target, mode=self.resize_mode, size=(224, 224), align_corners=False) # Evaluate perceptual loss at each scale. loss = 0 style_loss=0 for scale in range(self.num_scales): input_features, target_features = \ self.model(inp), self.model(target) for layer, weight in zip(self.layers, self.weights): # Example per-layer VGG19 loss values after applying # [0.03125, 0.0625, 0.125, 0.25, 1.0] weighting. # relu_1_1, 0.014698 # relu_2_1, 0.085817 # relu_3_1, 0.349977 # relu_4_1, 0.544188 # relu_5_1, 0.906261 input_feature = input_features[layer] target_feature = target_features[layer].detach() if self.instance_normalized: input_feature = F.instance_norm(input_feature) target_feature = F.instance_norm(target_feature) if mask is not None: mask_ = F.interpolate(mask, input_feature.shape[2:], mode='bilinear', align_corners=False) input_feature = input_feature * mask_ target_feature = target_feature * mask_ # print('mask',mask_.shape) loss += weight * self.criterion(input_feature, target_feature) if self.use_style_loss and scale==0: style_loss += self.criterion(self.compute_gram(input_feature), self.compute_gram(target_feature)) # Downsample the input and target. if scale != self.num_scales - 1: inp = F.interpolate( inp, mode=self.resize_mode, scale_factor=0.5, align_corners=False, recompute_scale_factor=True) target = F.interpolate( target, mode=self.resize_mode, scale_factor=0.5, align_corners=False, recompute_scale_factor=True) if self.use_style_loss: return loss + style_loss*self.weight_style else: return loss def compute_gram(self, x): b, ch, h, w = x.size() f = x.view(b, ch, w * h) f_T = f.transpose(1, 2) G = f.bmm(f_T) / (h * w * ch) return G class _PerceptualNetwork(nn.Module): r"""The network that extracts features to compute the perceptual loss. Args: network (nn.Sequential) : The network that extracts features. layer_name_mapping (dict) : The dictionary that maps a layer's index to its name. layers (list of str): The list of layer names that we are using. """ def __init__(self, network, layer_name_mapping, layers): super().__init__() assert isinstance(network, nn.Sequential), \ 'The network needs to be of type "nn.Sequential".' self.network = network self.layer_name_mapping = layer_name_mapping self.layers = layers for param in self.parameters(): param.requires_grad = False def forward(self, x): r"""Extract perceptual features.""" output = {} for i, layer in enumerate(self.network): x = layer(x) layer_name = self.layer_name_mapping.get(i, None) if layer_name in self.layers: # If the current layer is used by the perceptual loss. output[layer_name] = x return output def _vgg19(layers): r"""Get vgg19 layers""" network = torchvision.models.vgg19(pretrained=True).features layer_name_mapping = {1: 'relu_1_1', 3: 'relu_1_2', 6: 'relu_2_1', 8: 'relu_2_2', 11: 'relu_3_1', 13: 'relu_3_2', 15: 'relu_3_3', 17: 'relu_3_4', 20: 'relu_4_1', 22: 'relu_4_2', 24: 'relu_4_3', 26: 'relu_4_4', 29: 'relu_5_1'} return _PerceptualNetwork(network, layer_name_mapping, layers) def _vgg16(layers): r"""Get vgg16 layers""" network = torchvision.models.vgg16(pretrained=True).features layer_name_mapping = {1: 'relu_1_1', 3: 'relu_1_2', 6: 'relu_2_1', 8: 'relu_2_2', 11: 'relu_3_1', 13: 'relu_3_2', 15: 'relu_3_3', 18: 'relu_4_1', 20: 'relu_4_2', 22: 'relu_4_3', 25: 'relu_5_1'} return _PerceptualNetwork(network, layer_name_mapping, layers) def _alexnet(layers): r"""Get alexnet layers""" network = torchvision.models.alexnet(pretrained=True).features layer_name_mapping = {0: 'conv_1', 1: 'relu_1', 3: 'conv_2', 4: 'relu_2', 6: 'conv_3', 7: 'relu_3', 8: 'conv_4', 9: 'relu_4', 10: 'conv_5', 11: 'relu_5'} return _PerceptualNetwork(network, layer_name_mapping, layers) def _inception_v3(layers): r"""Get inception v3 layers""" inception = torchvision.models.inception_v3(pretrained=True) network = nn.Sequential(inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2), inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2), inception.Mixed_5b, inception.Mixed_5c, inception.Mixed_5d, inception.Mixed_6a, inception.Mixed_6b, inception.Mixed_6c, inception.Mixed_6d, inception.Mixed_6e, inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c, nn.AdaptiveAvgPool2d(output_size=(1, 1))) layer_name_mapping = {3: 'pool_1', 6: 'pool_2', 14: 'mixed_6e', 18: 'pool_3'} return _PerceptualNetwork(network, layer_name_mapping, layers) def _resnet50(layers): r"""Get resnet50 layers""" resnet50 = torchvision.models.resnet50(pretrained=True) network = nn.Sequential(resnet50.conv1, resnet50.bn1, resnet50.relu, resnet50.maxpool, resnet50.layer1, resnet50.layer2, resnet50.layer3, resnet50.layer4, resnet50.avgpool) layer_name_mapping = {4: 'layer_1', 5: 'layer_2', 6: 'layer_3', 7: 'layer_4'} return _PerceptualNetwork(network, layer_name_mapping, layers) def _robust_resnet50(layers): r"""Get robust resnet50 layers""" resnet50 = torchvision.models.resnet50(pretrained=False) state_dict = torch.utils.model_zoo.load_url( 'http://andrewilyas.com/ImageNet.pt') new_state_dict = {} for k, v in state_dict['model'].items(): if k.startswith('module.model.'): new_state_dict[k[13:]] = v resnet50.load_state_dict(new_state_dict) network = nn.Sequential(resnet50.conv1, resnet50.bn1, resnet50.relu, resnet50.maxpool, resnet50.layer1, resnet50.layer2, resnet50.layer3, resnet50.layer4, resnet50.avgpool) layer_name_mapping = {4: 'layer_1', 5: 'layer_2', 6: 'layer_3', 7: 'layer_4'} return _PerceptualNetwork(network, layer_name_mapping, layers) def _vgg_face_dag(layers): r"""Get vgg face layers""" network = torchvision.models.vgg16(num_classes=2622) state_dict = torch.utils.model_zoo.load_url( 'http://www.robots.ox.ac.uk/~albanie/models/pytorch-mcn/' 'vgg_face_dag.pth') feature_layer_name_mapping = { 0: 'conv1_1', 2: 'conv1_2', 5: 'conv2_1', 7: 'conv2_2', 10: 'conv3_1', 12: 'conv3_2', 14: 'conv3_3', 17: 'conv4_1', 19: 'conv4_2', 21: 'conv4_3', 24: 'conv5_1', 26: 'conv5_2', 28: 'conv5_3'} new_state_dict = {} for k, v in feature_layer_name_mapping.items(): new_state_dict['features.' + str(k) + '.weight'] =\ state_dict[v + '.weight'] new_state_dict['features.' + str(k) + '.bias'] = \ state_dict[v + '.bias'] classifier_layer_name_mapping = { 0: 'fc6', 3: 'fc7', 6: 'fc8'} for k, v in classifier_layer_name_mapping.items(): new_state_dict['classifier.' + str(k) + '.weight'] = \ state_dict[v + '.weight'] new_state_dict['classifier.' + str(k) + '.bias'] = \ state_dict[v + '.bias'] network.load_state_dict(new_state_dict) class Flatten(nn.Module): r"""Flatten the tensor""" def forward(self, x): r"""Flatten it""" return x.view(x.shape[0], -1) layer_name_mapping = { 1: 'avgpool', 3: 'fc6', 4: 'relu_6', 6: 'fc7', 7: 'relu_7', 9: 'fc8'} seq_layers = [network.features, network.avgpool, Flatten()] for i in range(7): seq_layers += [network.classifier[i]] network = nn.Sequential(*seq_layers) return _PerceptualNetwork(network, layer_name_mapping, layers) ================================================ FILE: requirements.txt ================================================ absl-py==0.13.0 backcall==0.2.0 cachetools==4.2.2 certifi==2021.5.30 charset-normalizer==2.0.6 cycler==0.10.0 dataclasses==0.8 decorator==4.4.2 filelock==3.0.12 gdown==3.13.1 google-auth==1.35.0 google-auth-oauthlib==0.4.6 grpcio==1.40.0 idna==3.2 imageio==2.9.0 importlib-metadata==4.8.1 ipython==7.16.1 ipython-genutils==0.2.0 jedi==0.18.0 kiwisolver==1.3.1 lmdb==1.2.1 Markdown==3.3.4 matplotlib==3.3.4 mkl-fft==1.3.0 mkl-random==1.1.1 mkl-service==2.3.0 networkx==2.5.1 numpy==1.19.2 oauthlib==3.1.1 olefile==0.46 opencv-python==4.5.3.56 parso==0.8.2 pexpect==4.8.0 pickleshare==0.7.5 Pillow==8.3.1 pip==21.2.2 prompt-toolkit==3.0.20 protobuf==3.18.0 ptyprocess==0.7.0 pyasn1==0.4.8 pyasn1-modules==0.2.8 Pygments==2.10.0 pyparsing==2.4.7 PySocks==1.7.1 python-dateutil==2.8.2 PyWavelets==1.1.1 PyYAML==5.4.1 requests==2.26.0 requests-oauthlib==1.3.0 rsa==4.7.2 scikit-image==0.17.2 scipy==1.5.4 setuptools==58.0.4 six==1.16.0 tensorboard==2.6.0 tensorboard-data-server==0.6.1 tensorboard-plugin-wit==1.8.0 tifffile==2020.9.3 torch==1.7.1 torchvision==0.8.2 tqdm==4.62.2 traitlets==4.3.3 typing-extensions==3.10.0.2 urllib3==1.26.6 wcwidth==0.2.5 Werkzeug==2.0.1 wheel==0.37.0 zipp==3.5.0 ================================================ FILE: scripts/coeff_detector.py ================================================ import os import glob import numpy as np from os import makedirs, name from PIL import Image from tqdm import tqdm import torch import torch.nn as nn from options.inference_options import InferenceOptions from models import create_model from util.preprocess import align_img from util.load_mats import load_lm3d from extract_kp_videos import KeypointExtractor class CoeffDetector(nn.Module): def __init__(self, opt): super().__init__() self.model = create_model(opt) self.model.setup(opt) self.model.device = 'cuda' self.model.parallelize() self.model.eval() self.lm3d_std = load_lm3d(opt.bfm_folder) def forward(self, img, lm): img, trans_params = self.image_transform(img, lm) data_input = { 'imgs': img[None], } self.model.set_input(data_input) self.model.test() pred_coeff = {key:self.model.pred_coeffs_dict[key].cpu().numpy() for key in self.model.pred_coeffs_dict} pred_coeff = np.concatenate([ pred_coeff['id'], pred_coeff['exp'], pred_coeff['tex'], pred_coeff['angle'], pred_coeff['gamma'], pred_coeff['trans'], trans_params[None], ], 1) return {'coeff_3dmm':pred_coeff, 'crop_img': Image.fromarray((img.cpu().permute(1, 2, 0).numpy()*255).astype(np.uint8))} def image_transform(self, images, lm): """ param: images: -- PIL image lm: -- numpy array """ W,H = images.size if np.mean(lm) == -1: lm = (self.lm3d_std[:, :2]+1)/2. lm = np.concatenate( [lm[:, :1]*W, lm[:, 1:2]*H], 1 ) else: lm[:, -1] = H - 1 - lm[:, -1] trans_params, img, lm, _ = align_img(images, lm, self.lm3d_std) img = torch.tensor(np.array(img)/255., dtype=torch.float32).permute(2, 0, 1) trans_params = np.array([float(item) for item in np.hsplit(trans_params, 5)]) trans_params = torch.tensor(trans_params.astype(np.float32)) return img, trans_params def get_data_path(root, keypoint_root): filenames = list() keypoint_filenames = list() IMAGE_EXTENSIONS_LOWERCASE = {'jpg', 'png', 'jpeg', 'webp'} IMAGE_EXTENSIONS = IMAGE_EXTENSIONS_LOWERCASE.union({f.upper() for f in IMAGE_EXTENSIONS_LOWERCASE}) extensions = IMAGE_EXTENSIONS for ext in extensions: filenames += glob.glob(f'{root}/*.{ext}', recursive=True) filenames = sorted(filenames) for filename in filenames: name = os.path.splitext(os.path.basename(filename))[0] keypoint_filenames.append( os.path.join(keypoint_root, name + '.txt') ) return filenames, keypoint_filenames if __name__ == "__main__": opt = InferenceOptions().parse() coeff_detector = CoeffDetector(opt) kp_extractor = KeypointExtractor() image_names, keypoint_names = get_data_path(opt.input_dir, opt.keypoint_dir) makedirs(opt.keypoint_dir, exist_ok=True) makedirs(opt.output_dir, exist_ok=True) for image_name, keypoint_name in tqdm(zip(image_names, keypoint_names)): image = Image.open(image_name) if not os.path.isfile(keypoint_name): lm = kp_extractor.extract_keypoint(image, keypoint_name) else: lm = np.loadtxt(keypoint_name).astype(np.float32) lm = lm.reshape([-1, 2]) predicted = coeff_detector(image, lm) name = os.path.splitext(os.path.basename(image_name))[0] np.savetxt( "{}/{}_3dmm_coeff.txt".format(opt.output_dir, name), predicted['coeff_3dmm'].reshape(-1)) ================================================ FILE: scripts/download_demo_dataset.sh ================================================ gdown https://drive.google.com/uc?id=1ruuLw5-0fpm6EREexPn3I_UQPmkrBoq9 unzip -x ./vox_lmdb_demo.zip mkdir ./dataset mv vox_lmdb_demo ./dataset ================================================ FILE: scripts/download_weights.sh ================================================ gdown https://drive.google.com/uc?id=1-0xOf6g58OmtKtEWJlU3VlnfRqPN9Uq7 unzip -x ./face.zip mkdir ./result mv face ./result rm face.zip ================================================ FILE: scripts/extract_kp_videos.py ================================================ import os import cv2 import time import glob import argparse import face_alignment import numpy as np from PIL import Image from tqdm import tqdm from itertools import cycle from torch.multiprocessing import Pool, Process, set_start_method class KeypointExtractor(): def __init__(self): self.detector = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D) def extract_keypoint(self, images, name=None): if isinstance(images, list): keypoints = [] for image in images: current_kp = self.extract_keypoint(image) if np.mean(current_kp) == -1 and keypoints: keypoints.append(keypoints[-1]) else: keypoints.append(current_kp[None]) keypoints = np.concatenate(keypoints, 0) np.savetxt(os.path.splitext(name)[0]+'.txt', keypoints.reshape(-1)) return keypoints else: while True: try: keypoints = self.detector.get_landmarks_from_image(np.array(images))[0] break except RuntimeError as e: if str(e).startswith('CUDA'): print("Warning: out of memory, sleep for 1s") time.sleep(1) else: print(e) break except TypeError: print('No face detected in this image') shape = [68, 2] keypoints = -1. * np.ones(shape) break if name is not None: np.savetxt(os.path.splitext(name)[0]+'.txt', keypoints.reshape(-1)) return keypoints def read_video(filename): frames = [] cap = cv2.VideoCapture(filename) while cap.isOpened(): ret, frame = cap.read() if ret: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = Image.fromarray(frame) frames.append(frame) else: break cap.release() return frames def run(data): filename, opt, device = data os.environ['CUDA_VISIBLE_DEVICES'] = device kp_extractor = KeypointExtractor() images = read_video(filename) name = filename.split('/')[-2:] os.makedirs(os.path.join(opt.output_dir, name[-2]), exist_ok=True) kp_extractor.extract_keypoint( images, name=os.path.join(opt.output_dir, name[-2], name[-1]) ) if __name__ == '__main__': set_start_method('spawn') parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--input_dir', type=str, help='the folder of the input files') parser.add_argument('--output_dir', type=str, help='the folder of the output files') parser.add_argument('--device_ids', type=str, default='0,1') parser.add_argument('--workers', type=int, default=4) opt = parser.parse_args() filenames = list() VIDEO_EXTENSIONS_LOWERCASE = {'mp4'} VIDEO_EXTENSIONS = VIDEO_EXTENSIONS_LOWERCASE.union({f.upper() for f in VIDEO_EXTENSIONS_LOWERCASE}) extensions = VIDEO_EXTENSIONS for ext in extensions: filenames = sorted(glob.glob(f'{opt.input_dir}/**/*.{ext}')) print('Total number of videos:', len(filenames)) pool = Pool(opt.workers) args_list = cycle([opt]) device_ids = opt.device_ids.split(",") device_ids = cycle(device_ids) for data in tqdm(pool.imap_unordered(run, zip(filenames, args_list, device_ids))): None ================================================ FILE: scripts/face_recon_images.py ================================================ import os import glob import numpy as np from PIL import Image from tqdm import tqdm from scipy.io import savemat import torch from models import create_model from options.inference_options import InferenceOptions from util.preprocess import align_img from util.load_mats import load_lm3d from util.util import tensor2im, save_image def get_data_path(root, keypoint_root): filenames = list() keypoint_filenames = list() IMAGE_EXTENSIONS_LOWERCASE = {'jpg', 'png', 'jpeg', 'webp'} IMAGE_EXTENSIONS = IMAGE_EXTENSIONS_LOWERCASE.union({f.upper() for f in IMAGE_EXTENSIONS_LOWERCASE}) extensions = IMAGE_EXTENSIONS for ext in extensions: filenames += glob.glob(f'{root}/*.{ext}', recursive=True) filenames = sorted(filenames) for filename in filenames: name = os.path.splitext(os.path.basename(filename))[0] keypoint_filenames.append( os.path.join(keypoint_root, name + '.txt') ) return filenames, keypoint_filenames class ImagePathDataset(torch.utils.data.Dataset): def __init__(self, filenames, txt_filenames, bfm_folder): self.filenames = filenames self.txt_filenames = txt_filenames self.lm3d_std = load_lm3d(bfm_folder) def __len__(self): return len(self.filenames) def __getitem__(self, i): filename = self.filenames[i] txt_filename = self.txt_filenames[i] imgs, _, trans_params = self.read_data(filename, txt_filename) return { 'imgs':imgs, 'trans_param':trans_params, 'filename': filename } def image_transform(self, images, lm): W,H = images.size if np.mean(lm) == -1: lm = (self.lm3d_std[:, :2]+1)/2. lm = np.concatenate( [lm[:, :1]*W, lm[:, 1:2]*H], 1 ) else: lm[:, -1] = H - 1 - lm[:, -1] trans_params, img, lm, _ = align_img(images, lm, self.lm3d_std) img = torch.tensor(np.array(img)/255., dtype=torch.float32).permute(2, 0, 1) lm = torch.tensor(lm) trans_params = np.array([float(item) for item in np.hsplit(trans_params, 5)]) trans_params = torch.tensor(trans_params.astype(np.float32)) return img, lm, trans_params def read_data(self, filename, txt_filename): images = Image.open(filename).convert('RGB') lm = np.loadtxt(txt_filename).astype(np.float32) lm = lm.reshape([-1, 2]) imgs, lms, trans_params = self.image_transform(images, lm) return imgs, lms, trans_params def main(opt, model): import torch.multiprocessing torch.multiprocessing.set_sharing_strategy('file_system') filenames, keypoint_filenames = get_data_path(opt.input_dir, opt.keypoint_dir) dataset = ImagePathDataset(filenames, keypoint_filenames, opt.bfm_folder) dataloader = torch.utils.data.DataLoader( dataset, batch_size=opt.inference_batch_size, shuffle=False, drop_last=False, num_workers=8, ) pred_coeffs, pred_trans_params = [], [] print('nums of images:', dataset.__len__()) for iteration, data in tqdm(enumerate(dataloader)): data_input = { 'imgs': data['imgs'], } model.set_input(data_input) model.test() pred_coeff = {key:model.pred_coeffs_dict[key].cpu().numpy() for key in model.pred_coeffs_dict} pred_coeff = np.concatenate([ pred_coeff['id'], pred_coeff['exp'], pred_coeff['tex'], pred_coeff['angle'], pred_coeff['gamma'], pred_coeff['trans']], 1) pred_coeffs.append(pred_coeff) trans_param = data['trans_param'].cpu().numpy() pred_trans_params.append(trans_param) if opt.save_split_files: for index, filename in enumerate(data['filename']): basename = os.path.splitext(os.path.basename(filename))[0] output_path = os.path.join(opt.output_dir, basename+'.mat') savemat( output_path, {'coeff':pred_coeff[index], 'transform_params':trans_param[index]} ) # visuals = model.get_current_visuals() # get image results # for name in visuals: # images = visuals[name] # for i in range(images.shape[0]): # image_numpy = tensor2im(images[i]) # save_image(image_numpy, os.path.basename(data['filename'][i])+'.png') pred_coeffs = np.concatenate(pred_coeffs, 0) pred_trans_params = np.concatenate(pred_trans_params, 0) savemat(os.path.join(opt.output_dir, 'ffhq.mat'), {'coeff':pred_coeffs, 'transform_params':pred_trans_params}) if __name__ == '__main__': opt = InferenceOptions().parse() # get test options model = create_model(opt) model.setup(opt) model.device = 'cuda:0' model.parallelize() model.eval() lm3d_std = load_lm3d(opt.bfm_folder) main(opt, model) ================================================ FILE: scripts/face_recon_videos.py ================================================ import os import cv2 import glob import numpy as np from PIL import Image from tqdm import tqdm from scipy.io import savemat import torch from models import create_model from options.inference_options import InferenceOptions from util.preprocess import align_img from util.load_mats import load_lm3d from util.util import mkdirs, tensor2im, save_image def get_data_path(root, keypoint_root): filenames = list() keypoint_filenames = list() VIDEO_EXTENSIONS_LOWERCASE = {'mp4'} VIDEO_EXTENSIONS = VIDEO_EXTENSIONS_LOWERCASE.union({f.upper() for f in VIDEO_EXTENSIONS_LOWERCASE}) extensions = VIDEO_EXTENSIONS for ext in extensions: filenames += glob.glob(f'{root}/**/*.{ext}', recursive=True) filenames = sorted(filenames) keypoint_filenames = sorted(glob.glob(f'{keypoint_root}/**/*.txt', recursive=True)) assert len(filenames) == len(keypoint_filenames) return filenames, keypoint_filenames class VideoPathDataset(torch.utils.data.Dataset): def __init__(self, filenames, txt_filenames, bfm_folder): self.filenames = filenames self.txt_filenames = txt_filenames self.lm3d_std = load_lm3d(bfm_folder) def __len__(self): return len(self.filenames) def __getitem__(self, index): filename = self.filenames[index] txt_filename = self.txt_filenames[index] frames = self.read_video(filename) lm = np.loadtxt(txt_filename).astype(np.float32) lm = lm.reshape([len(frames), -1, 2]) out_images, out_trans_params = list(), list() for i in range(len(frames)): out_img, _, out_trans_param \ = self.image_transform(frames[i], lm[i]) out_images.append(out_img[None]) out_trans_params.append(out_trans_param[None]) return { 'imgs': torch.cat(out_images, 0), 'trans_param':torch.cat(out_trans_params, 0), 'filename': filename } def read_video(self, filename): frames = list() cap = cv2.VideoCapture(filename) while cap.isOpened(): ret, frame = cap.read() if ret: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = Image.fromarray(frame) frames.append(frame) else: break cap.release() return frames def image_transform(self, images, lm): W,H = images.size if np.mean(lm) == -1: lm = (self.lm3d_std[:, :2]+1)/2. lm = np.concatenate( [lm[:, :1]*W, lm[:, 1:2]*H], 1 ) else: lm[:, -1] = H - 1 - lm[:, -1] trans_params, img, lm, _ = align_img(images, lm, self.lm3d_std) img = torch.tensor(np.array(img)/255., dtype=torch.float32).permute(2, 0, 1) lm = torch.tensor(lm) trans_params = np.array([float(item) for item in np.hsplit(trans_params, 5)]) trans_params = torch.tensor(trans_params.astype(np.float32)) return img, lm, trans_params def main(opt, model): import torch.multiprocessing torch.multiprocessing.set_sharing_strategy('file_system') filenames, keypoint_filenames = get_data_path(opt.input_dir, opt.keypoint_dir) dataset = VideoPathDataset(filenames, keypoint_filenames, opt.bfm_folder) dataloader = torch.utils.data.DataLoader( dataset, batch_size=1, # can noly set to one here! shuffle=False, drop_last=False, num_workers=8, ) batch_size = opt.inference_batch_size for data in tqdm(dataloader): num_batch = data['imgs'][0].shape[0] // batch_size + 1 pred_coeffs = list() for index in range(num_batch): data_input = { 'imgs': data['imgs'][0,index*batch_size:(index+1)*batch_size], } model.set_input(data_input) model.test() pred_coeff = {key:model.pred_coeffs_dict[key].cpu().numpy() for key in model.pred_coeffs_dict} pred_coeff = np.concatenate([ pred_coeff['id'], pred_coeff['exp'], pred_coeff['tex'], pred_coeff['angle'], pred_coeff['gamma'], pred_coeff['trans']], 1) pred_coeffs.append(pred_coeff) visuals = model.get_current_visuals() # get image results if False: # debug for name in visuals: images = visuals[name] for i in range(images.shape[0]): image_numpy = tensor2im(images[i]) save_image( image_numpy, os.path.join( opt.output_dir, os.path.basename(data['filename'][0])+str(i).zfill(5)+'.jpg') ) exit() pred_coeffs = np.concatenate(pred_coeffs, 0) pred_trans_params = data['trans_param'][0].cpu().numpy() name = data['filename'][0].split('/')[-2:] name[-1] = os.path.splitext(name[-1])[0] + '.mat' os.makedirs(os.path.join(opt.output_dir, name[-2]), exist_ok=True) savemat( os.path.join(opt.output_dir, name[-2], name[-1]), {'coeff':pred_coeffs, 'transform_params':pred_trans_params} ) if __name__ == '__main__': opt = InferenceOptions().parse() # get test options model = create_model(opt) model.setup(opt) model.device = 'cuda:0' model.parallelize() model.eval() main(opt, model) ================================================ FILE: scripts/inference_options.py ================================================ from .base_options import BaseOptions class InferenceOptions(BaseOptions): """This class includes test options. It also includes shared options defined in BaseOptions. """ def initialize(self, parser): parser = BaseOptions.initialize(self, parser) # define shared options parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') parser.add_argument('--dataset_mode', type=str, default=None, help='chooses how datasets are loaded. [None | flist]') parser.add_argument('--input_dir', type=str, help='the folder of the input files') parser.add_argument('--keypoint_dir', type=str, help='the folder of the keypoint files') parser.add_argument('--output_dir', type=str, default='mp4', help='the output dir to save the extracted coefficients') parser.add_argument('--save_split_files', action='store_true', help='save split files or not') parser.add_argument('--inference_batch_size', type=int, default=8) # Dropout and Batchnorm has different behavior during training and test. self.isTrain = False return parser ================================================ FILE: scripts/prepare_vox_lmdb.py ================================================ import os import cv2 import lmdb import argparse import multiprocessing import numpy as np from glob import glob from io import BytesIO from tqdm import tqdm from PIL import Image from scipy.io import loadmat from torchvision.transforms import functional as trans_fn def format_for_lmdb(*args): key_parts = [] for arg in args: if isinstance(arg, int): arg = str(arg).zfill(7) key_parts.append(arg) return '-'.join(key_parts).encode('utf-8') class Resizer: def __init__(self, size, kp_root, coeff_3dmm_root, img_format): self.size = size self.kp_root = kp_root self.coeff_3dmm_root = coeff_3dmm_root self.img_format = img_format def get_resized_bytes(self, img, img_format='jpeg'): img = trans_fn.resize(img, (self.size, self.size), interpolation=Image.BICUBIC) buf = BytesIO() img.save(buf, format=img_format) img_bytes = buf.getvalue() return img_bytes def prepare(self, filename): frames = {'img':[], 'kp':None, 'coeff_3dmm':None} cap = cv2.VideoCapture(filename) while cap.isOpened(): ret, frame = cap.read() if ret: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) img_pil = Image.fromarray(frame) img_bytes = self.get_resized_bytes(img_pil, self.img_format) frames['img'].append(img_bytes) else: break cap.release() video_name = os.path.splitext(os.path.basename(filename))[0] keypoint_byte = get_others(self.kp_root, video_name, 'keypoint') coeff_3dmm_byte = get_others(self.coeff_3dmm_root, video_name, 'coeff_3dmm') frames['kp'] = keypoint_byte frames['coeff_3dmm'] = coeff_3dmm_byte return frames def __call__(self, index_filename): index, filename = index_filename result = self.prepare(filename) return index, result, filename def get_others(root, video_name, data_type): if root is None: return else: assert data_type in ('keypoint', 'coeff_3dmm') if os.path.isfile(os.path.join(root, 'train', video_name+'.mat')): file_path = os.path.join(root, 'train', video_name+'.mat') else: file_path = os.path.join(root, 'test', video_name+'.mat') if data_type == 'keypoint': return_byte = convert_kp(file_path) else: return_byte = convert_3dmm(file_path) return return_byte def convert_kp(file_path): file_mat = loadmat(file_path) kp_byte = file_mat['landmark'].tobytes() return kp_byte def convert_3dmm(file_path): file_mat = loadmat(file_path) coeff_3dmm = file_mat['coeff'] crop_param = file_mat['transform_params'] _, _, ratio, t0, t1 = np.hsplit(crop_param.astype(np.float32), 5) crop_param = np.concatenate([ratio, t0, t1], 1) coeff_3dmm_cat = np.concatenate([coeff_3dmm, crop_param], 1) coeff_3dmm_byte = coeff_3dmm_cat.tobytes() return coeff_3dmm_byte def prepare_data(path, keypoint_path, coeff_3dmm_path, out, n_worker, sizes, chunksize, img_format): filenames = list() VIDEO_EXTENSIONS_LOWERCASE = {'mp4'} VIDEO_EXTENSIONS = VIDEO_EXTENSIONS_LOWERCASE.union({f.upper() for f in VIDEO_EXTENSIONS_LOWERCASE}) extensions = VIDEO_EXTENSIONS for ext in extensions: filenames += glob(f'{path}/**/*.{ext}', recursive=True) train_video, test_video = [], [] for item in filenames: if "/train/" in item: train_video.append(item) else: test_video.append(item) print(len(train_video), len(test_video)) with open(os.path.join(out, 'train_list.txt'),'w') as f: for item in train_video: item = os.path.splitext(os.path.basename(item))[0] f.write(item + '\n') with open(os.path.join(out, 'test_list.txt'),'w') as f: for item in test_video: item = os.path.splitext(os.path.basename(item))[0] f.write(item + '\n') filenames = sorted(filenames) total = len(filenames) os.makedirs(out, exist_ok=True) for size in sizes: lmdb_path = os.path.join(out, str(size)) with lmdb.open(lmdb_path, map_size=1024 ** 4, readahead=False) as env: with env.begin(write=True) as txn: txn.put(format_for_lmdb('length'), format_for_lmdb(total)) resizer = Resizer(size, keypoint_path, coeff_3dmm_path, img_format) with multiprocessing.Pool(n_worker) as pool: for idx, result, filename in tqdm( pool.imap_unordered(resizer, enumerate(filenames), chunksize=chunksize), total=total): filename = os.path.basename(filename) video_name = os.path.splitext(filename)[0] txn.put(format_for_lmdb(video_name, 'length'), format_for_lmdb(len(result['img']))) for frame_idx, frame in enumerate(result['img']): txn.put(format_for_lmdb(video_name, frame_idx), frame) if result['kp']: txn.put(format_for_lmdb(video_name, 'keypoint'), result['kp']) if result['coeff_3dmm']: txn.put(format_for_lmdb(video_name, 'coeff_3dmm'), result['coeff_3dmm']) if __name__ == '__main__': parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--path', type=str, help='a path to input directiory') parser.add_argument('--keypoint_path', type=str, help='a path to output directory', default=None) parser.add_argument('--coeff_3dmm_path', type=str, help='a path to output directory', default=None) parser.add_argument('--out', type=str, help='a path to output directory') parser.add_argument('--sizes', type=int, nargs='+', default=(256,)) parser.add_argument('--n_worker', type=int, help='number of worker processes', default=8) parser.add_argument('--chunksize', type=int, help='approximate chunksize for each worker', default=10) parser.add_argument('--img_format', type=str, default='jpeg') args = parser.parse_args() prepare_data(**vars(args)) ================================================ FILE: third_part/PerceptualSimilarity/models/__init__.py ================================================ ================================================ FILE: third_part/PerceptualSimilarity/models/base_model.py ================================================ import os import torch from torch.autograd import Variable from pdb import set_trace as st from IPython import embed class BaseModel(): def __init__(self): pass; def name(self): return 'BaseModel' def initialize(self, use_gpu=True): self.use_gpu = use_gpu self.Tensor = torch.cuda.FloatTensor if self.use_gpu else torch.Tensor # self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) def forward(self): pass def get_image_paths(self): pass def optimize_parameters(self): pass def get_current_visuals(self): return self.input def get_current_errors(self): return {} def save(self, label): pass # helper saving function that can be used by subclasses def save_network(self, network, path, network_label, epoch_label): save_filename = '%s_net_%s.pth' % (epoch_label, network_label) save_path = os.path.join(path, save_filename) torch.save(network.state_dict(), save_path) # helper loading function that can be used by subclasses def load_network(self, network, network_label, epoch_label): # embed() save_filename = '%s_net_%s.pth' % (epoch_label, network_label) save_path = os.path.join(self.save_dir, save_filename) print('Loading network from %s'%save_path) network.load_state_dict(torch.load(save_path)) def update_learning_rate(): pass def get_image_paths(self): return self.image_paths def save_done(self, flag=False): np.save(os.path.join(self.save_dir, 'done_flag'),flag) np.savetxt(os.path.join(self.save_dir, 'done_flag'),[flag,],fmt='%i') ================================================ FILE: third_part/PerceptualSimilarity/models/dist_model.py ================================================ from __future__ import absolute_import import sys sys.path.append('..') sys.path.append('.') import numpy as np import torch from torch import nn import os from collections import OrderedDict from torch.autograd import Variable import itertools from .base_model import BaseModel from scipy.ndimage import zoom import fractions import functools import skimage.transform from IPython import embed from . import networks_basic as networks from third_part.PerceptualSimilarity.util import util # from util import util class DistModel(BaseModel): def name(self): return self.model_name def initialize(self, model='net-lin', net='alex', pnet_rand=False, pnet_tune=False, model_path=None, colorspace='Lab', use_gpu=True, printNet=False, spatial=False, spatial_shape=None, spatial_order=1, spatial_factor=None, is_train=False, lr=.0001, beta1=0.5, version='0.1'): ''' INPUTS model - ['net-lin'] for linearly calibrated network ['net'] for off-the-shelf network ['L2'] for L2 distance in Lab colorspace ['SSIM'] for ssim in RGB colorspace net - ['squeeze','alex','vgg'] model_path - if None, will look in weights/[NET_NAME].pth colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM use_gpu - bool - whether or not to use a GPU printNet - bool - whether or not to print network architecture out spatial - bool - whether to output an array containing varying distances across spatial dimensions spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below). spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images. spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear). is_train - bool - [True] for training mode lr - float - initial learning rate beta1 - float - initial momentum term for adam version - 0.1 for latest, 0.0 was original ''' BaseModel.initialize(self, use_gpu=use_gpu) self.model = model self.net = net self.use_gpu = use_gpu self.is_train = is_train self.spatial = spatial self.spatial_shape = spatial_shape self.spatial_order = spatial_order self.spatial_factor = spatial_factor self.model_name = '%s [%s]'%(model,net) if(self.model == 'net-lin'): # pretrained net + linear layer self.net = networks.PNetLin(use_gpu=use_gpu,pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,use_dropout=True,spatial=spatial,version=version) kw = {} if not use_gpu: kw['map_location'] = 'cpu' if(model_path is None): import inspect # model_path = './PerceptualSimilarity/weights/v%s/%s.pth'%(version,net) model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', '..', 'weights/v%s/%s.pth'%(version,net))) if(not is_train): print('Loading model from: %s'%model_path) self.net.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage)) elif(self.model=='net'): # pretrained network assert not self.spatial, 'spatial argument not supported yet for uncalibrated networks' self.net = networks.PNet(use_gpu=use_gpu,pnet_type=net) self.is_fake_net = True elif(self.model in ['L2','l2']): self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing self.model_name = 'L2' elif(self.model in ['DSSIM','dssim','SSIM','ssim']): self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace) self.model_name = 'SSIM' else: raise ValueError("Model [%s] not recognized." % self.model) self.parameters = list(self.net.parameters()) if self.is_train: # training mode # extra network on top to go from distances (d0,d1) => predicted human judgment (h*) self.rankLoss = networks.BCERankingLoss(use_gpu=use_gpu) self.parameters+=self.rankLoss.parameters self.lr = lr self.old_lr = lr self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999)) else: # test mode self.net.eval() if(printNet): print('---------- Networks initialized -------------') networks.print_network(self.net) print('-----------------------------------------------') def forward_pair(self,in1,in2,retPerLayer=False): if(retPerLayer): return self.net.forward(in1,in2, retPerLayer=True) else: return self.net.forward(in1,in2) def forward(self, in0, in1, retNumpy=True): ''' Function computes the distance between image patches in0 and in1 INPUTS in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1] retNumpy - [False] to return as torch.Tensor, [True] to return as numpy array OUTPUT computed distances between in0 and in1 ''' self.input_ref = in0 self.input_p0 = in1 if(self.use_gpu): self.input_ref = self.input_ref.cuda() self.input_p0 = self.input_p0.cuda() self.var_ref = Variable(self.input_ref,requires_grad=True) self.var_p0 = Variable(self.input_p0,requires_grad=True) self.d0 = self.forward_pair(self.var_ref, self.var_p0) self.loss_total = self.d0 def convert_output(d0): if(retNumpy): ans = d0.cpu().data.numpy() if not self.spatial: ans = ans.flatten() else: assert(ans.shape[0] == 1 and len(ans.shape) == 4) return ans[0,...].transpose([1, 2, 0]) # Reshape to usual numpy image format: (height, width, channels) return ans else: return d0 if self.spatial: L = [convert_output(x) for x in self.d0] spatial_shape = self.spatial_shape if spatial_shape is None: if(self.spatial_factor is None): spatial_shape = (in0.size()[2],in0.size()[3]) else: spatial_shape = (max([x.shape[0] for x in L])*self.spatial_factor, max([x.shape[1] for x in L])*self.spatial_factor) L = [skimage.transform.resize(x, spatial_shape, order=self.spatial_order, mode='edge') for x in L] L = np.mean(np.concatenate(L, 2) * len(L), 2) return L else: return convert_output(self.d0) # ***** TRAINING FUNCTIONS ***** def optimize_parameters(self): self.forward_train() self.optimizer_net.zero_grad() self.backward_train() self.optimizer_net.step() self.clamp_weights() def clamp_weights(self): for module in self.net.modules(): if(hasattr(module, 'weight') and module.kernel_size==(1,1)): module.weight.data = torch.clamp(module.weight.data,min=0) def set_input(self, data): self.input_ref = data['ref'] self.input_p0 = data['p0'] self.input_p1 = data['p1'] self.input_judge = data['judge'] if(self.use_gpu): self.input_ref = self.input_ref.cuda() self.input_p0 = self.input_p0.cuda() self.input_p1 = self.input_p1.cuda() self.input_judge = self.input_judge.cuda() self.var_ref = Variable(self.input_ref,requires_grad=True) self.var_p0 = Variable(self.input_p0,requires_grad=True) self.var_p1 = Variable(self.input_p1,requires_grad=True) def forward_train(self): # run forward pass self.d0 = self.forward_pair(self.var_ref, self.var_p0) self.d1 = self.forward_pair(self.var_ref, self.var_p1) self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge) # var_judge self.var_judge = Variable(1.*self.input_judge).view(self.d0.size()) self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.) return self.loss_total def backward_train(self): torch.mean(self.loss_total).backward() def compute_accuracy(self,d0,d1,judge): ''' d0, d1 are Variables, judge is a Tensor ''' d1_lt_d0 = (d1