[
  {
    "path": ".gitignore",
    "content": ".idea/\nnote1/fit_a_line.inference.model\nnote3/image/\nnote8/models/\nnote9/models/\nnote10/log/\nnote11/images/\nnote11/infer_model/\nnote12/datasets/\nnote12/infer_model/\nnote13/train_image/\nnote13/infer_image/\nnote13/infer_model/\nnote13/datasets/\nnote14/infer_model/\nnote14/images/\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "# LearnPaddle2\nPaddlePaddle新版本Fluid教程，使用的PaddlePaddle版本为1.2.0，Python版本为3.5。\n\n# 文章博客地址\n\n\n* [第一章 新版本PaddlePaddle的安装](https://blog.doiduoyi.com/articles/1584974303857.html)\n* [第二章 计算1+1](https://blog.doiduoyi.com/articles/1584974387872.html)\n* [第三章 线性回归](https://blog.doiduoyi.com/articles/1584974471592.html)\n* [第四章 卷积神经网络](https://blog.doiduoyi.com/articles/1584974540988.html)\n* [第五章 循环神经网络](https://blog.doiduoyi.com/articles/1584974601202.html)\n* [第六章 生成对抗网络](https://blog.doiduoyi.com/articles/1584974661687.html)\n* [第七章 强化学习](https://blog.doiduoyi.com/articles/1584974728689.html)\n* [第八章 模型的保存与使用](https://blog.doiduoyi.com/articles/1584974792165.html)\n* [第九章 迁移学习](https://blog.doiduoyi.com/articles/1584974849177.html)\n* [第十章 VisualDL 训练可视化](https://blog.doiduoyi.com/articles/1584974915236.html)\n* [第十一章 自定义图像数据集识别](https://blog.doiduoyi.com/articles/1584974968544.html)\n* [第十二章 自定义文本数据集分类](https://blog.doiduoyi.com/articles/1584975038292.html)\n* [第十三章 自定义图像数生成](https://blog.doiduoyi.com/articles/1584975142214.html)\n* [第十四章 把预测模型部署在服务器](https://blog.doiduoyi.com/articles/1584975208040.html)\n* [第十五章 把预测模型部署到Android手机上](https://blog.doiduoyi.com/articles/1584975263604.html)\n\n \n# 补充\n来都来了，觉得不错的话，给个star吧。\n"
  },
  {
    "path": "note1/README.md",
    "content": "﻿@[TOC]\n\n# 前言\n这一章我们介绍如何安装新版本的PaddlePaddle，这里说的新版本主要是说Fluid版本。Fluid 是设计用来让用户像Pytorch和Tensorflow Eager Execution一样执行程序。在这些系统中，不再有模型这个概念，应用也不再包含一个用于描述Operator图或者一系列层的符号描述，而是像通用程序那样描述训练或者预测的过程。也就是说PaddlePaddle从Fluid版本开始使用动态图机制，所以我们这个系列也是使用Fluid版本编写的教程。\n\n# 环境\n - 系统：64位Windows 10专业版，64位Ubuntu 16.04 \n - Python环境：Python 3.5\n - 内存：8G\n\n# Windows下安装\nPaddlePaddle在1.2版本之后开始支持Windows，也就是说使用Windows的用户不需要再安装Docker容器，或者使用Windows的Liunx子系统，直接可以在Windows系统本身安装PaddlePaddle。下面我们就介绍如何在Windows安装PaddlePaddle，分为两个部分介绍，首先安装Python 3.5环境，然后再使用命令安装PaddlePaddle。\n\n## 安装Python\n1、本系列使用的是Python 3.5，官方在Windows上支持Python2.7.15，Python3.5.x，Python3.6.x，Python3.7.x。读者根据自己的实际情况安装自己喜欢的版本。官网下载页面：https://www.python.org/downloads/windows/ ，官网下载地址：https://www.python.org/ftp/python/3.5.4/python-3.5.4-amd64.exe\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190124144459791.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n2、双击运行Python 3.5安装包开始安装，记住要选上添加环境变量，这很重要，之后使用命令都要依赖这个环境变量，要不每次都要进入到`pip`的目录比较麻烦。然后点击`Install Now`开始安装。\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190124145152305.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n3、安装完成之后，测试安装是否成功，打开`Windows PowerShell`或者`cmd`，笔者的系统是Windows 10，可以使用`Windows PowerShell`，如果读者是其他系统，可以使用`cmd`。用命令`python -V`查看是否安装成功。正常安装之后可以显示安装Python的版本。\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190124145557657.png)\n\n## 安装PaddlePaddle\nPaddlePaddle支持Windows之后，安装起来非常简单，只需要一条命令就可以完成安装。\n\n - 安装CPU版本，打开`Windows PowerShell`，输入以下命令。可以使用`==`指定安装PaddlePaddle的版本，如没有指定版本，默认安装是最新版本。`-i`后面是镜像源地址，使用国内镜像源可以大大提高下载速度：\n```\npip3 install paddlepaddle==1.2.0 -i https://mirrors.aliyun.com/pypi/simple/\n```\n\n - 安装GPU版本，目前不支持Windows的GPU版本，支持后会更新。\n\n - 测试安装是否成功，在`Windows PowerShell`中输入命令`python`，进入到Python 编辑环境，并输入以下代码，导没有保存证明安装成功：\n```\nimport paddle.fluid\n```\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190124151103405.png)\n\n# Ubuntu下安装\n下面介绍在Ubuntu系统下安装PaddlePaddle，PaddlePaddle支持64位的Ubuntu 14.04 /16.04 /18.04系统，Python支持Python2.7.15，Python3.5.x，Python3.6.x，Python3.7.x。\n\n - 安装Python 3.5（通常不需要执行）。通常情况下Ubuntu 16.04自带的就是Python 3.5，其他Ubuntu的版本自带的可能是其他版本，不过没有关系，PaddlePaddle基本都支持，所以不必专门安装Python3.5。\n```\nsudo apt install python3.5\nsudo apt install python3.5-dev\n```\n\n - 安装CPU版本，打开Ubuntu的终端，快捷键是`Ctrl+Alt+T`，输入以下命令。可以使用`==`指定安装PaddlePaddle的版本，如没有指定版本，默认安装是最新版本。`-i`后面是镜像源地址，使用国内镜像源可以大大提高下载速度：\n```\npip3 install paddlepaddle==1.2.0 -i https://mirrors.aliyun.com/pypi/simple/\n```\n\n - 安装GPU版本，安装GPU版本之前，要先安装CUDA，可以查看笔者之前的文章[《Ubuntu安装和卸载CUDA和CUDNN》](https://blog.csdn.net/qq_33200967/article/details/80689543)，安装完成 CUDA 9 和 CUDNN 7 之后，再安装PaddlePaddle的GPU版本，安装命令如下。可以使用`==`指定安装PaddlePaddle的版本和CUDA、CUDNN的版本，这必须要跟读者系统本身安装的CUDA版本对应，比如以下命令就是安装支持CUDA 9.0和CUDNN 7的PaddlePaddle版本。`-i`后面是镜像源地址，使用国内镜像源可以大大提高下载速度：\n```\npip3 install paddlepaddle-gpu==1.2.0.post97 -i https://mirrors.aliyun.com/pypi/simple/\n```\n\n - 测试安装是否成功，在终端中输入命令`python3`，进入到Python 编辑环境，并输入以下代码，正确情况下如图所示：\n```\nimport paddle.fluid\n```\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190125093720742.png)\n\n# 源码编译\n这部分我们将介绍使用源码编译PaddlePaddle，可以通过这种方式安装符合读者需求的PaddlePaddle，比如笔者的电脑安装的是CUDA 10 和 CUDNN 7，而目前官方提供的没有支持CUDA 10 和 CUDNN 7的PaddlePaddle版本，所以笔者就可以通过源码编译的方式编译PaddlePaddle安装包，当然也要PaddlePaddle支持才行。\n\n## Windows下源码编译\n下面我们将介绍在Windows系统下进行源码编译PaddlePaddle。目前支持使用的系统是64位的Windows 10 家庭版/专业版/企业版。\n\n1. 安装`Visual Studio 2015 Update3`。下载地址：https://visualstudio.microsoft.com/zh-hans/vs/older-downloads/ ，因为是旧版本，还有`加入免费的 Dev Essentials 计划`才能正常下载。\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190125155716929.png)\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190125155913275.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n\n2. 安装`cmake 3.13`，下载cmake的安装包，下载地址：https://cmake.org/download/ ，一路默认，只需要在添加环境变量的时候注意添加环境变量就可以了。如何存在环境变量问题，可以重启系统。\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190125152728392.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n3. 安装Python的依赖库，只要执行以下命令。关于Windows安装Python，在“Windows下安装”部分已经介绍过，这里就不介绍了。\n```\npip3 install numpy\npip3 install protobuf\npip3 install wheel\n```\n\n4. 安装 git 工具。git的下载地址：https://git-scm.com/downloads ，下载git的安装包，安装的时候一路默认就可以了。\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190125153826299.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n5. 右键打开`Git Bash Here`，执行以下两条命令。将PaddlePaddle的源码clone在当下目录下的Paddle的文件夹中，并进入Padde目录下，操作如下图所示，之后的命令也是在这个终端操作：\n```\ngit clone https://github.com/PaddlePaddle/Paddle.git\ncd Paddle\n```\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190125164055182.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190125164157348.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n6. 切换到较稳定release分支下进行编译，入笔者选择1.2版本的代码：\n```\ngit checkout release/1.2\n```\n\n7. 创建名为build的目录并进入：\n```\nmkdir build\ncd build\n```\n\n8. 执行编译\n\t- 编译**CPU版本**命令如下：\n\t```\n\tcmake .. -G \"Visual Studio 14 2015 Win64\" -DPY_VERSION=3.5 -DPYTHON_INCLUDE_DIR=${PYTHON_INCLUDE_DIRS} -DPYTHON_LIBRARY=${PYTHON_LIBRARY} -DPYTHON_EXECUTABLE=${PYTHON_EXECUTABLE} -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release\n\t```\n\t\n\t-  编译**GPU版本**，目前Windows还不支持GPU，支持后会更新。\n\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190125164353250.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n9. 下载第三方依赖包（openblas，snappystream），下载地址：https://github.com/wopeizl/Paddle_deps ，将整个`third_party`文件夹放到上面第7步创建的`build`目录下。\n10. 使用`Blend for Visual Studio 2015` 打开`paddle.sln`文件，选择平台为`x64`，配置为`Release`，开始编译 \n11. 编译成功后进入`\\paddle\\build\\python\\dist`目录下找到生成的`.whl`包\n12. 执行以下命令安装编译好的PaddlePaddle包：\n```\npip3 install （whl包的名字）\n```\n\n## Ubuntu本地下源码编译\n下面介绍的是使用Ubuntu编译PaddlePaddle源码，笔者的系统是64位的Ubuntu 16.04，Python环境是Python 3.5。\n\n### 安装openCV\n1.  更新apt的源，命令如下：\n```\nsudo apt update\n```\n\n2. 下载openCV源码，官方地址：https://opencv.org/releases.html ， 笔者下载的是3.4.5版本，选择的是`Sources`点击下载。\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190125095611936.png)\n\n3. 解压openCV源码，命令如下：\n```\nunzip opencv-3.4.5.zip\n```\n\n4. 安装可能需要的依赖库，命令如下：\n```\nsudo apt-get install cmake\nsudo apt-get install build-essential libgtk2.0-dev libavcodec-dev libavformat-dev libjpeg.dev libtiff4.dev libswscale-dev libjasper-dev\n```\n\n5. 开始执行cmake。\n```\ncd opencv-3.4.5/\nmkdir my_build_dir\ncd my_build_dir\ncmake -D CMAKE_BUILD_TYPE=Release -D CMAKE_INSTALL_PREFIX=/usr/local ..\n```\n\n6. 开始执行编译\n```\nmake -j$(nproc)\n```\n\n7. 执行安装命令\n```\nsudo make install\n```\n\n### 安装依赖环境\n编译PaddlePaddle源码之前，还需要安装以下的一些依赖环境。\n```\nsudo apt install python3.5-dev\nsudo apt-get udpate\nsudo apt-get install -y software-properties-common\nsudo add-apt-repository ppa:deadsnakes/ppa\nsudo apt install curl\nsudo curl https://bootstrap.pypa.io/get-pip.py -o - | python3.5\nsudo easy_install pip\nsudo apt install swig\nsudo apt install wget\nsudo pip install numpy==1.14.0\nsudo pip install protobuf==3.1.0\nsudo pip install wheel\nsudo apt install patchelf\n```\n\n### 编译PaddlePaddle\n\n1. 将PaddlePaddle的源码clone在当下目录下的Paddle的文件夹中，并进入Padde目录下，命令如下：\n```\ngit clone https://github.com/PaddlePaddle/Paddle.git\ncd Paddle\n```\n\n2. 切换到较稳定release分支下进行编译，比如笔者使用的是1.2版本，读者可以根据自己的情况选择其他版本：\n```\ngit checkout release/1.2\n```\n\n3. 创建并进入一个叫build的目录下：\n```\nmkdir build && cd build\n```\n\n4. 执行cmake，这里分为CPU版本和GPU版本。\n\t- 编译**CPU版本**，命令如下。使用参数`-DPY_VERSION`指定编译的PaddlePaddle支持的Python版本，笔者这里选择的是Python 3.5。并且使用参数`-DWITH_FLUID_ONLY`指定不编译V2版本的PaddlePaddle代码。使用参数`-DWITH_GPU`指定不使用GPU，也就是只编译CPU版本：\n\t```\n\tcmake .. -DPY_VERSION=3.5 -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release\n\t```\n\t- 编译**GPU版本**，还要安装一下依赖环境，如下：\n\t\t1. 安装 CUDA 和 CUDNN，可以查看笔者之前的文章[《Ubuntu安装和卸载CUDA和CUDNN》](https://blog.csdn.net/qq_33200967/article/details/80689543)\n\t\t2. 安装nccl2，命令如下\n\t\t```\n\t\twget http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb\n\t\tdpkg -i nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb\n\t\tsudo apt-get install -y libnccl2=2.2.13-1+cuda9.0 libnccl-dev=2.2.13-1+cuda9.0\n\t\t ```\n\t \t3. 执行cmake。使用参数`-DPY_VERSION`指定编译的PaddlePaddle支持的Python版本，笔者这里选择的是Python 3.5。并且使用参数`-DWITH_FLUID_ONLY`指定不编译V2版本的PaddlePaddle代码。使用参数`-DWITH_GPU`指定使用GPU，同时编译支持CPU和GPU版本的PaddlePaddle。\n\t \t```\n\t \tcmake .. -DPY_VERSION=3.5 -DWITH_FLUID_ONLY=ON -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release\n\t\t```\n\n5. 使用以下命令正式编译，编译时间比较长：\n```\nmake -j$(nproc)\n```\n\n6. 编译成功后进入`/paddle/build/python/dist`目录下找到生成的PaddlePaddle`.whl`包，可以使用这个命令进入到指定目录。\n```\ncd /paddle/build/python/dist\n```\n\n7. 在当前机器或目标机器安装编译好的`.whl`包：\n```\npip3 install （whl包的名字）\n```\n\n## Ubuntu使用Docker源码编译\n使用docker编译的安装包只能支持Ubuntu的PaddlePaddle，因为下载docker镜像也是Ubuntu系统的。通过使用docker编译PaddlePaddle得到的安装包，可以在docker本身使用，之后可以使用docker执行PaddlePaddle。也可以本地的Ubuntu上安装使用，不过要注意的是docker中的系统是Ubuntu 16.04。\n\n### 安装Docker\n1. 安装前准备\n```python\n# 卸载系统原有docker\nsudo apt-get remove docker docker-engine docker.io\n# 更新apt-get源 \nsudo apt-get update\n# 安装docker的依赖 \nsudo apt-get install apt-transport-https ca-certificates curl software-properties-common\ncurl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - \nsudo apt-key fingerprint 0EBFCD88\nsudo add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu && $(lsb_release -cs) && stable\"\n```\n\n2. 安装Docker，编译**CPU版本**使用。\n```python\n# 再次更新apt-get源 \nsudo apt-get update\n# 开始安装docker \nsudo apt-get install docker-ce\n# 加载docker \nsudo apt-cache madison docker-ce\n# 验证docker是否安装成功\nsudo docker run hello-world\n```\n正常情况下输出如下图所示。\n![在这里插入图片描述](https://img-blog.csdnimg.cn/201901251129238.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n3. 安装nvidia-docker，编译**GPU版本**使用（根据情况安装）。安装之前要确认本地有独立显卡并安装的显卡驱动。\n```\nwget -P /tmp https://github.com/NVIDIA/nvidia-docker/releases/download/v1.0.1/nvidia-docker_1.0.1-1_amd64.deb\nsudo dpkg -i /tmp/nvidia-docker*.deb && rm /tmp/nvidia-docker*.deb\n```\n\n### 编译PaddlePaddle\n1. 克隆PaddlePaddle源码：\n```\ngit clone https://github.com/PaddlePaddle/Paddle.git\n```\n\n2. 进入Paddle目录下：\n```\ncd Paddle\n```\n\n3. 启动docker镜像\n\t- 编译**CPU版本**，使用命令\n\t```\n\tsudo docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash\n\t ```\n\t- 编译**GPU版本**，使用命令\n\t```\n\tsudo nvidia-docker run --name paddle-test -v $PWD:/paddle --network=host -it hub.baidubce.com/paddlepaddle/paddle:latest-dev /bin/bash\n\t```\n\n4. 进入Docker后进入paddle目录下：\n```\ncd paddle\n```\n\n5. 切换到较稳定release分支下进行编译，读者可以根据自己的情况选择其他版本：\n```\ngit checkout release/1.2\n```\n\n6. 创建并进入`/paddle/build`路径下：\n```\nmkdir -p /paddle/build && cd /paddle/build\n```\n\n7. 使用以下命令安装相关依赖：\n```\npip3 install protobuf==3.1.0\napt install patchelf\n```\n\n8. 执行cmake：\n\t- 编译**CPU版本**PaddlePaddle的命令。使用参数`-DPY_VERSION`指定编译的PaddlePaddle支持的Python版本，笔者这里选择的是Python 3.5。并且使用参数`-DWITH_FLUID_ONLY`指定不编译V2版本的PaddlePaddle代码。使用参数`-DWITH_GPU`指定不使用GPU，只编译支持CPU的PaddlePaddle：\n\t```\n\tcmake .. -DPY_VERSION=3.5 -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release\n\t```\n\t- 编译**GPU版本**PaddlePaddle的命令。使用参数`-DPY_VERSION`指定编译的PaddlePaddle支持的Python版本，笔者这里选择的是Python 3.5。并且使用参数`-DWITH_FLUID_ONLY`指定不编译V2版本的PaddlePaddle代码。使用参数`-DWITH_GPU`指定使用GPU，同时编译支持CPU和GPU版本的PaddlePaddle。这里要注意一下，我们拉取的这个镜像是CUDA 8.0的，不一定跟读者本地的CUDA版本对应，这可能导致编译的安装包在本地不可用：\n\t```\n\tcmake .. -DPY_VERSION=3.5 -DWITH_FLUID_ONLY=ON -DWITH_GPU=ON -DWITH_TESTING=OFF -DCMAKE_BUILD_TYPE=Release\n\t```\n\n9. 执行编译：\n```\nmake -j$(nproc)\n```\n\n10. 编译成功后，生成的安装包存放在`/paddle/build/python/dist`目录下，如果是想在docker中安装PaddlePaddle，可以直接在docker中打开这个目录。如果要在本地安装的话，还有先退出docker，并进入到这个目录：\n```python\n# 在docker镜像中安装\ncd /paddle/build/python/dist\n# 在Ubuntu本地安装】\nexit\ncd build/python/dist\n```\n\n11. 安装PaddlePaddle，执行以下命令：\n```\npip3.5 install （whl包的名字）\n```\n\n# 测试环境\n下面介绍在Windows测试PaddlePaddle的安装情况，Ubuntu环境类似。\n\n1. 开发工具笔者喜欢使用PyCharm，下载地址：https://www.jetbrains.com/pycharm/download/#section=windows ， 笔者使用的是社区版本的PyCharm，因为这个是免费的[坏笑]。\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190124163830889.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n2. 创建一个新项目，并选择系统的Python环境，第一个是创建一个Python的虚拟环境，这里选择第二个外部的Python环境，点击`...`选择外部Python环境。\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190124164232564.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n3. 这里选择系统的Python环境，选择的路径是之前安装Python的路径。\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190124164050887.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n3. 创建一个Python程序文件，并命名为`test_paddle.py`，编写并执行以下测试代码，现在看不懂没有关系，跟着这个系列教程来学，我们会熟悉使用PaddlePaddle的：\n```python\n# Include libraries.\nimport paddle\nimport paddle.fluid as fluid\nimport numpy\nimport six\n\n# Configure the neural network.\ndef net(x, y):\n    y_predict = fluid.layers.fc(input=x, size=1, act=None)\n    cost = fluid.layers.square_error_cost(input=y_predict, label=y)\n    avg_cost = fluid.layers.mean(cost)\n    return y_predict, avg_cost\n\n                                \n# Define train function.\ndef train(save_dirname):\n    x = fluid.layers.data(name='x', shape=[13], dtype='float32')\n    y = fluid.layers.data(name='y', shape=[1], dtype='float32')\n    y_predict, avg_cost = net(x, y)\n    sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)\n    sgd_optimizer.minimize(avg_cost)\n    train_reader = paddle.batch(\n        paddle.reader.shuffle(paddle.dataset.uci_housing.train(), buf_size=500),\n        batch_size=20)\n    place = fluid.CPUPlace()\n    exe = fluid.Executor(place)\n    def train_loop(main_program):\n        feeder = fluid.DataFeeder(place=place, feed_list=[x, y])\n        exe.run(fluid.default_startup_program())\n\n        PASS_NUM = 1000\n        for pass_id in range(PASS_NUM):\n            total_loss_pass = 0\n            for data in train_reader():\n                avg_loss_value, = exe.run(\n                    main_program, feed=feeder.feed(data), fetch_list=[avg_cost])\n                total_loss_pass += avg_loss_value\n                if avg_loss_value < 5.0:\n                    if save_dirname is not None:\n                        fluid.io.save_inference_model(\n                            save_dirname, ['x'], [y_predict], exe)\n                    return\n            print(\"Pass %d, total avg cost = %f\" % (pass_id, total_loss_pass))\n\n    train_loop(fluid.default_main_program())\n\n# Infer by using provided test data.\ndef infer(save_dirname=None):\n    place = fluid.CPUPlace()\n    exe = fluid.Executor(place)\n    inference_scope = fluid.core.Scope()\n    with fluid.scope_guard(inference_scope):\n        [inference_program, feed_target_names, fetch_targets] = (\n            fluid.io.load_inference_model(save_dirname, exe))\n        test_reader = paddle.batch(paddle.dataset.uci_housing.test(), batch_size=20)\n\n        test_data = six.next(test_reader())\n        test_feat = numpy.array(list(map(lambda x: x[0], test_data))).astype(\"float32\")\n        test_label = numpy.array(list(map(lambda x: x[1], test_data))).astype(\"float32\")\n\n        results = exe.run(inference_program,\n                          feed={feed_target_names[0]: numpy.array(test_feat)},\n                          fetch_list=fetch_targets)\n        print(\"infer results: \", results[0])\n        print(\"ground truth: \", test_label)\n\n                                \n# Run train and infer.\nif __name__ == \"__main__\":\n    save_dirname = \"fit_a_line.inference.model\"\n    train(save_dirname)\n    infer(save_dirname)\n```\n\n正常情况下会输出：\n```\nPass 0, total avg cost = 13527.760742\nPass 1, total avg cost = 12497.969727\nPass 2, total avg cost = 11737.727539\nPass 3, total avg cost = 11017.893555\nPass 4, total avg cost = 9801.554688\nPass 5, total avg cost = 9150.510742\nPass 6, total avg cost = 8611.593750\nPass 7, total avg cost = 7924.654297\n......\n```\n\nPaddlePaddle的安装已经介绍完成，那我们开始进入深度学习的大门吧。本系列教程将会一步步介绍如何使用PaddlePaddle，并使用PaddlePaddle应用到实际项目中。\n\n项目代码GitHub地址：https://github.com/yeyupiaoling/LearnPaddle2/tree/master/note1\n\n**注意：** 最新代码以GitHub上的为准\n\n# 参考资料\n1.\thttp://www.paddlepaddle.org/documentation/docs/zh/1.2/beginners_guide/install/install_Ubuntu.html\n2.\thttp://www.paddlepaddle.org/documentation/docs/zh/1.2/beginners_guide/install/install_Windows.html\n3.\thttps://blog.csdn.net/cocoaqin/article/details/78163171\n\n"
  },
  {
    "path": "note1/test_paddle.py",
    "content": "# Include libraries.\nimport paddle\nimport paddle.fluid as fluid\nimport numpy\nimport six\n\n\n# Configure the neural network.\ndef net(x, y):\n    y_predict = fluid.layers.fc(input=x, size=1, act=None)\n    cost = fluid.layers.square_error_cost(input=y_predict, label=y)\n    avg_cost = fluid.layers.mean(cost)\n    return y_predict, avg_cost\n\n\n# Define train function.\ndef train(save_dirname):\n    x = fluid.layers.data(name='x', shape=[13], dtype='float32')\n    y = fluid.layers.data(name='y', shape=[1], dtype='float32')\n    y_predict, avg_cost = net(x, y)\n    sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)\n    sgd_optimizer.minimize(avg_cost)\n    train_reader = paddle.batch(\n        paddle.reader.shuffle(paddle.dataset.uci_housing.train(), buf_size=500),\n        batch_size=20)\n    place = fluid.CPUPlace()\n    exe = fluid.Executor(place)\n\n    def train_loop(main_program):\n        feeder = fluid.DataFeeder(place=place, feed_list=[x, y])\n        exe.run(fluid.default_startup_program())\n\n        PASS_NUM = 1000\n        for pass_id in range(PASS_NUM):\n            total_loss_pass = 0\n            for data in train_reader():\n                avg_loss_value, = exe.run(\n                    main_program, feed=feeder.feed(data), fetch_list=[avg_cost])\n                total_loss_pass += avg_loss_value\n                if avg_loss_value < 5.0:\n                    if save_dirname is not None:\n                        fluid.io.save_inference_model(\n                            save_dirname, ['x'], [y_predict], exe)\n                    return\n            print(\"Pass %d, total avg cost = %f\" % (pass_id, total_loss_pass))\n\n    train_loop(fluid.default_main_program())\n\n\n# Infer by using provided test data.\ndef infer(save_dirname=None):\n    place = fluid.CPUPlace()\n    exe = fluid.Executor(place)\n    inference_scope = fluid.core.Scope()\n    with fluid.scope_guard(inference_scope):\n        [inference_program, feed_target_names, fetch_targets] = (\n            fluid.io.load_inference_model(save_dirname, exe))\n        test_reader = paddle.batch(paddle.dataset.uci_housing.test(), batch_size=20)\n\n        test_data = six.next(test_reader())\n        test_feat = numpy.array(list(map(lambda x: x[0], test_data))).astype(\"float32\")\n        test_label = numpy.array(list(map(lambda x: x[1], test_data))).astype(\"float32\")\n\n        results = exe.run(inference_program,\n                          feed={feed_target_names[0]: numpy.array(test_feat)},\n                          fetch_list=fetch_targets)\n        print(\"infer results: \", results[0])\n        print(\"ground truth: \", test_label)\n\n\n# Run train and infer.\nif __name__ == \"__main__\":\n    save_dirname = \"fit_a_line.inference.model\"\n    train(save_dirname)\n    infer(save_dirname)"
  },
  {
    "path": "note10/README.md",
    "content": "﻿@[TOC]\n\n# 前言\nVisualDL是一个面向深度学习任务设计的可视化工具，包含了scalar、参数分布、模型结构、图像可视化等功能。可以这样说：“所见即所得”。我们可以借助VisualDL来观察我们训练的情况，方便我们对训练的模型进行分析，改善模型的收敛情况。\n\n 1. `scalar`，趋势图，可用于训练测试误差的展示 \n![这里写图片描述](//img-blog.csdn.net/20180314105807560?watermark/2/text/Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70)\n 2. `image`, 图片的可视化，可用于卷积层或者其他参数的图形化展示 \n![这里写图片描述](//img-blog.csdn.net/20180314105838309?watermark/2/text/Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70)\n 3. `histogram`, 用于参数分布及变化趋势的展示 \n![这里写图片描述](//img-blog.csdn.net/20180314105859971?watermark/2/text/Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70)\n 4. `graph`，用于训练模型结构的可视化\n![这里写图片描述](//img-blog.csdn.net/20180314105922862?watermark/2/text/Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70)\n以上的图像来自[VisualDL的Github](https://github.com/PaddlePaddle/VisualDL)\n\n既然那么方便，那么我们就来尝试一下吧。VisualDL底层采用C++编写，但是它在提供C++ SDK的同时，也支持Python SDK，我们主要是使用Python的SDK。顺便说一下，VisualDL除了支持PaddlePaddle,之外，还支持pytorch, mxnet在内的大部分主流DNN平台。\n\n# VisualDL的安装\n本章只讲述在Ubuntu系统上的安装和使用，Mac的操作应该也差不多。\n\n## 使用pip安装\n使用pip安装非常简单，只要一条命令就够了，如下：\n```shell\npip3 install --upgrade visualdl\n```\n测试一下是否安装成功了，运行一个例子下载日志文件：\n```shell\n# 在当前位置下载一个日志\nvdl_create_scratch_log\n```\n然后再输入，启动VisualDL并加载这个日志信息：\n```shell\nvisualdl --logdir=scratch_log/ --port=8080\n```\n这里说明一下，visualDL的参数：\n\n - `host` 设定IP\n - `port` 设定端口\n - `model_pb` 指定 ONNX 格式的模型文件，这木方我们还没要用到\n\n**注意：** 如果是报以下的错误，那是因为protobuf版本过低的原因。\n```\nroot@test:/home/test/VisualDL# visualdl --logdir ./scratch_log --port 8080\nTraceback (most recent call last):\n  File \"/usr/local/bin/visualdl\", line 29, in <module>\n    import visualdl.server.graph as vdl_graph\n  File \"/usr/local/lib/python2.7/dist-packages/visualdl/server/graph.py\", line 23, in <module>\n    from . import onnx\n  File \"/usr/local/lib/python2.7/dist-packages/visualdl/server/onnx/__init__.py\", line 8, in <module>\n    from .onnx_pb2 import ModelProto\n  File \"/usr/local/lib/python2.7/dist-packages/visualdl/server/onnx/onnx_pb2.py\", line 213, in <module>\n    options=None, file=DESCRIPTOR),\nTypeError: __init__() got an unexpected keyword argument 'file'\n```\n\nprotobuf的版本要不小于3.5.0，如何小于这个版本可以使用以下命令升级：\n```\npip3 install protobuf -U\n```\n\n然后在浏览器上输入：\n```\nhttp://127.0.0.1:8080\n```\n即可看到一个可视化的界面，如下：\n![这里写图片描述](//img-blog.csdn.net/20180314124348701?watermark/2/text/Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70)\n\n## 使用源码安装\n如果读者出于各种情况，使用pip安装不能满足需求，那可以考虑使用源码安装VisualDL，操作如下：\n首先要安装依赖库：\n```shell\n# 安装npm\napt install npm\n# 安装node\napt install nodejs-legacy\n# 安装cmake\napt install cmake\n# 安装unzip\napt install unzip\n```\n然后在GitHub上clone最新的源码并打开：\n```shell\ngit clone https://github.com/PaddlePaddle/VisualDL.git\ncd VisualDL\n```\n之后是编译生成`whl`安装包：\n```shell\npython3 setup.py bdist_wheel\n```\n生成`whl`安装包之后，就可以使用pip命令安装这个安装包了，`*`号对应的是visualdl版本号，读者要根据实际情况来安装：\n```shell\npip3 install --upgrade dist/visualdl-*.whl\n```\n安装完成之后，同样可以使用在上一部分的[使用pip安装](http://mp.csdn.net/mdeditor/79127175#%E4%BD%BF%E7%94%A8pip%E5%AE%89%E8%A3%85)的测试方法测试安装是否成功。\n\n# 简单使用VisualDL\n我们编写下面这一小段的代码来学习VisualDL的使用，`test_visualdl.py`的代码如下：\n```python\n# 导入VisualDL的包\nfrom visualdl import LogWriter\n\n# 创建一个LogWriter，第一个参数是指定存放数据的路径，\n# 第二个参数是指定多少次写操作执行一次内存到磁盘的数据持久化\nlogw = LogWriter(\"./random_log\", sync_cycle=10000)\n\n# 创建训练和测试的scalar图，\n# mode是标注线条的名称，\n# scalar标注的是指定这个组件的tag\nwith logw.mode('train') as logger:\n    scalar0 = logger.scalar(\"scratch/scalar\")\n\nwith logw.mode('test') as logger:\n    scalar1 = logger.scalar(\"scratch/scalar\")\n\n# 读取数据\nfor step in range(1000):\n    scalar0.add_record(step, step * 1. / 1000)\n    scalar1.add_record(step, 1. - step * 1. / 1000)\n```\n运行Python代码之后，在终端上输入，从上面的代码可以看到我们定义的路径是`./random_log`：\n```shell\nvisualdl --logdir=random_log/ --port=8080\n```\n然后在浏览器上输入：\n```\nhttp://127.0.0.1:8080\n```\n然后就可以看到刚才编写Python代码生成的图像了：\n![这里写图片描述](//img-blog.csdn.net/20180314123345810?watermark/2/text/Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70)\n\n经过这个例子，读者对VisualDL有了进一步的了解了，那么在接下来的我们就在实际的PaddlePaddle例子中使用我们的VisualDL。\n\n# 在PaddlePaddle使用VisualDL\n下面就介绍在PaddlePaddle训练中使用VisualDL，通过在训练的时候使用VisualDL不断收集训练的数据集，最终通过可视化展示出来。\n\n\n## 定义MobileNet V2神经网络\n创建一个`mobilenet_v2.py`来定义一个MobileNet V2神经网络。MobileNet V2是MobileNet V1的升级版，从名字可以看出这个网络是为例移动设备而诞生的，它最大的特点就是模型小，预测速度快，适合部署在移动设备上。MobileNet V2是将MobileNet V1和残差网络ResNet的残差单元结合起来，用Depthwise Convolutions代替残差单元的bottleneck，最重要的是与residuals block相反，通常的residuals block是先经过1×1的卷积，降低feature map通道数，然后再通过3×3卷积，最后重新经过1×1卷积将feature map通道数扩张回去；而且为了避免ReLU对特征的破坏，用线性层替换channel数较少层后的ReLU非线性激活。\n```python\nimport paddle.fluid as fluid\n\ndef conv_bn_layer(input, filter_size, num_filters, stride, padding, num_groups=1, if_act=True, use_cudnn=True):\n    conv = fluid.layers.conv2d(input=input,\n                               num_filters=num_filters,\n                               filter_size=filter_size,\n                               stride=stride,\n                               padding=padding,\n                               groups=num_groups,\n                               use_cudnn=use_cudnn,\n                               bias_attr=False)\n    bn = fluid.layers.batch_norm(input=conv)\n    if if_act:\n        return fluid.layers.relu6(bn)\n    else:\n        return bn\n\n\ndef shortcut(input, data_residual):\n    return fluid.layers.elementwise_add(input, data_residual)\n\n\ndef inverted_residual_unit(input,\n                           num_in_filter,\n                           num_filters,\n                           ifshortcut,\n                           stride,\n                           filter_size,\n                           padding,\n                           expansion_factor):\n    num_expfilter = int(round(num_in_filter * expansion_factor))\n\n    channel_expand = conv_bn_layer(input=input,\n                                   num_filters=num_expfilter,\n                                   filter_size=1,\n                                   stride=1,\n                                   padding=0,\n                                   num_groups=1,\n                                   if_act=True)\n\n    bottleneck_conv = conv_bn_layer(input=channel_expand,\n                                    num_filters=num_expfilter,\n                                    filter_size=filter_size,\n                                    stride=stride,\n                                    padding=padding,\n                                    num_groups=num_expfilter,\n                                    if_act=True,\n                                    use_cudnn=False)\n\n    linear_out = conv_bn_layer(input=bottleneck_conv,\n                               num_filters=num_filters,\n                               filter_size=1,\n                               stride=1,\n                               padding=0,\n                               num_groups=1,\n                               if_act=False)\n    if ifshortcut:\n        out = shortcut(input=input, data_residual=linear_out)\n        return out\n    else:\n        return linear_out\n\ndef invresi_blocks(input, in_c, t, c, n, s, name=None):\n    first_block = inverted_residual_unit(input=input,\n                                         num_in_filter=in_c,\n                                         num_filters=c,\n                                         ifshortcut=False,\n                                         stride=s,\n                                         filter_size=3,\n                                         padding=1,\n                                         expansion_factor=t)\n\n    last_residual_block = first_block\n    last_c = c\n\n    for i in range(1, n):\n        last_residual_block = inverted_residual_unit(input=last_residual_block,\n                                                     num_in_filter=last_c,\n                                                     num_filters=c,\n                                                     ifshortcut=True,\n                                                     stride=1,\n                                                     filter_size=3,\n                                                     padding=1,\n                                                     expansion_factor=t)\n    return last_residual_block\n\n\ndef net(input, class_dim, scale=1.0):\n    bottleneck_params_list = [\n        (1, 16, 1, 1),\n        (6, 24, 2, 2),\n        (6, 32, 3, 2),\n        (6, 64, 4, 2),\n        (6, 96, 3, 1),\n        (6, 160, 3, 2),\n        (6, 320, 1, 1),\n    ]\n\n    # conv1\n    input = conv_bn_layer(input,\n                          num_filters=int(32 * scale),\n                          filter_size=3,\n                          stride=2,\n                          padding=1,\n                          if_act=True)\n\n    # bottleneck sequences\n    i = 1\n    in_c = int(32 * scale)\n    for layer_setting in bottleneck_params_list:\n        t, c, n, s = layer_setting\n        i += 1\n        input = invresi_blocks(input=input,\n                               in_c=in_c,\n                               t=t,\n                               c=int(c * scale),\n                               n=n,\n                               s=s,\n                               name='conv' + str(i))\n        in_c = int(c * scale)\n    # last_conv\n    input = conv_bn_layer(input=input,\n                          num_filters=int(1280 * scale) if scale > 1.0 else 1280,\n                          filter_size=1,\n                          stride=1,\n                          padding=0,\n                          if_act=True)\n\n    feature = fluid.layers.pool2d(input=input,\n                                  pool_size=7,\n                                  pool_stride=1,\n                                  pool_type='avg',\n                                  global_pooling=True)\n    net = fluid.layers.fc(input=feature,\n                          size=class_dim,\n                          act='softmax')\n    return net\n```\n\n\n创建一个`train.py`开始训练。首先导入相关的依赖包。\n```python\nimport paddle as paddle\nimport paddle.dataset.cifar as cifar\nimport paddle.fluid as fluid\nimport mobilenet_v2\nfrom visualdl import LogWriter\n```\n\n创建VisualDL的记录器，通过这个记录器可以记录每次训练的数据，并存储在`log/`目录下。\n```python\n# 创建记录器\nlog_writer = LogWriter(dir='log/', sync_cycle=10)\n\n# 创建训练和测试记录数据工具\nwith log_writer.mode('train') as writer:\n    train_cost_writer = writer.scalar('cost')\n    train_acc_writer = writer.scalar('accuracy')\n    histogram = writer.histogram('histogram', num_buckets=50)\n\nwith log_writer.mode('test') as writer:\n    test_cost_writer = writer.scalar('cost')\n    test_acc_writer = writer.scalar('accuracy')\n```\n\n这里是定义一系列的操作，如定义输入层，获取MobileNet V2的分类器，克隆预测程序，定义优化方法。\n```python\n# 定义输入层\nimage = fluid.layers.data(name='image', shape=[3, 32, 32], dtype='float32')\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n\n# 获取分类器\nmodel = mobilenet_v2.net(image, 10)\n\n# 获取损失函数和准确率函数\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n\n# 获取训练和测试程序\ntest_program = fluid.default_main_program().clone(for_test=True)\n\n# 定义优化方法\noptimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-3)\nopts = optimizer.minimize(avg_cost)\n```\n\n获取CIFAR的训练数据和测试数据，并创建一个执行器，MobileNet V2这个模型虽然使用在手机上的，但是在训练起来却不是那么快，最好使用GPU进行训练，要不是相当的慢。\n```python\n# 获取CIFAR数据\ntrain_reader = paddle.batch(cifar.train10(), batch_size=32)\ntest_reader = paddle.batch(cifar.test10(), batch_size=32)\n\n# 定义一个使用CPU的执行器\nplace = fluid.CUDAPlace(0)\n# place = fluid.CPUPlace()\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n\n# 定义输入数据维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[image, label])\n```\n\n这里从初始化程序中获取全部参数的名称，用于之后训练过程中输出参数的值，并记录到VisualDL中。\n```python\n# 定义日志的开始位置和获取参数名称\ntrain_step = 0\ntest_step = 0\nparams_name = fluid.default_startup_program().global_block().all_parameters()[0].name\n```\n\n开始训练模型，在训练过程中，把训练时的损失值保存到`train_cost_writer`中，把训练时的准确率保存到`train_acc_writer`中，把训练过程中的参数变化保存到`histogram`中。把测试时的损失值保存到`test_cost_writer`中，把测试时的准确率保存到`test_acc_writer`中。\n```python\n# 训练10次\nfor pass_id in range(10):\n    # 进行训练\n    for batch_id, data in enumerate(train_reader()):\n        train_cost, train_acc, params = exe.run(program=fluid.default_main_program(),\n                                                feed=feeder.feed(data),\n                                                fetch_list=[avg_cost, acc, params_name])\n        # 保存训练的日志数据\n        train_step += 1\n        train_cost_writer.add_record(train_step, train_cost[0])\n        train_acc_writer.add_record(train_step, train_acc[0])\n        histogram.add_record(train_step, params.flatten())\n\n        # 每100个batch打印一次信息\n        if batch_id % 100 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' %\n                  (pass_id, batch_id, train_cost[0], train_acc[0]))\n\n    # 进行测试\n    test_accs = []\n    test_costs = []\n    for batch_id, data in enumerate(test_reader()):\n        test_cost, test_acc = exe.run(program=test_program,\n                                      feed=feeder.feed(data),\n                                      fetch_list=[avg_cost, acc])\n        # 保存测试的日志数据\n        test_step += 1\n        test_cost_writer.add_record(test_step, test_cost[0])\n        test_acc_writer.add_record(test_step, test_acc[0])\n\n        test_accs.append(test_acc[0])\n        test_costs.append(test_cost[0])\n    # 求测试结果的平均值\n    test_cost = (sum(test_costs) / len(test_costs))\n    test_acc = (sum(test_accs) / len(test_accs))\n    print('Test:%d, Cost:%0.5f, Accuracy:%0.5f' % (pass_id, test_cost, test_acc))\n```\n\n训练时输出的信息：\n```\nPass:0, Batch:0, Cost:2.79566, Accuracy:0.03125\nPass:0, Batch:100, Cost:2.48199, Accuracy:0.15625\nPass:0, Batch:200, Cost:2.49757, Accuracy:0.18750\nPass:0, Batch:300, Cost:2.10605, Accuracy:0.28125\nPass:0, Batch:400, Cost:2.24151, Accuracy:0.15625\nPass:0, Batch:500, Cost:1.99807, Accuracy:0.21875\nPass:0, Batch:600, Cost:1.92178, Accuracy:0.34375\nPass:0, Batch:700, Cost:1.81583, Accuracy:0.28125\nPass:0, Batch:800, Cost:2.22559, Accuracy:0.25000\nPass:0, Batch:900, Cost:1.79611, Accuracy:0.34375\nPass:0, Batch:1000, Cost:2.00520, Accuracy:0.25000\n```\n\n训练结束之后，启动VisualDL工具，指定日志文件的目录和端口号。\n```\nvisualdl --logdir=log/ --port=8080\n```\n\n访问网页地址：`http://localhost:8080/`，我们会得到以下的图片。\n\n - 训练时的准确率和损失值的变化，从这些图片可以看到模型正在收敛，准确率在不断提升。\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190128173923744.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n - 下图是使用测试集的准确率和损失值，从图中可以看出后期的测试情况准确率在下降，损失值在增大，也对比上图训练的准确率还在上升，证明模型出现过拟合的情况。\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190128173952966.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n - 下图是训练是参数的histogram图，从图中可以看出参数正在趋于稳定，同时的没有出现异常值，如极大值或者极小值。\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190128174012410.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n本章关于VisualDL的使用就介绍到这里，读者在实际开发中可以使用VisualDL，通过利用VisualDL给予的训练可视化，不断优化模型。\n\n同步到百度AI Studio平台：http://aistudio.baidu.com/#/projectdetail/38856\n同步到科赛网K-Lab平台：https://www.kesci.com/home/project/5c3f495589f4aa002b845d6b\n项目代码GitHub地址：https://github.com/yeyupiaoling/LearnPaddle2/tree/master/note10\n\n**注意：** 最新代码以GitHub上的为准\n\n# 参考资料\n1. https://blog.csdn.net/qq_33200967/article/details/79127175\n2. https://github.com/PaddlePaddle/VisualDL\n3. https://www.jianshu.com/p/4c9404d4998c\n"
  },
  {
    "path": "note10/mobilenet_v2.py",
    "content": "import paddle.fluid as fluid\n\n\ndef conv_bn_layer(input, filter_size, num_filters, stride, padding, num_groups=1, if_act=True, use_cudnn=True):\n    conv = fluid.layers.conv2d(input=input,\n                               num_filters=num_filters,\n                               filter_size=filter_size,\n                               stride=stride,\n                               padding=padding,\n                               groups=num_groups,\n                               use_cudnn=use_cudnn,\n                               bias_attr=False)\n    bn = fluid.layers.batch_norm(input=conv)\n    if if_act:\n        return fluid.layers.relu6(bn)\n    else:\n        return bn\n\n\ndef shortcut(input, data_residual):\n    return fluid.layers.elementwise_add(input, data_residual)\n\n\ndef inverted_residual_unit(input,\n                           num_in_filter,\n                           num_filters,\n                           ifshortcut,\n                           stride,\n                           filter_size,\n                           padding,\n                           expansion_factor):\n    num_expfilter = int(round(num_in_filter * expansion_factor))\n\n    channel_expand = conv_bn_layer(input=input,\n                                   num_filters=num_expfilter,\n                                   filter_size=1,\n                                   stride=1,\n                                   padding=0,\n                                   num_groups=1,\n                                   if_act=True)\n\n    bottleneck_conv = conv_bn_layer(input=channel_expand,\n                                    num_filters=num_expfilter,\n                                    filter_size=filter_size,\n                                    stride=stride,\n                                    padding=padding,\n                                    num_groups=num_expfilter,\n                                    if_act=True,\n                                    use_cudnn=False)\n\n    linear_out = conv_bn_layer(input=bottleneck_conv,\n                               num_filters=num_filters,\n                               filter_size=1,\n                               stride=1,\n                               padding=0,\n                               num_groups=1,\n                               if_act=False)\n    if ifshortcut:\n        out = shortcut(input=input, data_residual=linear_out)\n        return out\n    else:\n        return linear_out\n\n\ndef invresi_blocks(input, in_c, t, c, n, s, name=None):\n    first_block = inverted_residual_unit(input=input,\n                                         num_in_filter=in_c,\n                                         num_filters=c,\n                                         ifshortcut=False,\n                                         stride=s,\n                                         filter_size=3,\n                                         padding=1,\n                                         expansion_factor=t)\n\n    last_residual_block = first_block\n    last_c = c\n\n    for i in range(1, n):\n        last_residual_block = inverted_residual_unit(input=last_residual_block,\n                                                     num_in_filter=last_c,\n                                                     num_filters=c,\n                                                     ifshortcut=True,\n                                                     stride=1,\n                                                     filter_size=3,\n                                                     padding=1,\n                                                     expansion_factor=t)\n    return last_residual_block\n\n\ndef net(input, class_dim, scale=1.0):\n    bottleneck_params_list = [\n        (1, 16, 1, 1),\n        (6, 24, 2, 2),\n        (6, 32, 3, 2),\n        (6, 64, 4, 2),\n        (6, 96, 3, 1),\n        (6, 160, 3, 2),\n        (6, 320, 1, 1),\n    ]\n\n    # conv1\n    input = conv_bn_layer(input,\n                          num_filters=int(32 * scale),\n                          filter_size=3,\n                          stride=2,\n                          padding=1,\n                          if_act=True)\n\n    # bottleneck sequences\n    i = 1\n    in_c = int(32 * scale)\n    for layer_setting in bottleneck_params_list:\n        t, c, n, s = layer_setting\n        i += 1\n        input = invresi_blocks(input=input,\n                               in_c=in_c,\n                               t=t,\n                               c=int(c * scale),\n                               n=n,\n                               s=s,\n                               name='conv' + str(i))\n        in_c = int(c * scale)\n    # last_conv\n    input = conv_bn_layer(input=input,\n                          num_filters=int(1280 * scale) if scale > 1.0 else 1280,\n                          filter_size=1,\n                          stride=1,\n                          padding=0,\n                          if_act=True)\n\n    feature = fluid.layers.pool2d(input=input,\n                                  pool_size=7,\n                                  pool_stride=1,\n                                  pool_type='avg',\n                                  global_pooling=True)\n    net = fluid.layers.fc(input=feature,\n                          size=class_dim,\n                          act='softmax')\n    return net\n"
  },
  {
    "path": "note10/test_visualdl.py",
    "content": "# 导入VisualDL的包\nfrom visualdl import LogWriter\n\n# 创建一个LogWriter，第一个参数是指定存放数据的路径，\n# 第二个参数是指定多少次写操作执行一次内存到磁盘的数据持久化\nlogw = LogWriter(\"./random_log\", sync_cycle=10000)\n\n# 创建训练和测试的scalar图，\n# mode是标注线条的名称，\n# scalar标注的是指定这个组件的tag\nwith logw.mode('train') as logger:\n    scalar0 = logger.scalar(\"scratch/scalar\")\n\nwith logw.mode('test') as logger:\n    scalar1 = logger.scalar(\"scratch/scalar\")\n\n# 读取数据\nfor step in range(1000):\n    scalar0.add_record(step, step * 1. / 1000)\n    scalar1.add_record(step, 1. - step * 1. / 1000)"
  },
  {
    "path": "note10/train.py",
    "content": "import mobilenet_v2\nimport paddle as paddle\nimport paddle.dataset.cifar as cifar\nimport paddle.fluid as fluid\nfrom visualdl import LogWriter\n\n# 创建记录器\nlog_writer = LogWriter(dir='log/', sync_cycle=10)\n\n# 创建训练和测试记录数据工具\nwith log_writer.mode('train') as writer:\n    train_cost_writer = writer.scalar('cost')\n    train_acc_writer = writer.scalar('accuracy')\n    histogram = writer.histogram('histogram', num_buckets=50)\n\nwith log_writer.mode('test') as writer:\n    test_cost_writer = writer.scalar('cost')\n    test_acc_writer = writer.scalar('accuracy')\n\n# 定义输入层\nimage = fluid.layers.data(name='image', shape=[3, 32, 32], dtype='float32')\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n\n# 获取分类器\nmodel = mobilenet_v2.net(image, 10)\n\n# 获取损失函数和准确率函数\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n\n# 获取训练和测试程序\ntest_program = fluid.default_main_program().clone(for_test=True)\n\n# 定义优化方法\noptimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-3)\nopts = optimizer.minimize(avg_cost)\n\n# 获取CIFAR数据\ntrain_reader = paddle.batch(cifar.train10(), batch_size=32)\ntest_reader = paddle.batch(cifar.test10(), batch_size=32)\n\n# 定义一个使用CPU的执行器\nplace = fluid.CUDAPlace(0)\n# place = fluid.CPUPlace()\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n\n# 定义输入数据维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[image, label])\n\n# 定义日志的开始位置和获取参数名称\ntrain_step = 0\ntest_step = 0\nparams_name = fluid.default_startup_program().global_block().all_parameters()[0].name\n\n# 训练10次\nfor pass_id in range(10):\n    # 进行训练\n    for batch_id, data in enumerate(train_reader()):\n        train_cost, train_acc, params = exe.run(program=fluid.default_main_program(),\n                                                feed=feeder.feed(data),\n                                                fetch_list=[avg_cost, acc, params_name])\n        # 保存训练的日志数据\n        train_step += 1\n        train_cost_writer.add_record(train_step, train_cost[0])\n        train_acc_writer.add_record(train_step, train_acc[0])\n        histogram.add_record(train_step, params.flatten())\n\n        # 每100个batch打印一次信息\n        if batch_id % 100 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' %\n                  (pass_id, batch_id, train_cost[0], train_acc[0]))\n\n    # 进行测试\n    test_accs = []\n    test_costs = []\n    for batch_id, data in enumerate(test_reader()):\n        test_cost, test_acc = exe.run(program=test_program,\n                                      feed=feeder.feed(data),\n                                      fetch_list=[avg_cost, acc])\n        # 保存测试的日志数据\n        test_step += 1\n        test_cost_writer.add_record(test_step, test_cost[0])\n        test_acc_writer.add_record(test_step, test_acc[0])\n\n        test_accs.append(test_acc[0])\n        test_costs.append(test_cost[0])\n    # 求测试结果的平均值\n    test_cost = (sum(test_costs) / len(test_costs))\n    test_acc = (sum(test_accs) / len(test_accs))\n    print('Test:%d, Cost:%0.5f, Accuracy:%0.5f' % (pass_id, test_cost, test_acc))\n"
  },
  {
    "path": "note11/README.md",
    "content": "﻿@[TOC]\n\nGitHub地址：https://github.com/yeyupiaoling/LearnPaddle2/tree/master/note11\n\n# 前言\n本章将介绍如何使用PaddlePaddle训练自己的图片数据集，在之前的图像数据集中，我们都是使用PaddlePaddle自带的数据集，本章我们就来学习如何让PaddlePaddle训练我们自己的图片数据集。\n\n# 爬取图像\n在本章中，我们使用的是自己的图片数据集，所以我们需要弄一堆图像来制作训练的数据集。下面我们就编写一个爬虫程序，让其帮我们从百度图片中爬取相应类别的图片。\n\n创建一个`download_image.py`文件用于编写爬取图片程序。首先导入所需的依赖包。\n```python\nimport re\nimport uuid\nimport requests\nimport os\nimport numpy\nimport imghdr\nfrom PIL import Image\n```\n\n然后编写一个下载图片的函数，这个是程序核心代码。参数是下载图片的关键、保存的名字、下载图片的数量。关键字是百度搜索图片的关键。\n```python\n# 获取百度图片下载图片\ndef download_image(key_word, save_name, download_max):\n    download_sum = 0\n    str_gsm = '80'\n    # 把每个类别的图片存放在单独一个文件夹中\n    save_path = 'images' + '/' + save_name\n    if not os.path.exists(save_path):\n        os.makedirs(save_path)\n    while download_sum < download_max:\n        # 下载次数超过指定值就停止下载\n        if download_sum >= download_max:\n            break\n        str_pn = str(download_sum)\n        # 定义百度图片的路径\n        url = 'http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&' \\\n              'word=' + key_word + '&pn=' + str_pn + '&gsm=' + str_gsm + '&ct=&ic=0&lm=-1&width=0&height=0'\n        print('正在下载 %s 的第 %d 张图片.....' % (key_word, download_sum))\n        try:\n            # 获取当前页面的源码\n            result = requests.get(url, timeout=30).text\n            # 获取当前页面的图片URL\n            img_urls = re.findall('\"objURL\":\"(.*?)\",', result, re.S)\n            if len(img_urls) < 1:\n                break\n            # 把这些图片URL一个个下载\n            for img_url in img_urls:\n                # 获取图片内容\n                img = requests.get(img_url, timeout=30)\n                img_name = save_path + '/' + str(uuid.uuid1()) + '.jpg'\n                # 保存图片\n                with open(img_name, 'wb') as f:\n                    f.write(img.content)\n                download_sum += 1\n                if download_sum >= download_max:\n                    break\n        except Exception as e:\n            print('【错误】当前图片无法下载，%s' % e)\n            download_sum += 1\n            continue\n    print('下载完成')\n```\n\n图片下载完成之后，需要删除一家损坏的图片，因为在下载的过程中，由于图片本身的问题或者下载过程造成的图片损坏，需要把这些已经损坏的图片上传。下面的函数就是删除所有损坏的图片，根据图像数据集的目录读取获取所有图片文件的路径，然后使用`imghdr`工具获取图片的类型是否为`png`或者`jpg`来判断图片文件是否完整，最后再删除根据图片的通道数据来删除灰度图片。\n```python\n# 删除不是JPEG或者PNG格式的图片\ndef delete_error_image(father_path):\n    # 获取父级目录的所有文件以及文件夹\n    try:\n        image_dirs = os.listdir(father_path)\n        for image_dir in image_dirs:\n            image_dir = os.path.join(father_path, image_dir)\n            # 如果是文件夹就继续获取文件夹中的图片\n            if os.path.isdir(image_dir):\n                images = os.listdir(image_dir)\n                for image in images:\n                    image = os.path.join(image_dir, image)\n                    try:\n                        # 获取图片的类型\n                        image_type = imghdr.what(image)\n                        # 如果图片格式不是JPEG同时也不是PNG就删除图片\n                        if image_type is not 'jpeg' and image_type is not 'png':\n                            os.remove(image)\n                            print('已删除：%s' % image)\n                            continue\n                        # 删除灰度图\n                        img = numpy.array(Image.open(image))\n                        if len(img.shape) is 2:\n                            os.remove(image)\n                            print('已删除：%s' % image)\n                    except:\n                        os.remove(image)\n                        print('已删除：%s' % image)\n    except:\n        pass\n```\n\n最后在main入口中通过调用两个函数来完成下载图像数据集，使用中文进行百度搜索图片，使用英文是为了出现中文路径导致图片读取错误。\n```python\nif __name__ == '__main__':\n    # 定义要下载的图片中文名称和英文名称，ps：英文名称主要是为了设置文件夹名\n    key_words = {'西瓜': 'watermelon', '哈密瓜': 'cantaloupe',\n                 '樱桃': 'cherry', '苹果': 'apple', '黄瓜': 'cucumber', '胡萝卜': 'carrot'}\n    # 每个类别下载一千个\n    max_sum = 500\n    for key_word in key_words:\n        save_name = key_words[key_word]\n        download_image(key_word, save_name, max_sum)\n\n    # 删除错误图片\n    delete_error_image('images/')\n```\n\n输出信息：\n```\n正在下载 哈密瓜 的第 0 张图片.....\n【错误】当前图片无法下载，HTTPConnectionPool(host='www.boyingsj.com', port=80): Read timed out.\n正在下载 哈密瓜 的第 10 张图片.....\n```\n\n**注意：** 下载处理完成之后，还可能存在其他杂乱的图片，所以还需要我们手动删除这些不属于这个类别的图片，这才算完成图像数据集的制作。\n\n\n# 创建图像列表\n创建一个`create_data_list.py`文件，在这个程序中，我们只要把爬取保存图片的路径的文件夹路径传进去就可以了，生成固定格式的列表，格式为`图片的路径 <Tab> 图片类别的标签`：\n```python\nimport json\nimport os\n\ndef create_data_list(data_root_path):\n    with open(data_root_path + \"test.list\", 'w') as f:\n        pass\n    with open(data_root_path + \"train.list\", 'w') as f:\n        pass\n    # 所有类别的信息\n    class_detail = []\n    # 获取所有类别\n    class_dirs = os.listdir(data_root_path)\n    # 类别标签\n    class_label = 0\n    # 获取总类别的名称\n    father_paths = data_root_path.split('/')\n    while True:\n        if father_paths[len(father_paths) - 1] == '':\n            del father_paths[len(father_paths) - 1]\n        else:\n            break\n    father_path = father_paths[len(father_paths) - 1]\n\n    all_class_images = 0\n    other_file = 0\n    # 读取每个类别\n    for class_dir in class_dirs:\n        if class_dir == 'test.list' or class_dir == \"train.list\" or class_dir == 'readme.json':\n            other_file += 1\n            continue\n        print('正在读取类别：%s' % class_dir)\n        # 每个类别的信息\n        class_detail_list = {}\n        test_sum = 0\n        trainer_sum = 0\n        # 统计每个类别有多少张图片\n        class_sum = 0\n        # 获取类别路径\n        path = data_root_path + \"/\" + class_dir\n        # 获取所有图片\n        img_paths = os.listdir(path)\n        for img_path in img_paths:\n            # 每张图片的路径\n            name_path = class_dir + '/' + img_path\n            # 如果不存在这个文件夹,就创建\n            if not os.path.exists(data_root_path):\n                os.makedirs(data_root_path)\n            # 每10张图片取一个做测试数据\n            if class_sum % 10 == 0:\n                test_sum += 1\n                with open(data_root_path + \"test.list\", 'a') as f:\n                    f.write(name_path + \"\\t%d\" % class_label + \"\\n\")\n            else:\n                trainer_sum += 1\n                with open(data_root_path + \"train.list\", 'a') as f:\n                    f.write(name_path + \"\\t%d\" % class_label + \"\\n\")\n            class_sum += 1\n            all_class_images += 1\n        # 说明的json文件的class_detail数据\n        class_detail_list['class_name'] = class_dir\n        class_detail_list['class_label'] = class_label\n        class_detail_list['class_test_images'] = test_sum\n        class_detail_list['class_trainer_images'] = trainer_sum\n        class_detail.append(class_detail_list)\n        class_label += 1\n    # 获取类别数量\n    all_class_sum = len(class_dirs) - other_file\n    # 说明的json文件信息\n    readjson = {}\n    readjson['all_class_name'] = father_path\n    readjson['all_class_sum'] = all_class_sum\n    readjson['all_class_images'] = all_class_images\n    readjson['class_detail'] = class_detail\n    jsons = json.dumps(readjson, sort_keys=True, indent=4, separators=(',', ': '))\n    with open(data_root_path + \"readme.json\", 'w') as f:\n        f.write(jsons)\n    print('图像列表已生成')\n```\n\n最后执行就可以生成图像的列表。\n```python\nif __name__ == '__main__':\n    # 把生产的数据列表都放在自己的总类别文件夹中\n    data_root_path = \"images/\"\n    create_data_list(data_root_path)\n```\n\n输出信息：\n```\n正在读取类别：apple\n正在读取类别：cantaloupe\n正在读取类别：carrot\n正在读取类别：cherry\n正在读取类别：cucumber\n正在读取类别：watermelon\n图像列表已生成\n```\n\n运行这个程序之后，会生成在data文件夹中生成一个单独的大类文件夹，比如我们这次是使用到蔬菜类，所以我生成一个`vegetables`文件夹，在这个文件夹下有3个文件：\n|文件名|作用|\n|:---:|:---:|\n|trainer.list|用于训练的图像列表|\n|test.list|用于测试的图像列表|\n|readme.json|该数据集的json格式的说明,方便以后使用|\n\n`readme.json`文件的格式如下，可以很清楚看到整个数据的图像数量,总类别名称和类别数量，还有每个类对应的标签，类别的名字，该类别的测试数据和训练数据的数量：\n```json\n{\n    \"all_class_images\": 2200,\n    \"all_class_name\": \"images\",\n    \"all_class_sum\": 2,\n    \"class_detail\": [\n        {\n            \"class_label\": 1,\n            \"class_name\": \"watermelon\",\n            \"class_test_images\": 110,\n            \"class_trainer_images\": 990\n        },\n        {\n            \"class_label\": 2,\n            \"class_name\": \"cantaloupe\",\n            \"class_test_images\": 110,\n            \"class_trainer_images\": 990\n        }\n    ]\n}\n```\n\n# 定义模型\n创建一个`mobilenet_v1.py`文件，在本章我们使用的是MobileNet神经网络，MobileNet是Google针对手机等嵌入式设备提出的一种轻量级的深层神经网络，它的核心思想就是卷积核的巧妙分解，可以有效减少网络参数，从而达到减小训练时网络的模型。因为太大的模型模型文件是不利于移植到移动设备上的，比如我们把模型文件迁移到Android手机应用上，那么模型文件的大小就直接影响应用安装包的大小。以下就是使用PaddlePaddle定义的MobileNet神经网络：\n```python\nimport paddle.fluid as fluid\n\ndef conv_bn_layer(input, filter_size, num_filters, stride,\n                  padding, channels=None, num_groups=1, act='relu', use_cudnn=True):\n    conv = fluid.layers.conv2d(input=input,\n                               num_filters=num_filters,\n                               filter_size=filter_size,\n                               stride=stride,\n                               padding=padding,\n                               groups=num_groups,\n                               act=None,\n                               use_cudnn=use_cudnn,\n                               bias_attr=False)\n\n    return fluid.layers.batch_norm(input=conv, act=act)\n```\n\n```python\ndef depthwise_separable(input, num_filters1, num_filters2, num_groups, stride, scale):\n    depthwise_conv = conv_bn_layer(input=input,\n                                   filter_size=3,\n                                   num_filters=int(num_filters1 * scale),\n                                   stride=stride,\n                                   padding=1,\n                                   num_groups=int(num_groups * scale),\n                                   use_cudnn=False)\n\n    pointwise_conv = conv_bn_layer(input=depthwise_conv,\n                                   filter_size=1,\n                                   num_filters=int(num_filters2 * scale),\n                                   stride=1,\n                                   padding=0)\n    return pointwise_conv\n```\n\n```python\ndef net(input, class_dim, scale=1.0):\n    # conv1: 112x112\n    input = conv_bn_layer(input=input,\n                          filter_size=3,\n                          channels=3,\n                          num_filters=int(32 * scale),\n                          stride=2,\n                          padding=1)\n\n    # 56x56\n    input = depthwise_separable(input=input,\n                                num_filters1=32,\n                                num_filters2=64,\n                                num_groups=32,\n                                stride=1,\n                                scale=scale)\n\n    input = depthwise_separable(input=input,\n                                num_filters1=64,\n                                num_filters2=128,\n                                num_groups=64,\n                                stride=2,\n                                scale=scale)\n\n    # 28x28\n    input = depthwise_separable(input=input,\n                                num_filters1=128,\n                                num_filters2=128,\n                                num_groups=128,\n                                stride=1,\n                                scale=scale)\n\n    input = depthwise_separable(input=input,\n                                num_filters1=128,\n                                num_filters2=256,\n                                num_groups=128,\n                                stride=2,\n                                scale=scale)\n\n    # 14x14\n    input = depthwise_separable(input=input,\n                                num_filters1=256,\n                                num_filters2=256,\n                                num_groups=256,\n                                stride=1,\n                                scale=scale)\n\n    input = depthwise_separable(input=input,\n                                num_filters1=256,\n                                num_filters2=512,\n                                num_groups=256,\n                                stride=2,\n                                scale=scale)\n\n    # 14x14\n    for i in range(5):\n        input = depthwise_separable(input=input,\n                                    num_filters1=512,\n                                    num_filters2=512,\n                                    num_groups=512,\n                                    stride=1,\n                                    scale=scale)\n    # 7x7\n    input = depthwise_separable(input=input,\n                                num_filters1=512,\n                                num_filters2=1024,\n                                num_groups=512,\n                                stride=2,\n                                scale=scale)\n\n    input = depthwise_separable(input=input,\n                                num_filters1=1024,\n                                num_filters2=1024,\n                                num_groups=1024,\n                                stride=1,\n                                scale=scale)\n\n    feature = fluid.layers.pool2d(input=input,\n                                  pool_size=0,\n                                  pool_stride=1,\n                                  pool_type='avg',\n                                  global_pooling=True)\n\n    net = fluid.layers.fc(input=feature,\n                          size=class_dim,\n                          act='softmax')\n    return net\n```\n\n# 定义数据读取\n创建一个`reader.py`文件，这个程序就是用户训练和测试的使用读取数据的。训练的时候，通过这个程序从本地读取图片，然后通过一系列的预处理操作，最后转换成训练所需的Numpy数组。\n\n首先导入所需的包，其中`cpu_count`是获取当前计算机有多少个CPU，然后使用多线程读取数据。\n```python\nimport os\nimport random\nfrom multiprocessing import cpu_count\nimport numpy as np\nimport paddle\nfrom PIL import Image\n```\n\n首先定义一个`train_mapper()`函数，这个函数是根据传入进来的图片路径来对图片进行预处理，比如训练的时候需要统一图片的大小，同时也使用多种的数据增强的方式，如水平翻转、垂直翻转、角度翻转、随机裁剪，这些方式都可以让有限的图片数据集在训练的时候成倍的增加。最后因为PIL打开图片存储顺序为H(高度)，W(宽度)，C(通道)，PaddlePaddle要求数据顺序为CHW，所以需要转换顺序。最后返回的是处理后的图片数据和其对应的标签。\n```python\n# 训练图片的预处理\ndef train_mapper(sample):\n    img_path, label, crop_size, resize_size = sample\n    try:\n        img = Image.open(img_path)\n        # 统一图片大小\n        img = img.resize((resize_size, resize_size), Image.ANTIALIAS)\n        # 随机水平翻转\n        r1 = random.random()\n        if r1 > 0.5:\n            img = img.transpose(Image.FLIP_LEFT_RIGHT)\n        # 随机垂直翻转\n        r2 = random.random()\n        if r2 > 0.5:\n            img = img.transpose(Image.FLIP_TOP_BOTTOM)\n        # 随机角度翻转\n        r3 = random.randint(-3, 3)\n        img = img.rotate(r3, expand=False)\n        # 随机裁剪\n        r4 = random.randint(0, int(resize_size - crop_size))\n        r5 = random.randint(0, int(resize_size - crop_size))\n        box = (r4, r5, r4 + crop_size, r5 + crop_size)\n        img = img.crop(box)\n        # 把图片转换成numpy值\n        img = np.array(img).astype(np.float32)\n        # 转换成CHW\n        img = img.transpose((2, 0, 1))\n        # 转换成BGR\n        img = img[(2, 1, 0), :, :] / 255.0\n        return img, int(label)\n    except:\n        print(\"%s 该图片错误，请删除该图片并重新创建图像数据列表\" % img_path)\n```\n\n这个`train_reader()`函数是根据已经创建的图像列表解析得到每张图片的路径和其他对应的标签，然后使用`paddle.reader.xmap_readers()`把数据传递给上面定义的`train_mapper()`函数进行处理，最后得到一个训练所需的reader。\n```python\n# 获取训练的reader\ndef train_reader(train_list_path, crop_size, resize_size):\n    father_path = os.path.dirname(train_list_path)\n\n    def reader():\n        with open(train_list_path, 'r') as f:\n            lines = f.readlines()\n            # 打乱图像列表\n            np.random.shuffle(lines)\n            # 开始获取每张图像和标签\n            for line in lines:\n                img, label = line.split('\\t')\n                img = os.path.join(father_path, img)\n                yield img, label, crop_size, resize_size\n\n    return paddle.reader.xmap_readers(train_mapper, reader, cpu_count(), 102400)\n```\n\n这是一个测试数据的预处理函数`test_mapper()`，这个没有做太多处理，因为测试的数据不需要数据增强操作，只需统一图片大小和设置好图片的通过顺序和数据类型即可。\n```python\n# 测试图片的预处理\ndef test_mapper(sample):\n    img, label, crop_size = sample\n    img = Image.open(img)\n    # 统一图像大小\n    img = img.resize((crop_size, crop_size), Image.ANTIALIAS)\n    # 转换成numpy值\n    img = np.array(img).astype(np.float32)\n    # 转换成CHW\n    img = img.transpose((2, 0, 1))\n    # 转换成BGR\n    img = img[(2, 1, 0), :, :] / 255.0\n    return img, int(label)\n```\n\n这个是测试的reader函数`test_reader()`，这个跟训练的reader函数定义一样。\n```python\n# 测试的图片reader\ndef test_reader(test_list_path, crop_size):\n    father_path = os.path.dirname(test_list_path)\n\n    def reader():\n        with open(test_list_path, 'r') as f:\n            lines = f.readlines()\n            for line in lines:\n                img, label = line.split('\\t')\n                img = os.path.join(father_path, img)\n                yield img, label, crop_size\n\n    return paddle.reader.xmap_readers(test_mapper, reader, cpu_count(), 1024)\n```\n\n# 训练模型\n万事俱备，只等训练了。关于PaddlePaddle训练流程，我们已经非常熟悉了，那么我们就简单地过一遍。\n\n创建`train.py`文件，首先导入所需的包，其中包括我们定义的MobileNet模型和数据读取程序：\n```python\nimport os\nimport shutil\nimport mobilenet_v1\nimport paddle as paddle\nimport reader\nimport paddle.fluid as fluid\n```\n\n然后定义数据输入层，这次我们使用的是图片大小是224，这比之前使用的CIFAR数据集的32大小要大很多，所以训练其他会慢不少。至于`resize_size`是用于统一缩放到这个大小，然后再随机裁剪成`crop_size`大小，`crop_size`才是最终训练图片的大小。\n```python\ncrop_size = 224\nresize_size = 250\n\n# 定义输入层\nimage = fluid.layers.data(name='image', shape=[3, crop_size, crop_size], dtype='float32')\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n```\n\n接着获取MobileNet网络的分类器，传入的第一个参数就是上面定义的输入层，第二个是分类的类别大小，比如我们这次爬取的图像类别数量是6个。\n```python\n# 获取分类器，因为这次只爬取了6个类别的图片，所以分类器的类别大小为6\nmodel = mobilenet_v1.net(image, 6)\n```\n\n再接着是获取损失函数和平均准确率函数，还有测试程序和优化方法，这个优化方法我加了正则，因为爬取的图片数量太少，在训练容易过拟合，所以加上正则一定程度上可以抑制过拟合。\n```python\n# 获取损失函数和准确率函数\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n\n# 获取训练和测试程序\ntest_program = fluid.default_main_program().clone(for_test=True)\n\n# 定义优化方法\noptimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-3,\n                                          regularization=fluid.regularizer.L2DecayRegularizer(1e-4))\nopts = optimizer.minimize(avg_cost)\n```\n\n这里就是获取训练测试是所以想的数据读取reader，通过使用`paddle.batch()`函数可以把多条数据打包成一个批次，训练的时候是按照一个个批次训练的。\n```python\n# 获取自定义数据\ntrain_reader = paddle.batch(reader=reader.train_reader('images/train.list', crop_size, resize_size), batch_size=32)\ntest_reader = paddle.batch(reader=reader.test_reader('images/test.list', crop_size), batch_size=32)\n```\n\n执行训练之前，还需要创建一个执行器，建议使用GPU进行训练，因为我们训练的图片比较大，所以使用CPU训练速度会相当的慢。\n```python\n# 定义一个使用GPU的执行器\nplace = fluid.CUDAPlace(0)\n# place = fluid.CPUPlace()\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n\n# 定义输入数据维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[image, label])\n```\n\n最后终于可以执行训练了，这里跟在前些章节都几乎一样，就不重复介绍了。\n```python\n# 训练100次\nfor pass_id in range(100):\n    # 进行训练\n    for batch_id, data in enumerate(train_reader()):\n        train_cost, train_acc = exe.run(program=fluid.default_main_program(),\n                                        feed=feeder.feed(data),\n                                        fetch_list=[avg_cost, acc])\n\n        # 每100个batch打印一次信息\n        if batch_id % 100 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' %\n                  (pass_id, batch_id, train_cost[0], train_acc[0]))\n\n    # 进行测试\n    test_accs = []\n    test_costs = []\n    for batch_id, data in enumerate(test_reader()):\n        test_cost, test_acc = exe.run(program=test_program,\n                                      feed=feeder.feed(data),\n                                      fetch_list=[avg_cost, acc])\n        test_accs.append(test_acc[0])\n        test_costs.append(test_cost[0])\n    # 求测试结果的平均值\n    test_cost = (sum(test_costs) / len(test_costs))\n    test_acc = (sum(test_accs) / len(test_accs))\n    print('Test:%d, Cost:%0.5f, Accuracy:%0.5f' % (pass_id, test_cost, test_acc))\n```\n\n训练的过程中可以保存预测模型，用于之后的预测。笔者一般是每一个pass保存一次模型。\n```python\n    # 保存预测模型\n    save_path = 'infer_model/'\n    # 删除旧的模型文件\n    shutil.rmtree(save_path, ignore_errors=True)\n    # 创建保持模型文件目录\n    os.makedirs(save_path)\n    # 保存预测模型\n    fluid.io.save_inference_model(save_path, feeded_var_names=[image.name], target_vars=[model], executor=exe)\n```\n\n训练输出的信息：\n```\nPass:0, Batch:0, Cost:1.84754, Accuracy:0.15625\nTest:0, Cost:4.66276, Accuracy:0.17857\nPass:1, Batch:0, Cost:1.04008, Accuracy:0.59375\nTest:1, Cost:1.23828, Accuracy:0.54464\nPass:2, Batch:0, Cost:1.04778, Accuracy:0.65625\nTest:2, Cost:0.99189, Accuracy:0.64286\nPass:3, Batch:0, Cost:1.21555, Accuracy:0.65625\nTest:3, Cost:1.01552, Accuracy:0.57589\nPass:4, Batch:0, Cost:0.64620, Accuracy:0.81250\nTest:4, Cost:1.19264, Accuracy:0.63393\n```\n\n# 预测图片\n经过上面训练后，得到了一个预测模型，下面我们就使用一个预测模型来预测一些图片。\n\n创建一个`infer.py`文件作为预测程序。首先导入所需的依赖包。\n```python\nimport paddle.fluid as fluid\nfrom PIL import Image\nimport numpy as np\n```\n\n创建一个执行器，这些不需要训练，所以可以使用CPU进行预测，速度不会太慢，当然，使用GPU的预测速度会更快一些。\n```python\n# 创建执行器\nplace = fluid.CPUPlace()\nexe = fluid.Executor(place)\nexe.run(fluid.default_startup_program())\n```\n\n然后加载预测模型，获取预测程序和输入层的名字，还有网络的分类器。\n```python\n# 保存预测模型路径\nsave_path = 'infer_model/'\n# 从模型中获取预测程序、输入数据名称列表、分类器\n[infer_program, feeded_var_names, target_var] = fluid.io.load_inference_model(dirname=save_path, executor=exe)\n```\n\n预测图片之前，还需要对图片进行预处理，处理的方式跟测试的时候处理的方式一样。\n```python\n# 预处理图片\ndef load_image(file):\n    img = Image.open(file)\n    # 统一图像大小\n    img = img.resize((224, 224), Image.ANTIALIAS)\n    # 转换成numpy值\n    img = np.array(img).astype(np.float32)\n    # 转换成CHW\n    img = img.transpose((2, 0, 1))\n    # 转换成BGR\n    img = img[(2, 1, 0), :, :] / 255.0\n    img = np.expand_dims(img, axis=0)\n    return img\n```\n\n最后获取经过预处理的图片数据，再使用这些图像数据进行预测，得到分类结果。\n```python\n# 获取图片数据\nimg = load_image('images/apple/0fdd5422-31e0-11e9-9cfd-3c970e769528.jpg')\n\n# 执行预测\nresult = exe.run(program=infer_program,\n                 feed={feeded_var_names[0]: img},\n                 fetch_list=target_var)\n```\n\n我们可以通过解析分类的结果，获取概率最大类别标签。关于预测输出的`result`是数据，它是3维的，第一层是输出本身就是一个数组，第二层图片的数量，因为PaddlePaddle支持多张图片同时预测，最后一层就是每个类别的概率，这个概率的总和为1，概率最大的标签就是预测结果。\n```python\n# 显示图片并输出结果最大的label\nlab = np.argsort(result)[0][0][-1]\n\nnames = ['苹果', '哈密瓜', '胡萝卜', '樱桃', '黄瓜', '西瓜']\n\nprint('预测结果标签为：%d， 名称为：%s， 概率为：%f' % (lab, names[lab], result[0][0][lab]))\n```\n\n预测输出的结果：\n```\n预测结果标签为：0， 名称为：苹果， 概率为：0.948698\n```\n\n# 参考资料\n1. https://yeyupiaoling.blog.csdn.net/article/details/79095265\n"
  },
  {
    "path": "note11/create_data_list.py",
    "content": "import json\nimport os\n\n\ndef create_data_list(data_root_path):\n    with open(data_root_path + \"test.list\", 'w') as f:\n        pass\n    with open(data_root_path + \"train.list\", 'w') as f:\n        pass\n    # 所有类别的信息\n    class_detail = []\n    # 获取所有类别\n    class_dirs = os.listdir(data_root_path)\n    # 类别标签\n    class_label = 0\n    # 获取总类别的名称\n    father_paths = data_root_path.split('/')\n    while True:\n        if father_paths[len(father_paths) - 1] == '':\n            del father_paths[len(father_paths) - 1]\n        else:\n            break\n    father_path = father_paths[len(father_paths) - 1]\n\n    all_class_images = 0\n    other_file = 0\n    # 读取每个类别\n    for class_dir in class_dirs:\n        if class_dir == 'test.list' or class_dir == \"train.list\" or class_dir == 'readme.json':\n            other_file += 1\n            continue\n        print('正在读取类别：%s' % class_dir)\n        # 每个类别的信息\n        class_detail_list = {}\n        test_sum = 0\n        trainer_sum = 0\n        # 统计每个类别有多少张图片\n        class_sum = 0\n        # 获取类别路径\n        path = data_root_path + \"/\" + class_dir\n        # 获取所有图片\n        img_paths = os.listdir(path)\n        for img_path in img_paths:\n            # 每张图片的路径\n            name_path = class_dir + '/' + img_path\n            # 如果不存在这个文件夹,就创建\n            if not os.path.exists(data_root_path):\n                os.makedirs(data_root_path)\n            # 每10张图片取一个做测试数据\n            if class_sum % 10 == 0:\n                test_sum += 1\n                with open(data_root_path + \"test.list\", 'a') as f:\n                    f.write(name_path + \"\\t%d\" % class_label + \"\\n\")\n            else:\n                trainer_sum += 1\n                with open(data_root_path + \"train.list\", 'a') as f:\n                    f.write(name_path + \"\\t%d\" % class_label + \"\\n\")\n            class_sum += 1\n            all_class_images += 1\n        # 说明的json文件的class_detail数据\n        class_detail_list['class_name'] = class_dir\n        class_detail_list['class_label'] = class_label\n        class_detail_list['class_test_images'] = test_sum\n        class_detail_list['class_trainer_images'] = trainer_sum\n        class_detail.append(class_detail_list)\n        class_label += 1\n    # 获取类别数量\n    all_class_sum = len(class_dirs) - other_file\n    # 说明的json文件信息\n    readjson = {}\n    readjson['all_class_name'] = father_path\n    readjson['all_class_sum'] = all_class_sum\n    readjson['all_class_images'] = all_class_images\n    readjson['class_detail'] = class_detail\n    jsons = json.dumps(readjson, sort_keys=True, indent=4, separators=(',', ': '))\n    with open(data_root_path + \"readme.json\", 'w') as f:\n        f.write(jsons)\n    print('图像列表已生成')\n\n\nif __name__ == '__main__':\n    # 把生产的数据列表都放在自己的总类别文件夹中\n    data_root_path = \"images/\"\n    create_data_list(data_root_path)"
  },
  {
    "path": "note11/download_image.py",
    "content": "import re\nimport uuid\nimport requests\nimport os\nimport numpy\nimport imghdr\nfrom PIL import Image\n\n\n# 获取百度图片下载图片\ndef download_image(key_word, save_name, download_max):\n    download_sum = 0\n    str_gsm = '80'\n    # 把每个类别的图片存放在单独一个文件夹中\n    save_path = 'images' + '/' + save_name\n    if not os.path.exists(save_path):\n        os.makedirs(save_path)\n    while download_sum < download_max:\n        # 下载次数超过指定值就停止下载\n        if download_sum >= download_max:\n            break\n        str_pn = str(download_sum)\n        # 定义百度图片的路径\n        url = 'http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&' \\\n              'word=' + key_word + '&pn=' + str_pn + '&gsm=' + str_gsm + '&ct=&ic=0&lm=-1&width=0&height=0'\n        print('正在下载 %s 的第 %d 张图片.....' % (key_word, download_sum))\n        try:\n            # 获取当前页面的源码\n            result = requests.get(url, timeout=30).text\n            # 获取当前页面的图片URL\n            img_urls = re.findall('\"objURL\":\"(.*?)\",', result, re.S)\n            if len(img_urls) < 1:\n                break\n            # 把这些图片URL一个个下载\n            for img_url in img_urls:\n                # 获取图片内容\n                img = requests.get(img_url, timeout=30)\n                img_name = save_path + '/' + str(uuid.uuid1()) + '.jpg'\n                # 保存图片\n                with open(img_name, 'wb') as f:\n                    f.write(img.content)\n                download_sum += 1\n                if download_sum >= download_max:\n                    break\n        except Exception as e:\n            print('【错误】当前图片无法下载，%s' % e)\n            download_sum += 1\n            continue\n    print('下载完成')\n\n\n# 删除不是JPEG或者PNG格式的图片\ndef delete_error_image(father_path):\n    # 获取父级目录的所有文件以及文件夹\n    try:\n        image_dirs = os.listdir(father_path)\n        for image_dir in image_dirs:\n            image_dir = os.path.join(father_path, image_dir)\n            # 如果是文件夹就继续获取文件夹中的图片\n            if os.path.isdir(image_dir):\n                images = os.listdir(image_dir)\n                for image in images:\n                    image = os.path.join(image_dir, image)\n                    try:\n                        # 获取图片的类型\n                        image_type = imghdr.what(image)\n                        # 如果图片格式不是JPEG同时也不是PNG就删除图片\n                        if image_type is not 'jpeg' and image_type is not 'png':\n                            os.remove(image)\n                            print('已删除：%s' % image)\n                            continue\n                        # 删除灰度图\n                        img = numpy.array(Image.open(image))\n                        if len(img.shape) is 2:\n                            os.remove(image)\n                            print('已删除：%s' % image)\n                    except:\n                        os.remove(image)\n                        print('已删除：%s' % image)\n    except:\n        pass\n\n\nif __name__ == '__main__':\n    # 定义要下载的图片中文名称和英文名称，ps：英文名称主要是为了设置文件夹名\n    key_words = {'西瓜': 'watermelon', '哈密瓜': 'cantaloupe',\n                 '樱桃': 'cherry', '苹果': 'apple', '黄瓜': 'cucumber', '胡萝卜': 'carrot'}\n    # 每个类别下载一千个\n    max_sum = 500\n    for key_word in key_words:\n        save_name = key_words[key_word]\n        download_image(key_word, save_name, max_sum)\n\n    # 删除错误图片\n    delete_error_image('images/')\n"
  },
  {
    "path": "note11/infer.py",
    "content": "import paddle.fluid as fluid\nfrom PIL import Image\nimport numpy as np\n\n# 创建执行器\nplace = fluid.CPUPlace()\nexe = fluid.Executor(place)\nexe.run(fluid.default_startup_program())\n\n# 保存预测模型路径\nsave_path = 'infer_model/'\n# 从模型中获取预测程序、输入数据名称列表、分类器\n[infer_program, feeded_var_names, target_var] = fluid.io.load_inference_model(dirname=save_path, executor=exe)\n\n\n# 预处理图片\ndef load_image(file):\n    img = Image.open(file)\n    # 统一图像大小\n    img = img.resize((224, 224), Image.ANTIALIAS)\n    # 转换成numpy值\n    img = np.array(img).astype(np.float32)\n    # 转换成CHW\n    img = img.transpose((2, 0, 1))\n    # 转换成BGR\n    img = img[(2, 1, 0), :, :] / 255.0\n    img = np.expand_dims(img, axis=0)\n    return img\n\n\n# 获取图片数据\nimg = load_image('images/apple/0fdd5422-31e0-11e9-9cfd-3c970e769528.jpg')\n\n# 执行预测\nresult = exe.run(program=infer_program,\n                 feed={feeded_var_names[0]: img},\n                 fetch_list=target_var)\n\n# 显示图片并输出结果最大的label\nlab = np.argsort(result)[0][0][-1]\n\nnames = ['苹果', '哈密瓜', '胡萝卜', '樱桃', '黄瓜', '西瓜']\n\nprint('预测结果标签为：%d， 名称为：%s， 概率为：%f' % (lab, names[lab], result[0][0][lab]))\n"
  },
  {
    "path": "note11/mobilenet_v1.py",
    "content": "import paddle.fluid as fluid\n\n\ndef net(input, class_dim, scale=1.0):\n    # conv1: 112x112\n    input = conv_bn_layer(input=input,\n                          filter_size=3,\n                          channels=3,\n                          num_filters=int(32 * scale),\n                          stride=2,\n                          padding=1)\n\n    # 56x56\n    input = depthwise_separable(input=input,\n                                num_filters1=32,\n                                num_filters2=64,\n                                num_groups=32,\n                                stride=1,\n                                scale=scale)\n\n    input = depthwise_separable(input=input,\n                                num_filters1=64,\n                                num_filters2=128,\n                                num_groups=64,\n                                stride=2,\n                                scale=scale)\n\n    # 28x28\n    input = depthwise_separable(input=input,\n                                num_filters1=128,\n                                num_filters2=128,\n                                num_groups=128,\n                                stride=1,\n                                scale=scale)\n\n    input = depthwise_separable(input=input,\n                                num_filters1=128,\n                                num_filters2=256,\n                                num_groups=128,\n                                stride=2,\n                                scale=scale)\n\n    # 14x14\n    input = depthwise_separable(input=input,\n                                num_filters1=256,\n                                num_filters2=256,\n                                num_groups=256,\n                                stride=1,\n                                scale=scale)\n\n    input = depthwise_separable(input=input,\n                                num_filters1=256,\n                                num_filters2=512,\n                                num_groups=256,\n                                stride=2,\n                                scale=scale)\n\n    # 14x14\n    for i in range(5):\n        input = depthwise_separable(input=input,\n                                    num_filters1=512,\n                                    num_filters2=512,\n                                    num_groups=512,\n                                    stride=1,\n                                    scale=scale)\n    # 7x7\n    input = depthwise_separable(input=input,\n                                num_filters1=512,\n                                num_filters2=1024,\n                                num_groups=512,\n                                stride=2,\n                                scale=scale)\n\n    input = depthwise_separable(input=input,\n                                num_filters1=1024,\n                                num_filters2=1024,\n                                num_groups=1024,\n                                stride=1,\n                                scale=scale)\n\n    feature = fluid.layers.pool2d(input=input,\n                                  pool_size=0,\n                                  pool_stride=1,\n                                  pool_type='avg',\n                                  global_pooling=True)\n\n    net = fluid.layers.fc(input=feature,\n                          size=class_dim,\n                          act='softmax')\n    return net\n\n\ndef conv_bn_layer(input, filter_size, num_filters, stride,\n                  padding, channels=None, num_groups=1, act='relu', use_cudnn=True):\n    conv = fluid.layers.conv2d(input=input,\n                               num_filters=num_filters,\n                               filter_size=filter_size,\n                               stride=stride,\n                               padding=padding,\n                               groups=num_groups,\n                               act=None,\n                               use_cudnn=use_cudnn,\n                               bias_attr=False)\n\n    return fluid.layers.batch_norm(input=conv, act=act)\n\n\ndef depthwise_separable(input, num_filters1, num_filters2, num_groups, stride, scale):\n    depthwise_conv = conv_bn_layer(input=input,\n                                   filter_size=3,\n                                   num_filters=int(num_filters1 * scale),\n                                   stride=stride,\n                                   padding=1,\n                                   num_groups=int(num_groups * scale),\n                                   use_cudnn=False)\n\n    pointwise_conv = conv_bn_layer(input=depthwise_conv,\n                                   filter_size=1,\n                                   num_filters=int(num_filters2 * scale),\n                                   stride=1,\n                                   padding=0)\n    return pointwise_conv\n"
  },
  {
    "path": "note11/reader.py",
    "content": "import os\nimport random\nfrom multiprocessing import cpu_count\nimport numpy as np\nimport paddle\nfrom PIL import Image\n\n\n# 训练图片的预处理\ndef train_mapper(sample):\n    img_path, label, crop_size, resize_size = sample\n    try:\n        img = Image.open(img_path)\n        # 统一图片大小\n        img = img.resize((resize_size, resize_size), Image.ANTIALIAS)\n        # 随机水平翻转\n        r1 = random.random()\n        if r1 > 0.5:\n            img = img.transpose(Image.FLIP_LEFT_RIGHT)\n        # 随机垂直翻转\n        r2 = random.random()\n        if r2 > 0.5:\n            img = img.transpose(Image.FLIP_TOP_BOTTOM)\n        # 随机角度翻转\n        r3 = random.randint(-3, 3)\n        img = img.rotate(r3, expand=False)\n        # 随机裁剪\n        r4 = random.randint(0, int(resize_size - crop_size))\n        r5 = random.randint(0, int(resize_size - crop_size))\n        box = (r4, r5, r4 + crop_size, r5 + crop_size)\n        img = img.crop(box)\n        # 把图片转换成numpy值\n        img = np.array(img).astype(np.float32)\n        # 转换成CHW\n        img = img.transpose((2, 0, 1))\n        # 转换成BGR\n        img = img[(2, 1, 0), :, :] / 255.0\n        return img, int(label)\n    except:\n        print(\"%s 该图片错误，请删除该图片并重新创建图像数据列表\" % img_path)\n\n\n# 获取训练的reader\ndef train_reader(train_list_path, crop_size, resize_size):\n    father_path = os.path.dirname(train_list_path)\n\n    def reader():\n        with open(train_list_path, 'r') as f:\n            lines = f.readlines()\n            # 打乱图像列表\n            np.random.shuffle(lines)\n            # 开始获取每张图像和标签\n            for line in lines:\n                img, label = line.split('\\t')\n                img = os.path.join(father_path, img)\n                yield img, label, crop_size, resize_size\n\n    return paddle.reader.xmap_readers(train_mapper, reader, cpu_count(), 102400)\n\n\n# 测试图片的预处理\ndef test_mapper(sample):\n    img, label, crop_size = sample\n    img = Image.open(img)\n    # 统一图像大小\n    img = img.resize((crop_size, crop_size), Image.ANTIALIAS)\n    # 转换成numpy值\n    img = np.array(img).astype(np.float32)\n    # 转换成CHW\n    img = img.transpose((2, 0, 1))\n    # 转换成BGR\n    img = img[(2, 1, 0), :, :] / 255.0\n    return img, int(label)\n\n\n# 测试的图片reader\ndef test_reader(test_list_path, crop_size):\n    father_path = os.path.dirname(test_list_path)\n\n    def reader():\n        with open(test_list_path, 'r') as f:\n            lines = f.readlines()\n            for line in lines:\n                img, label = line.split('\\t')\n                img = os.path.join(father_path, img)\n                yield img, label, crop_size\n\n    return paddle.reader.xmap_readers(test_mapper, reader, cpu_count(), 1024)"
  },
  {
    "path": "note11/train.py",
    "content": "import os\nimport shutil\nimport mobilenet_v1\nimport paddle as paddle\nimport reader\nimport paddle.fluid as fluid\n\ncrop_size = 224\nresize_size = 250\n\n# 定义输入层\nimage = fluid.layers.data(name='image', shape=[3, crop_size, crop_size], dtype='float32')\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n\n# 获取分类器，因为这次只爬取了6个类别的图片，所以分类器的类别大小为6\nmodel = mobilenet_v1.net(image, 6)\n\n# 获取损失函数和准确率函数\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n\n# 获取训练和测试程序\ntest_program = fluid.default_main_program().clone(for_test=True)\n\n# 定义优化方法\noptimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-3,\n                                          regularization=fluid.regularizer.L2DecayRegularizer(1e-4))\nopts = optimizer.minimize(avg_cost)\n\n# 获取自定义数据\ntrain_reader = paddle.batch(reader=reader.train_reader('images/train.list', crop_size, resize_size), batch_size=32)\ntest_reader = paddle.batch(reader=reader.test_reader('images/test.list', crop_size), batch_size=32)\n\n# 定义一个使用GPU的执行器\nplace = fluid.CUDAPlace(0)\n# place = fluid.CPUPlace()\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n\n# 定义输入数据维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[image, label])\n\n# 训练10次\nfor pass_id in range(100):\n    # 进行训练\n    for batch_id, data in enumerate(train_reader()):\n        train_cost, train_acc = exe.run(program=fluid.default_main_program(),\n                                        feed=feeder.feed(data),\n                                        fetch_list=[avg_cost, acc])\n\n        # 每100个batch打印一次信息\n        if batch_id % 100 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' %\n                  (pass_id, batch_id, train_cost[0], train_acc[0]))\n\n    # 进行测试\n    test_accs = []\n    test_costs = []\n    for batch_id, data in enumerate(test_reader()):\n        test_cost, test_acc = exe.run(program=test_program,\n                                      feed=feeder.feed(data),\n                                      fetch_list=[avg_cost, acc])\n        test_accs.append(test_acc[0])\n        test_costs.append(test_cost[0])\n    # 求测试结果的平均值\n    test_cost = (sum(test_costs) / len(test_costs))\n    test_acc = (sum(test_accs) / len(test_accs))\n    print('Test:%d, Cost:%0.5f, Accuracy:%0.5f' % (pass_id, test_cost, test_acc))\n\n    # 保存预测模型\n    save_path = 'infer_model/'\n    # 删除旧的模型文件\n    shutil.rmtree(save_path, ignore_errors=True)\n    # 创建保持模型文件目录\n    os.makedirs(save_path)\n    # 保存预测模型\n    fluid.io.save_inference_model(save_path, feeded_var_names=[image.name], target_vars=[model], executor=exe)\n"
  },
  {
    "path": "note12/README.md",
    "content": "﻿@[TOC]\n\n# 前言\n我们在第五章学习了循环神经网络，在第五章中我们使用循环神经网络实现了一个文本分类的模型，不过使用的数据集是PaddlePaddle自带的一个数据集，我们并没有了解到PaddlePaddle是如何使用读取文本数据集的，那么本章我们就来学习一下如何使用PaddlePaddle训练自己的文本数据集。我们将会从中文文本数据集的制作开始介绍，一步步讲解如何使用训练一个中文文本分类神经网络模型。\n\nGitHub地址：https://github.com/yeyupiaoling/LearnPaddle2/tree/master/note12\n\n# 爬取文本数据集\n网络上一些高质量的中文文本分类数据集相当少，经过充分考虑之后，绝对自己从网络中爬取自己的中文文本数据集。在GitHub中有一个开源的爬取今日头条中文新闻标题的代码，链接地址请查看最后的参考资料。我们在这个开源代码上做一些简单修改后，就使用他来爬取数据。\n\n\n创建一个`download_text_data.py`文件，这个就是爬取数据集的程序。首先导入相应的依赖包。\n```python\nimport os\nimport random\nimport requests\nimport json\nimport time\n```\n\n然后设置新闻的分类列表，这些是我们将要爬取的新闻类别。第一个值是分类的标签，第二个值是分类的中文名称，第三个是网络访问的请求头，通过值获取相应类别的新闻。\n```python\n# 分类新闻参数\nnews_classify = [\n    [0, '民生', 'news_story'],\n    [1, '文化', 'news_culture'],\n    [2, '娱乐', 'news_entertainment'],\n    [3, '体育', 'news_sports'],\n    [4, '财经', 'news_finance'],\n    [5, '房产', 'news_house'],\n    [6, '汽车', 'news_car'],\n    [7, '教育', 'news_edu'],\n    [8, '科技', 'news_tech'],\n    [9, '军事', 'news_military'],\n    [10, '旅游', 'news_travel'],\n    [11, '国际', 'news_world'],\n    [12, '证券', 'stock'],\n    [13, '农业', 'news_agriculture'],\n    [14, '游戏', 'news_game']\n]\n```\n\n以下代码片段是爬取数据的核心代码。`get_data`函数的`tup`参数是上面定义的新闻类别，`data_path`参数是保存爬取的文本数据。为了让爬取的程序更像正常的网络访问，这里还设置了一个访问请求头参数`querystring`和请求头`headers`，然后通过`requests.request`进行网络访问，爬取新闻数据，并对其进行解析，最后把需要的数据保存到本地文件中。\n```python\n# 已经下载的新闻标题的ID\ndownloaded_data_id = []\n# 已经下载新闻标题的数量\ndownloaded_sum = 0\n\n\ndef get_data(tup, data_path):\n    global downloaded_data_id\n    global downloaded_sum\n    print('============%s============' % tup[1])\n    url = \"http://it.snssdk.com/api/news/feed/v63/\"\n    # 分类新闻的访问参数，模仿正常网络访问\n    t = int(time.time() / 10000)\n    t = random.randint(6 * t, 10 * t)\n    querystring = {\"category\": tup[2], \"max_behot_time\": t, \"last_refresh_sub_entrance_interval\": \"1524907088\",\n                   \"loc_mode\": \"5\",\n                   \"tt_from\": \"pre_load_more\", \"cp\": \"51a5ee4f38c50q1\", \"plugin_enable\": \"0\", \"iid\": \"31047425023\",\n                   \"device_id\": \"51425358841\", \"ac\": \"wifi\", \"channel\": \"tengxun\", \"aid\": \"13\",\n                   \"app_name\": \"news_article\", \"version_code\": \"631\", \"version_name\": \"6.3.1\",\n                   \"device_platform\": \"android\",\n                   \"ab_version\": \"333116,297979,317498,336556,295827,325046,239097,324283,170988,335432,332098,325198,336443,330632,297058,276203,286212,313219,328615,332041,329358,322321,327537,335710,333883,335102,334828,328670,324007,317077,334305,280773,335671,319960,333985,331719,336452,214069,31643,332881,333968,318434,207253,266310,321519,247847,281298,328218,335998,325618,333327,336199,323429,287591,288418,260650,326188,324614,335477,271178,326588,326524,326532\",\n                   \"ab_client\": \"a1,c4,e1,f2,g2,f7\", \"ab_feature\": \"94563,102749\", \"abflag\": \"3\", \"ssmix\": \"a\",\n                   \"device_type\": \"MuMu\", \"device_brand\": \"Android\", \"language\": \"zh\", \"os_api\": \"19\",\n                   \"os_version\": \"4.4.4\", \"uuid\": \"008796762094657\", \"openudid\": \"b7215ea70ca32066\",\n                   \"manifest_version_code\": \"631\", \"resolution\": \"1280*720\", \"dpi\": \"240\",\n                   \"update_version_code\": \"6310\", \"_rticket\": \"1524907088018\", \"plugin\": \"256\"}\n\n    headers = {\n        'cache-control': \"no-cache\",\n        'postman-token': \"26530547-e697-1e8b-fd82-7c6014b3ee86\",\n        'User-Agent': 'Dalvik/1.6.0 (Linux; U; Android 4.4.4; MuMu Build/V417IR) NewsArticle/6.3.1 okhttp/3.7.0.2'\n    }\n\n    # 进行网络请求\n    response = requests.request(\"GET\", url, headers=headers, params=querystring)\n    # 获取返回的数据\n    new_data = json.loads(response.text)\n    with open(data_path, 'a', encoding='utf-8') as fp:\n        for item in new_data['data']:\n            item = item['content']\n            item = item.replace('\\\"', '\"')\n            item = json.loads(item)\n            # 判断数据中是否包含id和新闻标题\n            if 'item_id' in item.keys() and 'title' in item.keys():\n                item_id = item['item_id']\n                print(downloaded_sum, tup[0], tup[1], item['item_id'], item['title'])\n                # 通过新闻id判断是否已经下载过\n                if item_id not in downloaded_data_id:\n                    downloaded_data_id.append(item_id)\n                    # 安装固定格式追加写入文件中\n                    line = u\"{}_!_{}_!_{}_!_{}\".format(item['item_id'], tup[0], tup[1], item['title'])\n                    line = line.replace('\\n', '').replace('\\r', '')\n                    line = line + '\\n'\n                    fp.write(line)\n                    downloaded_sum += 1\n```\n\n\n有时候爬取时间比较长，可能中途需要中断。所以就需要以下的代码进行处理，读取已经保存的文本数据的文件中的数据ID，通过使用这个数据集，在爬取数据的时候就不再重复保存数据了。\n```python\ndef get_routine(data_path):\n    global downloaded_sum\n    # 从文件中读取已经有的数据，避免数据重复\n    if os.path.exists(data_path):\n        with open(data_path, 'r', encoding='utf-8') as fp:\n            lines = fp.readlines()\n            downloaded_sum = len(lines)\n            for line in lines:\n                item_id = int(line.split('_!_')[0])\n                downloaded_data_id.append(item_id)\n            print('在文件中已经读起了%d条数据' % downloaded_sum)\n    else:\n        os.makedirs(os.path.dirname(data_path))\n\n    while 1:\n        # 　开始下载数据\n        time.sleep(10)\n        for classify in news_classify:\n            get_data(classify, data_path)\n        # 当下载量超过300000就停止下载\n        if downloaded_sum >= 300000:\n            break\n```\n\n最后在main入口中启动爬取文本数据的函数。\n```python\nif __name__ == '__main__':\n    data_path = 'datasets/news_classify_data.txt'\n    dict_path = \"datasets/dict_txt.txt\"\n    # 下载数据集\n    get_routine(data_path)\n```\n\n在爬取过程中，输出信息：\n```\n============文化============\n17 1 文化 6646565189942510093 世界第一豪宅，坐落于北京，一根柱子27个亿，世界首富都买不起！\n18 1 文化 6658382232383652104 俗语讲：“男怕初一，女怕十五”，这话什么意思？有道理吗？\n19 1 文化 6636596124998173192 浙江一员工请假条火了，内容令人狂笑不止，字迹却让人念念不忘\n20 1 文化 6658848073562718734 难怪悟空被赶下山后菩提神秘消失，你看看方寸山门口对联写了啥？\n21 1 文化 6658952207871771140 他把183件国宝无偿捐给美国，捐回中国却收了450万美元\n```\n\n\n# 制作训练数据\n上面爬取的文本数据并不能直接拿来训练，因为PaddlePaddle训练的数据不能是字符串的，所以需要对这些文本数据转换成整型类型的数据。就是把一个字对应上唯一的数字，最后把全部的文字转换成数字。\n\n创建`create_data.py`文件。创建`create_dict()`函数，这个函数用来创建一个数据字典。数字字典就是把每个字都对应一个一个数字，包括标点符号。\n```python\nimport os\n\n# 把下载得数据生成一个字典\ndef create_dict(data_path, dict_path):\n    dict_set = set()\n    # 读取已经下载得数据\n    with open(data_path, 'r', encoding='utf-8') as f:\n        lines = f.readlines()\n    # 把数据生成一个元组\n    for line in lines:\n        title = line.split('_!_')[-1].replace('\\n', '')\n        for s in title:\n            dict_set.add(s)\n    # 把元组转换成字典，一个字对应一个数字\n    dict_list = []\n    i = 0\n    for s in dict_set:\n        dict_list.append([s, i])\n        i += 1\n    # 添加未知字符\n    dict_txt = dict(dict_list)\n    end_dict = {\"<unk>\": i}\n    dict_txt.update(end_dict)\n    # 把这些字典保存到本地中\n    with open(dict_path, 'w', encoding='utf-8') as f:\n        f.write(str(dict_txt))\n\n    print(\"数据字典生成完成！\")\n```\n\n生成的数据字典类型如下：\n```\n{'港': 712, '选': 367, '所': 0, '斯': 1,\n```\n\n\n创建一个数据自己之后，就使用这个数据字典把下载数据转换成数字，还有标签。\n```python\ndef create_data_list(data_root_path):\n    with open(data_root_path + 'test_list.txt', 'w') as f:\n        pass\n    with open(data_root_path + 'train_list.txt', 'w') as f:\n        pass\n\n    with open(os.path.join(data_root_path, 'dict_txt.txt'), 'r', encoding='utf-8') as f_data:\n        dict_txt = eval(f_data.readlines()[0])\n\n    with open(os.path.join(data_root_path, 'news_classify_data.txt'), 'r', encoding='utf-8') as f_data:\n        lines = f_data.readlines()\n    i = 0\n    for line in lines:\n        title = line.split('_!_')[-1].replace('\\n', '')\n        l = line.split('_!_')[1]\n        labs = \"\"\n        if i % 10 == 0:\n            with open(os.path.join(data_root_path, 'test_list.txt'), 'a', encoding='utf-8') as f_test:\n                for s in title:\n                    lab = str(dict_txt[s])\n                    labs = labs + lab + ','\n                labs = labs[:-1]\n                labs = labs + '\\t' + l + '\\n'\n                f_test.write(labs)\n        else:\n            with open(os.path.join(data_root_path, 'train_list.txt'), 'a', encoding='utf-8') as f_train:\n                for s in title:\n                    lab = str(dict_txt[s])\n                    labs = labs + lab + ','\n                labs = labs[:-1]\n                labs = labs + '\\t' + l + '\\n'\n                f_train.write(labs)\n        i += 1\n    print(\"数据列表生成完成！\")\n```\n\n转换后的数据如下：\n```\n321,364,535,897,322,263,354,337,441,815,943\t12\n540,299,884,1092,671,938\t13\n```\n\n这里顺便增加获取字典长度的函数，因为在训练的时候获取神经网络分类器的时候需要用到。\n```python\n# 获取字典的长度\ndef get_dict_len(dict_path):\n    with open(dict_path, 'r', encoding='utf-8') as f:\n        line = eval(f.readlines()[0])\n\n    return len(line.keys())\n```\n\n最后执行创建数据字典和生成数据列表的函数就可以生成待训练的数据了。\n```python\nif __name__ == '__main__':\n    # 把生产的数据列表都放在自己的总类别文件夹中\n    data_root_path = \"datasets/\"\n    data_path = os.path.join(data_root_path, 'news_classify_data.txt')\n    dict_path = os.path.join(data_root_path, \"dict_txt.txt\")\n    # 创建数据字典\n    create_dict(data_path, dict_path)\n    # 创建数据列表\n    create_data_list(data_root_path)\n```\n\n在执行的过程中会输出信息：\n```\n数据字典生成完成！\n数据列表生成完成！\n```\n\n# 定义模型\n然后我们定义一个文本分类模型，这里使用的是双向单层LSTM模型，据说百度的情感分析也是使用这个模型的。我们创建一个`bilstm_net.py`文件，用于定义双向单层LSTM模型。\n```python\nimport paddle.fluid as fluid\n\ndef bilstm_net(data, dict_dim, class_dim, emb_dim=128, hid_dim=128, hid_dim2=96, emb_lr=30.0):\n    # embedding layer\n    emb = fluid.layers.embedding(input=data,\n                                 size=[dict_dim, emb_dim],\n                                 param_attr=fluid.ParamAttr(learning_rate=emb_lr))\n\n    # bi-lstm layer\n    fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4)\n\n    rfc0 = fluid.layers.fc(input=emb, size=hid_dim * 4)\n\n    lstm_h, c = fluid.layers.dynamic_lstm(input=fc0, size=hid_dim * 4, is_reverse=False)\n\n    rlstm_h, c = fluid.layers.dynamic_lstm(input=rfc0, size=hid_dim * 4, is_reverse=True)\n\n    # extract last layer\n    lstm_last = fluid.layers.sequence_last_step(input=lstm_h)\n    rlstm_last = fluid.layers.sequence_last_step(input=rlstm_h)\n\n    # concat layer\n    lstm_concat = fluid.layers.concat(input=[lstm_last, rlstm_last], axis=1)\n\n    # full connect layer\n    fc1 = fluid.layers.fc(input=lstm_concat, size=hid_dim2, act='tanh')\n    # softmax layer\n    prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')\n    return prediction\n```\n\n# 定义数据读取\n接下来我们定义`text_reader.py`文件，用于读取文本数据集。这相对图片读取来说，这比较简单。\n\n首先导入相应的依赖包。\n```python\nfrom multiprocessing import cpu_count\nimport numpy as np\nimport paddle\n```\n\n因为在上一个程序已经把文本转换成PaddlePaddle可读数据，所以直接就可以在文件中读取数据成了。\n```python\n# 训练数据的预处理\ndef train_mapper(sample):\n    data, label = sample\n    data = [int(data) for data in data.split(',')]\n    return data, int(label)\n\n# 训练数据的reader\ndef train_reader(train_list_path):\n\n    def reader():\n        with open(train_list_path, 'r') as f:\n            lines = f.readlines()\n            # 打乱数据\n            np.random.shuffle(lines)\n            # 开始获取每张图像和标签\n            for line in lines:\n                data, label = line.split('\\t')\n                yield data, label\n\n    return paddle.reader.xmap_readers(train_mapper, reader, cpu_count(), 1024)\n```\n\n这里跟训练的读取方式一样，只是没有一个打乱数据的操作。\n```python\n# 测试数据的预处理\ndef test_mapper(sample):\n    data, label = sample\n    data = [int(data) for data in data.split(',')]\n    return data, int(label)\n\n# 测试数据的reader\ndef test_reader(test_list_path):\n\n    def reader():\n        with open(test_list_path, 'r') as f:\n            lines = f.readlines()\n            for line in lines:\n                data, label = line.split('\\t')\n                yield data, label\n```\n\n# 训练模型\n然后编写`train.py`文件，开始训练文本分类模型。首先到如相应的依赖包。\n```python\nimport os\nimport shutil\nimport paddle\nimport paddle.fluid as fluid\nimport create_data\nimport text_reader\nimport bilstm_net\n```\n\n定义网络输入层，数据是一条文本数据，所以只有一个维度。\n```python\n# 定义输入数据， lod_level不为0指定输入数据为序列数据\nwords = fluid.layers.data(name='words', shape=[1], dtype='int64', lod_level=1)\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n```\n\n接着是获取双向单层LSTM模型的分类器，这里需要用到文本数据集的字典大小，然后还需要分类器的大小，因为我们的文本数据有15个类别，所以这里分类器的大小是15。\n```python\n# 获取数据字典长度\ndict_dim = create_data.get_dict_len('datasets/dict_txt.txt')\n# 获取长短期记忆网络\nmodel = bilstm_net.bilstm_net(words, dict_dim, 15)\n```\n\n然后是定义一系列的损失函数，准确率函数，克隆预测程序和优化方法。这里使用的优化方法是Adagrad优化方法，Adagrad优化方法多用于处理稀疏数据。\n```python\n# 获取损失函数和准确率\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n\n# 获取预测程序\ntest_program = fluid.default_main_program().clone(for_test=True)\n\n# 定义优化方法\noptimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.002)\nopt = optimizer.minimize(avg_cost)\n\n# 创建一个执行器，CPU训练速度比较慢\n# place = fluid.CPUPlace()\nplace = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n```\n\n这里就是获取我们在上一个文件中定义读取数据的reader，根据不同的文本文件加载训练和预测的数据，准备进行训练。\n```python\n# 获取训练和预测数据\ntrain_reader = paddle.batch(reader=text_reader.train_reader('datasets/train_list.txt'), batch_size=128)\ntest_reader = paddle.batch(reader=text_reader.test_reader('datasets/test_list.txt'), batch_size=128)\n```\n\n最后在这里进行训练和测试，我们然执行器在训练的过程中输出训练时的是损失值和准确率。然后每40个batch打印一次信息和执行一次测试操作，查看网络模型在测试集中的准确率。\n```python\n# 定义输入数据的维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[words, label])\n\n# 开始训练\nfor pass_id in range(10):\n    # 进行训练\n    for batch_id, data in enumerate(train_reader()):\n        train_cost, train_acc = exe.run(program=fluid.default_main_program(),\n                             feed=feeder.feed(data),\n                             fetch_list=[avg_cost, acc])\n\n        if batch_id % 40 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f, Acc:%0.5f' % (pass_id, batch_id, train_cost[0], train_acc[0]))\n            # 进行测试\n            test_costs = []\n            test_accs = []\n            for batch_id, data in enumerate(test_reader()):\n                test_cost, test_acc = exe.run(program=test_program,\n                                              feed=feeder.feed(data),\n                                              fetch_list=[avg_cost, acc])\n                test_costs.append(test_cost[0])\n                test_accs.append(test_acc[0])\n            # 计算平均预测损失在和准确率\n            test_cost = (sum(test_costs) / len(test_costs))\n            test_acc = (sum(test_accs) / len(test_accs))\n            print('Test:%d, Cost:%0.5f, ACC:%0.5f' % (pass_id, test_cost, test_acc))\n```\n\n我可以在每pass训练结束之后保存一次预测模型，可以用于之后的预测。\n```python\n    # 保存预测模型\n    save_path = 'infer_model/'\n    # 删除旧的模型文件\n    shutil.rmtree(save_path, ignore_errors=True)\n    # 创建保持模型文件目录\n    os.makedirs(save_path)\n    # 保存预测模型\n    fluid.io.save_inference_model(save_path, feeded_var_names=[words.name], target_vars=[model], executor=exe)\n```\n\n训练输出的信息：\n```\nPass:0, Batch:0, Cost:2.70816, Acc:0.07812\nTest:0, Cost:2.68423, ACC:0.14427\nPass:0, Batch:40, Cost:2.01647, Acc:0.34375\nTest:0, Cost:1.99191, ACC:0.34301\nPass:0, Batch:80, Cost:1.61981, Acc:0.47656\nTest:0, Cost:1.69227, ACC:0.46456\nPass:0, Batch:120, Cost:1.40459, Acc:0.57812\nTest:0, Cost:1.47188, ACC:0.53961\nPass:0, Batch:160, Cost:1.15466, Acc:0.65625\nTest:0, Cost:1.32585, ACC:0.59393\nPass:0, Batch:200, Cost:1.08597, Acc:0.67188\nTest:0, Cost:1.20917, ACC:0.63793\nPass:0, Batch:240, Cost:1.08081, Acc:0.66406\nTest:0, Cost:1.14794, ACC:0.66145\n```\n\n# 预测文本\n在上面的训练中，我们已经训练到了一个文本分类预测模型。接下来我们就使用这个模型来预测我们想要预测文本。\n\n创建`infer.py`文件开始进行预测，首先导入依赖包。\n```python\nimport numpy as np\nimport paddle.fluid as fluid\n```\n\n然后创建执行器，并加载预测模型文件，获取到预测程序和输入数据的名称和网络分类器。\n```python\n# 创建执行器\nplace = fluid.CPUPlace()\nexe = fluid.Executor(place)\nexe.run(fluid.default_startup_program())\n\n# 保存预测模型路径\nsave_path = 'infer_model/'\n# 从模型中获取预测程序、输入数据名称列表、分类器\n[infer_program, feeded_var_names, target_var] = fluid.io.load_inference_model(dirname=save_path, executor=exe)\n```\n\n因为我们输入的是文本数据，但是PaddlePaddle读取的数据是整型数据，所以我们需要一个函数帮助我们把文本字符根据数据集的字典转换成整型数据。\n```python\n# 获取数据\ndef get_data(sentence):\n    # 读取数据字典\n    with open('datasets/dict_txt.txt', 'r', encoding='utf-8') as f_data:\n        dict_txt = eval(f_data.readlines()[0])\n    dict_txt = dict(dict_txt)\n    # 把字符串数据转换成列表数据\n    keys = dict_txt.keys()\n    data = []\n    for s in sentence:\n        # 判断是否存在未知字符\n        if not s in keys:\n            s = '<unk>'\n        data.append(int(dict_txt[s]))\n    return data\n```\n\n然后在这里获取数据。\n```python\ndata = []\n# 获取图片数据\ndata1 = get_data('京城最值得你来场文化之旅的博物馆')\ndata2 = get_data('谢娜为李浩菲澄清网络谣言，之后她的两个行为给自己加分')\ndata.append(data1)\ndata.append(data2)\n```\n\n因为输入的不定长度的文本数据，所以我们需要根据不同的输入数据的长度创建张量数据。\n```python\n# 获取每句话的单词数量\nbase_shape = [[len(c) for c in data]]\n\n# 生成预测数据\ntensor_words = fluid.create_lod_tensor(data, base_shape, place)\n```\n\n最后执行预测程序，获取预测结果。\n```python\n# 执行预测\nresult = exe.run(program=infer_program,\n                 feed={feeded_var_names[0]: tensor_words},\n                 fetch_list=target_var)\n```\n\n获取预测结果之后，获取预测结果的最大概率的标签，然后根据这个标签获取类别的名字。\n```python\n# 分类名称\nnames = ['民生', '文化', '娱乐', '体育', '财经',\n         '房产', '汽车', '教育', '科技', '军事',\n         '旅游', '国际', '证券', '农业', '游戏']\n\n# 获取结果概率最大的label\nfor i in range(len(data)):\n    lab = np.argsort(result)[0][i][-1]\n    print('预测结果标签为：%d， 名称为：%s， 概率为：%f' % (lab, names[lab], result[0][i][lab]))\n```\n\n预测输出的信息：\n```\n预测结果标签为：10， 名称为：旅游， 概率为：0.848075\n预测结果标签为：2， 名称为：娱乐， 概率为：0.894570\n```\n\n\n# 参考资料\n1. https://github.com/fate233/toutiao-text-classfication-dataset\n2. https://github.com/baidu/Senta"
  },
  {
    "path": "note12/bilstm_net.py",
    "content": "import paddle.fluid as fluid\n\n\ndef bilstm_net(data, dict_dim, class_dim, emb_dim=128, hid_dim=128, hid_dim2=96, emb_lr=30.0):\n    # embedding layer\n    emb = fluid.layers.embedding(input=data,\n                                 size=[dict_dim, emb_dim],\n                                 param_attr=fluid.ParamAttr(learning_rate=emb_lr))\n\n    # bi-lstm layer\n    fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4)\n\n    rfc0 = fluid.layers.fc(input=emb, size=hid_dim * 4)\n\n    lstm_h, c = fluid.layers.dynamic_lstm(input=fc0, size=hid_dim * 4, is_reverse=False)\n\n    rlstm_h, c = fluid.layers.dynamic_lstm(input=rfc0, size=hid_dim * 4, is_reverse=True)\n\n    # extract last layer\n    lstm_last = fluid.layers.sequence_last_step(input=lstm_h)\n    rlstm_last = fluid.layers.sequence_last_step(input=rlstm_h)\n\n    # concat layer\n    lstm_concat = fluid.layers.concat(input=[lstm_last, rlstm_last], axis=1)\n\n    # full connect layer\n    fc1 = fluid.layers.fc(input=lstm_concat, size=hid_dim2, act='tanh')\n    # softmax layer\n    prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')\n    return prediction\n"
  },
  {
    "path": "note12/create_data.py",
    "content": "import os\n\n\ndef create_data_list(data_root_path):\n    with open(data_root_path + 'test_list.txt', 'w') as f:\n        pass\n    with open(data_root_path + 'train_list.txt', 'w') as f:\n        pass\n\n    with open(os.path.join(data_root_path, 'dict_txt.txt'), 'r', encoding='utf-8') as f_data:\n        dict_txt = eval(f_data.readlines()[0])\n\n    with open(os.path.join(data_root_path, 'news_classify_data.txt'), 'r', encoding='utf-8') as f_data:\n        lines = f_data.readlines()\n    i = 0\n    for line in lines:\n        title = line.split('_!_')[-1].replace('\\n', '')\n        l = line.split('_!_')[1]\n        labs = \"\"\n        if i % 10 == 0:\n            with open(os.path.join(data_root_path, 'test_list.txt'), 'a', encoding='utf-8') as f_test:\n                for s in title:\n                    lab = str(dict_txt[s])\n                    labs = labs + lab + ','\n                labs = labs[:-1]\n                labs = labs + '\\t' + l + '\\n'\n                f_test.write(labs)\n        else:\n            with open(os.path.join(data_root_path, 'train_list.txt'), 'a', encoding='utf-8') as f_train:\n                for s in title:\n                    lab = str(dict_txt[s])\n                    labs = labs + lab + ','\n                labs = labs[:-1]\n                labs = labs + '\\t' + l + '\\n'\n                f_train.write(labs)\n        i += 1\n    print(\"数据列表生成完成！\")\n\n\n# 把下载得数据生成一个字典\ndef create_dict(data_path, dict_path):\n    dict_set = set()\n    # 读取已经下载得数据\n    with open(data_path, 'r', encoding='utf-8') as f:\n        lines = f.readlines()\n    # 把数据生成一个元组\n    for line in lines:\n        title = line.split('_!_')[-1].replace('\\n', '')\n        for s in title:\n            dict_set.add(s)\n    # 把元组转换成字典，一个字对应一个数字\n    dict_list = []\n    i = 0\n    for s in dict_set:\n        dict_list.append([s, i])\n        i += 1\n    # 添加未知字符\n    dict_txt = dict(dict_list)\n    end_dict = {\"<unk>\": i}\n    dict_txt.update(end_dict)\n    # 把这些字典保存到本地中\n    with open(dict_path, 'w', encoding='utf-8') as f:\n        f.write(str(dict_txt))\n\n    print(\"数据字典生成完成！\")\n\n\n# 获取字典的长度\ndef get_dict_len(dict_path):\n    with open(dict_path, 'r', encoding='utf-8') as f:\n        line = eval(f.readlines()[0])\n\n    return len(line.keys())\n\n\nif __name__ == '__main__':\n    # 把生产的数据列表都放在自己的总类别文件夹中\n    data_root_path = \"datasets/\"\n    data_path = os.path.join(data_root_path, 'news_classify_data.txt')\n    dict_path = os.path.join(data_root_path, \"dict_txt.txt\")\n    # 创建数据字典\n    create_dict(data_path, dict_path)\n    # 创建数据列表\n    create_data_list(data_root_path)\n"
  },
  {
    "path": "note12/download_text_data.py",
    "content": "import os\nimport random\nimport requests\nimport json\nimport time\n\n# 分类新闻参数\nnews_classify = [\n    [0, '民生', 'news_story'],\n    [1, '文化', 'news_culture'],\n    [2, '娱乐', 'news_entertainment'],\n    [3, '体育', 'news_sports'],\n    [4, '财经', 'news_finance'],\n    [5, '房产', 'news_house'],\n    [6, '汽车', 'news_car'],\n    [7, '教育', 'news_edu'],\n    [8, '科技', 'news_tech'],\n    [9, '军事', 'news_military'],\n    [10, '旅游', 'news_travel'],\n    [11, '国际', 'news_world'],\n    [12, '证券', 'stock'],\n    [13, '农业', 'news_agriculture'],\n    [14, '游戏', 'news_game']\n]\n\n# 已经下载的新闻标题的ID\ndownloaded_data_id = []\n# 已经下载新闻标题的数量\ndownloaded_sum = 0\n\n\ndef get_data(tup, data_path):\n    global downloaded_data_id\n    global downloaded_sum\n    print('============%s============' % tup[1])\n    url = \"http://it.snssdk.com/api/news/feed/v63/\"\n    # 分类新闻的访问参数，模仿正常网络访问\n    t = int(time.time() / 10000)\n    t = random.randint(6 * t, 10 * t)\n    querystring = {\"category\": tup[2], \"max_behot_time\": t, \"last_refresh_sub_entrance_interval\": \"1524907088\",\n                   \"loc_mode\": \"5\",\n                   \"tt_from\": \"pre_load_more\", \"cp\": \"51a5ee4f38c50q1\", \"plugin_enable\": \"0\", \"iid\": \"31047425023\",\n                   \"device_id\": \"51425358841\", \"ac\": \"wifi\", \"channel\": \"tengxun\", \"aid\": \"13\",\n                   \"app_name\": \"news_article\", \"version_code\": \"631\", \"version_name\": \"6.3.1\",\n                   \"device_platform\": \"android\",\n                   \"ab_version\": \"333116,297979,317498,336556,295827,325046,239097,324283,170988,335432,332098,325198,336443,330632,297058,276203,286212,313219,328615,332041,329358,322321,327537,335710,333883,335102,334828,328670,324007,317077,334305,280773,335671,319960,333985,331719,336452,214069,31643,332881,333968,318434,207253,266310,321519,247847,281298,328218,335998,325618,333327,336199,323429,287591,288418,260650,326188,324614,335477,271178,326588,326524,326532\",\n                   \"ab_client\": \"a1,c4,e1,f2,g2,f7\", \"ab_feature\": \"94563,102749\", \"abflag\": \"3\", \"ssmix\": \"a\",\n                   \"device_type\": \"MuMu\", \"device_brand\": \"Android\", \"language\": \"zh\", \"os_api\": \"19\",\n                   \"os_version\": \"4.4.4\", \"uuid\": \"008796762094657\", \"openudid\": \"b7215ea70ca32066\",\n                   \"manifest_version_code\": \"631\", \"resolution\": \"1280*720\", \"dpi\": \"240\",\n                   \"update_version_code\": \"6310\", \"_rticket\": \"1524907088018\", \"plugin\": \"256\"}\n\n    headers = {\n        'cache-control': \"no-cache\",\n        'postman-token': \"26530547-e697-1e8b-fd82-7c6014b3ee86\",\n        'User-Agent': 'Dalvik/1.6.0 (Linux; U; Android 4.4.4; MuMu Build/V417IR) NewsArticle/6.3.1 okhttp/3.7.0.2'\n    }\n\n    # 进行网络请求\n    response = requests.request(\"GET\", url, headers=headers, params=querystring)\n    # 获取返回的数据\n    new_data = json.loads(response.text)\n    with open(data_path, 'a', encoding='utf-8') as fp:\n        for item in new_data['data']:\n            item = item['content']\n            item = item.replace('\\\"', '\"')\n            item = json.loads(item)\n            # 判断数据中是否包含id和新闻标题\n            if 'item_id' in item.keys() and 'title' in item.keys():\n                item_id = item['item_id']\n                print(downloaded_sum, tup[0], tup[1], item['item_id'], item['title'])\n                # 通过新闻id判断是否已经下载过\n                if item_id not in downloaded_data_id:\n                    downloaded_data_id.append(item_id)\n                    # 安装固定格式追加写入文件中\n                    line = u\"{}_!_{}_!_{}_!_{}\".format(item['item_id'], tup[0], tup[1], item['title'])\n                    line = line.replace('\\n', '').replace('\\r', '')\n                    line = line + '\\n'\n                    fp.write(line)\n                    downloaded_sum += 1\n\n\ndef get_routine(data_path):\n    global downloaded_sum\n    # 从文件中读取已经有的数据，避免数据重复\n    if os.path.exists(data_path):\n        with open(data_path, 'r', encoding='utf-8') as fp:\n            lines = fp.readlines()\n            downloaded_sum = len(lines)\n            for line in lines:\n                item_id = int(line.split('_!_')[0])\n                downloaded_data_id.append(item_id)\n            print('在文件中已经读起了%d条数据' % downloaded_sum)\n    else:\n        os.makedirs(os.path.dirname(data_path))\n\n    while 1:\n        # 　开始下载数据\n        time.sleep(10)\n        for classify in news_classify:\n            get_data(classify, data_path)\n        # 当下载量超过300000就停止下载\n        if downloaded_sum >= 300000:\n            break\n\n\nif __name__ == '__main__':\n    data_path = 'datasets/news_classify_data.txt'\n    dict_path = \"datasets/dict_txt.txt\"\n    # 下载数据集\n    get_routine(data_path)"
  },
  {
    "path": "note12/infer.py",
    "content": "import numpy as np\nimport paddle.fluid as fluid\n\n# 创建执行器\nplace = fluid.CPUPlace()\nexe = fluid.Executor(place)\nexe.run(fluid.default_startup_program())\n\n# 保存预测模型路径\nsave_path = 'infer_model/'\n# 从模型中获取预测程序、输入数据名称列表、分类器\n[infer_program, feeded_var_names, target_var] = fluid.io.load_inference_model(dirname=save_path, executor=exe)\n\n\n# 获取数据\ndef get_data(sentence):\n    # 读取数据字典\n    with open('datasets/dict_txt.txt', 'r', encoding='utf-8') as f_data:\n        dict_txt = eval(f_data.readlines()[0])\n    dict_txt = dict(dict_txt)\n    # 把字符串数据转换成列表数据\n    keys = dict_txt.keys()\n    data = []\n    for s in sentence:\n        # 判断是否存在未知字符\n        if not s in keys:\n            s = '<unk>'\n        data.append(np.int64(dict_txt[s]))\n    return data\n\n\ndata = []\n# 获取图片数据\ndata1 = get_data('京城最值得你来场文化之旅的博物馆')\ndata2 = get_data('谢娜为李浩菲澄清网络谣言，之后她的两个行为给自己加分')\ndata.append(data1)\ndata.append(data2)\n\n# 获取每句话的单词数量\nbase_shape = [[len(c) for c in data]]\n\n# 生成预测数据\ntensor_words = fluid.create_lod_tensor(data, base_shape, place)\n\n# 执行预测\nresult = exe.run(program=infer_program,\n                 feed={feeded_var_names[0]: tensor_words},\n                 fetch_list=target_var)\n\n# 分类名称\nnames = ['民生', '文化', '娱乐', '体育', '财经',\n         '房产', '汽车', '教育', '科技', '军事',\n         '旅游', '国际', '证券', '农业', '游戏']\n\n# 获取结果概率最大的label\nfor i in range(len(data)):\n    lab = np.argsort(result)[0][i][-1]\n    print('预测结果标签为：%d， 名称为：%s， 概率为：%f' % (lab, names[lab], result[0][i][lab]))\n"
  },
  {
    "path": "note12/text_reader.py",
    "content": "from multiprocessing import cpu_count\nimport numpy as np\nimport paddle\n\n\n# 训练数据的预处理\ndef train_mapper(sample):\n    data, label = sample\n    data = [int(data) for data in data.split(',')]\n    return data, int(label)\n\n\n# 训练数据的reader\ndef train_reader(train_list_path):\n\n    def reader():\n        with open(train_list_path, 'r') as f:\n            lines = f.readlines()\n            # 打乱数据\n            np.random.shuffle(lines)\n            # 开始获取每张图像和标签\n            for line in lines:\n                data, label = line.split('\\t')\n                yield data, label\n\n    return paddle.reader.xmap_readers(train_mapper, reader, cpu_count(), 1024)\n\n\n# 测试数据的预处理\ndef test_mapper(sample):\n    data, label = sample\n    data = [int(data) for data in data.split(',')]\n    return data, int(label)\n\n\n# 测试数据的reader\ndef test_reader(test_list_path):\n\n    def reader():\n        with open(test_list_path, 'r') as f:\n            lines = f.readlines()\n            for line in lines:\n                data, label = line.split('\\t')\n                yield data, label\n\n    return paddle.reader.xmap_readers(test_mapper, reader, cpu_count(), 1024)"
  },
  {
    "path": "note12/train.py",
    "content": "import os\nimport shutil\n\nimport paddle\nimport paddle.fluid as fluid\n\nimport create_data\nimport text_reader\nimport bilstm_net\n\n# 定义输入数据， lod_level不为0指定输入数据为序列数据\nwords = fluid.layers.data(name='words', shape=[1], dtype='int64', lod_level=1)\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n\n# 获取数据字典长度\ndict_dim = create_data.get_dict_len('datasets/dict_txt.txt')\n# 获取长短期记忆网络\nmodel = bilstm_net.bilstm_net(words, dict_dim, 15)\n\n# 获取损失函数和准确率\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n\n# 获取预测程序\ntest_program = fluid.default_main_program().clone(for_test=True)\n\n# 定义优化方法\noptimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.002)\nopt = optimizer.minimize(avg_cost)\n\n# 创建一个执行器，CPU训练速度比较慢\n# place = fluid.CPUPlace()\nplace = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n\n# 获取训练和预测数据\ntrain_reader = paddle.batch(reader=text_reader.train_reader('datasets/train_list.txt'), batch_size=128)\ntest_reader = paddle.batch(reader=text_reader.test_reader('datasets/test_list.txt'), batch_size=128)\n\n# 定义输入数据的维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[words, label])\n\n# 开始训练\nfor pass_id in range(10):\n    # 进行训练\n    for batch_id, data in enumerate(train_reader()):\n        train_cost, train_acc = exe.run(program=fluid.default_main_program(),\n                             feed=feeder.feed(data),\n                             fetch_list=[avg_cost, acc])\n\n        if batch_id % 40 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f, Acc:%0.5f' % (pass_id, batch_id, train_cost[0], train_acc[0]))\n            # 进行测试\n            test_costs = []\n            test_accs = []\n            for batch_id, data in enumerate(test_reader()):\n                test_cost, test_acc = exe.run(program=test_program,\n                                              feed=feeder.feed(data),\n                                              fetch_list=[avg_cost, acc])\n                test_costs.append(test_cost[0])\n                test_accs.append(test_acc[0])\n            # 计算平均预测损失在和准确率\n            test_cost = (sum(test_costs) / len(test_costs))\n            test_acc = (sum(test_accs) / len(test_accs))\n            print('Test:%d, Cost:%0.5f, ACC:%0.5f' % (pass_id, test_cost, test_acc))\n\n    # 保存预测模型\n    save_path = 'infer_model/'\n    # 删除旧的模型文件\n    shutil.rmtree(save_path, ignore_errors=True)\n    # 创建保持模型文件目录\n    os.makedirs(save_path)\n    # 保存预测模型\n    fluid.io.save_inference_model(save_path, feeded_var_names=[words.name], target_vars=[model], executor=exe)\n"
  },
  {
    "path": "note13/README.md",
    "content": "﻿@[TOC]\n\n# 前言\n我们在第六章介绍了生成对抗网络，并使用生成对抗网络训练mnist数据集，生成手写数字图片。那么本章我们将使用对抗生成网络训练我们自己的图片数据集，并生成图片。在第六章中我们使用的黑白的单通道图片，在这一章中，我们使用的是3通道的彩色图。\n\nGitHub地址：https://github.com/yeyupiaoling/LearnPaddle2/tree/master/note13\n\n# 定义数据读取\n我们首先创建一个`image_reader.py`文件，用于读取我们自己定义的图片数据集。首先导入所需的依赖包。\n```python\nimport os\nimport random\nfrom multiprocessing import cpu_count\nimport numpy as np\nimport paddle\nfrom PIL import Image\n```\n\n这里的图片预处理主要是对图片进行等比例压缩和中心裁剪，这里为了避免图片在图片在resize时出现变形的情况，导致训练生成的图片不是我们真实图片的样子。这里为了增强数据集，做了随机水平翻转。最后在处理图片的时候，为了避免数据集中有单通道图片导致训练中断，所以还把单通道图转成3通道图片。\n```python\n# 测试图片的预处理\ndef train_mapper(sample):\n    img, crop_size = sample\n    img = Image.open(img)\n    # 随机水平翻转\n    r1 = random.random()\n    if r1 > 0.5:\n        img = img.transpose(Image.FLIP_LEFT_RIGHT)\n    # 等比例缩放和中心裁剪\n    width = img.size[0]\n    height = img.size[1]\n    if width < height:\n        ratio = width / crop_size\n        width = width / ratio\n        height = height / ratio\n        img = img.resize((int(width), int(height)), Image.ANTIALIAS)\n        height = height / 2\n        crop_size2 = crop_size / 2\n        box = (0, int(height - crop_size2), int(width), int(height + crop_size2))\n    else:\n        ratio = height / crop_size\n        height = height / ratio\n        width = width / ratio\n        img = img.resize((int(width), int(height)), Image.ANTIALIAS)\n        width = width / 2\n        crop_size2 = crop_size / 2\n        box = (int(width - crop_size2), 0, int(width + crop_size2), int(height))\n    img = img.crop(box)\n    img = img.resize((crop_size, crop_size), Image.ANTIALIAS)\n\n    # 把单通道图变成3通道\n    if len(img.getbands()) == 1:\n        img1 = img2 = img3 = img\n        img = Image.merge('RGB', (img1, img2, img3))\n\n    # 转换成numpy值\n    img = np.array(img).astype(np.float32)\n    # 转换成CHW\n    img = img.transpose((2, 0, 1))\n    # 转换成BGR\n    img = img[(2, 1, 0), :, :] / 255.0\n    return img\n```\n\n在这篇文章中，我们读取数据集不需要使用到数据列表，因为我们并没有进行分类，只是把所有的图片用于训练并生成图片。所有这里只需要把文件中的所有图片都读取进行训练就 可以了。\n```python\n# 测试的图片reader\ndef train_reader(train_image_path, crop_size):\n    pathss = []\n    for root, dirs, files in os.walk(train_image_path):\n        path = [os.path.join(root, name) for name in files]\n        pathss.extend(path)\n\n    def reader():\n        for line in pathss:\n            yield line, crop_size\n\n    return paddle.reader.xmap_readers(train_mapper, reader, cpu_count(), 1024)\n```\n\n# 训练生成模型\n下面创建`train.py`文件，用于训练对抗生成模型，并在训练过程中生成图片和保存预测模型。首先导入所需的依赖包。\n```python\nimport os\nimport shutil\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\nimport matplotlib.pyplot as plt\nimport image_reader\n```\n\n下面时定义生成器的，我们在第六章也介绍过。生成器的作用是尽可能生成满足判别器条件的图像。随着以上训练的进行，判别器不断增强自身的判别能力，而生成器也不断生成越来越逼真的图片，以欺骗判别器。生成器主要由两组全连接和BN层、两组转置卷积运算组成。唯一不同的时在生成器最后输出的大小是3，因为我们生成的图片是3通道的彩色图片，而且使用的激活函数是sigmoid，保证了输出的结果都是在0到1范围之内，这是彩色图片的颜色范围。\n```python\n# 训练的图片大小\nimage_size = 112\n\n# 定义生成器\ndef Generator(y, name=\"G\"):\n    def deconv(x, num_filters, filter_size=5, stride=2, dilation=1, padding=2, output_size=None, act=None):\n        return fluid.layers.conv2d_transpose(input=x,\n                                             num_filters=num_filters,\n                                             output_size=output_size,\n                                             filter_size=filter_size,\n                                             stride=stride,\n                                             dilation=dilation,\n                                             padding=padding,\n                                             act=act)\n\n    with fluid.unique_name.guard(name + \"/\"):\n        # 第一组全连接和BN层\n        y = fluid.layers.fc(y, size=2048)\n        y = fluid.layers.batch_norm(y)\n        # 第二组全连接和BN层\n        y = fluid.layers.fc(y, size=int(128 * (image_size / 4) * (image_size / 4)))\n        y = fluid.layers.batch_norm(y)\n        # 进行形状变换\n        y = fluid.layers.reshape(y, shape=[-1, 128, int((image_size / 4)), int((image_size / 4))])\n        # 第一组转置卷积运算\n        y = deconv(x=y, num_filters=128, act='relu', output_size=[int((image_size / 2)), int((image_size / 2))])\n        # 第二组转置卷积运算\n        y = deconv(x=y, num_filters=3, act='sigmoid', output_size=[image_size, image_size])\n    return y\n```\n\n判别器的作用是训练真实的数据集，然后使用训练真实数据集模型去判别生成器生成的假图片。这一过程可以理解判别器为一个二分类问题，判别器在训练真实数据集时，尽量让其输出概率为1，而训练生成器生成的假图片输出概率为0。这样不断给生成器压力，让其生成的图片尽量逼近真实图片，以至于真实到连判别器也无法判断这是真实图像还是假图片。以下判别器由三组卷积池化层和一个最后全连接层组成，全连接层的大小为1，输入一个二分类的结果。\n```python\n# 判别器 Discriminator\ndef Discriminator(images, name=\"D\"):\n    # 定义一个卷积池化组\n    def conv_pool(input, num_filters, act=None):\n        return fluid.nets.simple_img_conv_pool(input=input,\n                                               filter_size=3,\n                                               num_filters=num_filters,\n                                               pool_size=2,\n                                               pool_stride=2,\n                                               act=act)\n\n    with fluid.unique_name.guard(name + \"/\"):\n        y = fluid.layers.reshape(x=images, shape=[-1, 3, image_size, image_size])\n        # 第一个卷积池化组\n        y = conv_pool(input=y, num_filters=64, act='leaky_relu')\n        # 第一个卷积池化加回归层\n        y = conv_pool(input=y, num_filters=128)\n        y = fluid.layers.batch_norm(input=y, act='leaky_relu')\n        # 第二个卷积池化加回归层\n        y = fluid.layers.fc(input=y, size=1024)\n        y = fluid.layers.batch_norm(input=y, act='leaky_relu')\n        # 最后一个分类器输出\n        y = fluid.layers.fc(input=y, size=1, act='sigmoid')\n    return y\n```\n\n然后在这里获取所需的程序，如判别器D识别生成器G生成的假图片程序，判别器D识别真实图片程序，生成器G生成符合判别器D的程序和初始化的程序。最后定义一个`get_params()`函数用于获取参数名称。\n```python\n# 创建判别器D识别生成器G生成的假图片程序\ntrain_d_fake = fluid.Program()\n# 创建判别器D识别真实图片程序\ntrain_d_real = fluid.Program()\n# 创建生成器G生成符合判别器D的程序\ntrain_g = fluid.Program()\n\n# 创建共同的一个初始化的程序\nstartup = fluid.Program()\n\n# 噪声维度\nz_dim = 100\n\n# 从Program获取prefix开头的参数名字\ndef get_params(program, prefix):\n    all_params = program.global_block().all_parameters()\n    return [t.name for t in all_params if t.name.startswith(prefix)]\n```\n\n定义一个判别器识别真实图片的程序，这里判别器传入的数据是真实的图片数据，这里的输出图片是3通道的。这里使用的损失函数是fluid.layers.sigmoid_cross_entropy_with_logits()，这个损失函数是求它们在任务上的错误率，他们的类别是互不排斥的。所以无论真实图片的标签是什么，都不会影响模型识别为真实图片。这里更新的也只有判别器模型的参数，使用的优化方法是Adam。\n```python\n# 训练判别器D识别真实图片\nwith fluid.program_guard(train_d_real, startup):\n    # 创建读取真实数据集图片的data，并且label为1\n    real_image = fluid.layers.data('image', shape=[3, image_size, image_size])\n    ones = fluid.layers.fill_constant_batch_size_like(real_image, shape=[-1, 1], dtype='float32', value=1)\n\n    # 判别器D判断真实图片的概率\n    p_real = Discriminator(real_image)\n    # 获取损失函数\n    real_cost = fluid.layers.sigmoid_cross_entropy_with_logits(p_real, ones)\n    real_avg_cost = fluid.layers.mean(real_cost)\n\n    # 获取判别器D的参数\n    d_params = get_params(train_d_real, \"D\")\n\n    # 创建优化方法\n    optimizer = fluid.optimizer.Adam(learning_rate=2e-4)\n    optimizer.minimize(real_avg_cost, parameter_list=d_params)\n```\n\n这里定义一个判别器识别生成器生成的图片的程序，这里是使用噪声的维度进行输入。这里判别器识别的是生成器生成的图片，这里使用的损失函数同样是fluid.layers.sigmoid_cross_entropy_with_logits()。这里更新的参数还是判别器模型的参数，也是使用Adam优化方法。\n```python\n# 训练判别器D识别生成器G生成的图片为假图片\nwith fluid.program_guard(train_d_fake, startup):\n    # 利用创建假的图片data，并且label为0\n    z = fluid.layers.data(name='z', shape=[z_dim])\n    zeros = fluid.layers.fill_constant_batch_size_like(z, shape=[-1, 1], dtype='float32', value=0)\n\n    # 判别器D判断假图片的概率\n    p_fake = Discriminator(Generator(z))\n\n    # 获取损失函数\n    fake_cost = fluid.layers.sigmoid_cross_entropy_with_logits(p_fake, zeros)\n    fake_avg_cost = fluid.layers.mean(fake_cost)\n\n    # 获取判别器D的参数\n    d_params = get_params(train_d_fake, \"D\")\n\n    # 创建优化方法\n    optimizer = fluid.optimizer.Adam(learning_rate=2e-4)\n    optimizer.minimize(fake_avg_cost, parameter_list=d_params)\n```\n\n最后定义一个训练生成器生成图片的模型，这里也克隆一个预测程序，用于之后在训练的时候输出预测的图片。损失函数和优化方法都一样，但是要更新的参数是生成器的模型参。\n```python\n# 训练生成器G生成符合判别器D标准的假图片\nfake = None\nwith fluid.program_guard(train_g, startup):\n    # 噪声生成图片为真实图片的概率，Label为1\n    z = fluid.layers.data(name='z', shape=[z_dim])\n    ones = fluid.layers.fill_constant_batch_size_like(z, shape=[-1, 1], dtype='float32', value=1)\n\n    # 生成图片\n    fake = Generator(z)\n    # 克隆预测程序\n    infer_program = train_g.clone(for_test=True)\n\n    # 生成符合判别器的假图片\n    p = Discriminator(fake)\n\n    # 获取损失函数\n    g_cost = fluid.layers.sigmoid_cross_entropy_with_logits(p, ones)\n    g_avg_cost = fluid.layers.mean(g_cost)\n\n    # 获取G的参数\n    g_params = get_params(train_g, \"G\")\n\n    # 只训练G\n    optimizer = fluid.optimizer.Adam(learning_rate=2e-4)\n    optimizer.minimize(g_avg_cost, parameter_list=g_params)\n```\n\n这里创建一个可以生成训练噪声数据的reader函数。\n```python\n# 噪声生成\ndef z_reader():\n    while True:\n        yield np.random.uniform(-1.0, 1.0, (z_dim)).astype('float32')\n```\n\n这里定义一个保存在训练过程生成的图片，通过观察生成图片的情况，可以了解到训练的效果。\n```python\n# 保存图片\ndef show_image_grid(images):\n    for i, image in enumerate(images):\n        image = image.transpose((2, 1, 0))\n        save_image_path = 'train_image'\n        if not os.path.exists(save_image_path):\n            os.makedirs(save_image_path)\n        plt.imsave(os.path.join(save_image_path, \"test_%d.png\" % i), image)\n```\n\n这里就开始获取自定义的图片数据集，这里只需要把存放图片数据集的文件夹传进去就可以了。\n```python\n# 生成真实图片reader\nmydata_generator = paddle.batch(reader=image_reader.train_reader('datasets', image_size), batch_size=32)\n# 生成假图片的reader\nz_generator = paddle.batch(z_reader, batch_size=32)()\ntest_z = np.array(next(z_generator))\n```\n\n接着获取执行器，准备进行训练，这里笔者建议最好使用GPU，因为CPU贼慢。\n```python\n# 创建执行器，最好使用GPU，CPU速度太慢了\n# place = fluid.CPUPlace()\nplace = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\n# 初始化参数\nexe.run(startup)\n```\n\n最好就可以开始训练啦，我们可以在训练的时候输出训练的损失值。在训练每一个Pass之后又可以使用预测程序生成图片并进行保存到本地。\n```python\n# 开始训练\nfor pass_id in range(100):\n    for i, real_image in enumerate(mydata_generator()):\n        # 训练判别器D识别真实图片\n        r_fake = exe.run(program=train_d_fake,\n                         fetch_list=[fake_avg_cost],\n                         feed={'z': test_z})\n\n        # 训练判别器D识别生成器G生成的假图片\n        r_real = exe.run(program=train_d_real,\n                         fetch_list=[real_avg_cost],\n                         feed={'image': np.array(real_image)})\n\n        # 训练生成器G生成符合判别器D标准的假图片\n        r_g = exe.run(program=train_g,\n                      fetch_list=[g_avg_cost],\n                      feed={'z': test_z})\n\n        if i % 100 == 0:\n            print(\"Pass：%d, Batch：%d, 训练判别器D识别真实图片Cost：%0.5f, \"\n                  \"训练判别器D识别生成器G生成的假图片Cost：%0.5f, \"\n                  \"训练生成器G生成符合判别器D标准的假图片Cost：%0.5f\" % (pass_id, i, r_fake[0], r_real[0], r_g[0]))\n\n    # 测试生成的图片\n    r_i = exe.run(program=infer_program,\n                  fetch_list=[fake],\n                  feed={'z': test_z})\n\n    r_i = np.array(r_i).astype(np.float32)\n    # 显示生成的图片\n    show_image_grid(r_i[0])\n```\n\n同时在每个Pass之后又可以保存预测函数，用于之后预测生成图片使用。\n```python\n   # 保存预测模型\n    save_path = 'infer_model/'\n    # 删除旧的模型文件\n    shutil.rmtree(save_path, ignore_errors=True)\n    # 创建保持模型文件目录\n    os.makedirs(save_path)\n    # 保存预测模型\n    fluid.io.save_inference_model(save_path, feeded_var_names=[z.name], target_vars=[fake], executor=exe, main_program=train_g)\n```\n\n在训练的过程可以输出每一个训练程序输出的损失值：\n```\nPass：0, Batch：0, 训练判别器D识别真实图片Cost：1.03734, 训练判别器D识别生成器G生成的假图片Cost：0.46931, 训练生成器G生成符合判别器D标准的假图片Cost：0.54236\nPass：1, Batch：0, 训练判别器D识别真实图片Cost：1.09766, 训练判别器D识别生成器G生成的假图片Cost：0.32896, 训练生成器G生成符合判别器D标准的假图片Cost：0.44473\nPass：2, Batch：0, 训练判别器D识别真实图片Cost：1.17703, 训练判别器D识别生成器G生成的假图片Cost：0.38643, 训练生成器G生成符合判别器D标准的假图片Cost：0.39445\n```\n\n# 使用模型生成图片\n在上一个文件中，我们已经训练得到一个预测模型，下面我们将使用这个预测模型直接生成图片。创建`infer.py`文件用于预测生成图片。首先导入相应的依赖包。\n```python\nimport os\nimport paddle\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport paddle.fluid as fluid\n```\n\n然后创建执行器，这里可以使用CPU进行预测可以，因为预测并不需要太大的计算。然后加载上一步训练保存的预测模型，获取预测程序，输入层的名称，和生成器。\n```python\n# 创建执行器\nplace = fluid.CPUPlace()\nexe = fluid.Executor(place)\nexe.run(fluid.default_startup_program())\n\n# 保存预测模型路径\nsave_path = 'infer_model/'\n# 从模型中获取预测程序、输入数据名称列表、分类器\n[infer_program, feeded_var_names, target_var] = fluid.io.load_inference_model(dirname=save_path, executor=exe)\n```\n\n跟训练的时候一样，需要生成噪声数据作为输入数据。这里说明一下，输入数据`z_generator `的batch大小就是生成图片的数量。\n```python\n# 噪声维度\nz_dim = 100\n\n# 噪声生成\ndef z_reader():\n    while True:\n        yield np.random.uniform(-1.0, 1.0, (z_dim)).astype('float32')\n\nz_generator = paddle.batch(z_reader, batch_size=32)()\ntest_z = np.array(next(z_generator))\n```\n\n这里创建一个保存生成图片的函数，用于保存预测生成的图片。\n```python\n# 保存图片\ndef save_image(images):\n    for i, image in enumerate(images):\n        image = image.transpose((2, 1, 0))\n        save_image_path = 'infer_image'\n        if not os.path.exists(save_image_path):\n            os.makedirs(save_image_path)\n        plt.imsave(os.path.join(save_image_path, \"test_%d.png\" % i), image)\n```\n\n最后执行预测程序，开始生成图片。预测输出的结果就是图片的数据，通过保存这些数据就是保存图片了。\n```python\n# 测试生成的图片\nr_i = exe.run(program=infer_program,\n              feed={feeded_var_names[0]: test_z},\n              fetch_list=target_var)\n\nr_i = np.array(r_i).astype(np.float32)\n\n# 显示生成的图片\nsave_image(r_i[0])\n\nprint('生成图片完成')\n```\n\n目前这个网络在训练比较复杂的图片时，模型的拟合效果并不太好，也就是说生成的图片没有我们想象那么好。所以这个网络还需要不断调整，如果读者有更好的建议，欢迎交流一下。\n\n# 参考资料\n1. https://github.com/oraoto/learn_ml/blob/master/paddle/gan-mnist-split.ipynb\n2. https://www.cnblogs.com/max-hu/p/7129188.html\n3. https://blog.csdn.net/somtian/article/details/72126328"
  },
  {
    "path": "note13/image_reader.py",
    "content": "import os\nimport random\nfrom multiprocessing import cpu_count\nimport numpy as np\nimport paddle\nfrom PIL import Image\n\n\n# 测试图片的预处理\ndef train_mapper(sample):\n    img, crop_size = sample\n    img = Image.open(img)\n    # 随机水平翻转\n    r1 = random.random()\n    if r1 > 0.5:\n        img = img.transpose(Image.FLIP_LEFT_RIGHT)\n    # 等比例缩放和中心裁剪\n    width = img.size[0]\n    height = img.size[1]\n    if width < height:\n        ratio = width / crop_size\n        width = width / ratio\n        height = height / ratio\n        img = img.resize((int(width), int(height)), Image.ANTIALIAS)\n        height = height / 2\n        crop_size2 = crop_size / 2\n        box = (0, int(height - crop_size2), int(width), int(height + crop_size2))\n    else:\n        ratio = height / crop_size\n        height = height / ratio\n        width = width / ratio\n        img = img.resize((int(width), int(height)), Image.ANTIALIAS)\n        width = width / 2\n        crop_size2 = crop_size / 2\n        box = (int(width - crop_size2), 0, int(width + crop_size2), int(height))\n    img = img.crop(box)\n    img = img.resize((crop_size, crop_size), Image.ANTIALIAS)\n\n    # 把单通道图变成3通道\n    if len(img.getbands()) == 1:\n        img1 = img2 = img3 = img\n        img = Image.merge('RGB', (img1, img2, img3))\n\n    # 转换成numpy值\n    img = np.array(img).astype(np.float32)\n    # 转换成CHW\n    img = img.transpose((2, 0, 1))\n    # 转换成BGR\n    img = img[(2, 1, 0), :, :] / 255.0\n    return img\n\n\n# 测试的图片reader\ndef train_reader(train_image_path, crop_size):\n    pathss = []\n    for root, dirs, files in os.walk(train_image_path):\n        path = [os.path.join(root, name) for name in files]\n        pathss.extend(path)\n\n    def reader():\n        for line in pathss:\n            yield line, crop_size\n\n    return paddle.reader.xmap_readers(train_mapper, reader, cpu_count(), 1024)\n"
  },
  {
    "path": "note13/infer.py",
    "content": "import os\nimport paddle\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport paddle.fluid as fluid\n\n# 创建执行器\nplace = fluid.CPUPlace()\nexe = fluid.Executor(place)\nexe.run(fluid.default_startup_program())\n\n# 保存预测模型路径\nsave_path = 'infer_model/'\n# 从模型中获取预测程序、输入数据名称列表、分类器\n[infer_program, feeded_var_names, target_var] = fluid.io.load_inference_model(dirname=save_path, executor=exe)\n\n# 噪声维度\nz_dim = 100\n\n\n# 噪声生成\ndef z_reader():\n    while True:\n        yield np.random.uniform(-1.0, 1.0, (z_dim)).astype('float32')\n\n\nz_generator = paddle.batch(z_reader, batch_size=32)()\ntest_z = np.array(next(z_generator))\n\n\n# 保存图片\ndef save_image(images):\n    for i, image in enumerate(images):\n        image = image.transpose((2, 1, 0))\n        save_image_path = 'infer_image'\n        if not os.path.exists(save_image_path):\n            os.makedirs(save_image_path)\n        plt.imsave(os.path.join(save_image_path, \"test_%d.png\" % i), image)\n\n\n# 测试生成的图片\nr_i = exe.run(program=infer_program,\n              feed={feeded_var_names[0]: test_z},\n              fetch_list=target_var)\n\nr_i = np.array(r_i).astype(np.float32)\n\n# 显示生成的图片\nsave_image(r_i[0])\n\nprint('生成图片完成')\n"
  },
  {
    "path": "note13/train.py",
    "content": "import os\nimport shutil\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\nimport matplotlib.pyplot as plt\nimport image_reader\n\n# 训练的图片大小\nimage_size = 112\n\n# 定义生成器\ndef Generator(y, name=\"G\"):\n    def deconv(x, num_filters, filter_size=5, stride=2, dilation=1, padding=2, output_size=None, act=None):\n        return fluid.layers.conv2d_transpose(input=x,\n                                             num_filters=num_filters,\n                                             output_size=output_size,\n                                             filter_size=filter_size,\n                                             stride=stride,\n                                             dilation=dilation,\n                                             padding=padding,\n                                             act=act)\n\n    with fluid.unique_name.guard(name + \"/\"):\n        # 第一组全连接和BN层\n        y = fluid.layers.fc(y, size=2048)\n        y = fluid.layers.batch_norm(y)\n        # 第二组全连接和BN层\n        y = fluid.layers.fc(y, size=int(128 * (image_size / 4) * (image_size / 4)))\n        y = fluid.layers.batch_norm(y)\n        # 进行形状变换\n        y = fluid.layers.reshape(y, shape=[-1, 128, int((image_size / 4)), int((image_size / 4))])\n        # 第一组转置卷积运算\n        y = deconv(x=y, num_filters=128, act='relu', output_size=[int((image_size / 2)), int((image_size / 2))])\n        # 第二组转置卷积运算\n        y = deconv(x=y, num_filters=3, act='sigmoid', output_size=[image_size, image_size])\n    return y\n\n\n# 判别器 Discriminator\ndef Discriminator(images, name=\"D\"):\n    # 定义一个卷积池化组\n    def conv_pool(input, num_filters, act=None):\n        return fluid.nets.simple_img_conv_pool(input=input,\n                                               filter_size=3,\n                                               num_filters=num_filters,\n                                               pool_size=2,\n                                               pool_stride=2,\n                                               act=act)\n\n    with fluid.unique_name.guard(name + \"/\"):\n        y = fluid.layers.reshape(x=images, shape=[-1, 3, image_size, image_size])\n        # 第一个卷积池化组\n        y = conv_pool(input=y, num_filters=64, act='leaky_relu')\n        # 第一个卷积池化加回归层\n        y = conv_pool(input=y, num_filters=128)\n        y = fluid.layers.batch_norm(input=y, act='leaky_relu')\n        # 第二个卷积池化加回归层\n        y = fluid.layers.fc(input=y, size=1024)\n        y = fluid.layers.batch_norm(input=y, act='leaky_relu')\n        # 最后一个分类器输出\n        y = fluid.layers.fc(input=y, size=1, act='sigmoid')\n    return y\n\n\n# 创建判别器D识别生成器G生成的假图片程序\ntrain_d_fake = fluid.Program()\n# 创建判别器D识别真实图片程序\ntrain_d_real = fluid.Program()\n# 创建生成器G生成符合判别器D的程序\ntrain_g = fluid.Program()\n\n# 创建共同的一个初始化的程序\nstartup = fluid.Program()\n\n# 噪声维度\nz_dim = 100\n\n\n# 从Program获取prefix开头的参数名字\ndef get_params(program, prefix):\n    all_params = program.global_block().all_parameters()\n    return [t.name for t in all_params if t.name.startswith(prefix)]\n\n\n# 训练判别器D识别真实图片\nwith fluid.program_guard(train_d_real, startup):\n    # 创建读取真实数据集图片的data，并且label为1\n    real_image = fluid.layers.data('image', shape=[3, image_size, image_size])\n    ones = fluid.layers.fill_constant_batch_size_like(real_image, shape=[-1, 1], dtype='float32', value=1)\n\n    # 判别器D判断真实图片的概率\n    p_real = Discriminator(real_image)\n    # 获取损失函数\n    real_cost = fluid.layers.sigmoid_cross_entropy_with_logits(p_real, ones)\n    real_avg_cost = fluid.layers.mean(real_cost)\n\n    # 获取判别器D的参数\n    d_params = get_params(train_d_real, \"D\")\n\n    # 创建优化方法\n    optimizer = fluid.optimizer.Adam(learning_rate=2e-4)\n    optimizer.minimize(real_avg_cost, parameter_list=d_params)\n\n# 训练判别器D识别生成器G生成的图片为假图片\nwith fluid.program_guard(train_d_fake, startup):\n    # 利用创建假的图片data，并且label为0\n    z = fluid.layers.data(name='z', shape=[z_dim])\n    zeros = fluid.layers.fill_constant_batch_size_like(z, shape=[-1, 1], dtype='float32', value=0)\n\n    # 判别器D判断假图片的概率\n    p_fake = Discriminator(Generator(z))\n\n    # 获取损失函数\n    fake_cost = fluid.layers.sigmoid_cross_entropy_with_logits(p_fake, zeros)\n    fake_avg_cost = fluid.layers.mean(fake_cost)\n\n    # 获取判别器D的参数\n    d_params = get_params(train_d_fake, \"D\")\n\n    # 创建优化方法\n    optimizer = fluid.optimizer.Adam(learning_rate=2e-4)\n    optimizer.minimize(fake_avg_cost, parameter_list=d_params)\n\n# 训练生成器G生成符合判别器D标准的假图片\nfake = None\nwith fluid.program_guard(train_g, startup):\n    # 噪声生成图片为真实图片的概率，Label为1\n    z = fluid.layers.data(name='z', shape=[z_dim])\n    ones = fluid.layers.fill_constant_batch_size_like(z, shape=[-1, 1], dtype='float32', value=1)\n\n    # 生成图片\n    fake = Generator(z)\n    # 克隆预测程序\n    infer_program = train_g.clone(for_test=True)\n\n    # 生成符合判别器的假图片\n    p = Discriminator(fake)\n\n    # 获取损失函数\n    g_cost = fluid.layers.sigmoid_cross_entropy_with_logits(p, ones)\n    g_avg_cost = fluid.layers.mean(g_cost)\n\n    # 获取G的参数\n    g_params = get_params(train_g, \"G\")\n\n    # 只训练G\n    optimizer = fluid.optimizer.Adam(learning_rate=2e-4)\n    optimizer.minimize(g_avg_cost, parameter_list=g_params)\n\n\n# 噪声生成\ndef z_reader():\n    while True:\n        yield np.random.uniform(-1.0, 1.0, (z_dim)).astype('float32')\n\n\n# 读取cifar数据集，不使用label\ndef cifar_reader(reader):\n    def r():\n        for img, label in reader():\n            yield img.reshape(3, 32, 32)\n\n    return r\n\n\n# 保存图片\ndef show_image_grid(images):\n    for i, image in enumerate(images):\n        image = image.transpose((2, 1, 0))\n        save_image_path = 'train_image'\n        if not os.path.exists(save_image_path):\n            os.makedirs(save_image_path)\n        plt.imsave(os.path.join(save_image_path, \"test_%d.png\" % i), image)\n\n\n# 生成真实图片reader\nmydata_generator = paddle.batch(reader=image_reader.train_reader('datasets', image_size), batch_size=32)\n# 使用CIFAR数据集\n# mydata_generator = paddle.batch(reader=cifar_reader(paddle.dataset.cifar.train10()), batch_size=128)\n# 生成假图片的reader\nz_generator = paddle.batch(z_reader, batch_size=32)()\n# 测试噪声\ntest_z = np.array(next(z_generator))\n\n# 创建执行器，最好使用GPU，CPU速度太慢了\n# place = fluid.CPUPlace()\nplace = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\n# 初始化参数\nexe.run(startup)\n\n# 开始训练\nfor pass_id in range(100):\n    for i, real_image in enumerate(mydata_generator()):\n        # 训练判别器D识别真实图片\n        r_fake = exe.run(program=train_d_fake,\n                         fetch_list=[fake_avg_cost],\n                         feed={'z': test_z})\n\n        # 训练判别器D识别生成器G生成的假图片\n        r_real = exe.run(program=train_d_real,\n                         fetch_list=[real_avg_cost],\n                         feed={'image': np.array(real_image)})\n\n        # 训练生成器G生成符合判别器D标准的假图片\n        r_g = exe.run(program=train_g,\n                      fetch_list=[g_avg_cost],\n                      feed={'z': test_z})\n\n        if i % 100 == 0:\n            print(\"Pass：%d, Batch：%d, 训练判别器D识别真实图片Cost：%0.5f, \"\n                  \"训练判别器D识别生成器G生成的假图片Cost：%0.5f, \"\n                  \"训练生成器G生成符合判别器D标准的假图片Cost：%0.5f\" % (pass_id, i, r_fake[0], r_real[0], r_g[0]))\n\n    # 测试生成的图片\n    r_i = exe.run(program=infer_program,\n                  fetch_list=[fake],\n                  feed={'z': test_z})\n\n    r_i = np.array(r_i).astype(np.float32)\n    # 显示生成的图片\n    show_image_grid(r_i[0])\n\n    # 保存预测模型\n    save_path = 'infer_model/'\n    # 删除旧的模型文件\n    shutil.rmtree(save_path, ignore_errors=True)\n    # 创建保持模型文件目录\n    os.makedirs(save_path)\n    # 保存预测模型\n    fluid.io.save_inference_model(save_path, feeded_var_names=[z.name], target_vars=[fake], executor=exe, main_program=train_g)\n"
  },
  {
    "path": "note14/README.md",
    "content": "﻿暂时这样凑合着看，之后有时间再补充文字说明。[微笑]\n\n@[TOC]\n\n# 前言\n如果读者使用过百度等的一些图像识别的接口，比如百度的细粒度图像识别接口，应该了解这个过程，省略其他的安全方面的考虑。这个接口大体的流程是，我们把图像上传到百度的网站上，然后服务器把这些图像转换成功矢量数据，最后就是拿这些数据传给深度学习的预测接口，比如是PaddlePaddle的预测接口，获取到预测结果，返回给客户端。这个只是简单的流程，真实的复杂性远远不止这些，但是我们只需要了解这些，然后去搭建属于我们的图像识别接口。\n\n# 了解Flask\n安装flask很简单，只要一条命令就可以了：\n```\npip install flask\n```\n同时我们也使用到flask_cors，所以我们也要安装这个库\n```\npip install flask_cors\n```\n\n创建一个`paddle_server.py`文件，然后编写一个简单的程序，了解一些如何使用这个Flask框架，首先导入所需的依赖库：\n```python\nimport os\nimport uuid\nimport numpy as np\nimport paddle.fluid as fluid\nfrom PIL import Image\nfrom flask import Flask, request\nfrom flask_cors import CORS\nfrom werkzeug.utils import secure_filename\n```\n\n编写一个`hello_world()`函数，使用`@app.route('/')`是指定访问的路径，该函数的返回值是一个字符串`Welcome to PaddlePaddle`：\n```python\n# 根路径，返回一个字符串\n@app.route('/')\ndef hello_world():\n    return 'Welcome to PaddlePaddle'\n```\n\n然后启动这个服务，如果是在Ubuntu的话，可能是需要在root下执行这个程序。\n```python\nif __name__ == '__main__':\n    # 启动服务，并指定端口号\n    app.run(port=80)\n```\n\n然后浏览器访问`http://127.0.0.1`，返回之前写好的字符串：\n```\nWelcome to PaddlePaddle\n```\n\n要预测图片，上传图片是首要的，所以我们来学习如何使用Flask来上传图片。\n\n - `secure_filename`是为了能够正常获取到上传文件的文件名\n - `/upload`指定该函数的访问地址\n - `methods=['POST']`指定该路径只能使用POST方法访问\n - `f = request.files['img']`读取表单名称为img的文件\n - `f.save(img_path)`在指定路径保存该文件\n\n```python\n# 上传文件\n@app.route('/upload', methods=['POST'])\ndef upload_file():\n    f = request.files['img']\n    # 设置保存路径\n    save_father_path = 'images'\n    img_path = os.path.join(save_father_path, str(uuid.uuid1()) + secure_filename(f.filename).split('.')[-1])\n    if not os.path.exists(save_father_path):\n        os.makedirs(save_father_path)\n    f.save(img_path)\n    return 'success, save path: ' + img_path\n```\n\n然后再次启动服务\n```python\nif __name__ == '__main__':\n    # 启动服务，并指定端口号\n    app.run(port=80)\n```\n\n然后再创建`index.html`文件，编写一个表单，指定表单提交的路径`http://127.0.0.1/upload`，并设置表单提交数据的格式`multipart/form-data`，而且支持表单提交方式是POST。\n```xml\n<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n    <meta charset=\"UTF-8\">\n    <title>预测图像</title>\n</head>\n<body>\n<!--上传图片的表单-->\n<form action=\"http://127.0.0.1/upload\" enctype=\"multipart/form-data\" method=\"post\">\n    选择上传的图像：<input type=\"file\" name=\"img\"><br>\n    <input type=\"submit\" value=\"上传\">\n</form>\n</body>\n</html>\n```\n\n\n\n# 预测服务\n在`paddle_server.py`中添加：\n```python\n# 预处理图片\ndef load_image(file):\n    img = Image.open(file)\n    # 统一图像大小\n    img = img.resize((224, 224), Image.ANTIALIAS)\n    # 转换成numpy值\n    img = np.array(img).astype(np.float32)\n    # 转换成CHW\n    img = img.transpose((2, 0, 1))\n    # 转换成BGR\n    img = img[(2, 1, 0), :, :] / 255.0\n    img = np.expand_dims(img, axis=0)\n    return img\n```\n\n```python\n# 创建执行器\nplace = fluid.CPUPlace()\nexe = fluid.Executor(place)\nexe.run(fluid.default_startup_program())\n\n# 保存预测模型路径\nsave_path = 'infer_model/'\n# 从模型中获取预测程序、输入数据名称列表、分类器\n[infer_program, feeded_var_names, target_var] = fluid.io.load_inference_model(dirname=save_path, executor=exe)\n```\n\n```python\n@app.route('/infer', methods=['POST'])\ndef infer():\n    f = request.files['img']\n\n    # 保存图片\n    save_father_path = 'images'\n    img_path = os.path.join(save_father_path, str(uuid.uuid1()) + '.' + secure_filename(f.filename).split('.')[-1])\n    if not os.path.exists(save_father_path):\n        os.makedirs(save_father_path)\n    f.save(img_path)\n\n    # 开始预测图片\n    img = load_image(img_path)\n    result = exe.run(program=infer_program,\n                     feed={feeded_var_names[0]: img},\n                     fetch_list=target_var)\n\n    # 显示图片并输出结果最大的label\n    lab = np.argsort(result)[0][0][-1]\n\n    names = ['苹果', '哈密瓜', '胡萝卜', '樱桃', '黄瓜', '西瓜']\n\n    # 打印和返回预测结果\n    r = '{\"label\":%d, \"name\":\"%s\", \"possibility\":%f}' % (lab, names[lab], result[0][0][lab])\n    print(r)\n    return r\n```\n\n```python\nif __name__ == '__main__':\n    # 启动服务，并指定端口号\n    app.run(port=80)\n```\n\n在`index.html`文件增加一个表单：\n```xml\n<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n    <meta charset=\"UTF-8\">\n    <title>预测图像</title>\n</head>\n<body>\n<!--调用服务器预测接口的表单-->\n<form action=\"http://127.0.0.1/infer\" enctype=\"multipart/form-data\" method=\"post\">\n    选择预测的图像：<input type=\"file\" name=\"img\"><br>\n    <input type=\"submit\" value=\"预测\">\n</form>\n</body>\n</html>\n```\n\n\n\n\n"
  },
  {
    "path": "note14/index.html",
    "content": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n    <meta charset=\"UTF-8\">\n    <title>预测图像</title>\n</head>\n<body>\n<!--上传图片的表单-->\n<form action=\"http://127.0.0.1/upload\" enctype=\"multipart/form-data\" method=\"post\">\n    选择上传的图像：<input type=\"file\" name=\"img\"><br>\n    <input type=\"submit\" value=\"上传\">\n</form>\n\n<br><br>\n\n<!--调用服务器预测接口的表单-->\n<form action=\"http://127.0.0.1/infer\" enctype=\"multipart/form-data\" method=\"post\">\n    选择预测的图像：<input type=\"file\" name=\"img\"><br>\n    <input type=\"submit\" value=\"预测\">\n</form>\n</body>\n</html>"
  },
  {
    "path": "note14/paddle_server.py",
    "content": "import os\nimport uuid\nimport numpy as np\nimport paddle.fluid as fluid\nfrom PIL import Image\nfrom flask import Flask, request\nfrom flask_cors import CORS\nfrom werkzeug.utils import secure_filename\n\napp = Flask(__name__)\n# 允许跨越访问\nCORS(app)\n\n\n# 根路径，返回一个字符串\n@app.route('/')\ndef hello_world():\n    return 'Welcome to PaddlePaddle'\n\n\n# 上传文件\n@app.route('/upload', methods=['POST'])\ndef upload_file():\n    f = request.files['img']\n    # 设置保存路径\n    save_father_path = 'images'\n    img_path = os.path.join(save_father_path, str(uuid.uuid1()) + secure_filename(f.filename).split('.')[-1])\n    if not os.path.exists(save_father_path):\n        os.makedirs(save_father_path)\n    f.save(img_path)\n    return 'success, save path: ' + img_path\n\n\n# 预处理图片\ndef load_image(file):\n    img = Image.open(file)\n    # 统一图像大小\n    img = img.resize((224, 224), Image.ANTIALIAS)\n    # 转换成numpy值\n    img = np.array(img).astype(np.float32)\n    # 转换成CHW\n    img = img.transpose((2, 0, 1))\n    # 转换成BGR\n    img = img[(2, 1, 0), :, :] / 255.0\n    img = np.expand_dims(img, axis=0)\n    return img\n\n\n# 创建执行器\nplace = fluid.CPUPlace()\nexe = fluid.Executor(place)\nexe.run(fluid.default_startup_program())\n\n# 保存预测模型路径\nsave_path = 'infer_model/'\n# 从模型中获取预测程序、输入数据名称列表、分类器\n[infer_program, feeded_var_names, target_var] = fluid.io.load_inference_model(dirname=save_path, executor=exe)\n\n\n@app.route('/infer', methods=['POST'])\ndef infer():\n    f = request.files['img']\n\n    # 保存图片\n    save_father_path = 'images'\n    img_path = os.path.join(save_father_path, str(uuid.uuid1()) + '.' + secure_filename(f.filename).split('.')[-1])\n    if not os.path.exists(save_father_path):\n        os.makedirs(save_father_path)\n    f.save(img_path)\n\n    # 开始预测图片\n    img = load_image(img_path)\n    result = exe.run(program=infer_program,\n                     feed={feeded_var_names[0]: img},\n                     fetch_list=target_var)\n\n    # 显示图片并输出结果最大的label\n    lab = np.argsort(result)[0][0][-1]\n\n    names = ['苹果', '哈密瓜', '胡萝卜', '樱桃', '黄瓜', '西瓜']\n\n    # 打印和返回预测结果\n    r = '{\"label\":%d, \"name\":\"%s\", \"possibility\":%f}' % (lab, names[lab], result[0][0][lab])\n    print(r)\n    return r\n\n\nif __name__ == '__main__':\n    # 启动服务，并指定端口号\n    app.run(port=80)\n"
  },
  {
    "path": "note15/.gitignore",
    "content": "*.iml\n.gradle\n/local.properties\n/.idea/libraries\n/.idea/modules.xml\n/.idea/workspace.xml\n.DS_Store\n/build\n/captures\n.externalNativeBuild\n/app/src/androidTest\n/app/src/test\n"
  },
  {
    "path": "note15/README.md",
    "content": "﻿\n# 目录\n@[toc]\n# 前言\n现在越来越多的手机要使用到深度学习了，比如一些图像分类，目标检测，风格迁移等等，之前都是把数据提交给服务器完成的。但是提交给服务器有几点不好，首先是速度问题，图片上传到服务器需要时间，客户端接收结果也需要时间，这一来回就占用了一大半的时间，会使得整体的预测速度都变慢了，再且现在手机的性能不断提高，足以做深度学习的预测。其二是隐私问题，如果只是在本地预测，那么用户根本就不用上传图片，安全性也大大提高了。所以本章我们就来学如何包我们训练的PaddlePaddle预测模型部署到Android手机上。\n\n\n# 编译paddle-mobile库\n想要把PaddlePaddle训练好的预测库部署到Android手机上，还需要借助paddle-mobile框架。paddle-mobile框架主要是为了方便PaddlePaddle训练好的模型部署到移动设备上，比如Android手机，苹果手机，树莓派等等这些移动设备，有了paddle-mobile框架大大方便了把PaddlePaddle的预测库部署到移动设备上，而且paddle-mobile框架针对移动设备做了大量的优化，使用这些预测库在移动设备上有了更好的预测性能。\n\n想要在Android手机上使用paddle-mobile，就要编译Android能够使用的CPP库，在这一部分中，我们介绍两种编译Android的paddle-mobile库，分别是使用Docker编译paddle-mobile库、使用Ubuntu交叉编译paddle-mobile库。\n\n## 使用Docker编译\n\n为了方便操作，以下的操作都是在root用户的执行的：\n\n1、安装Docker，以下是在Ubuntu下安装的的方式，只要一条命令就可以了：\n```\napt-get install docker.io\n```\n\n2、克隆paddle-mobile源码：\n```\ngit clone https://github.com/PaddlePaddle/paddle-mobile.git\n```\n\n3、进入到paddle-mobile根目录下编译docker镜像：\n```\ncd paddle-mobile\n# 编译生成进行，编译时间可能要很长\ndocker build -t paddle-mobile:dev - < Dockerfile\n```\n\n编译完成可以使用`docker images`命令查看是否已经生成进行：\n```\nroot@test:/home/test# docker images\nREPOSITORY                          TAG                 IMAGE ID            CREATED             SIZE\npaddle-mobile                       dev                 fffbd8779c68        20 hours ago        3.76 GB\n```\n\n4、运行镜像并进入到容器里面，当前目录还是在paddle-mobile根目录下：\n```\ndocker run -it -v $PWD:/paddle-mobile paddle-mobile:dev\n```\n\n5、在容器里面执行以下两条命令：\n```\nroot@fc6f7e9ebdf1:/# cd paddle-mobile/\nroot@fc6f7e9ebdf1:/paddle-mobile# cmake -DCMAKE_TOOLCHAIN_FILE=tools/toolchains/arm-android-neon.cmake\n```\n\n6、（可选）可以使用命令`ccmake .`配置一些信息，比如可以设置`NET`仅支持`googlenet`，这样便于得到的paddle-mobile库会更小一些，修改完成之后，使用`c`命令保存，使用`g`退出。笔者一般跳过这个步骤。\n```\n                                                    Page 1 of 1\n CMAKE_ASM_FLAGS                                                                                                                                                                                \n CMAKE_ASM_FLAGS_DEBUG                                                                                                                                                                          \n CMAKE_ASM_FLAGS_RELEASE                                                                                                                                                                        \n CMAKE_BUILD_TYPE                                                                                                                                                                               \n CMAKE_INSTALL_PREFIX             /usr/local                                                                                                                                                    \n CMAKE_TOOLCHAIN_FILE             /paddle-mobile/tools/toolchains/arm-android-neon.cmake                                                                                                        \n CPU                              ON                                                                                                                                                            \n DEBUGING                         ON                                                                                                                                                            \n FPGA                             OFF                                                                                                                                                           \n LOG_PROFILE                      ON                                                                                                                                                            \n MALI_GPU                         OFF                                                                                                                                                           \n NET                              defult                                                                                                                                                        \n USE_EXCEPTION                    ON                                                                                                                                                            \n USE_OPENMP                       ON                                                       \n```\n\n7、最后执行一下`make`就可以了，到这一步就完成了paddle-mobile的编译。\n```\nroot@fc6f7e9ebdf1:/paddle-mobile# make\n```\n\n8、使用`exit`命令退出容器，回到Ubuntu本地上。\n```\nroot@fc6f7e9ebdf1:/paddle-mobile# exit\n```\n\n9、在paddle-mobile根目录下，有一个build目录，我们编译好的paddle-mobile库就在这里。\n```\nroot@test:/home/test/paddle-mobile/build# ls\nlibpaddle-mobile.so\n```\n`libpaddle-mobile.so`就是我们在开发Android项目的时候使用到的paddle-mobile库。\n\n\n## 使用Ubuntu编译\n\n1、首先要下载和解压NDK。\n```\nwget https://dl.google.com/android/repository/android-ndk-r17b-linux-x86_64.zip\nunzip android-ndk-r17b-linux-x86_64.zip\n```\n\n2、设置NDK环境变量，目录是NDK的解压目录。\n```\nexport NDK_ROOT=\"/home/test/paddlepaddle/android-ndk-r17b\"\n```\n\n设置好之后，可以使用以下的命令查看配置情况。\n```\nroot@test:/home/test/paddlepaddle# echo $NDK_ROOT\n/home/test/paddlepaddle/android-ndk-r17b\n```\n\n3、安装cmake，需要安装较高版本的，笔者的cmake版本是3.11.2。\n\n - 下载cmake源码\n```\nwget https://cmake.org/files/v3.11/cmake-3.11.2.tar.gz\n```\n\n - 解压cmake源码\n```\ntar -zxvf cmake-3.11.2.tar.gz\n```\n\n - 进入到cmake源码根目录，并执行`bootstrap`。\n```\ncd cmake-3.11.2\n./bootstrap\n```\n\n - 最后执行以下两条命令开始安装cmake。\n```\nmake\nmake install\n```\n\n - 安装完成之后，可以使用`cmake --version`是否安装成功.\n```\nroot@test:/home/test/paddlepaddle# cmake --version\ncmake version 3.11.2\n\nCMake suite maintained and supported by Kitware (kitware.com/cmake).\n```\n\n4、克隆paddle-mobile源码。\n```\ngit clone https://github.com/PaddlePaddle/paddle-mobile.git\n```\n\n5、进入到paddle-mobile的tools目录下，执行编译。\n```\ncd paddle-mobile/tools/\nsh build.sh android\n```\n\n（可选）如果想编译针对某一个网络编译更小的库时，可以在命令后面加上相应的参数，如下：\n```\nsh build.sh android mobilenet\n```\n\n6、最后会在`paddle-mobile/build/release/arm-v7a/build`目录下生产paddle-mobile库。\n```\nroot@test:/home/test/paddlepaddle/paddle-mobile/build/release/arm-v7a/build# ls\nlibpaddle-mobile.so\n```\n`libpaddle-mobile.so`就是我们在开发Android项目的时候使用到的paddle-mobile库。\n\n\n# 创建Android项目\n\n首先使用Android Studio创建一个普通的Android项目，我们可以不用选择CPP的支持，因为我们已经编译好了CPP。之后按照以下的步骤开始执行：\n\n1、在`main`目录下创建两个`assets/infer_model`文件夹，这个文件夹我们将会使用它来存放PaddlePaddle训练好的预测模型，本章我们使用的预测模型是[《PaddlePaddle从入门到炼丹》十一——自定义图像数据集识别](https://blog.csdn.net/qq_33200967/article/details/87895105)训练得到的预测模型，我们训练好的模型复制到这个文件夹下。\n\n2、在`main`目录下创建一个`jniLibs`文件夹，这个文件夹是存放CPP编译库的，就是**编译paddle-mobile库**部分编译的`libpaddle-mobile.so`\n\n3、在Android项目的配置文件夹中加上权限声明，因为我们要使用到读取相册和使用相机，所以加上以下的权限声明：\n```xml\n<uses-permission android:name=\"android.permission.WRITE_EXTERNAL_STORAGE\" />\n<uses-permission android:name=\"android.permission.READ_EXTERNAL_STORAGE\" />\n```\n\n4、修改`activity_main.xml`界面，修改成如下：\n```xml\n<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<RelativeLayout xmlns:android=\"http://schemas.android.com/apk/res/android\"\n    xmlns:app=\"http://schemas.android.com/apk/res-auto\"\n    xmlns:tools=\"http://schemas.android.com/tools\"\n    android:layout_width=\"match_parent\"\n    android:layout_height=\"match_parent\"\n    tools:context=\".MainActivity\">\n\n    <LinearLayout\n        android:id=\"@+id/ll\"\n        android:orientation=\"horizontal\"\n        android:layout_alignParentBottom=\"true\"\n        android:layout_width=\"match_parent\"\n        android:layout_height=\"50dp\">\n\n        <Button\n            android:layout_weight=\"1\"\n            android:id=\"@+id/load\"\n            android:text=\"加载模型\"\n            android:layout_width=\"0dp\"\n            android:layout_height=\"match_parent\" />\n\n        <Button\n            android:id=\"@+id/clear\"\n            android:layout_weight=\"1\"\n            android:text=\"清空模型\"\n            android:layout_width=\"0dp\"\n            android:layout_height=\"match_parent\" />\n\n        <Button\n            android:id=\"@+id/infer\"\n            android:layout_weight=\"1\"\n            android:text=\"预测图片\"\n            android:layout_width=\"0dp\"\n            android:layout_height=\"match_parent\" />\n    </LinearLayout>\n\n    <TextView\n        android:layout_above=\"@id/ll\"\n        android:id=\"@+id/show\"\n        android:hint=\"这里显示预测结果\"\n        android:layout_width=\"match_parent\"\n        android:layout_height=\"100dp\" />\n\n    <ImageView\n        android:id=\"@+id/image_view\"\n        android:layout_above=\"@id/show\"\n        android:layout_width=\"match_parent\"\n        android:layout_height=\"match_parent\" />\n</RelativeLayout>\n```\n\n5、创建一个`com.baidu.paddle`包，在这个包下创建的Java程序，这个Java程序就是用于调用paddle-mobile的CPP动态库的。它提供了多种方法给我们使用，我们主要使用到加载模型的方法`load(String modelDir)`，清空已加载的方法`clear()`，还有最最重要的预测方法`predictImage(float[] buf, int[]ddims)`。\n```java\npackage com.baidu.paddle;\n\npublic class PML {\n    // set thread num\n    public static native void setThread(int threadCount);\n\n    //Load seperated parameters\n    public static native boolean load(String modelDir);\n\n    // load qualified model\n    public static native boolean loadQualified(String modelDir);\n\n    // Load combined parameters\n    public static native boolean loadCombined(String modelPath, String paramPath);\n\n    // load qualified model\n    public static native boolean loadCombinedQualified(String modelPath, String paramPath);\n\n    // object detection\n    public static native float[] predictImage(float[] buf, int[]ddims);\n\n    // predict yuv image\n    public static native float[] predictYuv(byte[] buf, int imgWidth, int imgHeight, int[] ddims, float[]meanValues);\n\n    // clear model\n    public static native void clear();\n}\n```\n\n6、然后在项目的主要包下创建一个`Utils.java`的工具类。这个工具类主要编写一些图像的处理方法，和一些模型复制方法等，我们下面将一一介绍这些方法。\n\n该方法是用于获取预测结果中概率最大的标签，参数是执行预测的结果，这个结果是对应没有类别的概率，这个方法就判断哪个类别的概率最大，然后就返回概率最大的标签。\n```java\n// 获取预测值中最大概率的标签\npublic static int getMaxResult(float[] result) {\n    float probability = result[0];\n    int r = 0;\n    for (int i = 0; i < result.length; i++) {\n        if (probability < result[i]) {\n            probability = result[i];\n            r = i;\n        }\n    }\n    return r;\n}\n```\n\n该方法是把图片转换成预测需要用的数据格式浮点数组。在转换的过程中也对图像做了预处理，这个预处理需要跟训练的预处理的方式一样，否则无法正确预测。还有指定了处理后图片的大小，根据参数输入的宽度和高度，把图片压缩到这些自定的大小。还有把图片的通道顺序改为RGB，同时每个像素除以255，这个操作跟训练的时候一样。\n```java\n// 对将要预测的图片进行预处理\npublic static float[] getScaledMatrix(Bitmap bitmap, int desWidth, int desHeight) {\n    float[] dataBuf = new float[3 * desWidth * desHeight];\n    int rIndex;\n    int gIndex;\n    int bIndex;\n    int[] pixels = new int[desWidth * desHeight];\n    Bitmap bm = Bitmap.createScaledBitmap(bitmap, desWidth, desHeight, false);\n    bm.getPixels(pixels, 0, desWidth, 0, 0, desWidth, desHeight);\n    int j = 0;\n    int k = 0;\n    for (int i = 0; i < pixels.length; i++) {\n        int clr = pixels[i];\n        j = i / desHeight;\n        k = i % desWidth;\n        rIndex = j * desWidth + k;\n        gIndex = rIndex + desHeight * desWidth;\n        bIndex = gIndex + desHeight * desWidth;\n        // 转成RGB通道顺序，并除以255，跟训练的预处理一样\n        dataBuf[rIndex] = (float) (((clr & 0x00ff0000) >> 16) / 255.0);\n        dataBuf[gIndex] = (float) (((clr & 0x0000ff00) >> 8) / 255.0);\n        dataBuf[bIndex] = (float) (((clr & 0x000000ff)) / 255.0);\n\n    }\n    if (bm.isRecycled()) {\n        bm.recycle();\n    }\n    return dataBuf;\n}\n```\n\n该方法是对图片进行压缩，避免图片过大，超过内存支出。把图片的最大长度压缩到500以内。\n```java\n// 压缩图片，避免图片过大\npublic static Bitmap getScaleBitmap(String filePath) {\n    BitmapFactory.Options opt = new BitmapFactory.Options();\n    opt.inJustDecodeBounds = true;\n    BitmapFactory.decodeFile(filePath, opt);\n\n    int bmpWidth = opt.outWidth;\n    int bmpHeight = opt.outHeight;\n\n    int maxSize = 500;\n\n    // compress picture with inSampleSize\n    opt.inSampleSize = 1;\n    while (true) {\n        if (bmpWidth / opt.inSampleSize < maxSize || bmpHeight / opt.inSampleSize < maxSize) {\n            break;\n        }\n        opt.inSampleSize *= 2;\n    }\n    opt.inJustDecodeBounds = false;\n    return BitmapFactory.decodeFile(filePath, opt);\n}\n```\n\n该方法是根据相册返回的URI转换为图片的绝对路径，用于之后使用这个路径获取图片内容。\n```java\n// 根据相册返回的URI返回图片的绝对路径\npublic static String getPathFromURI(Context context, Uri uri) {\n    String result;\n    Cursor cursor = context.getContentResolver().query(uri, null, null, null, null);\n    if (cursor == null) {\n        result = uri.getPath();\n    } else {\n        cursor.moveToFirst();\n        int idx = cursor.getColumnIndex(MediaStore.Images.ImageColumns.DATA);\n        result = cursor.getString(idx);\n        cursor.close();\n    }\n    return result;\n}\n```\n\n该方法是把`assets`资源文件下的预测文件复制到缓存目录，用于之后加载模型文件。\n```java\n// 复制莫模型文件到缓存目录\npublic static void copyFileFromAsset(Context context, String oldPath, String newPath) {\n    try {\n        // 预测模型文件在assets中的位置\n        String[] fileNames = context.getAssets().list(oldPath);\n        if (fileNames.length > 0) {\n            // directory\n            File file = new File(newPath);\n            if (!file.exists()) {\n                file.mkdirs();\n            }\n            // copy recursivelyC\n            for (String fileName : fileNames) {\n                copyFileFromAsset(context, oldPath + \"/\" + fileName, newPath + \"/\" + fileName);\n            }\n        } else {\n            // file\n            File file = new File(newPath);\n            // if file exists will never copy\n            if (file.exists()) {\n                return;\n            }\n\n            // copy file to new path\n            InputStream is = context.getAssets().open(oldPath);\n            FileOutputStream fos = new FileOutputStream(file);\n            byte[] buffer = new byte[1024];\n            int byteCount;\n            while ((byteCount = is.read(buffer)) != -1) {\n                fos.write(buffer, 0, byteCount);\n            }\n            fos.flush();\n            is.close();\n            fos.close();\n        }\n    } catch (Exception e) {\n        e.printStackTrace();\n    }\n}\n```\n\n\n7、最后修改`MainActivity.java`，修改如下：\n\n这里做一些初始化操作，如加载PaddleMobile的动态库，指定图片的形状。\n```java\n    private String model_path;\n    // 模型文件夹\n    private String assets_path = \"infer_model\";\n    private boolean load_result = false;\n    // 输入图片的形状，分别是：batch size、通道数、宽度、高度\n    private int[] ddims = {1, 3, 224, 224};\n    private ImageView imageView;\n    private TextView showTv;\n\n    // 加载PaddleMobile的动态库\n    static {\n        try {\n            System.loadLibrary(\"paddle-mobile\");\n\n        } catch (Exception e) {\n            e.printStackTrace();\n\n        }\n    }\n```\n\n该方法是初始化控件，和定义按钮的点击事件，如加载模型点击事件，清空模型点击事件，打开相册预测图片点击事件。\n```java\n    // 初始化控件\n    private void initView(){\n        Button loadBtn = findViewById(R.id.load);\n        Button clearBtn = findViewById(R.id.clear);\n        Button inferBtn = findViewById(R.id.infer);\n        showTv = findViewById(R.id.show);\n        imageView = findViewById(R.id.image_view);\n\n        // 加载模型点击事件\n        loadBtn.setOnClickListener(new View.OnClickListener() {\n            @Override\n            public void onClick(View v) {\n                load_result = PML.load(model_path);\n                if (load_result) {\n                    Toast.makeText(MainActivity.this, \"模型加载成功\", Toast.LENGTH_SHORT).show();\n                } else {\n                    Toast.makeText(MainActivity.this, \"模型加载失败\", Toast.LENGTH_SHORT).show();\n                }\n            }\n        });\n\n        // 清空模型点击事件\n        clearBtn.setOnClickListener(new View.OnClickListener() {\n            @Override\n            public void onClick(View v) {\n                PML.clear();\n                load_result = false;\n                Toast.makeText(MainActivity.this, \"模型已清空\", Toast.LENGTH_SHORT).show();\n            }\n        });\n\n        // 打开相册选择图片预测点击事件\n        inferBtn.setOnClickListener(new View.OnClickListener() {\n            @Override\n            public void onClick(View v) {\n                if (load_result){\n                    Intent intent = new Intent(Intent.ACTION_PICK);\n                    intent.setType(\"image/*\");\n                    startActivityForResult(intent, 1);\n                } else {\n                    Toast.makeText(MainActivity.this, \"模型未加载\", Toast.LENGTH_SHORT).show();\n                }\n            }\n        });\n\n    }\n```\n\n该方法是一个回调方法，主要是打开相册后的回调预测操作。使用返回的URI转换为绝对路径，然后使用这个图片路径转换成Bitmap用于显示，同时也使用这个路径执行预测操作。\n```java\n    // 回调事件\n    @Override\n    protected void onActivityResult(int requestCode, int resultCode, @Nullable Intent data) {\n        String image_path;\n        if (resultCode == Activity.RESULT_OK) {\n            switch (requestCode) {\n                case 1:\n                    if (data == null) {\n                        return;\n                    }\n                    // 获取相册返回的URI\n                    Uri image_uri = data.getData();\n                    // 根据图片的URI获取绝对路径\n                    image_path = Utils.getPathFromURI(MainActivity.this, image_uri);\n                    // 压缩图片用于显示\n                    Bitmap bitmap = Utils.getScaleBitmap(image_path);\n                    imageView.setImageBitmap(bitmap);\n                    // 开始预测图片\n                    predictImage(image_path);\n                    break;\n            }\n        }\n    }\n```\n\n\n该方法是预测操作的方法，参数是图片的绝对路径，首先根据图片获取已经压缩过的Bitmap，然后使用这个Bitmap转换成预处理后的浮点数组，最后执行预测操作。再根据预测结果提取最大概率的标签，并获取该标签的类别名称。\n```java\n    // 根据图片的路径预测图片\n    private void predictImage(String image_path) {\n        // 把图片进行压缩\n        Bitmap bmp = Utils.getScaleBitmap(image_path);\n        // 把图片转换成浮点数组，用于预测\n        float[] inputData = Utils.getScaledMatrix(bmp, ddims[2], ddims[3]);\n        try {\n            long start = System.currentTimeMillis();\n            // 执行预测，获取预测结果\n            float[] result = PML.predictImage(inputData, ddims);\n            long end = System.currentTimeMillis();\n            // 获取概率最大的标签\n            int r = Utils.getMaxResult(result);\n            // 获取标签对应的类别名称\n            String[] names = {\"苹果\", \"哈密瓜\", \"胡萝卜\", \"樱桃\", \"黄瓜\", \"西瓜\"};\n            String show_text = \"标签：\" + r + \"\\n名称：\" + names[r] + \"\\n概率：\" + result[r] + \"\\n时间：\" + (end - start) + \"ms\";\n            // 显示预测结果\n            showTv.setText(show_text);\n        } catch (Exception e) {\n            e.printStackTrace();\n        }\n    }\n```\n\n\n这主要是用于动态获取权限，因为读取外部文件需要读取外部文件的权限，又因为读取外部文件权限是属于危险权限，需要动态获取。\n```java\n    // 多权限动态申请\n    private void requestPermissions() {\n        List<String> permissionList = new ArrayList<>();\n        if (ContextCompat.checkSelfPermission(this, Manifest.permission.WRITE_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {\n            permissionList.add(Manifest.permission.WRITE_EXTERNAL_STORAGE);\n        }\n\n        if (ContextCompat.checkSelfPermission(this, Manifest.permission.READ_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {\n            permissionList.add(Manifest.permission.READ_EXTERNAL_STORAGE);\n        }\n\n        // if list is not empty will request permissions\n        if (!permissionList.isEmpty()) {\n            ActivityCompat.requestPermissions(this, permissionList.toArray(new String[permissionList.size()]), 1);\n        }\n    }\n\n    @Override\n    public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) {\n        super.onRequestPermissionsResult(requestCode, permissions, grantResults);\n        switch (requestCode) {\n            case 1:\n                if (grantResults.length > 0) {\n                    for (int i = 0; i < grantResults.length; i++) {\n\n                        int grantResult = grantResults[i];\n                        if (grantResult == PackageManager.PERMISSION_DENIED) {\n                            String s = permissions[i];\n                            Toast.makeText(this, s + \" permission was denied\", Toast.LENGTH_SHORT).show();\n                        }\n                    }\n                }\n                break;\n        }\n    }\n```\n\n然后修改`onCreate`，首先获取缓存文件路径，然后初始化视图控件和动态获取权限，最后把预测模型文件复制到缓存路径下。\n```java\n    @Override\n    protected void onCreate(Bundle savedInstanceState) {\n        super.onCreate(savedInstanceState);\n        setContentView(R.layout.activity_main);\n\n        model_path = getCacheDir().getAbsolutePath() + File.separator + \"infer_model\";\n        // 初始化控件\n        initView();\n        // 动态请求权限\n        requestPermissions();\n        // 从assets中复制模型文件到缓存目录下\n        Utils.copyFileFromAsset(this, assets_path, model_path);\n    }\n```\n\n\n8、最后运行项目，选择图片预测会得到以下的效果：\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190223192610299.jpg?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n# 参考资料\n\n 1. https://github.com/PaddlePaddle/paddle-mobile\n 2. https://blog.csdn.net/qq_33200967/article/details/81066970\n\n\n"
  },
  {
    "path": "note15/app/.gitignore",
    "content": "/build\n"
  },
  {
    "path": "note15/app/build.gradle",
    "content": "apply plugin: 'com.android.application'\n\nandroid {\n    compileSdkVersion 28\n    defaultConfig {\n        applicationId \"com.yeyupiaoling.note15\"\n        minSdkVersion 19\n        targetSdkVersion 28\n        versionCode 1\n        versionName \"1.0\"\n        testInstrumentationRunner \"android.support.test.runner.AndroidJUnitRunner\"\n    }\n    buildTypes {\n        release {\n            minifyEnabled false\n            proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'\n        }\n    }\n}\n\ndependencies {\n    implementation fileTree(dir: 'libs', include: ['*.jar'])\n    implementation 'com.android.support:appcompat-v7:28.0.0'\n    implementation 'com.android.support.constraint:constraint-layout:1.1.3'\n    testImplementation 'junit:junit:4.12'\n    androidTestImplementation 'com.android.support.test:runner:1.0.2'\n    androidTestImplementation 'com.android.support.test.espresso:espresso-core:3.0.2'\n}\n"
  },
  {
    "path": "note15/app/proguard-rules.pro",
    "content": "# Add project specific ProGuard rules here.\n# You can control the set of applied configuration files using the\n# proguardFiles setting in build.gradle.\n#\n# For more details, see\n#   http://developer.android.com/guide/developing/tools/proguard.html\n\n# If your project uses WebView with JS, uncomment the following\n# and specify the fully qualified class name to the JavaScript interface\n# class:\n#-keepclassmembers class fqcn.of.javascript.interface.for.webview {\n#   public *;\n#}\n\n# Uncomment this to preserve the line number information for\n# debugging stack traces.\n#-keepattributes SourceFile,LineNumberTable\n\n# If you keep the line number information, uncomment this to\n# hide the original source file name.\n#-renamesourcefileattribute SourceFile\n"
  },
  {
    "path": "note15/app/src/main/AndroidManifest.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\"\n    package=\"com.yeyupiaoling.note15\">\n\n    <uses-permission android:name=\"android.permission.WRITE_EXTERNAL_STORAGE\" />\n    <uses-permission android:name=\"android.permission.READ_EXTERNAL_STORAGE\" />\n\n    <application\n        android:allowBackup=\"true\"\n        android:icon=\"@mipmap/ic_launcher\"\n        android:label=\"@string/app_name\"\n        android:roundIcon=\"@mipmap/ic_launcher_round\"\n        android:supportsRtl=\"true\"\n        android:theme=\"@style/AppTheme\">\n        <activity android:name=\".MainActivity\">\n            <intent-filter>\n                <action android:name=\"android.intent.action.MAIN\" />\n\n                <category android:name=\"android.intent.category.LAUNCHER\" />\n            </intent-filter>\n        </activity>\n    </application>\n\n</manifest>"
  },
  {
    "path": "note15/app/src/main/java/com/baidu/paddle/PML.java",
    "content": "package com.baidu.paddle;\n\npublic class PML {\n    // set thread num\n    public static native void setThread(int threadCount);\n\n    //Load seperated parameters\n    public static native boolean load(String modelDir);\n\n    // load qualified model\n    public static native boolean loadQualified(String modelDir);\n\n    // Load combined parameters\n    public static native boolean loadCombined(String modelPath, String paramPath);\n\n    // load qualified model\n    public static native boolean loadCombinedQualified(String modelPath, String paramPath);\n\n    // object detection\n    public static native float[] predictImage(float[] buf, int[]ddims);\n\n    // predict yuv image\n    public static native float[] predictYuv(byte[] buf, int imgWidth, int imgHeight, int[] ddims, float[]meanValues);\n\n    // clear model\n    public static native void clear();\n}\n"
  },
  {
    "path": "note15/app/src/main/java/com/yeyupiaoling/note15/MainActivity.java",
    "content": "package com.yeyupiaoling.note15;\n\nimport android.Manifest;\nimport android.app.Activity;\nimport android.content.Intent;\nimport android.content.pm.PackageManager;\nimport android.graphics.Bitmap;\nimport android.net.Uri;\nimport android.os.Bundle;\nimport android.support.annotation.NonNull;\nimport android.support.annotation.Nullable;\nimport android.support.v4.app.ActivityCompat;\nimport android.support.v4.content.ContextCompat;\nimport android.support.v7.app.AppCompatActivity;\nimport android.view.View;\nimport android.widget.Button;\nimport android.widget.ImageView;\nimport android.widget.TextView;\nimport android.widget.Toast;\n\nimport com.baidu.paddle.PML;\n\nimport java.io.File;\nimport java.util.ArrayList;\nimport java.util.List;\n\npublic class MainActivity extends AppCompatActivity {\n    private String model_path;\n    // 模型文件夹\n    private String assets_path = \"infer_model\";\n    private boolean load_result = false;\n    // 输入图片的形状，分别是：batch size、通道数、宽度、高度\n    private int[] ddims = {1, 3, 224, 224};\n    private ImageView imageView;\n    private TextView showTv;\n\n    // 加载PaddlePaddle的动态库\n    static {\n        try {\n            System.loadLibrary(\"paddle-mobile\");\n\n        } catch (Exception e) {\n            e.printStackTrace();\n\n        }\n    }\n\n\n    @Override\n    protected void onCreate(Bundle savedInstanceState) {\n        super.onCreate(savedInstanceState);\n        setContentView(R.layout.activity_main);\n\n        model_path = getCacheDir().getAbsolutePath() + File.separator + \"infer_model\";\n        // 初始化控件\n        initView();\n        // 动态请求权限\n        requestPermissions();\n        // 从assets中复制模型文件到缓存目录下\n        Utils.copyFileFromAsset(this, assets_path, model_path);\n    }\n\n    // 初始化控件\n    private void initView(){\n        Button loadBtn = findViewById(R.id.load);\n        Button clearBtn = findViewById(R.id.clear);\n        Button inferBtn = findViewById(R.id.infer);\n        showTv = findViewById(R.id.show);\n        imageView = findViewById(R.id.image_view);\n\n        // 加载模型点击事件\n        loadBtn.setOnClickListener(new View.OnClickListener() {\n            @Override\n            public void onClick(View v) {\n                load_result = PML.load(model_path);\n                if (load_result) {\n                    Toast.makeText(MainActivity.this, \"模型加载成功\", Toast.LENGTH_SHORT).show();\n                } else {\n                    Toast.makeText(MainActivity.this, \"模型加载失败\", Toast.LENGTH_SHORT).show();\n                }\n            }\n        });\n\n        // 清空模型点击事件\n        clearBtn.setOnClickListener(new View.OnClickListener() {\n            @Override\n            public void onClick(View v) {\n                PML.clear();\n                load_result = false;\n                Toast.makeText(MainActivity.this, \"模型已清空\", Toast.LENGTH_SHORT).show();\n            }\n        });\n\n        // 打开相册选择图片预测点击事件\n        inferBtn.setOnClickListener(new View.OnClickListener() {\n            @Override\n            public void onClick(View v) {\n                if (load_result){\n                    Intent intent = new Intent(Intent.ACTION_PICK);\n                    intent.setType(\"image/*\");\n                    startActivityForResult(intent, 1);\n                } else {\n                    Toast.makeText(MainActivity.this, \"模型未加载\", Toast.LENGTH_SHORT).show();\n                }\n            }\n        });\n\n    }\n\n\n    // 回调事件\n    @Override\n    protected void onActivityResult(int requestCode, int resultCode, @Nullable Intent data) {\n        String image_path;\n        if (resultCode == Activity.RESULT_OK) {\n            switch (requestCode) {\n                case 1:\n                    if (data == null) {\n                        return;\n                    }\n                    // 获取相册返回的URI\n                    Uri image_uri = data.getData();\n                    // 根据图片的URI获取绝对路径\n                    image_path = Utils.getPathFromURI(MainActivity.this, image_uri);\n                    // 压缩图片用于显示\n                    Bitmap bitmap = Utils.getScaleBitmap(image_path);\n                    imageView.setImageBitmap(bitmap);\n                    // 开始预测图片\n                    predictImage(image_path);\n                    break;\n            }\n        }\n    }\n\n\n    // 根据图片的路径预测图片\n    private void predictImage(String image_path) {\n        // 把图片进行压缩\n        Bitmap bmp = Utils.getScaleBitmap(image_path);\n        // 把图片转换成浮点数组，用于预测\n        float[] inputData = Utils.getScaledMatrix(bmp, ddims[2], ddims[3]);\n        try {\n            long start = System.currentTimeMillis();\n            // 执行预测，获取预测结果\n            float[] result = PML.predictImage(inputData, ddims);\n            long end = System.currentTimeMillis();\n            // 获取概率最大的标签\n            int r = Utils.getMaxResult(result);\n            // 获取标签对应的类别名称\n            String[] names = {\"苹果\", \"哈密瓜\", \"胡萝卜\", \"樱桃\", \"黄瓜\", \"西瓜\"};\n            String show_text = \"标签：\" + r + \"\\n名称：\" + names[r] + \"\\n概率：\" + result[r] + \"\\n时间：\" + (end - start) + \"ms\";\n            // 显示预测结果\n            showTv.setText(show_text);\n        } catch (Exception e) {\n            e.printStackTrace();\n        }\n    }\n\n    // 多权限动态申请\n    private void requestPermissions() {\n        List<String> permissionList = new ArrayList<>();\n        if (ContextCompat.checkSelfPermission(this, Manifest.permission.WRITE_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {\n            permissionList.add(Manifest.permission.WRITE_EXTERNAL_STORAGE);\n        }\n\n        if (ContextCompat.checkSelfPermission(this, Manifest.permission.READ_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {\n            permissionList.add(Manifest.permission.READ_EXTERNAL_STORAGE);\n        }\n\n        // if list is not empty will request permissions\n        if (!permissionList.isEmpty()) {\n            ActivityCompat.requestPermissions(this, permissionList.toArray(new String[permissionList.size()]), 1);\n        }\n    }\n\n    @Override\n    public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) {\n        super.onRequestPermissionsResult(requestCode, permissions, grantResults);\n        switch (requestCode) {\n            case 1:\n                if (grantResults.length > 0) {\n                    for (int i = 0; i < grantResults.length; i++) {\n\n                        int grantResult = grantResults[i];\n                        if (grantResult == PackageManager.PERMISSION_DENIED) {\n                            String s = permissions[i];\n                            Toast.makeText(this, s + \" permission was denied\", Toast.LENGTH_SHORT).show();\n                        }\n                    }\n                }\n                break;\n        }\n    }\n}\n"
  },
  {
    "path": "note15/app/src/main/java/com/yeyupiaoling/note15/Utils.java",
    "content": "package com.yeyupiaoling.note15;\n\nimport android.content.Context;\nimport android.database.Cursor;\nimport android.graphics.Bitmap;\nimport android.graphics.BitmapFactory;\nimport android.net.Uri;\nimport android.provider.MediaStore;\n\nimport java.io.File;\nimport java.io.FileOutputStream;\nimport java.io.InputStream;\n\npublic class Utils {\n\n    // 获取预测值中最大概率的标签\n    public static int getMaxResult(float[] result) {\n        float probability = result[0];\n        int r = 0;\n        for (int i = 0; i < result.length; i++) {\n            if (probability < result[i]) {\n                probability = result[i];\n                r = i;\n            }\n        }\n        return r;\n    }\n\n    // 对将要预测的图片进行预处理\n    public static float[] getScaledMatrix(Bitmap bitmap, int desWidth, int desHeight) {\n        float[] dataBuf = new float[3 * desWidth * desHeight];\n        int rIndex;\n        int gIndex;\n        int bIndex;\n        int[] pixels = new int[desWidth * desHeight];\n        Bitmap bm = Bitmap.createScaledBitmap(bitmap, desWidth, desHeight, false);\n        bm.getPixels(pixels, 0, desWidth, 0, 0, desWidth, desHeight);\n        int j = 0;\n        int k = 0;\n        for (int i = 0; i < pixels.length; i++) {\n            int clr = pixels[i];\n            j = i / desHeight;\n            k = i % desWidth;\n            rIndex = j * desWidth + k;\n            gIndex = rIndex + desHeight * desWidth;\n            bIndex = gIndex + desHeight * desWidth;\n            // 转成RGB通道顺序，并除以255，跟训练的预处理一样\n            dataBuf[rIndex] = (float) (((clr & 0x00ff0000) >> 16) / 255.0);\n            dataBuf[gIndex] = (float) (((clr & 0x0000ff00) >> 8) / 255.0);\n            dataBuf[bIndex] = (float) (((clr & 0x000000ff)) / 255.0);\n\n        }\n        if (bm.isRecycled()) {\n            bm.recycle();\n        }\n        return dataBuf;\n    }\n\n    // 压缩图片，避免图片过大\n    public static Bitmap getScaleBitmap(String filePath) {\n        BitmapFactory.Options opt = new BitmapFactory.Options();\n        opt.inJustDecodeBounds = true;\n        BitmapFactory.decodeFile(filePath, opt);\n\n        int bmpWidth = opt.outWidth;\n        int bmpHeight = opt.outHeight;\n\n        int maxSize = 500;\n\n        // compress picture with inSampleSize\n        opt.inSampleSize = 1;\n        while (true) {\n            if (bmpWidth / opt.inSampleSize < maxSize || bmpHeight / opt.inSampleSize < maxSize) {\n                break;\n            }\n            opt.inSampleSize *= 2;\n        }\n        opt.inJustDecodeBounds = false;\n        return BitmapFactory.decodeFile(filePath, opt);\n    }\n\n\n    // 根据相册返回的URI返回图片的绝对路径\n    public static String getPathFromURI(Context context, Uri uri) {\n        String result;\n        Cursor cursor = context.getContentResolver().query(uri, null, null, null, null);\n        if (cursor == null) {\n            result = uri.getPath();\n        } else {\n            cursor.moveToFirst();\n            int idx = cursor.getColumnIndex(MediaStore.Images.ImageColumns.DATA);\n            result = cursor.getString(idx);\n            cursor.close();\n        }\n        return result;\n    }\n\n\n    // 复制莫模型文件到缓存目录\n    public static void copyFileFromAsset(Context context, String oldPath, String newPath) {\n        try {\n            // 预测模型文件在assets中的位置\n            String[] fileNames = context.getAssets().list(oldPath);\n            if (fileNames.length > 0) {\n                // directory\n                File file = new File(newPath);\n                if (!file.exists()) {\n                    file.mkdirs();\n                }\n                // copy recursivelyC\n                for (String fileName : fileNames) {\n                    copyFileFromAsset(context, oldPath + \"/\" + fileName, newPath + \"/\" + fileName);\n                }\n            } else {\n                // file\n                File file = new File(newPath);\n                // if file exists will never copy\n                if (file.exists()) {\n                    return;\n                }\n\n                // copy file to new path\n                InputStream is = context.getAssets().open(oldPath);\n                FileOutputStream fos = new FileOutputStream(file);\n                byte[] buffer = new byte[1024];\n                int byteCount;\n                while ((byteCount = is.read(buffer)) != -1) {\n                    fos.write(buffer, 0, byteCount);\n                }\n                fos.flush();\n                is.close();\n                fos.close();\n            }\n        } catch (Exception e) {\n            e.printStackTrace();\n        }\n    }\n}\n"
  },
  {
    "path": "note15/app/src/main/res/drawable/ic_launcher_background.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<vector xmlns:android=\"http://schemas.android.com/apk/res/android\"\n    android:width=\"108dp\"\n    android:height=\"108dp\"\n    android:viewportHeight=\"108\"\n    android:viewportWidth=\"108\">\n    <path\n        android:fillColor=\"#26A69A\"\n        android:pathData=\"M0,0h108v108h-108z\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M9,0L9,108\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M19,0L19,108\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M29,0L29,108\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M39,0L39,108\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M49,0L49,108\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M59,0L59,108\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M69,0L69,108\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M79,0L79,108\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M89,0L89,108\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M99,0L99,108\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M0,9L108,9\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M0,19L108,19\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M0,29L108,29\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M0,39L108,39\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M0,49L108,49\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M0,59L108,59\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M0,69L108,69\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M0,79L108,79\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M0,89L108,89\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M0,99L108,99\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M19,29L89,29\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M19,39L89,39\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M19,49L89,49\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M19,59L89,59\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M19,69L89,69\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M19,79L89,79\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M29,19L29,89\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M39,19L39,89\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M49,19L49,89\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M59,19L59,89\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M69,19L69,89\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n    <path\n        android:fillColor=\"#00000000\"\n        android:pathData=\"M79,19L79,89\"\n        android:strokeColor=\"#33FFFFFF\"\n        android:strokeWidth=\"0.8\" />\n</vector>\n"
  },
  {
    "path": "note15/app/src/main/res/drawable-v24/ic_launcher_foreground.xml",
    "content": "<vector xmlns:android=\"http://schemas.android.com/apk/res/android\"\n    xmlns:aapt=\"http://schemas.android.com/aapt\"\n    android:width=\"108dp\"\n    android:height=\"108dp\"\n    android:viewportHeight=\"108\"\n    android:viewportWidth=\"108\">\n    <path\n        android:fillType=\"evenOdd\"\n        android:pathData=\"M32,64C32,64 38.39,52.99 44.13,50.95C51.37,48.37 70.14,49.57 70.14,49.57L108.26,87.69L108,109.01L75.97,107.97L32,64Z\"\n        android:strokeColor=\"#00000000\"\n        android:strokeWidth=\"1\">\n        <aapt:attr name=\"android:fillColor\">\n            <gradient\n                android:endX=\"78.5885\"\n                android:endY=\"90.9159\"\n                android:startX=\"48.7653\"\n                android:startY=\"61.0927\"\n                android:type=\"linear\">\n                <item\n                    android:color=\"#44000000\"\n                    android:offset=\"0.0\" />\n                <item\n                    android:color=\"#00000000\"\n                    android:offset=\"1.0\" />\n            </gradient>\n        </aapt:attr>\n    </path>\n    <path\n        android:fillColor=\"#FFFFFF\"\n        android:fillType=\"nonZero\"\n        android:pathData=\"M66.94,46.02L66.94,46.02C72.44,50.07 76,56.61 76,64L32,64C32,56.61 35.56,50.11 40.98,46.06L36.18,41.19C35.45,40.45 35.45,39.3 36.18,38.56C36.91,37.81 38.05,37.81 38.78,38.56L44.25,44.05C47.18,42.57 50.48,41.71 54,41.71C57.48,41.71 60.78,42.57 63.68,44.05L69.11,38.56C69.84,37.81 70.98,37.81 71.71,38.56C72.44,39.3 72.44,40.45 71.71,41.19L66.94,46.02ZM62.94,56.92C64.08,56.92 65,56.01 65,54.88C65,53.76 64.08,52.85 62.94,52.85C61.8,52.85 60.88,53.76 60.88,54.88C60.88,56.01 61.8,56.92 62.94,56.92ZM45.06,56.92C46.2,56.92 47.13,56.01 47.13,54.88C47.13,53.76 46.2,52.85 45.06,52.85C43.92,52.85 43,53.76 43,54.88C43,56.01 43.92,56.92 45.06,56.92Z\"\n        android:strokeColor=\"#00000000\"\n        android:strokeWidth=\"1\" />\n</vector>\n"
  },
  {
    "path": "note15/app/src/main/res/layout/activity_main.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<RelativeLayout xmlns:android=\"http://schemas.android.com/apk/res/android\"\n    xmlns:app=\"http://schemas.android.com/apk/res-auto\"\n    xmlns:tools=\"http://schemas.android.com/tools\"\n    android:layout_width=\"match_parent\"\n    android:layout_height=\"match_parent\"\n    tools:context=\".MainActivity\">\n\n    <LinearLayout\n        android:id=\"@+id/ll\"\n        android:orientation=\"horizontal\"\n        android:layout_alignParentBottom=\"true\"\n        android:layout_width=\"match_parent\"\n        android:layout_height=\"50dp\">\n\n        <Button\n            android:layout_weight=\"1\"\n            android:id=\"@+id/load\"\n            android:text=\"加载模型\"\n            android:layout_width=\"0dp\"\n            android:layout_height=\"match_parent\" />\n\n        <Button\n            android:id=\"@+id/clear\"\n            android:layout_weight=\"1\"\n            android:text=\"清空模型\"\n            android:layout_width=\"0dp\"\n            android:layout_height=\"match_parent\" />\n\n        <Button\n            android:id=\"@+id/infer\"\n            android:layout_weight=\"1\"\n            android:text=\"预测图片\"\n            android:layout_width=\"0dp\"\n            android:layout_height=\"match_parent\" />\n    </LinearLayout>\n\n    <TextView\n        android:layout_above=\"@id/ll\"\n        android:id=\"@+id/show\"\n        android:hint=\"这里显示预测结果\"\n        android:layout_width=\"match_parent\"\n        android:layout_height=\"100dp\" />\n\n    <ImageView\n        android:id=\"@+id/image_view\"\n        android:layout_above=\"@id/show\"\n        android:layout_width=\"match_parent\"\n        android:layout_height=\"match_parent\" />\n</RelativeLayout>"
  },
  {
    "path": "note15/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<adaptive-icon xmlns:android=\"http://schemas.android.com/apk/res/android\">\n    <background android:drawable=\"@drawable/ic_launcher_background\" />\n    <foreground android:drawable=\"@drawable/ic_launcher_foreground\" />\n</adaptive-icon>"
  },
  {
    "path": "note15/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<adaptive-icon xmlns:android=\"http://schemas.android.com/apk/res/android\">\n    <background android:drawable=\"@drawable/ic_launcher_background\" />\n    <foreground android:drawable=\"@drawable/ic_launcher_foreground\" />\n</adaptive-icon>"
  },
  {
    "path": "note15/app/src/main/res/values/colors.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<resources>\n    <color name=\"colorPrimary\">#3F51B5</color>\n    <color name=\"colorPrimaryDark\">#303F9F</color>\n    <color name=\"colorAccent\">#FF4081</color>\n</resources>\n"
  },
  {
    "path": "note15/app/src/main/res/values/strings.xml",
    "content": "<resources>\n    <string name=\"app_name\">note15</string>\n</resources>\n"
  },
  {
    "path": "note15/app/src/main/res/values/styles.xml",
    "content": "<resources>\n\n    <!-- Base application theme. -->\n    <style name=\"AppTheme\" parent=\"Base.Theme.AppCompat.Light.DarkActionBar\">\n        <!-- Customize your theme here. -->\n        <item name=\"colorPrimary\">@color/colorPrimary</item>\n        <item name=\"colorPrimaryDark\">@color/colorPrimaryDark</item>\n        <item name=\"colorAccent\">@color/colorAccent</item>\n    </style>\n\n</resources>\n"
  },
  {
    "path": "note15/build.gradle",
    "content": "// Top-level build file where you can add configuration options common to all sub-projects/modules.\n\nbuildscript {\n    \n    repositories {\n        google()\n        jcenter()\n    }\n    dependencies {\n        classpath 'com.android.tools.build:gradle:3.1.3'\n        \n\n        // NOTE: Do not place your application dependencies here; they belong\n        // in the individual module build.gradle files\n    }\n}\n\nallprojects {\n    repositories {\n        google()\n        jcenter()\n    }\n}\n\ntask clean(type: Delete) {\n    delete rootProject.buildDir\n}\n"
  },
  {
    "path": "note15/gradle/wrapper/gradle-wrapper.properties",
    "content": "#Fri Feb 22 11:28:20 CST 2019\ndistributionBase=GRADLE_USER_HOME\ndistributionPath=wrapper/dists\nzipStoreBase=GRADLE_USER_HOME\nzipStorePath=wrapper/dists\ndistributionUrl=https\\://services.gradle.org/distributions/gradle-4.4-all.zip\n"
  },
  {
    "path": "note15/gradle.properties",
    "content": "# Project-wide Gradle settings.\n# IDE (e.g. Android Studio) users:\n# Gradle settings configured through the IDE *will override*\n# any settings specified in this file.\n# For more details on how to configure your build environment visit\n# http://www.gradle.org/docs/current/userguide/build_environment.html\n# Specifies the JVM arguments used for the daemon process.\n# The setting is particularly useful for tweaking memory settings.\norg.gradle.jvmargs=-Xmx1536m\n# When configured, Gradle will run in incubating parallel mode.\n# This option should only be used with decoupled projects. More details, visit\n# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects\n# org.gradle.parallel=true\n"
  },
  {
    "path": "note15/gradlew",
    "content": "#!/usr/bin/env sh\n\n##############################################################################\n##\n##  Gradle start up script for UN*X\n##\n##############################################################################\n\n# Attempt to set APP_HOME\n# Resolve links: $0 may be a link\nPRG=\"$0\"\n# Need this for relative symlinks.\nwhile [ -h \"$PRG\" ] ; do\n    ls=`ls -ld \"$PRG\"`\n    link=`expr \"$ls\" : '.*-> \\(.*\\)$'`\n    if expr \"$link\" : '/.*' > /dev/null; then\n        PRG=\"$link\"\n    else\n        PRG=`dirname \"$PRG\"`\"/$link\"\n    fi\ndone\nSAVED=\"`pwd`\"\ncd \"`dirname \\\"$PRG\\\"`/\" >/dev/null\nAPP_HOME=\"`pwd -P`\"\ncd \"$SAVED\" >/dev/null\n\nAPP_NAME=\"Gradle\"\nAPP_BASE_NAME=`basename \"$0\"`\n\n# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.\nDEFAULT_JVM_OPTS=\"\"\n\n# Use the maximum available, or set MAX_FD != -1 to use that value.\nMAX_FD=\"maximum\"\n\nwarn () {\n    echo \"$*\"\n}\n\ndie () {\n    echo\n    echo \"$*\"\n    echo\n    exit 1\n}\n\n# OS specific support (must be 'true' or 'false').\ncygwin=false\nmsys=false\ndarwin=false\nnonstop=false\ncase \"`uname`\" in\n  CYGWIN* )\n    cygwin=true\n    ;;\n  Darwin* )\n    darwin=true\n    ;;\n  MINGW* )\n    msys=true\n    ;;\n  NONSTOP* )\n    nonstop=true\n    ;;\nesac\n\nCLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar\n\n# Determine the Java command to use to start the JVM.\nif [ -n \"$JAVA_HOME\" ] ; then\n    if [ -x \"$JAVA_HOME/jre/sh/java\" ] ; then\n        # IBM's JDK on AIX uses strange locations for the executables\n        JAVACMD=\"$JAVA_HOME/jre/sh/java\"\n    else\n        JAVACMD=\"$JAVA_HOME/bin/java\"\n    fi\n    if [ ! -x \"$JAVACMD\" ] ; then\n        die \"ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME\n\nPlease set the JAVA_HOME variable in your environment to match the\nlocation of your Java installation.\"\n    fi\nelse\n    JAVACMD=\"java\"\n    which java >/dev/null 2>&1 || die \"ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.\n\nPlease set the JAVA_HOME variable in your environment to match the\nlocation of your Java installation.\"\nfi\n\n# Increase the maximum file descriptors if we can.\nif [ \"$cygwin\" = \"false\" -a \"$darwin\" = \"false\" -a \"$nonstop\" = \"false\" ] ; then\n    MAX_FD_LIMIT=`ulimit -H -n`\n    if [ $? -eq 0 ] ; then\n        if [ \"$MAX_FD\" = \"maximum\" -o \"$MAX_FD\" = \"max\" ] ; then\n            MAX_FD=\"$MAX_FD_LIMIT\"\n        fi\n        ulimit -n $MAX_FD\n        if [ $? -ne 0 ] ; then\n            warn \"Could not set maximum file descriptor limit: $MAX_FD\"\n        fi\n    else\n        warn \"Could not query maximum file descriptor limit: $MAX_FD_LIMIT\"\n    fi\nfi\n\n# For Darwin, add options to specify how the application appears in the dock\nif $darwin; then\n    GRADLE_OPTS=\"$GRADLE_OPTS \\\"-Xdock:name=$APP_NAME\\\" \\\"-Xdock:icon=$APP_HOME/media/gradle.icns\\\"\"\nfi\n\n# For Cygwin, switch paths to Windows format before running java\nif $cygwin ; then\n    APP_HOME=`cygpath --path --mixed \"$APP_HOME\"`\n    CLASSPATH=`cygpath --path --mixed \"$CLASSPATH\"`\n    JAVACMD=`cygpath --unix \"$JAVACMD\"`\n\n    # We build the pattern for arguments to be converted via cygpath\n    ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`\n    SEP=\"\"\n    for dir in $ROOTDIRSRAW ; do\n        ROOTDIRS=\"$ROOTDIRS$SEP$dir\"\n        SEP=\"|\"\n    done\n    OURCYGPATTERN=\"(^($ROOTDIRS))\"\n    # Add a user-defined pattern to the cygpath arguments\n    if [ \"$GRADLE_CYGPATTERN\" != \"\" ] ; then\n        OURCYGPATTERN=\"$OURCYGPATTERN|($GRADLE_CYGPATTERN)\"\n    fi\n    # Now convert the arguments - kludge to limit ourselves to /bin/sh\n    i=0\n    for arg in \"$@\" ; do\n        CHECK=`echo \"$arg\"|egrep -c \"$OURCYGPATTERN\" -`\n        CHECK2=`echo \"$arg\"|egrep -c \"^-\"`                                 ### Determine if an option\n\n        if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then                    ### Added a condition\n            eval `echo args$i`=`cygpath --path --ignore --mixed \"$arg\"`\n        else\n            eval `echo args$i`=\"\\\"$arg\\\"\"\n        fi\n        i=$((i+1))\n    done\n    case $i in\n        (0) set -- ;;\n        (1) set -- \"$args0\" ;;\n        (2) set -- \"$args0\" \"$args1\" ;;\n        (3) set -- \"$args0\" \"$args1\" \"$args2\" ;;\n        (4) set -- \"$args0\" \"$args1\" \"$args2\" \"$args3\" ;;\n        (5) set -- \"$args0\" \"$args1\" \"$args2\" \"$args3\" \"$args4\" ;;\n        (6) set -- \"$args0\" \"$args1\" \"$args2\" \"$args3\" \"$args4\" \"$args5\" ;;\n        (7) set -- \"$args0\" \"$args1\" \"$args2\" \"$args3\" \"$args4\" \"$args5\" \"$args6\" ;;\n        (8) set -- \"$args0\" \"$args1\" \"$args2\" \"$args3\" \"$args4\" \"$args5\" \"$args6\" \"$args7\" ;;\n        (9) set -- \"$args0\" \"$args1\" \"$args2\" \"$args3\" \"$args4\" \"$args5\" \"$args6\" \"$args7\" \"$args8\" ;;\n    esac\nfi\n\n# Escape application args\nsave () {\n    for i do printf %s\\\\n \"$i\" | sed \"s/'/'\\\\\\\\''/g;1s/^/'/;\\$s/\\$/' \\\\\\\\/\" ; done\n    echo \" \"\n}\nAPP_ARGS=$(save \"$@\")\n\n# Collect all arguments for the java command, following the shell quoting and substitution rules\neval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS \"\\\"-Dorg.gradle.appname=$APP_BASE_NAME\\\"\" -classpath \"\\\"$CLASSPATH\\\"\" org.gradle.wrapper.GradleWrapperMain \"$APP_ARGS\"\n\n# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong\nif [ \"$(uname)\" = \"Darwin\" ] && [ \"$HOME\" = \"$PWD\" ]; then\n  cd \"$(dirname \"$0\")\"\nfi\n\nexec \"$JAVACMD\" \"$@\"\n"
  },
  {
    "path": "note15/gradlew.bat",
    "content": "@if \"%DEBUG%\" == \"\" @echo off\n@rem ##########################################################################\n@rem\n@rem  Gradle startup script for Windows\n@rem\n@rem ##########################################################################\n\n@rem Set local scope for the variables with windows NT shell\nif \"%OS%\"==\"Windows_NT\" setlocal\n\nset DIRNAME=%~dp0\nif \"%DIRNAME%\" == \"\" set DIRNAME=.\nset APP_BASE_NAME=%~n0\nset APP_HOME=%DIRNAME%\n\n@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.\nset DEFAULT_JVM_OPTS=\n\n@rem Find java.exe\nif defined JAVA_HOME goto findJavaFromJavaHome\n\nset JAVA_EXE=java.exe\n%JAVA_EXE% -version >NUL 2>&1\nif \"%ERRORLEVEL%\" == \"0\" goto init\n\necho.\necho ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.\necho.\necho Please set the JAVA_HOME variable in your environment to match the\necho location of your Java installation.\n\ngoto fail\n\n:findJavaFromJavaHome\nset JAVA_HOME=%JAVA_HOME:\"=%\nset JAVA_EXE=%JAVA_HOME%/bin/java.exe\n\nif exist \"%JAVA_EXE%\" goto init\n\necho.\necho ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%\necho.\necho Please set the JAVA_HOME variable in your environment to match the\necho location of your Java installation.\n\ngoto fail\n\n:init\n@rem Get command-line arguments, handling Windows variants\n\nif not \"%OS%\" == \"Windows_NT\" goto win9xME_args\n\n:win9xME_args\n@rem Slurp the command line arguments.\nset CMD_LINE_ARGS=\nset _SKIP=2\n\n:win9xME_args_slurp\nif \"x%~1\" == \"x\" goto execute\n\nset CMD_LINE_ARGS=%*\n\n:execute\n@rem Setup the command line\n\nset CLASSPATH=%APP_HOME%\\gradle\\wrapper\\gradle-wrapper.jar\n\n@rem Execute Gradle\n\"%JAVA_EXE%\" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% \"-Dorg.gradle.appname=%APP_BASE_NAME%\" -classpath \"%CLASSPATH%\" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%\n\n:end\n@rem End local scope for the variables with windows NT shell\nif \"%ERRORLEVEL%\"==\"0\" goto mainEnd\n\n:fail\nrem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of\nrem the _cmd.exe /c_ return code!\nif  not \"\" == \"%GRADLE_EXIT_CONSOLE%\" exit 1\nexit /b 1\n\n:mainEnd\nif \"%OS%\"==\"Windows_NT\" endlocal\n\n:omega\n"
  },
  {
    "path": "note15/settings.gradle",
    "content": "include ':app'\n"
  },
  {
    "path": "note2/README.md",
    "content": "﻿@[TOC]\n\n# 前言\n在第一章介绍了PaddlePaddle的安装，接下来我们将介绍如何使用PaddlePaddle。PaddlePaddle是百度在2016年9月27日开源的一个深度学习框架，也是目前国内唯一一个开源的深度学习框架。PaddlePaddle在0.11.0版本之后，开始推出Fluid版本，Fluid版本相对之前的V2版本，Fluid的代码结构更加清晰，使用起来更加方便。这本章中我们将会介绍如何使用PaddlePaddle来计算1+1，选择这个简单的例子主要是为了让读者了解PaddlePaddle的Fluid版本的使用，掌握PaddlePaddle的使用流程。我们讲过介绍如何使用PaddlePaddle定义一个张量和如何对张量进行计算。\n\n# 计算常量的1+1\nPaddlePaddle类似一个科学计算库，比如Python下我们使用的numpy，提供的大量的计算操作，但是PaddlePaddle的计算对象是张量。我们下面就编写一个`constant_sum.py`Python文件，使用PaddlePaddle计算一个`[[1, 1], [1, 1]] * [[1, 1], [1, 1]]`。\n\n首先导入PaddlePaddle库，大部分的API都在`paddle.fluid`下。\n```python \nimport paddle.fluid as fluid\n```\n\n定义两个张量的常量x1和x2，并指定它们的形状是[2, 2]，并赋值为1铺满整个张量，类型为int64.\n```python\n# 定义两个张量\nx1 = fluid.layers.fill_constant(shape=[2, 2], value=1, dtype='int64')\nx2 = fluid.layers.fill_constant(shape=[2, 2], value=1, dtype='int64')\n```\n\n接着定义一个操作，该计算是将上面两个张量进行加法计算，并返回一个求和的算子。PaddlePaddle提供了大量的操作，比如加减乘除、三角函数等，读者可以在`fluid.layers`找到。\n```python\n# 将两个张量求和\ny1 = fluid.layers.sum(x=[x1, x2])\n```\n\n然后创建一个执行器，可以在这里指定计算使用CPU或GPU。当使用`CPUPlace()`时使用的是CPU，如果是`CUDAPlace()`使用的是GPU。解析器是之后使用它来进行计算过的，比如在执行计算之前我们要先执行参数初始化的`program`也是要使用到解析器的，因为只有解析器才能执行`program`。\n```python\n# 创建一个使用CPU的执行器\nplace = fluid.CPUPlace()\nexe = fluid.executor.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n```\n\n最后执行计算，`program`的参数值是主程序，不是上一步使用的是初始化参数的程序，`program`默认一共有两个，分别是`default_startup_program()`和`default_main_program()`。`fetch_list`参数的值是在解析器在run之后要输出的值，我们要输出计算加法之后输出结果值。最后计算得到的也是一个张量。\n```python\n# 进行运算，并把y的结果输出\nresult = exe.run(program=fluid.default_main_program(),\n                 fetch_list=[y1])\nprint(result)\n```\n\n输出信息：\n```\n[array([[2, 2],\n       [2, 2]], dtype=int64)]\n```\n\n# 计算变量的1+1\n上面计算的是张量常量的1+1，并不能随意修改常量的值，所以下面我们要编写一个`variable_sum.py`程序文件，使用张量变量作为乘数的程序，类似是一个占位符，等到将要计算时，再把要计算的值添加到占位符中进行计算。\n\n导入PaddlePaddle库和numpy的库。\n```python\nimport paddle.fluid as fluid\nimport numpy as np\n```\n\n定义两个张量，并不指定该张量的形状和值，它们是之后动态赋值的。这里只是指定它们的类型和名字，这个名字是我们之后赋值的关键。\n```python\n# 定义两个张量\na = fluid.layers.create_tensor(dtype='int64', name='a')\nb = fluid.layers.create_tensor(dtype='int64', name='b')\n```\n\n使用同样的方式，定义这个两个张量的加法操作。\n```python\n# 将两个张量求和\ny = fluid.layers.sum(x=[a, b])\n```\n\n这里我们同样是创建一个使用CPU的执行器，和进行参数初始化。\n```python\n# 创建一个使用CPU的执行器\nplace = fluid.CPUPlace()\nexe = fluid.executor.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n```\n\n然后使用numpy创建两个张量值，之后我们要计算的就是这两个值。\n```python\n# 定义两个要计算的变量\na1 = np.array([3, 2]).astype('int64')\nb1 = np.array([1, 1]).astype('int64')\n```\n\n这次`exe.run()`的参数有点不一样了，多了一个`feed`参数，这个就是要对张量变量进行赋值的。赋值的方式是使用了键值对的格式，key是定义张量变量是指定的名称，value就是要传递的值。在`fetch_list`参数中，笔者希望把`a, b, y`的值都输出来，所以要使用3个变量来接受返回值。\n```python\n# 进行运算，并把y的结果输出\nout_a, out_b, result = exe.run(program=fluid.default_main_program(),\n                               feed={'a': a1, 'b': b1},\n                               fetch_list=[a, b, y])\nprint(out_a, \" + \", out_b,\" = \", result)\n```\n\n输出信息：\n```\n[3 2]  +  [1 1]  =  [4 3]\n```\n\n\n到处为止，本章就结束了。在本章我们学会了PaddlePaddle的使用方式，那在下一章我们使用PaddlePaddle完成我们的第一个安装——线性回归，我们下章见。\n\n同步到百度AI Studio平台：http://aistudio.baidu.com/aistudio/#/projectdetail/29339\n同步到科赛网K-Lab平台：https://www.kesci.com/home/project/5bf75387954d6e0010668f76\n项目代码GitHub地址：https://github.com/yeyupiaoling/LearnPaddle2/tree/master/note2\n\n**注意：** 最新代码以GitHub上的为准\n\n# 参考资料\n1. http://www.paddlepaddle.org/documentation/api/zh/1.0/layers.html\n"
  },
  {
    "path": "note2/constant_sum.py",
    "content": "import paddle.fluid as fluid\n\n# 定义两个张量\nx1 = fluid.layers.fill_constant(shape=[2, 2], value=1, dtype='int64')\nx2 = fluid.layers.fill_constant(shape=[2, 2], value=1, dtype='int64')\n\n# 将两个张量求和\ny1 = fluid.layers.sum(x=[x1, x2])\n\n# 创建一个使用CPU的执行器\nplace = fluid.CPUPlace()\nexe = fluid.executor.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n\n# 进行运算，并把y的结果输出\nresult = exe.run(program=fluid.default_main_program(),\n                 fetch_list=[y1])\nprint(result)\n"
  },
  {
    "path": "note2/variable_sum.py",
    "content": "import paddle.fluid as fluid\nimport numpy as np\n\n# 定义两个张量\na = fluid.layers.create_tensor(dtype='int64', name='a')\nb = fluid.layers.create_tensor(dtype='int64', name='b')\n\n# 将两个张量求和\ny = fluid.layers.sum(x=[a, b])\n\n# 创建一个使用CPU的执行器\nplace = fluid.CPUPlace()\nexe = fluid.executor.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n\n# 定义两个要计算的变量\na1 = np.array([3, 2]).astype('int64')\nb1 = np.array([1, 1]).astype('int64')\n\n# 进行运算，并把y的结果输出\nout_a, out_b, result = exe.run(program=fluid.default_main_program(),\n                               feed={'a': a1, 'b': b1},\n                               fetch_list=[a, b, y])\nprint(out_a,\" + \", out_b,\" = \", result)\n"
  },
  {
    "path": "note3/README.md",
    "content": "﻿@[TOC]\n\n# 前言\n在第二章，我们已经学习了如何使用PaddlePaddle来进行加法计算，从这个小小的例子中，我们掌握了PaddlePaddle的使用方式。在本章中，我们将介绍使用PaddlePaddle完成一个深度学习非常常见的入门例子——线性回归，我们将分别使用自定义数据集和使用PaddlePaddle提供的数据集接口来训练一个线性回归模型。\n\n# 使用自定义数据\n在这一部分，我们将介绍整个线性回归从定义网络到使用自定义的数据进行训练，最后验证我们网络的预测能力。\n\n首先导入PaddlePaddle库和一些工具类库。\n```python\nimport paddle.fluid as fluid\nimport paddle\nimport numpy as np\n```\n\n定义一个简单的线性网络，这个网络非常简单，结构是：`输出层-->>隐层-->>输出层`，这个网络一共有2层，因为输入层不算网络的层数。更具体的就是一个大小为100，激活函数是ReLU的全连接层和一个输出大小为1的全连接层，就这样构建了一个非常简单的网络。这里使用输入`fluid.layers.data()`定义的输入层类似`fluid.layers.create_tensor()`，也是有`name`属性，之后也是根据这个属性来填充数据的。这里定义输入层的形状为13，这是因为波士顿房价数据集的每条数据有13个属性，我们之后自定义的数据集也是为了符合这一个维度。\n```python\n# 定义一个简单的线性网络\nx = fluid.layers.data(name='x', shape=[13], dtype='float32')\nhidden = fluid.layers.fc(input=x, size=100, act='relu')\nnet = fluid.layers.fc(input=hidden, size=1, act=None)\n```\n\n接着定义神经网络的损失函数，这里同样使用了`fluid.layers.data()`这个接口，这个可以理解为数据对应的结果，上面`name`为`x`的`fluid.layers.data()`为属性数据。这里使用了平方差损失函数(square_error_cost)，PaddlePaddle提供了很多的损失函数的接口，比如交叉熵损失函数(cross_entropy)。因为本项目是一个线性回归任务，所以我们使用的是平方差损失函数。因为`fluid.layers.square_error_cost()`求的是一个Batch的损失值，所以我们还要对他求一个平均值。\n```python\n# 定义损失函数\ny = fluid.layers.data(name='y', shape=[1], dtype='float32')\ncost = fluid.layers.square_error_cost(input=net, label=y)\navg_cost = fluid.layers.mean(cost)\n```\n\n定义损失函数之后，可以在主程序（fluid.default_main_program）中克隆一个程序作为预测程序，用于训练完成之后使用这个预测程序进行预测数据。这个定义的顺序不能错，因为我们定义的网络结构，损失函数等等都是更加顺序记录到PaddlePaddle的主程序中的。主程序定义了神经网络模型，前向反向计算，以及优化算法对网络中可学习参数的更新，是我们整个程序的核心，这个是PaddlePaddle已经帮我们实现的了，我们只需注重网络的构建和训练即可。\n```python\n# 复制一个主程序，方便之后使用\ntest_program = fluid.default_main_program().clone(for_test=True)\n```\n\n接着是定义训练使用的优化方法，这里使用的是随机梯度下降优化方法。PaddlePaddle提供了大量的优化函数接口，除了本项目使用的随机梯度下降法（SGD），还有Momentum、Adagrad、Adagrad等等，读者可以更加自己项目的需求使用不同的优化方法。\n```python\n# 定义优化方法\noptimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.01)\nopts = optimizer.minimize(avg_cost)\n```\n\n然后是创建一个解析器，我们同样是使用CPU来进行训练。创建解析器之后，使用解析器来执行`fluid.default_startup_program()`初始化参数。\n```python\n# 创建一个使用CPU的执行器\nplace = fluid.CPUPlace()\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n```\n\n我们使用numpy定义一组数据，这组数据的每一条数据有13个，这是因为我们在定义网络的输入层时，`shape`是13，但是每条数据的后面12个数据是没意义的，因为笔者全部都是使用0来填充，纯粹是为了符合数据的格式而已。这组数据是符合`y = 2 * x + 1`，但是程序是不知道的，我们之后使用这组数据进行训练，看看强大的神经网络是否能够训练出一个拟合这个函数的模型。最后定义了一个预测数据，是在训练完成，使用这个数据作为`x`输入，看是否能够预测于正确值相近结果。\n```python\n# 定义训练和测试数据\nx_data = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], \n                   [2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], \n                   [3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], \n                   [4.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], \n                   [5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]).astype('float32')\ny_data = np.array([[3.0], [5.0], [7.0], [9.0], [11.0]]).astype('float32')\ntest_data = np.array([[6.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]).astype('float32')\n```\n\n定义数据之后，我们就可以使用数据进行训练了。我们这次训练了10个pass，读者可根据情况设置更多的训练轮数，通常来说训练的次数越多，模型收敛的越好。同样我们使用的时`profram`是`fluid.default_main_program()`，`feed`中是在训练时把数据传入`fluid.layers.data()`定义的变量中，及那个键值对的`key`对用的就是`fluid.layers.data()`中的`name`的值。我们让训练过程中输出avg_cost的值。\n\n在训练过程中，我们可以看到输出的损失值在不断减小，证明我们的模型在不断收敛。\n```python\n# 开始训练100个pass\nfor pass_id in range(10):\n    train_cost = exe.run(program=fluid.default_main_program(),\n                         feed={'x': x_data, 'y': y_data},\n                         fetch_list=[avg_cost])\n    print(\"Pass:%d, Cost:%0.5f\" % (pass_id, train_cost[0]))\n```\n\n输出信息：\n```\nPass:0, Cost:65.61024\nPass:1, Cost:26.62285\nPass:2, Cost:7.78299\nPass:3, Cost:0.59838\nPass:4, Cost:0.02781\nPass:5, Cost:0.02600\nPass:6, Cost:0.02548\nPass:7, Cost:0.02496\nPass:8, Cost:0.02446\nPass:9, Cost:0.02396\n```\n\n训练完成之后，我们使用上面克隆主程序得到的预测程序了预测我们刚才定义的预测数据。预测数据同样作为`x`在`feed`输入，在预测时，理论上是不用输入`y`的，但是要符合输入格式，我们模拟一个`y`的数据值，这个值并不会影响我们的预测结果。`fetch_list`的值，也就是我们执行预测之后要输出的结果，这是网络的最后一层，而不是平均损失函数（avg_cost），因为我们是想要预测程序输出预测结果。根据我们上面定义数据时，满足规律`y = 2 * x + 1`，所以当x为6时，y应该时13，最后输出的结果也是应该接近13的。\n```python\n# 开始预测\nresult = exe.run(program=test_program,\n                 feed={'x': test_data, 'y': np.array([[0.0]]).astype('float32')},\n                 fetch_list=[net])\nprint(\"当x为6.0时，y为：%0.5f:\" % result[0][0][0])\n```\n\n输出信息：\n```\n当x为6.0时，y为：13.23651:\n```\n\n# 使用房价数据集训练\n在这一部分，我们还是使用上面定义的网络结构，使用波士顿房价数据集进行训练。\n\n在此之前，我们已经完整训练深度学习模型，并使用这个模型来进行预测。而上面使用的是我们自己定义的数据，而且这个数据非常小。PaddlePaddle提供了大量的数据集API，我们可使用这些API来使用一些比较常用的数据集，比如在深度学习中，线性回归最常用的是波士顿房价数据集（UCI Housing Data Set），`uci_housing`就是PaddlePaddle提供的一个波士顿房价数据集。\n\n而且这次我们的数据集不是一下子全部都丢入到训练中，而已把它们分成一个个Batch的小数据集，而每个Batch的大小我们都可以通过`batch_size`进行设置，这个大小一般是2的N次方。这里定义了训练和测试两个数据集。\n```python\nimport paddle.dataset.uci_housing as uci_housing\n# 使用房价数据进行训练和测试\n# 从paddle接口中获取房价数据集\ntrain_reader = paddle.batch(reader=uci_housing.train(), batch_size=128)\ntest_reader = paddle.batch(reader=uci_housing.test(), batch_size=128)\n```\n\n接着定义数据的维度，在使用自定义数据的时候，我们是使用键值对的方式添加数据的，但是我们调用API来获取数据集时，已经是将属性数据和结果放在一个Batch中，如果再对数据拆分在训练进行填充，那就更麻烦了，所以PaddlePaddle提供了一个`fluid.DataFeeder()`这个接口，这里可以定义输入数据每个维度是属于哪一个`fluid.layers.data()`.\n```python\n# 定义输入数据维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[x, y])\n```\n\n接下来我们是使用波士顿房价数据集来进行训练，在训练时，我们是通过一个循环迭代器把reader中的数据按照一个个Batch提取出来加入到训练中。加入训练时使用上面定义的数据维度feeder.feed()添加的。\n\n当每一个Pass训练完成之后，都执行一次测试，测试与预测的作用不同，测试是为了使用于测试数据集预测并与真实结果对比，评估当前模型的好坏。因为测试集不属于训练集，所以测试集的预测结果的好坏能狗体现模型的泛化能力。\n```python\n# 开始训练和测试\nfor pass_id in range(10):\n    # 开始训练并输出最后一个batch的损失值\n    train_cost = 0\n    for batch_id, data in enumerate(train_reader()):\n        train_cost = exe.run(program=fluid.default_main_program(),\n                             feed=feeder.feed(data),\n                             fetch_list=[avg_cost])\n    print(\"Pass:%d, Cost:%0.5f\" % (pass_id, train_cost[0][0]))\n\n    # 开始测试并输出最后一个batch的损失值\n    test_cost = 0\n    for batch_id, data in enumerate(test_reader()):\n        test_cost = exe.run(program=fluid.default_main_program(),\n                            feed=feeder.feed(data),\n                            fetch_list=[avg_cost])\n    print('Test:%d, Cost:%0.5f' % (pass_id, test_cost[0][0]))\n```\n\n输出信息：\n```\nPass:0, Cost:35.61119\nTest:0, Cost:92.18690\nPass:1, Cost:121.56089\nTest:1, Cost:51.94175\nPass:2, Cost:44.66270\nTest:2, Cost:34.46148\nPass:3, Cost:33.25787\nTest:3, Cost:30.89449\nPass:4, Cost:29.12044\nTest:4, Cost:28.29573\nPass:5, Cost:26.75469\nTest:5, Cost:26.66773\nPass:6, Cost:24.98260\nTest:6, Cost:25.11611\nPass:7, Cost:23.55230\nTest:7, Cost:23.84240\nPass:8, Cost:22.41704\nTest:8, Cost:22.65791\nPass:9, Cost:21.51291\nTest:9, Cost:21.71775\n```\n\n到此为止，本章知识已经学完。本章我们学会了如何使用PaddlePaddle完成了深度学习入门的常见例子，相信读者经过学习本章之后，对深度学习和PaddlePaddle的使用有了非常深刻的了解，也恭喜读者正式加入到人工智能行列中，希望读者能够坚定信心，在自己喜欢的领域一直走下去。在下一章，我们将会介绍使用卷积神经网络进行训练MNIST图像数据集，相信下一章你更加喜欢深度学习的，准备学习下一章了吗。\n\n同步到百度AI Studio平台：http://aistudio.baidu.com/#/projectdetail/29342\n同步到科赛网K-Lab平台：https://www.kesci.com/home/project/5bf8c7a6954d6e001066d72c\n项目代码GitHub地址：https://github.com/yeyupiaoling/LearnPaddle2/tree/master/note3\n\n**注意：** 最新代码以GitHub上的为准\n\n# 参考资料\n1. http://www.paddlepaddle.org/documentation/docs/zh/1.0/beginners_guide/quick_start/fit_a_line/README.cn.html\n"
  },
  {
    "path": "note3/linear_regression.py",
    "content": "import paddle.fluid as fluid\nimport numpy as np\n\n# 定义一个简单的线性网络\nx = fluid.layers.data(name='x', shape=[1], dtype='float32')\nhidden = fluid.layers.fc(input=x, size=100, act='relu')\nnet = fluid.layers.fc(input=hidden, size=1, act=None)\n\n# 获取预测程序\ninfer_program = fluid.default_main_program().clone(for_test=True)\n\n# 定义损失函数\ny = fluid.layers.data(name='y', shape=[1], dtype='float32')\ncost = fluid.layers.square_error_cost(input=net, label=y)\navg_cost = fluid.layers.mean(cost)\n\n# 复制一个主程序，方便之后使用\ntest_program = fluid.default_main_program().clone(for_test=True)\n\n# 定义优化方法\noptimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.01)\nopts = optimizer.minimize(avg_cost)\n\n# 创建一个使用CPU的执行器\nplace = fluid.CPUPlace()\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n\n# 定义训练和测试数据\nx_data = np.array([[1.0], [2.0], [3.0], [4.0], [5.0]]).astype('float32')\ny_data = np.array([[3.0], [5.0], [7.0], [9.0], [11.0]]).astype('float32')\ntest_data = np.array([[6.0]]).astype('float32')\n\n# 开始训练100个pass\nfor pass_id in range(100):\n    train_cost = exe.run(program=fluid.default_main_program(),\n                         feed={'x': x_data, 'y': y_data},\n                         fetch_list=[avg_cost])\n    print(\"Pass:%d, Cost:%0.5f\" % (pass_id, train_cost[0]))\n\n# 开始预测\nresult = exe.run(program=infer_program,\n                 feed={'x': test_data},\n                 fetch_list=[net])\nprint(\"当x为6.0时，y为：%0.5f:\" % result[0][0][0])\n"
  },
  {
    "path": "note3/uci_housing_linear.py",
    "content": "import paddle.fluid as fluid\nimport paddle\nimport paddle.dataset.uci_housing as uci_housing\n\n# 定义一个简单的线性网络\nx = fluid.layers.data(name='x', shape=[13], dtype='float32')\nhidden = fluid.layers.fc(input=x, size=100, act='relu')\nnet = fluid.layers.fc(input=hidden, size=1, act=None)\n\n# 定义损失函数\ny = fluid.layers.data(name='y', shape=[1], dtype='float32')\ncost = fluid.layers.square_error_cost(input=net, label=y)\navg_cost = fluid.layers.mean(cost)\n\n# 复制一个主程序，方便之后使用\ntest_program = fluid.default_main_program().clone(for_test=True)\n\n# 定义优化方法\noptimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.01)\nopts = optimizer.minimize(avg_cost)\n\n# 创建一个使用CPU的执行器\nplace = fluid.CPUPlace()\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n\n# 从paddle接口中获取房价数据集，使用房价数据进行训练和测试\ntrain_reader = paddle.batch(reader=uci_housing.train(), batch_size=128)\ntest_reader = paddle.batch(reader=uci_housing.test(), batch_size=128)\n\n# 定义输入数据维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[x, y])\n\n# 开始训练和测试\nfor pass_id in range(100):\n    # 开始训练并输出最后一个batch的损失值\n    train_cost = 0\n    for batch_id, data in enumerate(train_reader()):\n        train_cost = exe.run(program=fluid.default_main_program(),\n                             feed=feeder.feed(data),\n                             fetch_list=[avg_cost])\n    print(\"Pass:%d, Cost:%0.5f\" % (pass_id, train_cost[0][0]))\n\n    # 开始测试并输出最后一个batch的损失值\n    test_cost = 0\n    for batch_id, data in enumerate(test_reader()):\n        test_cost = exe.run(program=fluid.default_main_program(),\n                            feed=feeder.feed(data),\n                            fetch_list=[avg_cost])\n    print('Test:%d, Cost:%0.5f' % (pass_id, test_cost[0][0]))\n"
  },
  {
    "path": "note4/README.md",
    "content": "﻿@[TOC]\n\n# 前言\n上一章我们通过学习线性回归例子入门了深度学习，同时也熟悉了PaddlePaddle的使用方式，那么我们在本章学习更有趣的知识点卷积神经网络。深度学习之所以那么流行，很大程度上是得益于它在计算机视觉上得到非常好的效果，而在深度学习上几乎是使用卷积神经网络来提取图像的特征的。在PaddlePaddle上如何定义一个卷积神经网络，并使用它来完成一个图像识别的任务呢。在本章我们通过学习MNIST图像数据集的分类例子，来掌握卷积神经网络的使用。\n\n# 训练模型\n创建一个`mnist_classification.py`文件，首先导入所需得包，这次使用到了MNIST数据集接口，也使用了处理图像得工具包。\n```python\nimport numpy as np\nimport paddle as paddle\nimport paddle.dataset.mnist as mnist\nimport paddle.fluid as fluid\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n```\n\n在图像识别上，使用得算法也经过了多次的迭代更新，比如多层感知器，在卷积神经网络广泛使用之前，多层感知器在图像识别上是非常流行的，从这方面来看，多层感知器在当时也是有一定的优势的。那么如下使用PaddlePaddle来定义一个多层感知器呢，我们可以来学习一下。以下的代码判断就是定义一个简单的多层感知器，一共有三层，两个大小为100的隐层和一个大小为10的输出层，因为MNIST数据集是手写0到9的灰度图像，类别有10个，所以最后的输出大小是10。最后输出层的激活函数是Softmax，所以最后的输出层相当于一个分类器。加上一个输入层的话，多层感知器的结构是：`输入层-->>隐层-->>隐层-->>输出层`。\n```python\n# 定义多层感知器\ndef multilayer_perceptron(input):\n    # 第一个全连接层，激活函数为ReLU\n    hidden1 = fluid.layers.fc(input=input, size=100, act='relu')\n    # 第二个全连接层，激活函数为ReLU\n    hidden2 = fluid.layers.fc(input=hidden1, size=100, act='relu')\n    # 以softmax为激活函数的全连接输出层，大小为label大小\n    fc = fluid.layers.fc(input=hidden2, size=10, act='softmax')\n    return fc\n```\n\n卷积神经网络普遍用在图像特征提取上，一些图像分类、目标检测、文字识别几乎都回使用到卷积神经网络作为图像的特征提取方式。卷积神经网络通常由卷积层、池化层和全连接层，有时还有Batch Normalization层和Dropout层。下面我们就创建一个简单卷积神经网络，一共定义了5层，加上输入层的话，它的结构是：`输入层-->>卷积层-->>池化层-->>卷积层-->>池化层-->>输出层`。我们可以通过调用PaddlePaddle的接口`fluid.layers.conv2d()`来做一次卷积操作，我们可以通过`num_filters`参数设置卷积核的数量，通过`filter_size`设置卷积核的大小，还有通过`stride`来设置卷积操作时移动的步长。使用`fluid.layers.pool2d()`接口做一次池化操作，通过参数`pool_size`可以设置池化的大小，通过参数`pool_stride`设置池化滑动的步长，通过参数`pool_type`设置池化的类型，目前有最大池化和平均池化，下面使用的时最大池化，当值为`avg`时是平均池化。\n```python\n# 卷积神经网络\ndef convolutional_neural_network(input):\n    # 第一个卷积层，卷积核大小为3*3，一共有32个卷积核\n    conv1 = fluid.layers.conv2d(input=input,\n                                num_filters=32,\n                                filter_size=3,\n                                stride=1)\n\n    # 第一个池化层，池化大小为2*2，步长为1，最大池化\n    pool1 = fluid.layers.pool2d(input=conv1,\n                                pool_size=2,\n                                pool_stride=1,\n                                pool_type='max')\n\n    # 第二个卷积层，卷积核大小为3*3，一共有64个卷积核\n    conv2 = fluid.layers.conv2d(input=pool1,\n                                num_filters=64,\n                                filter_size=3,\n                                stride=1)\n\n    # 第二个池化层，池化大小为2*2，步长为1，最大池化\n    pool2 = fluid.layers.pool2d(input=conv2,\n                                pool_size=2,\n                                pool_stride=1,\n                                pool_type='max')\n\n    # 以softmax为激活函数的全连接输出层，大小为label大小\n    fc = fluid.layers.fc(input=pool2, size=10, act='softmax')\n    return fc\n```\n\n定义输入层，输入的是图像数据。图像是`28*28`的灰度图，所以输入的形状是`[1, 28, 28]`，如果图像是`32*32`的彩色图，那么输入的形状是`[3. 32, 32]`，因为灰度图只有一个通道，而彩色图有RGB三个通道。理论上它还有一个维度是Batch的，不过这个是PaddlePaddle帮我们默认设置的，我们可以不用理会。\n```python\n# 定义输入层\nimage = fluid.layers.data(name='image', shape=[1, 28, 28], dtype='float32')\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n```\n\n上面定义了多层感机器和卷积神经网络，我们可以在这里调用定义好的网络来获取分类器，读者可以尝试这两种不同的网络进行训练，观察一下他们的准确率如何。\n```python\n# 获取分类器\n# model = multilayer_perceptron(image)\nmodel = convolutional_neural_network(image)\n```\n\n接着是定义损失函数，这次使用的是交叉熵损失函数，该函数在分类任务上比较常用。定义了一个损失函数之后，还有对它求平均值，因为定义的是一个Batch的损失值。同时我们还可以定义一个准确率函数，这个可以在我们训练的时候输出分类的准确率。\n```python\n# 获取损失函数和准确率函数\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n```\n\n然后我们从主程序中克隆一个程序作为预测程序，之后可以使用这个预测程序预测测试的准确率和预测自己的图像。\n```python\n# 获取测试程序\ntest_program = fluid.default_main_program().clone(for_test=True)\n```\n\n接着是定义优化方法，这次我们使用的是Adam优化方法，同时指定学习率为0.001。\n```python\n# 定义优化方法\noptimizer = fluid.optimizer.AdamOptimizer(learning_rate=0.001)\nopts = optimizer.minimize(avg_cost)\n```\n\n定义读取MNIST数据集的reader，指定一个Batch的大小为128，也就是一次训练128张图像。\n```python\n# 获取MNIST数据\ntrain_reader = paddle.batch(mnist.train(), batch_size=128)\ntest_reader = paddle.batch(mnist.test(), batch_size=128)\n```\n\n接着也是定义一个执行器和初始化参数，Fluid版本使用的流程都差不多。\n```python\n# 定义一个使用CPU的执行器\nplace = fluid.CPUPlace()\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n```\n\n输入的数据维度是图像数据和图像对应的标签，每个类别的图像都要对应一个标签，这个标签是从0递增的整型数值。\n```python\n# 定义输入数据维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[image, label])\n```\n\n最后就可以开始训练了，我们这次训练5个Pass，读者可以根据自己的情况自由设置。在上面我们已经定义了一个求准确率的函数，所以我们在训练的时候让它输出当前的准确率，计算准确率的原理很简单，就是把训练是预测的结果和真实的值比较，求出准确率。每一个Pass训练结束之后，再进行一次测试，使用测试集进行测试，并求出当前的Cost和准确率的平均值。\n```python\n# 开始训练和测试\nfor pass_id in range(5):\n    # 进行训练\n    for batch_id, data in enumerate(train_reader()):\n        train_cost, train_acc = exe.run(program=fluid.default_main_program(),\n                                        feed=feeder.feed(data),\n                                        fetch_list=[avg_cost, acc])\n        # 每100个batch打印一次信息\n        if batch_id % 100 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' %\n                  (pass_id, batch_id, train_cost[0], train_acc[0]))\n\n    # 进行测试\n    test_accs = []\n    test_costs = []\n    for batch_id, data in enumerate(test_reader()):\n        test_cost, test_acc = exe.run(program=test_program,\n                                      feed=feeder.feed(data),\n                                      fetch_list=[avg_cost, acc])\n        test_accs.append(test_acc[0])\n        test_costs.append(test_cost[0])\n    # 求测试结果的平均值\n    test_cost = (sum(test_costs) / len(test_costs))\n    test_acc = (sum(test_accs) / len(test_accs))\n    print('Test:%d, Cost:%0.5f, Accuracy:%0.5f' % (pass_id, test_cost, test_acc))\n```\n\n输出信息：\n```\nPass:0, Batch:0, Cost:3.50138, Accuracy:0.07812\nPass:0, Batch:100, Cost:0.14832, Accuracy:0.96875\nPass:0, Batch:200, Cost:0.13408, Accuracy:0.96875\nPass:0, Batch:300, Cost:0.11601, Accuracy:0.97656\nPass:0, Batch:400, Cost:0.27977, Accuracy:0.92969\nTest:0, Cost:0.08879, Accuracy:0.97379\nPass:1, Batch:0, Cost:0.11175, Accuracy:0.96875\nPass:1, Batch:100, Cost:0.07854, Accuracy:0.97656\nPass:1, Batch:200, Cost:0.04025, Accuracy:0.99219\nPass:1, Batch:300, Cost:0.09936, Accuracy:0.98438\nPass:1, Batch:400, Cost:0.19245, Accuracy:0.95312\nTest:1, Cost:0.10123, Accuracy:0.97241\nPass:2, Batch:0, Cost:0.13749, Accuracy:0.96094\nPass:2, Batch:100, Cost:0.06074, Accuracy:0.98438\nPass:2, Batch:200, Cost:0.01982, Accuracy:0.99219\nPass:2, Batch:300, Cost:0.06725, Accuracy:0.97656\nPass:2, Batch:400, Cost:0.10043, Accuracy:0.96875\nTest:2, Cost:0.13354, Accuracy:0.96776\nPass:3, Batch:0, Cost:0.08895, Accuracy:0.98438\nPass:3, Batch:100, Cost:0.06339, Accuracy:0.96875\nPass:3, Batch:200, Cost:0.05107, Accuracy:0.98438\nPass:3, Batch:300, Cost:0.08062, Accuracy:0.97656\nPass:3, Batch:400, Cost:0.07631, Accuracy:0.96875\nTest:3, Cost:0.11465, Accuracy:0.97449\nPass:4, Batch:0, Cost:0.01259, Accuracy:1.00000\nPass:4, Batch:100, Cost:0.01203, Accuracy:1.00000\nPass:4, Batch:200, Cost:0.08451, Accuracy:0.97656\nPass:4, Batch:300, Cost:0.16532, Accuracy:0.98438\nPass:4, Batch:400, Cost:0.09657, Accuracy:0.98438\nTest:4, Cost:0.14624, Accuracy:0.97211\n```\n\n# 预测图像\n训练完成之后，我们可以使用从主程序中克隆的`test_program`来预测我们自己的图像。再预测之前，要对图像进行预处理，处理方式要跟训练的时候一样。首先进行灰度化，然后压缩图像大小为`28*28`，接着将图像转换成一维向量，最后再对一维向量进行归一化处理。\n```python\n# 对图片进行预处理\ndef load_image(file):\n    im = Image.open(file).convert('L')\n    im = im.resize((28, 28), Image.ANTIALIAS)\n    im = np.array(im).reshape(1, 1, 28, 28).astype(np.float32)\n    im = im / 255.0 * 2.0 - 1.0\n    return im\n```\n\n我们从网上下载一张图像，并将它命名为`infer_3.png`。\n```python\n!wget https://github.com/yeyupiaoling/LearnPaddle2/blob/master/note4/infer_3.png?raw=true -O 'infer_3.png'\n```\n\n我们可以使用Matplotlib工具显示这张图像。\n```python\nimg = Image.open('infer_3.png')\nplt.imshow(img)\nplt.show()\n```\n\n输出的图片：\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20181207115540368.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n最后把图像转换成一维向量并进行预测，数据从`feed`中的`image`传入，`label`设置一个假的label值传进去。`fetch_list`的值是网络模型的最后一层分类器，所以输出的结果是10个标签的概率值，这些概率值的总和为1。\n```python\n# 加载数据并开始预测\nimg = load_image('./infer_3.png')\nresults = exe.run(program=test_program,\n                  feed={'image': img, \"label\": np.array([[1]]).astype(\"int64\")},\n                  fetch_list=[model])\n```\n\n拿到每个标签的概率值之后，我们要获取概率最大的标签，并打印出来。\n```python\n# 获取概率最大的label\nlab = np.argsort(results)\nprint(\"该图片的预测结果的label为: %d\" % lab[0][0][-1])\n```\n\n输出信息：\n```\n该图片的预测结果的label为: 3\n```\n\n到处为止，本章就结束了。经过学完这一章节，是不是觉得PaddlePaddle非常好用呢，借助PaddlePaddle我们很容易就定义了一个卷积神经网络，并完成了图像分类的训练和预测。卷积神经网络在图像识别上发挥着巨大的作用，而在自然语言处理上，循环神经网络同样起着巨大的作用，我们下一章就学习一下循环神经网络。\n\n同步到百度AI Studio平台：http://aistudio.baidu.com/aistudio/#/projectdetail/29346\n同步到科赛网K-Lab平台：https://www.kesci.com/home/project/5bf8c998954d6e001066d780\n项目代码GitHub地址：https://github.com/yeyupiaoling/LearnPaddle2/tree/master/note4\n\n**注意：** 最新代码以GitHub上的为准\n\n# 参考资料\n1. https://blog.csdn.net/m_buddy/article/details/80224409\n2. http://www.paddlepaddle.org/documentation/docs/zh/1.0/beginners_guide/quick_start/recognize_digits/README.cn.html\n\n"
  },
  {
    "path": "note4/mnist_classification.py",
    "content": "import numpy as np\nimport paddle as paddle\nimport paddle.dataset.mnist as mnist\nimport paddle.fluid as fluid\nfrom PIL import Image\n\n\n# 定义多层感知器\ndef multilayer_perceptron(input):\n    # 第一个全连接层，激活函数为ReLU\n    hidden1 = fluid.layers.fc(input=input, size=100, act='relu')\n    # 第二个全连接层，激活函数为ReLU\n    hidden2 = fluid.layers.fc(input=hidden1, size=100, act='relu')\n    # 以softmax为激活函数的全连接输出层，大小为label大小\n    fc = fluid.layers.fc(input=hidden2, size=10, act='softmax')\n    return fc\n\n\n# 卷积神经网络\ndef convolutional_neural_network(input):\n    # 第一个卷积层，卷积核大小为3*3，一共有32个卷积核\n    conv1 = fluid.layers.conv2d(input=input,\n                                num_filters=32,\n                                filter_size=3,\n                                stride=1)\n\n    # 第一个池化层，池化大小为2*2，步长为1，最大池化\n    pool1 = fluid.layers.pool2d(input=conv1,\n                                pool_size=2,\n                                pool_stride=1,\n                                pool_type='max')\n\n    # 第二个卷积层，卷积核大小为3*3，一共有64个卷积核\n    conv2 = fluid.layers.conv2d(input=pool1,\n                                num_filters=64,\n                                filter_size=3,\n                                stride=1)\n\n    # 第二个池化层，池化大小为2*2，步长为1，最大池化\n    pool2 = fluid.layers.pool2d(input=conv2,\n                                pool_size=2,\n                                pool_stride=1,\n                                pool_type='max')\n\n    # 以softmax为激活函数的全连接输出层，大小为label大小\n    fc = fluid.layers.fc(input=pool2, size=10, act='softmax')\n    return fc\n\n\n# 定义输入层\nimage = fluid.layers.data(name='image', shape=[1, 28, 28], dtype='float32')\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n\n# 获取分类器\n# model = multilayer_perceptron(image)\nmodel = convolutional_neural_network(image)\n\n# 获取预测程序\ninfer_program = fluid.default_main_program().clone(for_test=True)\n\n# 获取损失函数和准确率函数\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n\n# 获取测试程序\ntest_program = fluid.default_main_program().clone(for_test=True)\n\n# 定义优化方法\noptimizer = fluid.optimizer.AdamOptimizer(learning_rate=0.001)\nopts = optimizer.minimize(avg_cost)\n\n# 获取MNIST数据\ntrain_reader = paddle.batch(mnist.train(), batch_size=128)\ntest_reader = paddle.batch(mnist.test(), batch_size=128)\n\n# 定义一个使用CPU的执行器\nplace = fluid.CPUPlace()\n# place = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n\n# 定义输入数据维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[image, label])\n\n# 开始训练和测试\nfor pass_id in range(1):\n    # 进行训练\n    for batch_id, data in enumerate(train_reader()):\n        train_cost, train_acc = exe.run(program=fluid.default_main_program(),\n                                        feed=feeder.feed(data),\n                                        fetch_list=[avg_cost, acc])\n        # 每100个batch打印一次信息\n        if batch_id % 100 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' %\n                  (pass_id, batch_id, train_cost[0], train_acc[0]))\n\n    # 进行测试\n    test_accs = []\n    test_costs = []\n    for batch_id, data in enumerate(test_reader()):\n        test_cost, test_acc = exe.run(program=test_program,\n                                      feed=feeder.feed(data),\n                                      fetch_list=[avg_cost, acc])\n        test_accs.append(test_acc[0])\n        test_costs.append(test_cost[0])\n    # 求测试结果的平均值\n    test_cost = (sum(test_costs) / len(test_costs))\n    test_acc = (sum(test_accs) / len(test_accs))\n    print('Test:%d, Cost:%0.5f, Accuracy:%0.5f' % (pass_id, test_cost, test_acc))\n\n\n# 对图片进行预处理\ndef load_image(file):\n    im = Image.open(file).convert('L')\n    im = im.resize((28, 28), Image.ANTIALIAS)\n    im = np.array(im).reshape(1, 1, 28, 28).astype(np.float32)\n    im = im / 255.0 * 2.0 - 1.0\n    return im\n\n\n# 加载数据并开始预测\nimg = load_image('image/infer_3.png')\nresults = exe.run(program=infer_program,\n                  feed={'image': img},\n                  fetch_list=[model])\n# 获取概率最大的标签\nlab = np.argsort(results)[0][0][-1]\nprint('infer_3.png infer result: %d' % lab)\n"
  },
  {
    "path": "note5/README.md",
    "content": "﻿@[TOC]\n\n# 前言\n除了卷积神经网络，深度学习中还有循环神经网络也是很常用的，循环神经网络更常用于自然语言处理任务上。我们在这一章中，我们就来学习如何使用PaddlePaddle来实现一个循环神经网络，并使用该网络完成情感分析的模型训练。\n\n# 训练模型\n创建一个`text_classification.py`的Python文件。首先导入Python库，fluid和numpy库我们在前几章都有使用过，这里就不重复了。这里主要结束是imdb库，这个是一个数据集的库，这个是数据集是一个英文的电影评论数据集，每一条数据都会有两个分类，分别是正面和负面。\n```python\nimport paddle\nimport paddle.dataset.imdb as imdb\nimport paddle.fluid as fluid\nimport numpy as np\n```\n\n循环神经网络发展到现在，已经有不少性能很好的升级版的循环神经网络，比如长短期记忆网络等。一下的代码片段是一个比较简单的循环神经网络，首先是经过一个`fluid.layers.embedding()`，这个是接口是接受数据的ID输入，因为输入数据时一个句子，但是在训练的时候我们是把每个单词转换成对应的ID，再输入到网络中，所以这里使用到了`embedding`接口。然后是一个全连接层，接着是一个循环神经网络块，在循环神经网络块之后再经过一个`sequence_last_step`接口，这个接口通常是使用在序列函数的最后一步。最后的输出层的激活函数是Softmax，大小为2，因为数据的结果有2个，为正负面。\n```python\ndef rnn_net(ipt, input_dim):\n    # 以数据的IDs作为输入\n    emb = fluid.layers.embedding(input=ipt, size=[input_dim, 128], is_sparse=True)\n    sentence = fluid.layers.fc(input=emb, size=128, act='tanh')\n\n    rnn = fluid.layers.DynamicRNN()\n    with rnn.block():\n        word = rnn.step_input(sentence)\n        prev = rnn.memory(shape=[128])\n        hidden = fluid.layers.fc(input=[word, prev], size=128, act='relu')\n        rnn.update_memory(prev, hidden)\n        rnn.output(hidden)\n\n    last = fluid.layers.sequence_last_step(rnn())\n    out = fluid.layers.fc(input=last, size=2, act='softmax')\n    return out\n```\n\n下面的代码片段是一个简单的长短期记忆网络，这个网络是有循环神经网络演化过来的。当较长的序列数据，循环神经网络的训练过程中容易出现梯度消失或爆炸现象，而长短期记忆网络就可以解决这个问题。在网络的开始同样是经过一个`embedding`接口，接着是一个全连接层，紧接的是一个`dynamic_lstm`长短期记忆操作接口，有这个接口，我们很容易就搭建一个长短期记忆网络。然后是经过两个序列池操作，该序列池的类型是最大化。最后也是一个大小为2的输出层。\n```python\n# 定义长短期记忆网络\ndef lstm_net(ipt, input_dim):\n    # 以数据的IDs作为输入\n    emb = fluid.layers.embedding(input=ipt, size=[input_dim, 128], is_sparse=True)\n\n    # 第一个全连接层\n    fc1 = fluid.layers.fc(input=emb, size=128)\n    # 进行一个长短期记忆操作\n    lstm1, _ = fluid.layers.dynamic_lstm(input=fc1, size=128)\n\n    # 第一个最大序列池操作\n    fc2 = fluid.layers.sequence_pool(input=fc1, pool_type='max')\n    # 第二个最大序列池操作\n    lstm2 = fluid.layers.sequence_pool(input=lstm1, pool_type='max')\n\n    # 以softmax作为全连接的输出层，大小为2,也就是正负面\n    out = fluid.layers.fc(input=[fc2, lstm2], size=2, act='softmax')\n    return out\n```\n\n这里可以先定义一个输入层，这样要注意的是我们使用的数据属于序列数据，所以我们可以设置`lod_level`为1，当该参数不为0时，表示输入的数据为序列数据，默认`lod_level`的值是0.\n```python\n# 定义输入数据， lod_level不为0指定输入数据为序列数据\nwords = fluid.layers.data(name='words', shape=[1], dtype='int64', lod_level=1)\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n```\n\n然后是读取数据字典，因为我们的数据是以数据标签的放方式表示数据一个句子。所以每个句子都是以一串整数来表示的，每个数字都是对应一个单词。所以这个数据集就会有一个数据集字典，这个字典是训练数据中出现单词对应的数字标签。\n```python\n# 获取数据字典\nprint(\"加载数据字典中...\")\nword_dict = imdb.word_dict()\n# 获取数据字典长度\ndict_dim = len(word_dict)\n```\n\n输出信息：\n```\n加载数据字典中...\n```\n\n这里可以获取我们上面定义的网络作为我们之后训练的网络模型，这两个网络读者都可以试试，可以对比它们的差别。\n```python\n# 获取长短期记忆网络\nmodel = lstm_net(words, dict_dim)\n# 获取循环神经网络\n# model = rnn_net(words, dict_dim)\n```\n\n接着定义损失函数，这里同样是一个分类任务，所以使用的损失函数也是交叉熵损失函数。这里也可以使用`fluid.layers.accuracy()`接口定义一个输出分类准确率的函数，可以方便在训练的时候，输出测试时的分类准确率，观察模型收敛的情况。\n```python\n# 获取损失函数和准确率\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n```\n\n这里克隆一个测试测试程序，用于之后的测试和预测数据使用的。\n```python\n# 获取预测程序\ntest_program = fluid.default_main_program().clone(for_test=True)\n```\n\n然后是定义优化方法，这里使用的时Adagrad优化方法，Adagrad优化方法多用于处理稀疏数据，设置学习率为0.002。\n```python\n# 定义优化方法\noptimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.002)\nopt = optimizer.minimize(avg_cost)\n```\n\n接着创建一个执行器，这次是的数据集比之前使用的数据集要大不少，所以训练起来先对比较慢，如果读取有GPU环境，可以尝试使用GPU来训练，使用方式是使用`fluid.CUDAPlace(0)`来创建执行器。\n```python\n# 创建一个执行器\nplace = fluid.CPUPlace()\n# place = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n```\n\n然后把训练数据和测试数据读取到内存中，因为数据集比较大，为了加快数据的数据，使用`paddle.reader.shuffle()`接口来将数据先按照设置的大小读取到缓存中。读入缓存的大小可以根据硬件环境内存大小来设置。\n```python\n# 获取训练和预测数据\nprint(\"加载训练数据中...\")\ntrain_reader = paddle.batch(paddle.reader.shuffle(imdb.train(word_dict), 25000), batch_size=128)\nprint(\"加载测试数据中...\")\ntest_reader = paddle.batch(imdb.test(word_dict), batch_size=128)\n```\n\n输出信息：\n```\n加载训练数据中...\n\n加载测试数据中...\n```\n\n定义数据数据的维度，数据的顺序是一条句子数据对应一个标签。\n```python\n# 定义输入数据的维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[words, label])\n```\n\n现在就可以开始训练了，这里设置训练的循环是1次，读者可以根据情况设置更多的训练轮数，来让模型完全收敛。我们在训练中，每40个Batch打印一层训练信息和进行一次测试，测试是使用测试集进行预测并输出损失值和准确率，测试完成之后，对之前预测的结果进行求平均值。\n```python\n# 开始训练\nfor pass_id in range(1):\n    # 进行训练\n    train_cost = 0\n    for batch_id, data in enumerate(train_reader()):\n        train_cost = exe.run(program=fluid.default_main_program(),\n                             feed=feeder.feed(data),\n                             fetch_list=[avg_cost])\n\n        if batch_id % 40 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f' % (pass_id, batch_id, train_cost[0]))\n            # 进行测试\n            test_costs = []\n            test_accs = []\n            for batch_id, data in enumerate(test_reader()):\n                test_cost, test_acc = exe.run(program=test_program,\n                                              feed=feeder.feed(data),\n                                              fetch_list=[avg_cost, acc])\n                test_costs.append(test_cost[0])\n                test_accs.append(test_acc[0])\n            # 计算平均预测损失在和准确率\n            test_cost = (sum(test_costs) / len(test_costs))\n            test_acc = (sum(test_accs) / len(test_accs))\n            print('Test:%d, Cost:%0.5f, ACC:%0.5f' % (pass_id, test_cost, test_acc))\n```\n\n输出信息：\n```\nPass:0, Batch:0, Cost:0.69274\nTest:0, Cost:0.69329, ACC:0.50175\nPass:0, Batch:40, Cost:0.61183\nTest:0, Cost:0.61142, ACC:0.82659\nPass:0, Batch:80, Cost:0.55504\nTest:0, Cost:0.54904, ACC:0.83959\nPass:0, Batch:120, Cost:0.51100\nTest:0, Cost:0.50026, ACC:0.84318\nPass:0, Batch:160, Cost:0.46800\nTest:0, Cost:0.46199, ACC:0.84533\n```\n\n\n# 预测数据\n\n我们先定义三个句子，第一句是中性的，第二句偏向正面，第三句偏向负面。然后把这些句子读取到一个列表中。\n```python\n# 定义预测数据\nreviews_str = ['read the book forget the movie', 'this is a great movie', 'this is very bad']\n# 把每个句子拆成一个个单词\nreviews = [c.split() for c in reviews_str]\n```\n\n然后把句子转换成编码，根据数据集的字典，把句子中的单词转换成对应标签。\n```python\n# 获取结束符号的标签\nUNK = word_dict['<unk>']\n# 获取每句话对应的标签\nlod = []\nfor c in reviews:\n    # 需要把单词进行字符串编码转换\n    lod.append([word_dict.get(words.encode('utf-8'), UNK) for words in c])\n```\n\n获取输入数据的维度和大小。\n```python\n# 获取每句话的单词数量\nbase_shape = [[len(c) for c in lod]]\n```\n\n将要预测的数据转换成张量，准备开始预测。\n```python\n# 生成预测数据\ntensor_words = fluid.create_lod_tensor(lod, base_shape, place)\n```\n\n开始预测，使用的`program`是克隆的测试程序。预测数据是通过`feed`键值对的方式传入到预测程序中，为了符合输入数据的格式，label中使用了一个假的label输入到程序中。`fetch_list`的值是网络的分类器。\n```python\n# 预测获取预测结果,因为输入的是3个数据，所以要模拟3个label的输入\nresults = exe.run(program=test_program,\n                  feed={'words': tensor_words, 'label': np.array([[0], [0], [0]]).astype('int64')},\n                  fetch_list=[model])\n```\n\n最后可以把预测结果输出，因为我们使用了3条数据进行预测，所以输出也会有3个结果。每个结果是类别的概率。\n```python\n# 打印每句话的正负面概率\nfor i, r in enumerate(results[0]):\n    print(\"\\'%s\\'的预测结果为：正面概率为：%0.5f，负面概率为：%0.5f\" % (reviews_str[i], r[0], r[1]))\n```\n\n输出信息：\n```\n'read the book forget the movie'的预测结果为：正面概率为：0.53604，负面概率为：0.46396\n'this is a great movie'的预测结果为：正面概率为：0.67564，负面概率为：0.32436\n'this is very bad'的预测结果为：正面概率为：0.35406，负面概率为：0.64594\n```\n\n到处为止，本章就结束了。希望读者经过学习完这一章，可以对PaddlePaddle的使用有更深一步的认识。在下一章中，我们来使用PaddlePaddle实现一个生成对抗网络，生成对抗网络这一两年中可以说时非常火的，同样也非长有趣。那么我们下一章见吧。\n\n\n同步到百度AI Studio平台：http://aistudio.baidu.com/aistudio/#/projectdetail/29347\n同步到科赛网K-Lab平台：https://www.kesci.com/home/project/5bf8cb78954d6e001066d7d8\n项目代码GitHub地址：https://github.com/yeyupiaoling/LearnPaddle2/tree/master/note5\n\n**注意：** 最新代码以GitHub上的为准\n\n# 参考资料\n1. https://blog.csdn.net/u010089444/article/details/76725843\n2. http://ai.stanford.edu/~amaas/data/sentiment/\n3. https://github.com/PaddlePaddle/book/tree/develop/06.understand_sentiment\n"
  },
  {
    "path": "note5/text_classification.py",
    "content": "import paddle\nimport paddle.dataset.imdb as imdb\nimport paddle.fluid as fluid\nimport numpy as np\n\n\ndef rnn_net(ipt, input_dim):\n    emb = fluid.layers.embedding(input=ipt, size=[input_dim, 128], is_sparse=True)\n    sentence = fluid.layers.fc(input=emb, size=128, act='tanh')\n\n    rnn = fluid.layers.DynamicRNN()\n    with rnn.block():\n        word = rnn.step_input(sentence)\n        prev = rnn.memory(shape=[128])\n        hidden = fluid.layers.fc(input=[word, prev], size=128, act='relu')\n        rnn.update_memory(prev, hidden)\n        rnn.output(hidden)\n\n    last = fluid.layers.sequence_last_step(rnn())\n    out = fluid.layers.fc(input=last, size=2, act='softmax')\n    return out\n\n\n# 定义长短期记忆网络\ndef lstm_net(ipt, input_dim):\n    # 以数据的IDs作为输入\n    emb = fluid.layers.embedding(input=ipt, size=[input_dim, 128], is_sparse=True)\n\n    # 第一个全连接层\n    fc1 = fluid.layers.fc(input=emb, size=128)\n    # 进行一个长短期记忆操作\n    lstm1, _ = fluid.layers.dynamic_lstm(input=fc1, size=128)\n\n    # 第一个最大序列池操作\n    fc2 = fluid.layers.sequence_pool(input=fc1, pool_type='max')\n    # 第二个最大序列池操作\n    lstm2 = fluid.layers.sequence_pool(input=lstm1, pool_type='max')\n\n    # 以softmax作为全连接的输出层，大小为2,也就是正负面\n    out = fluid.layers.fc(input=[fc2, lstm2], size=2, act='softmax')\n    return out\n\n\n# 定义输入数据， lod_level不为0指定输入数据为序列数据\nwords = fluid.layers.data(name='words', shape=[1], dtype='int64', lod_level=1)\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n\n# 获取数据字典\nprint(\"加载数据字典中...\")\nword_dict = imdb.word_dict()\n# 获取数据字典长度\ndict_dim = len(word_dict)\n# 获取长短期记忆网络\nmodel = lstm_net(words, dict_dim)\n# model = rnn_net(words, dict_dim)\n\n# 获取预测程序\ninfer_program = fluid.default_main_program().clone(for_test=True)\n\n# 获取损失函数和准确率\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n\n# 获取预测程序\ntest_program = fluid.default_main_program().clone(for_test=True)\n\n# 定义优化方法\noptimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.002)\nopt = optimizer.minimize(avg_cost)\n\n# 创建一个执行器，CPU训练速度比较慢\n# place = fluid.CPUPlace()\nplace = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n\n# 获取训练和预测数据\nprint(\"加载训练数据中...\")\ntrain_reader = paddle.batch(\n    paddle.reader.shuffle(imdb.train(word_dict), 25000), batch_size=128)\nprint(\"加载测试数据中...\")\ntest_reader = paddle.batch(imdb.test(word_dict), batch_size=128)\n\n# 定义输入数据的维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[words, label])\n\n# 开始训练\nfor pass_id in range(1):\n    # 进行训练\n    train_cost = 0\n    for batch_id, data in enumerate(train_reader()):\n        train_cost = exe.run(program=fluid.default_main_program(),\n                             feed=feeder.feed(data),\n                             fetch_list=[avg_cost])\n\n        if batch_id % 40 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f' % (pass_id, batch_id, train_cost[0]))\n            # 进行测试\n            test_costs = []\n            test_accs = []\n            for batch_id, data in enumerate(test_reader()):\n                test_cost, test_acc = exe.run(program=test_program,\n                                              feed=feeder.feed(data),\n                                              fetch_list=[avg_cost, acc])\n                test_costs.append(test_cost[0])\n                test_accs.append(test_acc[0])\n            # 计算平均预测损失在和准确率\n            test_cost = (sum(test_costs) / len(test_costs))\n            test_acc = (sum(test_accs) / len(test_accs))\n            print('Test:%d, Cost:%0.5f, ACC:%0.5f' % (pass_id, test_cost, test_acc))\n\n\n# 定义预测数据\nreviews_str = ['read the book forget the movie', 'this is a great movie', 'this is very bad']\n# 把每个句子拆成一个个单词\nreviews = [c.split() for c in reviews_str]\n\n# 获取结束符号的标签\nUNK = word_dict['<unk>']\n# 获取每句话对应的标签\nlod = []\nfor c in reviews:\n    # 需要把单词进行字符串编码转换\n    lod.append([np.int64(word_dict.get(words.encode('utf-8'), UNK)) for words in c])\n\n# 获取每句话的单词数量\nbase_shape = [[len(c) for c in lod]]\n\n# 生成预测数据\ntensor_words = fluid.create_lod_tensor(lod, base_shape, place)\n\n# 预测获取预测结果,因为输入的是3个数据，所以要模拟3个label的输入\nresults = exe.run(program=infer_program,\n                  feed={'words': tensor_words},\n                  fetch_list=[model])\n# 打印每句话的正负面概率\nfor i, r in enumerate(results[0]):\n    print(\"\\'%s\\'的预测结果为：正面概率为：%0.5f，负面概率为：%0.5f\" % (reviews_str[i], r[0], r[1]))\n"
  },
  {
    "path": "note6/GAN.py",
    "content": "import numpy as np\nimport paddle\nimport paddle.fluid as fluid\nimport matplotlib.pyplot as plt\n\n\n# 定义生成器\ndef Generator(y, name=\"G\"):\n    def deconv(x, num_filters, filter_size=5, stride=2, dilation=1, padding=2, output_size=None, act=None):\n        return fluid.layers.conv2d_transpose(input=x,\n                                             num_filters=num_filters,\n                                             output_size=output_size,\n                                             filter_size=filter_size,\n                                             stride=stride,\n                                             dilation=dilation,\n                                             padding=padding,\n                                             act=act)\n\n    with fluid.unique_name.guard(name + \"/\"):\n        # 第一组全连接和BN层\n        y = fluid.layers.fc(y, size=2048)\n        y = fluid.layers.batch_norm(y)\n        # 第二组全连接和BN层\n        y = fluid.layers.fc(y, size=128 * 7 * 7)\n        y = fluid.layers.batch_norm(y)\n        # 进行形状变换\n        y = fluid.layers.reshape(y, shape=(-1, 128, 7, 7))\n        # 第一组转置卷积运算\n        y = deconv(x=y, num_filters=128, act='relu', output_size=[14, 14])\n        # 第二组转置卷积运算\n        y = deconv(x=y, num_filters=1, act='tanh', output_size=[28, 28])\n    return y\n\n\n# 判别器 Discriminator\ndef Discriminator(images, name=\"D\"):\n    # 定义一个卷积池化组\n    def conv_pool(input, num_filters, act=None):\n        return fluid.nets.simple_img_conv_pool(input=input,\n                                               filter_size=5,\n                                               num_filters=num_filters,\n                                               pool_size=2,\n                                               pool_stride=2,\n                                               act=act)\n\n    with fluid.unique_name.guard(name + \"/\"):\n        y = fluid.layers.reshape(x=images, shape=[-1, 1, 28, 28])\n        # 第一个卷积池化组\n        y = conv_pool(input=y, num_filters=64, act='leaky_relu')\n        # 第一个卷积池化加回归层\n        y = conv_pool(input=y, num_filters=128)\n        y = fluid.layers.batch_norm(input=y, act='leaky_relu')\n        # 第二个卷积池化加回归层\n        y = fluid.layers.fc(input=y, size=1024)\n        y = fluid.layers.batch_norm(input=y, act='leaky_relu')\n        # 最后一个分类器输出\n        y = fluid.layers.fc(input=y, size=1, act='sigmoid')\n    return y\n\n\n# 创建判别器D识别生成器G生成的假图片程序\ntrain_d_fake = fluid.Program()\n# 创建判别器D识别真实图片程序\ntrain_d_real = fluid.Program()\n# 创建生成器G生成符合判别器D的程序\ntrain_g = fluid.Program()\n\n# 创建共同的一个初始化的程序\nstartup = fluid.Program()\n\n# 噪声维度\nz_dim = 100\n\n\n# 从Program获取prefix开头的参数名字\ndef get_params(program, prefix):\n    all_params = program.global_block().all_parameters()\n    return [t.name for t in all_params if t.name.startswith(prefix)]\n\n\n# 训练判别器D识别真实图片\nwith fluid.program_guard(train_d_real, startup):\n    # 创建读取真实数据集图片的data，并且label为1\n    real_image = fluid.layers.data('image', shape=[1, 28, 28])\n    ones = fluid.layers.fill_constant_batch_size_like(real_image, shape=[-1, 1], dtype='float32', value=1)\n\n    # 判别器D判断真实图片的概率\n    p_real = Discriminator(real_image)\n    # 获取损失函数\n    real_cost = fluid.layers.sigmoid_cross_entropy_with_logits(p_real, ones)\n    real_avg_cost = fluid.layers.mean(real_cost)\n\n    # 获取判别器D的参数\n    d_params = get_params(train_d_real, \"D\")\n\n    # 创建优化方法\n    optimizer = fluid.optimizer.Adam(learning_rate=2e-4)\n    optimizer.minimize(real_avg_cost, parameter_list=d_params)\n\n# 训练判别器D识别生成器G生成的图片为假图片\nwith fluid.program_guard(train_d_fake, startup):\n    # 利用创建假的图片data，并且label为0\n    z = fluid.layers.data(name='z', shape=[z_dim])\n    zeros = fluid.layers.fill_constant_batch_size_like(z, shape=[-1, 1], dtype='float32', value=0)\n\n    # 判别器D判断假图片的概率\n    p_fake = Discriminator(Generator(z))\n\n    # 获取损失函数\n    fake_cost = fluid.layers.sigmoid_cross_entropy_with_logits(p_fake, zeros)\n    fake_avg_cost = fluid.layers.mean(fake_cost)\n\n    # 获取判别器D的参数\n    d_params = get_params(train_d_fake, \"D\")\n\n    # 创建优化方法\n    optimizer = fluid.optimizer.Adam(learning_rate=2e-4)\n    optimizer.minimize(fake_avg_cost, parameter_list=d_params)\n\n# 训练生成器G生成符合判别器D标准的假图片\nwith fluid.program_guard(train_g, startup):\n    # 噪声生成图片为真实图片的概率，Label为1\n    z = fluid.layers.data(name='z', shape=[z_dim])\n    ones = fluid.layers.fill_constant_batch_size_like(z, shape=[-1, 1], dtype='float32', value=1)\n\n    # 生成图片\n    fake = Generator(z)\n    # 克隆预测程序\n    infer_program = train_g.clone(for_test=True)\n\n    # 生成符合判别器的假图片\n    p = Discriminator(fake)\n\n    # 获取损失函数\n    g_cost = fluid.layers.sigmoid_cross_entropy_with_logits(p, ones)\n    g_avg_cost = fluid.layers.mean(g_cost)\n\n    # 获取G的参数\n    g_params = get_params(train_g, \"G\")\n\n    # 只训练G\n    optimizer = fluid.optimizer.Adam(learning_rate=2e-4)\n    optimizer.minimize(g_avg_cost, parameter_list=g_params)\n\n\n# 噪声生成\ndef z_reader():\n    while True:\n        yield np.random.uniform(-1.0, 1.0, (z_dim)).astype('float32')\n\n\n# 读取MNIST数据集，不使用label\ndef mnist_reader(reader):\n    def r():\n        for img, label in reader():\n            yield img.reshape(1, 28, 28)\n\n    return r\n\n\n# 保存图片\ndef show_image_grid(images):\n    for i, image in enumerate(images[:64]):\n        image = image[0]\n        plt.imsave(\"image/test_%d.png\" % i, image, cmap='Greys_r')\n\n\n# 生成真实图片reader\nmnist_generator = paddle.batch(\n    paddle.reader.shuffle(mnist_reader(paddle.dataset.mnist.train()), 30000), batch_size=128)\n# 生成假图片的reader\nz_generator = paddle.batch(z_reader, batch_size=128)()\n\n# 创建执行器，最好使用GPU，CPU速度太慢了\n# place = fluid.CPUPlace()\nplace = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\n# 初始化参数\nexe.run(startup)\n\n# 测试噪声\ntest_z = np.array(next(z_generator))\n\n# 开始训练\nfor pass_id in range(20):\n    for i, real_image in enumerate(mnist_generator()):\n        # 训练判别器D识别真实图片\n        r_fake = exe.run(program=train_d_fake,\n                         fetch_list=[fake_avg_cost],\n                         feed={'z': test_z})\n\n        # 训练判别器D识别生成器G生成的假图片\n        r_real = exe.run(program=train_d_real,\n                         fetch_list=[real_avg_cost],\n                         feed={'image': np.array(real_image)})\n\n        # 训练生成器G生成符合判别器D标准的假图片\n        r_g = exe.run(program=train_g,\n                      fetch_list=[g_avg_cost],\n                      feed={'z': test_z})\n\n        if i % 100 == 0:\n            print(\"Pass：%d, Batch：%d, 训练判别器D识别真实图片Cost：%0.5f, \"\n                  \"训练判别器D识别生成器G生成的假图片Cost：%0.5f, \"\n                  \"训练生成器G生成符合判别器D标准的假图片Cost：%0.5f\" % (pass_id, i, r_fake[0], r_real[0], r_g[0]))\n\n    # 测试生成的图片\n    r_i = exe.run(program=infer_program,\n                  fetch_list=[fake],\n                  feed={'z': test_z})\n\n    # 显示生成的图片\n    show_image_grid(r_i[0])\n"
  },
  {
    "path": "note6/README.md",
    "content": "﻿@[TOC]\n\n# 前言\n我们上一章使用MNIST数据集进行训练，获得一个可以分类手写字体的模型。如果我们数据集的数量不够，不足于让模型收敛，最直接的是增加数据集。但是我们收集数据并进行标注是非常消耗时间了，而最近非常火的生成对抗网络就非常方便我们数据的收集。对抗生成网络可以根据之前的图片训练生成更多的图像，已达到以假乱真的目的。\n\n# 训练并预测\n创建一个`GAN.py`文件。首先导入所需要的Python包，其中matplotlib包是之后用于展示出生成的图片。\n```python\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\nimport matplotlib.pyplot as plt\n```\n\n## 定义网络\n生成对抗网络由生成器和判别器组合，下面的代码片段就是一个生成器，生成器的作用是尽可能生成满足判别器条件的图像。随着以上训练的进行，判别器不断增强自身的判别能力，而生成器也不断生成越来越逼真的图片，以欺骗判别器。生成器主要由两组全连接和BN层、两组转置卷积运算组成，其中最后一层的卷积层的卷积核数量是1，因为输出的图像是一个灰度图的手写字体图片。\n```python\n# 定义生成器\ndef Generator(y, name=\"G\"):\n    def deconv(x, num_filters, filter_size=5, stride=2, dilation=1, padding=2, output_size=None, act=None):\n        return fluid.layers.conv2d_transpose(input=x,\n                                             num_filters=num_filters,\n                                             output_size=output_size,\n                                             filter_size=filter_size,\n                                             stride=stride,\n                                             dilation=dilation,\n                                             padding=padding,\n                                             act=act)\n    with fluid.unique_name.guard(name + \"/\"):\n        # 第一组全连接和BN层\n        y = fluid.layers.fc(y, size=2048)\n        y = fluid.layers.batch_norm(y)\n        # 第二组全连接和BN层\n        y = fluid.layers.fc(y, size=128 * 7 * 7)\n        y = fluid.layers.batch_norm(y)\n        # 进行形状变换\n        y = fluid.layers.reshape(y, shape=(-1, 128, 7, 7))\n        # 第一组转置卷积运算\n        y = deconv(x=y, num_filters=128, act='relu', output_size=[14, 14])\n        # 第二组转置卷积运算\n        y = deconv(x=y, num_filters=1, act='tanh', output_size=[28, 28])\n    return y\n```\n\n判别器的作用是训练真实的数据集，然后使用训练真实数据集模型去判别生成器生成的假图片。这一过程可以理解判别器为一个二分类问题，判别器在训练真实数据集时，尽量让其输出概率为1，而训练生成器生成的假图片输出概率为0。这样不断给生成器压力，让其生成的图片尽量逼近真实图片，以至于真实到连判别器也无法判断这是真实图像还是假图片。以下判别器由三组卷积池化层和一个最后全连接层组成，全连接层的大小为1，输入一个二分类的结果。\n```python\n# 判别器 Discriminator\ndef Discriminator(images, name=\"D\"):\n    # 定义一个卷积池化组\n    def conv_pool(input, num_filters, act=None):\n        return fluid.nets.simple_img_conv_pool(input=input,\n                                               filter_size=5,\n                                               num_filters=num_filters,\n                                               pool_size=2,\n                                               pool_stride=2,\n                                               act=act)\n\n    with fluid.unique_name.guard(name + \"/\"):\n        y = fluid.layers.reshape(x=images, shape=[-1, 1, 28, 28])\n        # 第一个卷积池化组\n        y = conv_pool(input=y, num_filters=64, act='leaky_relu')\n        # 第一个卷积池化加回归层\n        y = conv_pool(input=y, num_filters=128)\n        y = fluid.layers.batch_norm(input=y, act='leaky_relu')\n        # 第二个卷积池化加回归层\n        y = fluid.layers.fc(input=y, size=1024)\n        y = fluid.layers.batch_norm(input=y, act='leaky_relu')\n        # 最后一个分类器输出\n        y = fluid.layers.fc(input=y, size=1, act='sigmoid')\n    return y\n```\n\n## 定义训练程序\n\n定义四个Program和一个噪声维度，其中使用三个Program分别进行训练生成器生成图片、训练判别器识别真实图片、训练判别器识别生成器生成的假图片，还要一个Program是用于初始化参数的。噪声的作用是初始化生成图片。\n```python\n# 创建判别器D识别生成器G生成的假图片程序\ntrain_d_fake = fluid.Program()\n# 创建判别器D识别真实图片程序\ntrain_d_real = fluid.Program()\n# 创建生成器G生成符合判别器D的程序\ntrain_g = fluid.Program()\n# 创建共同的一个初始化的程序\nstartup = fluid.Program()\n# 噪声维度\nz_dim = 100\n```\n\n获取Program中的独立参数，因为我们同时训练3个Program，其中训练生成器或训练判别器时，它们参数的更新不应该互相影响。就是训练判别器识别真实图片时，在更新判别器模型参数时，不要更新生成器模型的参数，同理更新生成器模型参数时，不要更新判别器的模型参数。\n```python\n# 从Program获取prefix开头的参数名字\ndef get_params(program, prefix):\n    all_params = program.global_block().all_parameters()\n    return [t.name for t in all_params if t.name.startswith(prefix)]\n```\n\n定义一个判别器识别真实图片的程序，这里判别器传入的数据是真实的图片数据。这里使用的损失函数是`fluid.layers.sigmoid_cross_entropy_with_logits()`，这个损失函数是求它们在任务上的错误率，他们的类别是互不排斥的。所以无论真实图片的标签是什么，都不会影响模型识别为真实图片。这里更新的也只有判别器模型的参数，使用的优化方法是Adam。\n```python\n# 训练判别器D识别真实图片\nwith fluid.program_guard(train_d_real, startup):\n    # 创建读取真实数据集图片的data，并且label为1\n    real_image = fluid.layers.data('image', shape=[1, 28, 28])\n    ones = fluid.layers.fill_constant_batch_size_like(real_image, shape=[-1, 1], dtype='float32', value=1)\n\n    # 判别器D判断真实图片的概率\n    p_real = Discriminator(real_image)\n    # 获取损失函数\n    real_cost = fluid.layers.sigmoid_cross_entropy_with_logits(p_real, ones)\n    real_avg_cost = fluid.layers.mean(real_cost)\n\n    # 获取判别器D的参数\n    d_params = get_params(train_d_real, \"D\")\n\n    # 创建优化方法\n    optimizer = fluid.optimizer.AdamOptimizer(learning_rate=2e-4)\n    optimizer.minimize(real_avg_cost, parameter_list=d_params)\n```\n\n这里定义一个判别器识别生成器生成的图片的程序，这里是使用噪声的维度进行输入。这里判别器识别的是生成器生成的图片，这里使用的损失函数同样是`fluid.layers.sigmoid_cross_entropy_with_logits()`。这里更新的参数还是判别器模型的参数，也是使用Adam优化方法。\n```python\n# 训练判别器D识别生成器G生成的图片为假图片\nwith fluid.program_guard(train_d_fake, startup):\n    # 利用创建假的图片data，并且label为0\n    z = fluid.layers.data(name='z', shape=[z_dim, 1, 1])\n    zeros = fluid.layers.fill_constant_batch_size_like(z, shape=[-1, 1], dtype='float32', value=0)\n\n    # 判别器D判断假图片的概率\n    p_fake = Discriminator(Generator(z))\n\n    # 获取损失函数\n    fake_cost = fluid.layers.sigmoid_cross_entropy_with_logits(p_fake, zeros)\n    fake_avg_cost = fluid.layers.mean(fake_cost)\n\n    # 获取判别器D的参数\n    d_params = get_params(train_d_fake, \"D\")\n\n    # 创建优化方法\n    optimizer = fluid.optimizer.AdamOptimizer(learning_rate=2e-4)\n    optimizer.minimize(fake_avg_cost, parameter_list=d_params)\n```\n\n最后定义一个训练生成器生成图片的模型，这里也克隆一个预测程序，用于之后在训练的时候输出预测的图片。损失函数和优化方法都一样，但是要更新的参数是生成器的模型参。\n```python\n# 训练生成器G生成符合判别器D标准的假图片\nwith fluid.program_guard(train_g, startup):\n    # 噪声生成图片为真实图片的概率，Label为1\n    z = fluid.layers.data(name='z', shape=[z_dim, 1, 1])\n    ones = fluid.layers.fill_constant_batch_size_like(z, shape=[-1, 1], dtype='float32', value=1)\n\n    # 生成图片\n    fake = Generator(z)\n    # 克隆预测程序\n    infer_program = train_g.clone(for_test=True)\n\n    # 生成符合判别器的假图片\n    p = Discriminator(fake)\n\n    # 获取损失函数\n    g_cost = fluid.layers.sigmoid_cross_entropy_with_logits(p, ones)\n    g_avg_cost = fluid.layers.mean(g_cost)\n\n    # 获取G的参数\n    g_params = get_params(train_g, \"G\")\n\n    # 只训练G\n    optimizer = fluid.optimizer.AdamOptimizer(learning_rate=2e-4)\n    optimizer.minimize(g_avg_cost, parameter_list=g_params)\n```\n\n## 训练并预测\n\n通过由噪声来生成假的图片数据输入。\n```python\n# 噪声生成\ndef z_reader():\n    while True:\n        yield np.random.normal(0.0, 1.0, (z_dim, 1, 1)).astype('float32')\n```\n\n读取真实图片的数据集，这里去除了数据集中的label数据，因为label在这里使用不上，这里不考虑标签分类问题。\n```python\n# 读取MNIST数据集，不使用label\ndef mnist_reader(reader):\n    def r():\n        for img, label in reader():\n            yield img.reshape(1, 28, 28)\n    return r\n```\n\n把预测的图片保存到本地目录上，如果使用jupyter，可用把图片打印到页面上。\n```python\n# 显示图片\ndef show_image_grid(images, pass_id=None):\n    # fig = plt.figure(figsize=(5, 5))\n    # fig.suptitle(\"Pass {}\".format(pass_id))\n    # gs = plt.GridSpec(8, 8)\n    # gs.update(wspace=0.05, hspace=0.05)\n\n    for i, image in enumerate(images[:64]):\n        # 保存生成的图片\n        plt.imsave(\"image/test_%d.png\" % i, image[0])\n    # 以下代码在jupyter可用\n    #     ax = plt.subplot(gs[i])\n    #     plt.axis('off')\n    #     ax.set_xticklabels([])\n    #     ax.set_yticklabels([])\n    #     ax.set_aspect('equal')\n    #     plt.imshow(image[0], cmap='Greys_r')\n    # plt.show()\n\n```\n\n将真实数据和噪声生成的数据的生成一个reader。\n```python\n# 生成真实图片reader\nmnist_generator = paddle.batch(\n    paddle.reader.shuffle(mnist_reader(paddle.dataset.mnist.train()), 30000), batch_size=128)\n# 生成假图片的reader\nz_generator = paddle.batch(z_reader, batch_size=128)()\n```\n\n创建一个执行器，这里使用的GPU进行训练，因为该网络比较大，使用CPU训练速度会非常慢。如果读者没有GPU只有，可以取消注释`place = fluid.CPUPlace()`这行代码，并注释`place = fluid.CUDAPlace(0)`这行代码，就可以使用CPU进行训练了。\n```python\n# 创建执行器\n# place = fluid.CPUPlace()\nplace = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\n# 初始化参数\nexe.run(startup)\n```\n\n获取测试需要的噪声数据，使用这些数据进行预测，获取预测的图片。\n```python\n# 测试噪声数据\ntest_z = np.array(next(z_generator))\n```\n\n开始训练，这里同时训练了3个程序，分别是训练判别器D识别生成器G生成的假图片、训练判别器D识别真实图片、训练生成器G生成符合判别器D标准的假图片。通过不断更新判别器的参数，使得判别器的识别能力越来越强。不断更新生成器的参数，使得生成器生成的图像越来越逼近真实图像。在每一轮训练结束后，进行一次预测，输入生成器生成的图片并显示出来。\n```python\n# 开始训练\nfor pass_id in range(5):\n    for i, real_image in enumerate(mnist_generator()):\n        # 训练判别器D识别生成器G生成的假图片\n        r_fake = exe.run(program=train_d_fake,\n                         fetch_list=[fake_avg_cost],\n                         feed={'z': np.array(next(z_generator))})\n\n        # 训练判别器D识别真实图片\n        r_real = exe.run(program=train_d_real,\n                         fetch_list=[real_avg_cost],\n                         feed={'image': np.array(real_image)})\n\n        # 训练生成器G生成符合判别器D标准的假图片\n        r_g = exe.run(program=train_g,\n                      fetch_list=[g_avg_cost],\n                      feed={'z': np.array(next(z_generator))})\n    print(\"Pass：%d，fake_avg_cost：%f, real_avg_cost：%f, g_avg_cost：%f\" % (pass_id, r_fake[0][0], r_real[0][0], r_g[0][0]))\n\n    # 测试生成的图片\n    r_i = exe.run(program=infer_program,\n                  fetch_list=[fake],\n                  feed={'z': test_z})\n\n    # 显示生成的图片\n    show_image_grid(r_i[0], pass_id)\n```\n\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20181207120414333.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20181207120439588.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20181207120450489.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3FxXzMzMjAwOTY3,size_16,color_FFFFFF,t_70)\n\n到处为止，本章就结束了。通过学习本章，是不是觉得生成对抗网络非常神奇呢，读者可以参数一下其他的数据，通过生成对抗网络生成更多有趣的图像数据集。从本章可以了解到深度学习的强大，但深度学习远远不止这些，在下一章，我们使用深度学习中的强化学习，通过训练获取模型，使用模型来自己玩一个小游戏。\n\n同步到百度AI Studio平台：http://aistudio.baidu.com/aistudio/#/projectdetail/29365\n同步到科赛网K-Lab平台：https://www.kesci.com/home/project/5bf8cd7c954d6e001066d82e\n项目代码GitHub地址：https://github.com/yeyupiaoling/LearnPaddle2/tree/master/note6\n\n**注意：** 最新代码以GitHub上的为准\n\n# 参考资料\n1. https://www.cnblogs.com/max-hu/p/7129188.html\n2. https://github.com/oraoto/learn_ml/blob/master/paddle/gan-mnist-split.ipynb\n3. https://blog.csdn.net/somtian/article/details/72126328\n4. http://www.paddlepaddle.org/documentation/api/zh/1.1/layers.html#sigmoid-cross-entropy-with-logits\n"
  },
  {
    "path": "note7/DQN.py",
    "content": "import numpy as np\r\nimport paddle.fluid as fluid\r\nimport random\r\nimport gym\r\nfrom collections import deque\r\nfrom paddle.fluid.param_attr import ParamAttr\r\n\r\n\r\n# 定义一个深度神经网络，通过指定参数名称，用于之后更新指定的网络参数\r\ndef DQNetWork(ipt, variable_field):\r\n    fc1 = fluid.layers.fc(input=ipt,\r\n                          size=24,\r\n                          act='relu',\r\n                          param_attr=ParamAttr(name='{}_fc1'.format(variable_field)),\r\n                          bias_attr=ParamAttr(name='{}_fc1_b'.format(variable_field)))\r\n    fc2 = fluid.layers.fc(input=fc1,\r\n                          size=24,\r\n                          act='relu',\r\n                          param_attr=ParamAttr(name='{}_fc2'.format(variable_field)),\r\n                          bias_attr=ParamAttr(name='{}_fc2_b'.format(variable_field)))\r\n    out = fluid.layers.fc(input=fc2,\r\n                          size=2,\r\n                          param_attr=ParamAttr(name='{}_fc3'.format(variable_field)),\r\n                          bias_attr=ParamAttr(name='{}_fc3_b'.format(variable_field)))\r\n    return out\r\n\r\n\r\n# 定义更新参数程序\r\ndef _build_sync_target_network():\r\n    # 获取所有的参数\r\n    vars = list(fluid.default_main_program().list_vars())\r\n    # 把两个网络的参数分别过滤出来\r\n    policy_vars = list(filter(lambda x: 'GRAD' not in x.name and 'policy' in x.name, vars))\r\n    target_vars = list(filter(lambda x: 'GRAD' not in x.name and 'target' in x.name, vars))\r\n    policy_vars.sort(key=lambda x: x.name)\r\n    target_vars.sort(key=lambda x: x.name)\r\n\r\n    # 从主程序中克隆一个程序用于更新参数\r\n    sync_program = fluid.default_main_program().clone()\r\n    with fluid.program_guard(sync_program):\r\n        sync_ops = []\r\n        for i, var in enumerate(policy_vars):\r\n            sync_op = fluid.layers.assign(policy_vars[i], target_vars[i])\r\n            sync_ops.append(sync_op)\r\n    # 修剪第二个玩了个的参数，完成更新参数\r\n    sync_program = sync_program._prune(sync_ops)\r\n    return sync_program\r\n\r\n\r\n# 定义输入数据\r\nstate_data = fluid.layers.data(name='state', shape=[4], dtype='float32')\r\naction_data = fluid.layers.data(name='action', shape=[1], dtype='int64')\r\nreward_data = fluid.layers.data(name='reward', shape=[], dtype='float32')\r\nnext_state_data = fluid.layers.data(name='next_state', shape=[4], dtype='float32')\r\ndone_data = fluid.layers.data(name='done', shape=[], dtype='float32')\r\n\r\n# 定义训练的参数\r\nbatch_size = 32\r\nnum_episodes = 300\r\nnum_exploration_episodes = 100\r\nmax_len_episode = 1000\r\nlearning_rate = 1e-3\r\ngamma = 1.0\r\ninitial_epsilon = 1.0\r\nfinal_epsilon = 0.01\r\n\r\n# 实例化一个游戏环境，参数为游戏名称\r\nenv = gym.make(\"CartPole-v1\")\r\nreplay_buffer = deque(maxlen=10000)\r\n\r\n# 获取网络\r\nstate_model = DQNetWork(state_data, 'policy')\r\n\r\n# 克隆预测程序\r\npredict_program = fluid.default_main_program().clone()\r\n\r\n# 定义损失函数\r\naction_onehot = fluid.layers.one_hot(action_data, 2)\r\naction_value = fluid.layers.elementwise_mul(action_onehot, state_model)\r\npred_action_value = fluid.layers.reduce_sum(action_value, dim=1)\r\n\r\ntargetQ_predict_value = DQNetWork(next_state_data, 'target')\r\nbest_v = fluid.layers.reduce_max(targetQ_predict_value, dim=1)\r\nbest_v.stop_gradient = True\r\ntarget = reward_data + gamma * best_v * (1.0 - done_data)\r\n\r\ncost = fluid.layers.square_error_cost(pred_action_value, target)\r\navg_cost = fluid.layers.reduce_mean(cost)\r\n\r\n# 获取更新参数程序\r\n_sync_program = _build_sync_target_network()\r\n\r\n# 定义优化方法\r\noptimizer = fluid.optimizer.AdamOptimizer(learning_rate=learning_rate, epsilon=1e-3)\r\nopt = optimizer.minimize(avg_cost)\r\n\r\n# 创建执行器并进行初始化\r\nplace = fluid.CPUPlace()\r\nexe = fluid.Executor(place)\r\nexe.run(fluid.default_startup_program())\r\nepsilon = initial_epsilon\r\n\r\nupdate_num = 0\r\n# 开始玩游戏\r\nfor epsilon_id in range(num_episodes):\r\n    # 初始化环境，获得初始状态\r\n    state = env.reset()\r\n    epsilon = max(initial_epsilon * (num_exploration_episodes - epsilon_id) /\r\n                  num_exploration_episodes, final_epsilon)\r\n    for t in range(max_len_episode):\r\n        # 显示游戏界面\r\n        # env.render()\r\n        state = np.expand_dims(state, axis=0)\r\n        # epsilon-greedy 探索策略\r\n        if random.random() < epsilon:\r\n            # 以 epsilon 的概率选择随机下一步动作\r\n            action = env.action_space.sample()\r\n        else:\r\n            # 使用模型预测作为结果下一步动作\r\n            action = exe.run(predict_program,\r\n                             feed={'state': state.astype('float32')},\r\n                             fetch_list=[state_model])[0]\r\n            action = np.squeeze(action, axis=0)\r\n            action = np.argmax(action)\r\n\r\n        # 让游戏执行动作，获得执行完 动作的下一个状态，动作的奖励，游戏是否已结束以及额外信息\r\n        next_state, reward, done, info = env.step(action)\r\n\r\n        # 如果游戏结束，就进行惩罚\r\n        reward = -10 if done else reward\r\n        # 记录游戏输出的结果，作为之后训练的数据\r\n        replay_buffer.append((state, action, reward, next_state, done))\r\n        state = next_state\r\n\r\n        # 如果游戏结束，就重新玩游戏\r\n        if done:\r\n            print('Pass:%d, epsilon:%f, score:%d' % (epsilon_id, epsilon, t))\r\n            break\r\n\r\n        # 如果收集的数据大于Batch的大小，就开始训练\r\n        if len(replay_buffer) >= batch_size:\r\n            batch_state, batch_action, batch_reward, batch_next_state, batch_done = \\\r\n                [np.array(a, np.float32) for a in zip(*random.sample(replay_buffer, batch_size))]\r\n\r\n            # 更新参数\r\n            if update_num % 200 == 0:\r\n                exe.run(program=_sync_program)\r\n            update_num += 1\r\n\r\n            # 调整数据维度\r\n            batch_action = np.expand_dims(batch_action, axis=-1)\r\n            batch_next_state = np.expand_dims(batch_next_state, axis=1)\r\n\r\n            # 执行训练\r\n            exe.run(program=fluid.default_main_program(),\r\n                    feed={'state': batch_state,\r\n                          'action': batch_action.astype('int64'),\r\n                          'reward': batch_reward,\r\n                          'next_state': batch_next_state,\r\n                          'done': batch_done})\r\n"
  },
  {
    "path": "note7/README.md",
    "content": "﻿@[TOC]\n\n# 前言\n本章介绍使用PaddlePaddle实现强化学习，通过自我学习，完成一个经典控制类的游戏，相关游戏介绍可以在[Gym官网](https://gym.openai.com/envs/#classic_control)上了解。我们这次玩的是一个`CartPole-v1`游戏，操作就是通过控制滑块的左右移动，不让竖着的柱子掉下来。利用强化学习的方法，不断自我学习，通过在玩游戏的过程中获取到奖励或者惩罚，学习到一个模型。在王者荣耀中的超强人机使用的AI技术也类似这样。\n![在这里插入图片描述](https://img-blog.csdnimg.cn/20190131173040228.gif)\n\n# PaddlePaddle程序\n创建一个`DQN.py`的Python文件。导入项目所需的依赖库，如果还没安装gym的话，可以通过命令`pip3 install gym`安装。\n```python\nimport numpy as np\nimport paddle.fluid as fluid\nimport random\nimport gym\nfrom collections import deque\nfrom paddle.fluid.param_attr import ParamAttr\n```\n\n定义一个简单的网络，这个网络只是由4个全连接层组成，并为每个全连接层指定参数的名称。指定参数的作用是为了之后更新模型参数使用的，因为之后会通过这个网络生成两个模型，而且没有模型参数更新不一样。\n```python\n# 定义一个深度神经网络，通过指定参数名称，用于之后更新指定的网络参数\ndef DQNetWork(ipt, variable_field):\n    fc1 = fluid.layers.fc(input=ipt,\n                          size=24,\n                          act='relu',\n                          param_attr=ParamAttr(name='{}_fc1'.format(variable_field)),\n                          bias_attr=ParamAttr(name='{}_fc1_b'.format(variable_field)))\n    fc2 = fluid.layers.fc(input=fc1,\n                          size=24,\n                          act='relu',\n                          param_attr=ParamAttr(name='{}_fc2'.format(variable_field)),\n                          bias_attr=ParamAttr(name='{}_fc2_b'.format(variable_field)))\n    out = fluid.layers.fc(input=fc2,\n                          size=2,\n                          param_attr=ParamAttr(name='{}_fc3'.format(variable_field)),\n                          bias_attr=ParamAttr(name='{}_fc3_b'.format(variable_field)))\n    return out\n```\n\n定义一个更新参数的函数，这个函数是通过指定的参数名称，通过修剪参数的方式完成模型更新。\n```python\n# 定义更新参数程序\ndef _build_sync_target_network():\n    # 获取所有的参数\n    vars = list(fluid.default_main_program().list_vars())\n    # 把两个网络的参数分别过滤出来\n    policy_vars = list(filter(lambda x: 'GRAD' not in x.name and 'policy' in x.name, vars))\n    target_vars = list(filter(lambda x: 'GRAD' not in x.name and 'target' in x.name, vars))\n    policy_vars.sort(key=lambda x: x.name)\n    target_vars.sort(key=lambda x: x.name)\n\n    # 从主程序中克隆一个程序用于更新参数\n    sync_program = fluid.default_main_program().clone()\n    with fluid.program_guard(sync_program):\n        sync_ops = []\n        for i, var in enumerate(policy_vars):\n            sync_op = fluid.layers.assign(policy_vars[i], target_vars[i])\n            sync_ops.append(sync_op)\n    # 修剪第二个玩了个的参数，完成更新参数\n    sync_program = sync_program._prune(sync_ops)\n    return sync_program\n```\n\n定义5个数据输出层，`state_data`是当前游戏状态的数据输入层，`action_data`是对游戏操作动作的数据输入层，只有两个动作0和1，`reward_data`是当前游戏给出的奖励的数据输入层，`next_state_data`是游戏下一个状态的数据输入层，`done_data`是游戏是否结束的数据输入层。\n```python\n# 定义输入数据\nstate_data = fluid.layers.data(name='state', shape=[4], dtype='float32')\naction_data = fluid.layers.data(name='action', shape=[1], dtype='int64')\nreward_data = fluid.layers.data(name='reward', shape=[], dtype='float32')\nnext_state_data = fluid.layers.data(name='next_state', shape=[4], dtype='float32')\ndone_data = fluid.layers.data(name='done', shape=[], dtype='float32') \n```\n\n定义一些必要的训练参数，比如epsilon-greedy 探索策略参数。\n```python\n# 定义训练的参数\nbatch_size = 32\nnum_episodes = 300\nnum_exploration_episodes = 100\nmax_len_episode = 1000\nlearning_rate = 1e-3\ngamma = 1.0\ninitial_epsilon = 1.0\nfinal_epsilon = 0.01\n```\n\n创建一个游戏，通过指定游戏的名称`CartPole-v1`就可以获取前言部分所说的游戏。也可以创建其他更多的有些，具体可以参照官方的游戏名称。\n```python\n# 实例化一个游戏环境，参数为游戏名称\nenv = gym.make(\"CartPole-v1\")\nreplay_buffer = deque(maxlen=10000)\n```\n\n获取第一个网络模型，并指定参数名称内包含`policy`字符串。\n```python\n# 获取网络\nstate_model = DQNetWork(state_data, 'policy')\n```\n\n这里从主程序中克隆一个预测程序，这个预测程序是之后预测游戏的下一个动作的，也就是说它在操作游戏。\n```python\n# 克隆预测程序\npredict_program = fluid.default_main_program().clone()\n```\n\n这里定义损失函数，强化学习中的损失函数跟之后我们使用的损失函数有点不一样。虽然最终还是使用平方差损失函数，但是输入的不只是普通的输入数据和标签。\n```python\n# 定义损失函数\naction_onehot = fluid.layers.one_hot(action_data, 2)\naction_value = fluid.layers.elementwise_mul(action_onehot, state_model)\npred_action_value = fluid.layers.reduce_sum(action_value, dim=1)\n\ntargetQ_predict_value = DQNetWork(next_state_data, 'target')\nbest_v = fluid.layers.reduce_max(targetQ_predict_value, dim=1)\nbest_v.stop_gradient = True\ntarget = reward_data + gamma * best_v * (1.0 - done_data)\n\ncost = fluid.layers.square_error_cost(pred_action_value, target)\navg_cost = fluid.layers.reduce_mean(cost)\n```\n\n这里获取一个更新参数的程序，用于之后执行更新参数。\n```python\n# 获取更新参数程序\n_sync_program = _build_sync_target_network()\n```\n\n定义一个优化方法，这里还是用AdamOptimizer，笔者也是比较喜欢使用这个优化方法。\n```python\n# 定义优化方法\noptimizer = fluid.optimizer.AdamOptimizer(learning_rate=learning_rate, epsilon=1e-3)\nopt = optimizer.minimize(avg_cost)\n```\n\n开始创建执行器\n```python\n# 创建执行器并进行初始化\nplace = fluid.CPUPlace()\nexe = fluid.Executor(place)\nexe.run(fluid.default_startup_program())\nepsilon = initial_epsilon\n```\n\n这个循环有点大，不过因为是一个整体，不好拆分出来，所以就一起介绍吧。\n\n - 在每次循环开始，就开始获取游戏的状态，这个是游戏结束之后再执行的。\n - 定义一个epsilon-greedy探索策略，这个策略是根据训练的进度，开始选择自动操作的动作或者是模型预测的动作的概率。\n - 接下来就是一局游戏的的循环，在这里可以显示游戏的界面\n - 下面就是通过使用epsilon-greedy探索策略，决定使用随机生成动作，还是预测生成动作，使用随机动作可以增加数据的多样性，通过使用模型预测就是让模型根据当前的游戏状态来预测下一步动作是怎么才是正确的，随着模型的不断训练，这个预测也是越来越正确。\n - 然后更加随机生成的动作，或者模型预测的动作，传递个游戏，得到游戏的相关输出，比如游戏的下一个状态，游戏的奖励，游戏是否结束等等。\n - 如果游戏结束了，应当给游戏一个负奖励，惩罚它做出了一个错误的操作。\n - 然后把这些数据存储起来，用于之后训练使用。\n - 当存储的数据大于或等于Batch size，就可以开始训练。\n```python\nupdate_num = 0\n# 开始玩游戏\nfor epsilon_id in range(num_episodes):\n    # 初始化环境，获得初始状态\n    state = env.reset()\n    epsilon = max(initial_epsilon * (num_exploration_episodes - epsilon_id) /\n                  num_exploration_episodes, final_epsilon)\n    for t in range(max_len_episode):\n        # 显示游戏界面\n        # env.render()\n        state = np.expand_dims(state, axis=0)\n        # epsilon-greedy 探索策略\n        if random.random() < epsilon:\n            # 以 epsilon 的概率选择随机下一步动作\n            action = env.action_space.sample()\n        else:\n            # 使用模型预测作为结果下一步动作\n            action = exe.run(predict_program,\n                             feed={'state': state.astype('float32')},\n                             fetch_list=[state_model])[0]\n            action = np.squeeze(action, axis=0)\n            action = np.argmax(action)\n\n        # 让游戏执行动作，获得执行完 动作的下一个状态，动作的奖励，游戏是否已结束以及额外信息\n        next_state, reward, done, info = env.step(action)\n\n        # 如果游戏结束，就进行惩罚\n        reward = -10 if done else reward\n        # 记录游戏输出的结果，作为之后训练的数据\n        replay_buffer.append((state, action, reward, next_state, done))\n        state = next_state\n\n        # 如果游戏结束，就重新玩游戏\n        if done:\n            print('Pass:%d, epsilon:%f, score:%d' % (epsilon_id, epsilon, t))\n            break\n\n        # 如果收集的数据大于Batch的大小，就开始训练\n        if len(replay_buffer) >= batch_size:\n            batch_state, batch_action, batch_reward, batch_next_state, batch_done = \\\n                [np.array(a, np.float32) for a in zip(*random.sample(replay_buffer, batch_size))]\n\n            # 更新参数\n            if update_num % 200 == 0:\n                exe.run(program=_sync_program)\n            update_num += 1\n\n            # 调整数据维度\n            batch_action = np.expand_dims(batch_action, axis=-1)\n            batch_next_state = np.expand_dims(batch_next_state, axis=1)\n\n            # 执行训练\n            exe.run(program=fluid.default_main_program(),\n                    feed={'state': batch_state,\n                          'action': batch_action.astype('int64'),\n                          'reward': batch_reward,\n                          'next_state': batch_next_state,\n                          'done': batch_done})\n```\n\n输出训练信息：\n```\n......\nPass:70, epsilon:0.300000, score:234\nPass:71, epsilon:0.290000, score:272\nPass:72, epsilon:0.280000, score:254\nPass:73, epsilon:0.270000, score:148\nPass:74, epsilon:0.260000, score:147\nPass:75, epsilon:0.250000, score:342\nPass:76, epsilon:0.240000, score:295\nPass:77, epsilon:0.230000, score:290\nPass:78, epsilon:0.220000, score:276\nPass:79, epsilon:0.210000, score:279\n......\n```\n\n关于通过使用PaddlePaddle实现强化学习，并玩一个小游戏就介绍完成了。强化学习还有很多好玩的地方，比如应用于机器人的避障等一些智能控制上。\n\n同步到百度AI Studio平台：http://aistudio.baidu.com/aistudio/#/projectdetail/31310\n同步到科赛网K-Lab平台：https://www.kesci.com/home/project/5c3eaac54223d9002bfef5ae\n项目代码GitHub地址：https://github.com/yeyupiaoling/LearnPaddle2/tree/master/note7\n\n**注意：** 最新代码以GitHub上的为准\n\n# 参考资料\n1. https://github.com/PaddlePaddle/models/blob/develop/fluid/DeepQNetwork/README_cn.md\n2. https://github.com/snowkylin/TensorFlow-cn\n"
  },
  {
    "path": "note8/README.md",
    "content": "﻿@[TOC]\n\n# 前言\n本系列教程中，前面介绍的都没有保存模型，训练之后也就结束了。那么本章就介绍如果在训练过程中保存模型，用于之后预测或者恢复训练，又或者由于其他数据集的预训练模型。本章会介绍三种保存模型和使用模型的方式。\n\n# 训练模型\n在训练模型的过程中我们可以随时保存模型，当时也可以在训练开始之前加载之前训练过程的模型。为了介绍这三个保存模型的方式，一共编写了三个Python程序进行介绍，分别是`save_infer_model.py`、\t`save_use_params_model.py`、`save_use_persistables_model.py`。\n\n导入相关的依赖库\n```python\nimport os\nimport shutil\nimport paddle as paddle\nimport paddle.dataset.cifar as cifar\nimport paddle.fluid as fluid\n```\n\n定义一个残差神经网络，这个是目前比较常用的一个网络。该神经模型可以通过增加网络的深度达到提高识别率，而不会像其他过去的神经模型那样，当网络继续加深时,反而会损失精度。\n```python\n# 定义残差神经网络（ResNet）\ndef resnet_cifar10(ipt, class_dim):\n    def conv_bn_layer(input,\n                      ch_out,\n                      filter_size,\n                      stride,\n                      padding,\n                      act='relu',\n                      bias_attr=False):\n        tmp = fluid.layers.conv2d(\n            input=input,\n            filter_size=filter_size,\n            num_filters=ch_out,\n            stride=stride,\n            padding=padding,\n            bias_attr=bias_attr)\n        return fluid.layers.batch_norm(input=tmp, act=act)\n\n    def shortcut(input, ch_in, ch_out, stride):\n        if ch_in != ch_out:\n            return conv_bn_layer(input, ch_out, 1, stride, 0, None)\n        else:\n            return input\n\n    def basicblock(input, ch_in, ch_out, stride):\n        tmp = conv_bn_layer(input, ch_out, 3, stride, 1)\n        tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True)\n        short = shortcut(input, ch_in, ch_out, stride)\n        return fluid.layers.elementwise_add(x=tmp, y=short, act='relu')\n\n    # 残差块\n    def layer_warp(block_func, input, ch_in, ch_out, count, stride):\n        tmp = block_func(input, ch_in, ch_out, stride)\n        for i in range(1, count):\n            tmp = block_func(tmp, ch_out, ch_out, 1)\n        return tmp\n\n    conv1 = conv_bn_layer(ipt, ch_out=16, filter_size=3, stride=1, padding=1)\n    res1 = layer_warp(basicblock, conv1, 16, 16, 5, 1)\n    res2 = layer_warp(basicblock, res1, 16, 32, 5, 2)\n    res3 = layer_warp(basicblock, res2, 32, 64, 5, 2)\n    pool = fluid.layers.pool2d(input=res3, pool_size=8, pool_type='avg', pool_stride=1)\n    predict = fluid.layers.fc(input=pool, size=class_dim, act='softmax')\n    return predict\n```\n\n定义输出成，这里使用的数据集是cifar数据集，这个数据集的图片是宽高都为32的3通道图片，所以这里定义的图片输入层的shape是`[3, 32, 32]`。\n```python\n# 定义输入层\nimage = fluid.layers.data(name='image', shape=[3, 32, 32], dtype='float32')\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n```\n\n获取残差神经网络的分类器，并指定分类大小是10，因为这个数据集有10个类别。\n```python\n# 获取分类器\nmodel = resnet_cifar10(image, 10)\n```\n\n获取交叉熵损失函数和平均准确率，模型获取的准确率是Top1的。\n```python\n# 获取损失函数和准确率函数\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n```\n\n获取测试程序，用于之后的测试使。\n```python\n# 获取训练和测试程序\ntest_program = fluid.default_main_program().clone(for_test=True)\n```\n\n定义优化方法。\n```python\n# 定义优化方法\noptimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-3)\nopts = optimizer.minimize(avg_cost)\n```\n\n获取训练和测试数据，使用的是cifar数据集，cifar数据集有两种，一种是100个类别的，一种是10个类别的，这里使用的是10个类别的。\n```python\n# 获取CIFART数据\ntrain_reader = paddle.batch(cifar.train10(), batch_size=32)\ntest_reader = paddle.batch(cifar.test10(), batch_size=32)\n```\n\n创建执行器，因为我们使用的网络是一个比较大的网络，而且图片也比之前的灰度图要大很多。之前的MNIST数据集的每张图片大小784，而现在的是3072。当然主要是网络比之前的要大很多很多，如果使用CPU训练，速度是非常慢的，所以最好使用GPU进行训练。\n```python\n# 创建执行器，最好使用GPU，CPU速度太慢了\n# place = fluid.CPUPlace()\nplace = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n```\n\n## 加载模型\n创建执行器之后，就可以加载之前训练的模型了，有两种加载模型的方式，对应着两种保存模型的方式。这两种模型，可以只使用一种就可以。\n\n - `save_use_params_model.py`加载之前训练保存的参数模型，对应的保存接口是`fluid.io.save_params`。使用这些模型参数初始化网络参数，进行训练\n```python\n# 加载之前训练过的参数模型\nsave_path = 'models/params_model/'\nif os.path.exists(save_path):\n    print('使用参数模型作为预训练模型')\n    fluid.io.load_params(executor=exe, dirname=save_path)\n```\n\n - `save_use_persistables_model.py`加载之前训练保存的持久化变量模型，对应的保存接口是`fluid.io.save_persistables`。使用这些模型参数初始化网络参数，进行训练。\n```python\n# 加载之前训练过的检查点模型\nsave_path = 'models/persistables_model/'\nif os.path.exists(save_path):\n    print('使用持久化变量模型作为预训练模型')\n    fluid.io.load_persistables(executor=exe, dirname=save_path)\n```\n\n\n开始训练模型。\n```python\n# 定义输入数据维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[image, label])\n\nfor pass_id in range(10):\n    # 进行训练\n    for batch_id, data in enumerate(train_reader()):\n        train_cost, train_acc = exe.run(program=fluid.default_main_program(),\n                                        feed=feeder.feed(data),\n                                        fetch_list=[avg_cost, acc])\n        # 每100个batch打印一次信息\n        if batch_id % 100 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' %\n                  (pass_id, batch_id, train_cost[0], train_acc[0]))\n\n    # 进行测试\n    test_accs = []\n    test_costs = []\n    for batch_id, data in enumerate(test_reader()):\n        test_cost, test_acc = exe.run(program=test_program,\n                                      feed=feeder.feed(data),\n                                      fetch_list=[avg_cost, acc])\n        test_accs.append(test_acc[0])\n        test_costs.append(test_cost[0])\n    # 求测试结果的平均值\n    test_cost = (sum(test_costs) / len(test_costs))\n    test_acc = (sum(test_accs) / len(test_accs))\n    print('Test:%d, Cost:%0.5f, Accuracy:%0.5f' % (pass_id, test_cost, test_acc))\n```\n\n没有加载之前保存的模型\n```\nPass:0, Batch:0, Cost:2.73460, Accuracy:0.03125\nPass:0, Batch:100, Cost:1.93663, Accuracy:0.25000\nPass:0, Batch:200, Cost:2.02943, Accuracy:0.12500\nPass:0, Batch:300, Cost:1.94425, Accuracy:0.25000\nPass:0, Batch:400, Cost:1.87802, Accuracy:0.21875\nPass:0, Batch:500, Cost:1.71312, Accuracy:0.25000\nPass:0, Batch:600, Cost:1.94090, Accuracy:0.18750\nPass:0, Batch:700, Cost:2.08904, Accuracy:0.12500\nPass:0, Batch:800, Cost:1.89128, Accuracy:0.12500\nPass:0, Batch:900, Cost:1.95716, Accuracy:0.21875\nPass:0, Batch:1000, Cost:1.65181, Accuracy:0.34375\n```\n\n使用参数模型作为预训练模型训练时输出的信息：\n```\n使用参数模型作为预训练模型\nPass:0, Batch:0, Cost:0.27627, Accuracy:0.90625\nPass:0, Batch:100, Cost:0.40026, Accuracy:0.87500\nPass:0, Batch:200, Cost:0.54928, Accuracy:0.78125\nPass:0, Batch:300, Cost:0.56526, Accuracy:0.84375\nPass:0, Batch:400, Cost:0.53501, Accuracy:0.78125\nPass:0, Batch:500, Cost:0.18596, Accuracy:0.93750\nPass:0, Batch:600, Cost:0.23747, Accuracy:0.96875\nPass:0, Batch:700, Cost:0.45520, Accuracy:0.84375\nPass:0, Batch:800, Cost:0.86205, Accuracy:0.71875\nPass:0, Batch:900, Cost:0.36981, Accuracy:0.87500\nPass:0, Batch:1000, Cost:0.37483, Accuracy:0.81250\n```\n\n持久性变量模型作为预训练模型训练时输出的信息：\n```\n使用持久性变量模型作为预训练模型\nPass:0, Batch:0, Cost:0.51357, Accuracy:0.81250\nPass:0, Batch:100, Cost:0.64380, Accuracy:0.78125\nPass:0, Batch:200, Cost:0.69049, Accuracy:0.62500\nPass:0, Batch:300, Cost:0.52201, Accuracy:0.87500\nPass:0, Batch:400, Cost:0.47289, Accuracy:0.81250\nPass:0, Batch:500, Cost:0.15821, Accuracy:1.00000\nPass:0, Batch:600, Cost:0.36470, Accuracy:0.87500\nPass:0, Batch:700, Cost:0.25326, Accuracy:0.90625\nPass:0, Batch:800, Cost:0.92556, Accuracy:0.78125\nPass:0, Batch:900, Cost:0.27470, Accuracy:0.93750\nPass:0, Batch:1000, Cost:0.34562, Accuracy:0.87500\n```\n\n## 保存模型\n训练结束之后，就可以进行保存模型。当然也不一样要全部训练结束才保存模型，我们可以在每一个Pass训练结束之后保存一次模型。这里使用三个程序分别保存，当然也可以一次全部保存。\n\n - `save_infer_model.py`保存预测模型，之后用于预测图像。通过使用这个方式保存的模型，之后预测是非常方便的，具体可以阅读预测部分。\n```python\n# 保存预测模型\nsave_path = 'models/infer_model/'\n# 删除旧的模型文件\nshutil.rmtree(save_path, ignore_errors=True)\n# 创建保持模型文件目录\nos.makedirs(save_path)\n# 保存预测模型\nfluid.io.save_inference_model(save_path, feeded_var_names=[image.name], target_vars=[model], executor=exe)\n```\n\n - `save_use_params_model.py`保存参数模型，之后用于初始化模型，进行训练。\n```python\n# 保存参数模型\nsave_path = 'models/params_model/'\n# 删除旧的模型文件\nshutil.rmtree(save_path, ignore_errors=True)\n# 创建保持模型文件目录\nos.makedirs(save_path)\n# 保存参数模型\nfluid.io.save_params(executor=exe, dirname=save_path)\n```\n\n - `save_use_persistables_model.py`保存持久化变量模型，之后用于初始化模型，进行训练。\n```python\n# 保存持久化变量模型\nsave_path = 'models/persistables_model/'\n# 删除旧的模型文件\nshutil.rmtree(save_path, ignore_errors=True)\n# 创建保持模型文件目录\nos.makedirs(save_path)\n# 保存持久化变量模型\nfluid.io.save_persistables(executor=exe, dirname=save_path)\n```\n\n\n# 预测\n在训练的时候使用`fluid.io.save_inference_model`接口保存的模型，可以通过以下`use_infer_model.py`程序预测，通过这个程序，读者会发现通过这个接口保存的模型，再次预测是非常简单的。\n\n导入相关的依赖库\n```python\nimport paddle.fluid as fluid\nfrom PIL import Image\nimport numpy as np\n```\n\n创建一个执行器，预测图片可以使用CPU执行，这个速度不会太慢。\n```python\n# 创建执行器\nplace = fluid.CPUPlace()\nexe = fluid.Executor(place)\nexe.run(fluid.default_startup_program())\n```\n\n加载模型，这个是整个预测程序的重点，通过加载预测模型我们就可以轻松获取得到一个预测程序，输出参数的名称，以及分类器的输出。\n```python\n# 保存预测模型路径\nsave_path = 'models/infer_model/'\n# 从模型中获取预测程序、输入数据名称列表、分类器\n[infer_program, feeded_var_names, target_var] = fluid.io.load_inference_model(dirname=save_path, executor=exe)\n```\n\n定义一个图像预处理的函数，这个函数可以统一图像大小，修改图像的存储顺序和图片的通道顺序，转换成numpy数据。\n```python\n# 预处理图片\ndef load_image(file):\n    im = Image.open(file)\n    im = im.resize((32, 32), Image.ANTIALIAS)\n    im = np.array(im).astype(np.float32)\n    # PIL打开图片存储顺序为H(高度)，W(宽度)，C(通道)。\n    # PaddlePaddle要求数据顺序为CHW，所以需要转换顺序。\n    im = im.transpose((2, 0, 1))\n    # CIFAR训练图片通道顺序为B(蓝),G(绿),R(红),\n    # 而PIL打开图片默认通道顺序为RGB,因为需要交换通道。\n    im = im[(2, 1, 0), :, :]  # BGR\n    im = im / 255.0\n    im = np.expand_dims(im, axis=0)\n    return im\n```\n\n获取数据并进行预测。这里对比之前的预测方式，不需要再输入一个模拟的标签，因为在保存模型的时候，已经对这部分进行修剪，去掉了这部分不必要的输入。\n```python\n# 获取图片数据\nimg = load_image('image/cat.png')\n\n# 执行预测\nresult = exe.run(program=infer_program,\n                 feed={feeded_var_names[0]: img},\n                 fetch_list=target_var)\n```\n\n执行预测之后，得到一个数组，这个数组是表示每个类别的概率，获取最大概率的标签，并根据标签获取获取该类的名称。\n```python\n# 显示图片并输出结果最大的label\nlab = np.argsort(result)[0][0][-1]\n\nnames = ['飞机', '汽车', '鸟', '猫', '鹿', '狗', '青蛙', '马', '船', '卡车']\n\nprint('预测结果标签为：%d， 名称为：%s， 概率为：%f' % (lab, names[lab], result[0][0][lab]))\n```\n\n预测输出结果：\n```\n预测结果标签为：3， 名称为：猫， 概率为：0.864919\n```\n\n关于模型的保存和使用就介绍到这里，读者可以使用这个方式保存之前学过的模型。在这个基础上，下一章我们介绍如何使用预训练模型。\n\n同步到百度AI Studio平台：http://aistudio.baidu.com/?_=1548666175806#/projectdetail/38741\n同步到科赛网K-Lab平台：https://www.kesci.com/home/project/5c3f495589f4aa002b845d6b\n项目代码GitHub地址：https://github.com/yeyupiaoling/LearnPaddle2/tree/master/note7\n\n**注意：** 最新代码以GitHub上的为准\n\n# 参考资料\n1. https://blog.csdn.net/qq_33200967/article/details/79095224\n2. http://www.paddlepaddle.org/documentation/docs/zh/1.2/api_cn/io_cn.html\n"
  },
  {
    "path": "note8/save_infer_model.py",
    "content": "import os\nimport shutil\n\nimport paddle as paddle\nimport paddle.dataset.cifar as cifar\nimport paddle.fluid as fluid\n\n\n# 定义VGG16神经网络\ndef vgg16(input, class_dim=1000):\n    def conv_block(conv, num_filter, groups):\n        for i in range(groups):\n            conv = fluid.layers.conv2d(input=conv,\n                                       num_filters=num_filter,\n                                       filter_size=3,\n                                       stride=1,\n                                       padding=1,\n                                       act='relu')\n        return fluid.layers.pool2d(input=conv, pool_size=2, pool_type='max', pool_stride=2)\n\n    conv1 = conv_block(input, 64, 2)\n    conv2 = conv_block(conv1, 128, 2)\n    conv3 = conv_block(conv2, 256, 3)\n    conv4 = conv_block(conv3, 512, 3)\n    conv5 = conv_block(conv4, 512, 3)\n\n    fc1 = fluid.layers.fc(input=conv5, size=512)\n    dp1 = fluid.layers.dropout(x=fc1, dropout_prob=0.5)\n    fc2 = fluid.layers.fc(input=dp1, size=512)\n    bn1 = fluid.layers.batch_norm(input=fc2, act='relu')\n    fc2 = fluid.layers.dropout(x=bn1, dropout_prob=0.5)\n    out = fluid.layers.fc(input=fc2, size=class_dim, act='softmax')\n\n    return out\n\n\n# 定义输入层\nimage = fluid.layers.data(name='image', shape=[3, 32, 32], dtype='float32')\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n\n# 获取分类器\nmodel = vgg16(image, 10)\n\n# 获取损失函数和准确率函数\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n\n# 获取训练和测试程序\ntest_program = fluid.default_main_program().clone(for_test=True)\n\n# 定义优化方法\noptimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-3)\nopts = optimizer.minimize(avg_cost)\n\n# 获取CIFAR数据\ntrain_reader = paddle.batch(cifar.train10(), batch_size=32)\ntest_reader = paddle.batch(cifar.test10(), batch_size=32)\n\n# 定义一个使用CPU的执行器\nplace = fluid.CUDAPlace(0)\n# place = fluid.CPUPlace()\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n\n# 定义输入数据维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[image, label])\n\n# 开始训练和测试\nfor pass_id in range(10):\n    # 进行训练\n    for batch_id, data in enumerate(train_reader()):\n        train_cost, train_acc = exe.run(program=fluid.default_main_program(),\n                                        feed=feeder.feed(data),\n                                        fetch_list=[avg_cost, acc])\n        # 每100个batch打印一次信息\n        if batch_id % 100 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' %\n                  (pass_id, batch_id, train_cost[0], train_acc[0]))\n\n    # 进行测试\n    test_accs = []\n    test_costs = []\n    for batch_id, data in enumerate(test_reader()):\n        test_cost, test_acc = exe.run(program=test_program,\n                                      feed=feeder.feed(data),\n                                      fetch_list=[avg_cost, acc])\n        test_accs.append(test_acc[0])\n        test_costs.append(test_cost[0])\n    # 求测试结果的平均值\n    test_cost = (sum(test_costs) / len(test_costs))\n    test_acc = (sum(test_accs) / len(test_accs))\n    print('Test:%d, Cost:%0.5f, Accuracy:%0.5f' % (pass_id, test_cost, test_acc))\n\n    # 保存预测模型\n    save_path = 'models/infer_model/'\n    # 删除旧的模型文件\n    shutil.rmtree(save_path, ignore_errors=True)\n    # 创建保持模型文件目录\n    os.makedirs(save_path)\n    # 保存预测模型\n    fluid.io.save_inference_model(save_path, feeded_var_names=[image.name], target_vars=[model], executor=exe)\n"
  },
  {
    "path": "note8/save_use_params_model.py",
    "content": "import os\nimport shutil\n\nimport paddle as paddle\nimport paddle.dataset.cifar as cifar\nimport paddle.fluid as fluid\n\n\n# 定义VGG16神经网络\ndef vgg16(input, class_dim=1000):\n    def conv_block(conv, num_filter, groups):\n        for i in range(groups):\n            conv = fluid.layers.conv2d(input=conv,\n                                       num_filters=num_filter,\n                                       filter_size=3,\n                                       stride=1,\n                                       padding=1,\n                                       act='relu')\n        return fluid.layers.pool2d(input=conv, pool_size=2, pool_type='max', pool_stride=2)\n\n    conv1 = conv_block(input, 64, 2)\n    conv2 = conv_block(conv1, 128, 2)\n    conv3 = conv_block(conv2, 256, 3)\n    conv4 = conv_block(conv3, 512, 3)\n    conv5 = conv_block(conv4, 512, 3)\n\n    fc1 = fluid.layers.fc(input=conv5, size=512)\n    dp1 = fluid.layers.dropout(x=fc1, dropout_prob=0.5)\n    fc2 = fluid.layers.fc(input=dp1, size=512)\n    bn1 = fluid.layers.batch_norm(input=fc2, act='relu')\n    fc2 = fluid.layers.dropout(x=bn1, dropout_prob=0.5)\n    out = fluid.layers.fc(input=fc2, size=class_dim, act='softmax')\n\n    return out\n\n\n# 定义输入层\nimage = fluid.layers.data(name='image', shape=[3, 32, 32], dtype='float32')\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n\n# 获取分类器\nmodel = vgg16(image, 10)\n\n# 获取损失函数和准确率函数\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n\n# 获取训练和测试程序\ntest_program = fluid.default_main_program().clone(for_test=True)\n\n# 定义优化方法\noptimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-3)\nopts = optimizer.minimize(avg_cost)\n\n# 获取CIFAR数据\ntrain_reader = paddle.batch(cifar.train10(), batch_size=32)\ntest_reader = paddle.batch(cifar.test10(), batch_size=32)\n\n# 定义一个使用CPU的执行器\nplace = fluid.CUDAPlace(0)\n# place = fluid.CPUPlace()\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n\n# 加载之前训练过的参数模型\nsave_path = 'models/params_model/'\nif os.path.exists(save_path):\n    print('使用参数模型作为预训练模型')\n    fluid.io.load_params(executor=exe, dirname=save_path)\n\n# 定义输入数据维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[image, label])\n\n# 开始训练和测试\nfor pass_id in range(10):\n    # 进行训练\n    for batch_id, data in enumerate(train_reader()):\n        train_cost, train_acc = exe.run(program=fluid.default_main_program(),\n                                        feed=feeder.feed(data),\n                                        fetch_list=[avg_cost, acc])\n        # 每100个batch打印一次信息\n        if batch_id % 100 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' %\n                  (pass_id, batch_id, train_cost[0], train_acc[0]))\n\n    # 进行测试\n    test_accs = []\n    test_costs = []\n    for batch_id, data in enumerate(test_reader()):\n        test_cost, test_acc = exe.run(program=test_program,\n                                      feed=feeder.feed(data),\n                                      fetch_list=[avg_cost, acc])\n        test_accs.append(test_acc[0])\n        test_costs.append(test_cost[0])\n    # 求测试结果的平均值\n    test_cost = (sum(test_costs) / len(test_costs))\n    test_acc = (sum(test_accs) / len(test_accs))\n    print('Test:%d, Cost:%0.5f, Accuracy:%0.5f' % (pass_id, test_cost, test_acc))\n\n    # 保存参数模型\n    save_path = 'models/params_model/'\n    # 删除旧的模型文件\n    shutil.rmtree(save_path, ignore_errors=True)\n    # 创建保持模型文件目录\n    os.makedirs(save_path)\n    # 保存参数模型\n    fluid.io.save_params(executor=exe, dirname=save_path)\n"
  },
  {
    "path": "note8/save_use_persistables_model.py",
    "content": "import os\nimport shutil\n\nimport paddle as paddle\nimport paddle.dataset.cifar as cifar\nimport paddle.fluid as fluid\n\n\n# 定义VGG16神经网络\ndef vgg16(input, class_dim=1000):\n    def conv_block(conv, num_filter, groups):\n        for i in range(groups):\n            conv = fluid.layers.conv2d(input=conv,\n                                       num_filters=num_filter,\n                                       filter_size=3,\n                                       stride=1,\n                                       padding=1,\n                                       act='relu')\n        return fluid.layers.pool2d(input=conv, pool_size=2, pool_type='max', pool_stride=2)\n\n    conv1 = conv_block(input, 64, 2)\n    conv2 = conv_block(conv1, 128, 2)\n    conv3 = conv_block(conv2, 256, 3)\n    conv4 = conv_block(conv3, 512, 3)\n    conv5 = conv_block(conv4, 512, 3)\n\n    fc1 = fluid.layers.fc(input=conv5, size=512)\n    dp1 = fluid.layers.dropout(x=fc1, dropout_prob=0.5)\n    fc2 = fluid.layers.fc(input=dp1, size=512)\n    bn1 = fluid.layers.batch_norm(input=fc2, act='relu')\n    fc2 = fluid.layers.dropout(x=bn1, dropout_prob=0.5)\n    out = fluid.layers.fc(input=fc2, size=class_dim, act='softmax')\n\n    return out\n\n\n# 定义输入层\nimage = fluid.layers.data(name='image', shape=[3, 32, 32], dtype='float32')\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n\n# 获取分类器\nmodel = vgg16(image, 10)\n\n# 获取损失函数和准确率函数\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n\n# 获取训练和测试程序\ntest_program = fluid.default_main_program().clone(for_test=True)\n\n# 定义优化方法\noptimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-3)\nopts = optimizer.minimize(avg_cost)\n\n# 获取CIFAR数据\ntrain_reader = paddle.batch(cifar.train10(), batch_size=32)\ntest_reader = paddle.batch(cifar.test10(), batch_size=32)\n\n# 定义一个使用CPU的执行器\nplace = fluid.CUDAPlace(0)\n# place = fluid.CPUPlace()\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n\n# 加载之前训练过的持久性变量模型\nsave_path = 'models/persistables_model/'\nif os.path.exists(save_path):\n    print('使用持久性变量模型作为预训练模型')\n    fluid.io.load_persistables(executor=exe, dirname=save_path)\n\n# 定义输入数据维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[image, label])\n\n# 开始训练和测试\nfor pass_id in range(10):\n    # 进行训练\n    for batch_id, data in enumerate(train_reader()):\n        train_cost, train_acc = exe.run(program=fluid.default_main_program(),\n                                        feed=feeder.feed(data),\n                                        fetch_list=[avg_cost, acc])\n        # 每100个batch打印一次信息\n        if batch_id % 100 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' %\n                  (pass_id, batch_id, train_cost[0], train_acc[0]))\n\n    # 进行测试\n    test_accs = []\n    test_costs = []\n    for batch_id, data in enumerate(test_reader()):\n        test_cost, test_acc = exe.run(program=test_program,\n                                      feed=feeder.feed(data),\n                                      fetch_list=[avg_cost, acc])\n        test_accs.append(test_acc[0])\n        test_costs.append(test_cost[0])\n    # 求测试结果的平均值\n    test_cost = (sum(test_costs) / len(test_costs))\n    test_acc = (sum(test_accs) / len(test_accs))\n    print('Test:%d, Cost:%0.5f, Accuracy:%0.5f' % (pass_id, test_cost, test_acc))\n\n    # 保存持久性变量模型\n    save_path = 'models/persistables_model/'\n    # 删除旧的模型文件\n    shutil.rmtree(save_path, ignore_errors=True)\n    # 创建保持模型文件目录\n    os.makedirs(save_path)\n    # 保存持久性变量模型\n    fluid.io.save_persistables(executor=exe, dirname=save_path)\n"
  },
  {
    "path": "note8/use_infer_model.py",
    "content": "import paddle.fluid as fluid\nfrom PIL import Image\nimport numpy as np\n\n# 创建执行器\nplace = fluid.CPUPlace()\nexe = fluid.Executor(place)\nexe.run(fluid.default_startup_program())\n\n# 保存预测模型路径\nsave_path = 'models/infer_model/'\n# 从模型中获取预测程序、输入数据名称列表、分类器\n[infer_program, feeded_var_names, target_var] = fluid.io.load_inference_model(dirname=save_path, executor=exe)\n\n\n# 预处理图片\ndef load_image(file):\n    im = Image.open(file)\n    im = im.resize((32, 32), Image.ANTIALIAS)\n    im = np.array(im).astype(np.float32)\n    # PIL打开图片存储顺序为H(高度)，W(宽度)，C(通道)。\n    # PaddlePaddle要求数据顺序为CHW，所以需要转换顺序。\n    im = im.transpose((2, 0, 1))\n    # CIFAR训练图片通道顺序为B(蓝),G(绿),R(红),\n    # 而PIL打开图片默认通道顺序为RGB,因为需要交换通道。\n    im = im[(2, 1, 0), :, :]  # BGR\n    im = im / 255.0\n    im = np.expand_dims(im, axis=0)\n    return im\n\n\n# 获取图片数据\nimg = load_image('image/cat.png')\n\n# 执行预测\nresult = exe.run(program=infer_program,\n                 feed={feeded_var_names[0]: img},\n                 fetch_list=target_var)\n\n# 显示图片并输出结果最大的label\nlab = np.argsort(result)[0][0][-1]\n\nnames = ['飞机', '汽车', '鸟', '猫', '鹿', '狗', '青蛙', '马', '船', '卡车']\n\nprint('预测结果标签为：%d， 名称为：%s， 概率为：%f' % (lab, names[lab], result[0][0][lab]))\n"
  },
  {
    "path": "note9/README.md",
    "content": "﻿@[TOC]\n\n# 前言\n在深度学习训练中，例如图像识别训练，每次从零开始训练都要消耗大量的时间和资源。而且当数据集比较少时，模型也难以拟合的情况。基于这种情况下，就出现了迁移学习，通过使用已经训练好的模型来初始化即将训练的网络，可以加快模型的收敛速度，而且还能提高模型的准确率。这个用于初始化训练网络的模型是使用大型数据集训练得到的一个模型，而且模型已经完全收敛。最好训练的模型和预训练的模型是同一个网络，这样可以最大限度地初始化全部层。\n\n\n# 初步训练模型\n本章使用的预训练模型是PaddlePaddle官方提供的ResNet50网络模型，训练的数据集是ImageNet，它的下载地址为：http://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_pretrained.zip ，读者可以下载其他更多的模型，可以在[这里下载](https://github.com/PaddlePaddle/models/blob/develop/fluid/PaddleCV/image_classification/README_cn.md#%E5%B7%B2%E6%9C%89%E6%A8%A1%E5%9E%8B%E5%8F%8A%E5%85%B6%E6%80%A7%E8%83%BD)。下载之后解压到models目录下。\n\n编写一个`pretrain_model.py`的Python程序，用于初步训练模型。首先导入相关的依赖包。\n```python\nimport os\nimport shutil\nimport paddle as paddle\nimport paddle.dataset.flowers as flowers\nimport paddle.fluid as fluid\nfrom paddle.fluid.param_attr import ParamAttr\n```\n\n定义一个残差神经网络，这个网络是PaddlePaddle官方提供的，模型地址为[models_name](https://github.com/PaddlePaddle/models/tree/develop/fluid/PaddleCV/image_classification/models_name)。这个网络是在每一个层都由指定参数名字，这是为了方便初始化网络模型，如果网络的结构发生变化了，但是名字没有变化，之后使用预训练模型初始化时，就可以根据每个参数的名字初始化对应的层。\n```python\n# 定义残差神经网络（ResNet）\ndef resnet50(input):\n    def conv_bn_layer(input, num_filters, filter_size, stride=1, groups=1, act=None, name=None):\n        conv = fluid.layers.conv2d(input=input,\n                                   num_filters=num_filters,\n                                   filter_size=filter_size,\n                                   stride=stride,\n                                   padding=(filter_size - 1) // 2,\n                                   groups=groups,\n                                   act=None,\n                                   param_attr=ParamAttr(name=name + \"_weights\"),\n                                   bias_attr=False,\n                                   name=name + '.conv2d.output.1')\n        if name == \"conv1\":\n            bn_name = \"bn_\" + name\n        else:\n            bn_name = \"bn\" + name[3:]\n        return fluid.layers.batch_norm(input=conv,\n                                       act=act,\n                                       name=bn_name + '.output.1',\n                                       param_attr=ParamAttr(name=bn_name + '_scale'),\n                                       bias_attr=ParamAttr(bn_name + '_offset'),\n                                       moving_mean_name=bn_name + '_mean',\n                                       moving_variance_name=bn_name + '_variance', )\n\n    def shortcut(input, ch_out, stride, name):\n        ch_in = input.shape[1]\n        if ch_in != ch_out or stride != 1:\n            return conv_bn_layer(input, ch_out, 1, stride, name=name)\n        else:\n            return input\n\n    def bottleneck_block(input, num_filters, stride, name):\n        conv0 = conv_bn_layer(input=input,\n                              num_filters=num_filters,\n                              filter_size=1,\n                              act='relu',\n                              name=name + \"_branch2a\")\n        conv1 = conv_bn_layer(input=conv0,\n                              num_filters=num_filters,\n                              filter_size=3,\n                              stride=stride,\n                              act='relu',\n                              name=name + \"_branch2b\")\n        conv2 = conv_bn_layer(input=conv1,\n                              num_filters=num_filters * 4,\n                              filter_size=1,\n                              act=None,\n                              name=name + \"_branch2c\")\n\n        short = shortcut(input, num_filters * 4, stride, name=name + \"_branch1\")\n\n        return fluid.layers.elementwise_add(x=short, y=conv2, act='relu', name=name + \".add.output.5\")\n\n    depth = [3, 4, 6, 3]\n    num_filters = [64, 128, 256, 512]\n\n    conv = conv_bn_layer(input=input, num_filters=64, filter_size=7, stride=2, act='relu', name=\"conv1\")\n    conv = fluid.layers.pool2d(input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')\n\n    for block in range(len(depth)):\n        for i in range(depth[block]):\n            conv_name = \"res\" + str(block + 2) + chr(97 + i)\n            conv = bottleneck_block(input=conv,\n                                    num_filters=num_filters[block],\n                                    stride=2 if i == 0 and block != 0 else 1,\n                                    name=conv_name)\n\n    pool = fluid.layers.pool2d(input=conv, pool_size=7, pool_type='avg', global_pooling=True)\n    return pool\n```\n\n\n定义图片数据和标签数据的输入层，本章使用的图片数据集是[flowers](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/)。这个通过使用PaddlePaddle的接口得到的flowers数据集的图片是3通道宽高都是224的彩色图，总类别是102种。\n```python\n# 定义输入层\nimage = fluid.layers.data(name='image', shape=[3, 224, 224], dtype='float32')\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n```\n\n获取一个基本的模型，并从主程序中克隆一个基本的程序，用于之后加载参数使用。\n```python\n# 获取分类器的上一层\npool = resnet50(image)\n# 停止梯度下降\npool.stop_gradient = True\n# 由这里创建一个基本的主程序\nbase_model_program = fluid.default_main_program().clone()\n```\n\n这里再加上网络的分类器，因为预训练模型的类别数量是1000，所以要重新修改分类器。这个也是训练新模型的最大不同点，通过分离分类器来解决两个数据集的不同类别的问题。\n```python\n# 这里再重新加载网络的分类器，大小为本项目的分类大小\nmodel = fluid.layers.fc(input=pool, size=102, act='softmax')\n```\n\n然后是获取损失函数，准确率函数和优化方法。\n```python\n# 获取损失函数和准确率函数\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n\n# 定义优化方法\noptimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-3)\nopts = optimizer.minimize(avg_cost)\n```\n\n\n获取flowers数据集，因为这里不需要使用测试，所以这里也不需要读取测试数据集。\n```python\n# 获取flowers数据\ntrain_reader = paddle.batch(flowers.train(), batch_size=16)\n```\n\n\n创建执行器，最好是使用GPU进行训练，因为数据集和网络都是比较大的。\n```python\n# 定义一个使用CPU的执行器\nplace = fluid.CUDAPlace(0)\n# place = fluid.CPUPlace()\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n```\n\n\n这里就是加载预训练模型的重点，通过if_exist函数判断网络所需的模型文件是否存在，然后再通过调用`fluid.io.load_vars`加载存在的模型文件。要留意的是这里使用的是之前克隆的基本程序。\n```python\n# 官方提供的原预训练模型\nsrc_pretrain_model_path = 'models/ResNet50_pretrained/'\n\n\n# 通过这个函数判断模型文件是否存在\ndef if_exist(var):\n    path = os.path.join(src_pretrain_model_path, var.name)\n    exist = os.path.exists(path)\n    if exist:\n        print('Load model: %s' % path)\n    return exist\n\n\n# 加载模型文件，只加载存在模型的模型文件\nfluid.io.load_vars(executor=exe, dirname=src_pretrain_model_path, predicate=if_exist, main_program=base_model_program)\n\n```\n\n然后使用这个预训练模型进行训练10个Pass。\n```python\n# 优化内存\noptimized = fluid.transpiler.memory_optimize(input_program=fluid.default_main_program(), print_log=False)\n\n# 定义输入数据维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[image, label])\n\n# 训练10次\nfor pass_id in range(10):\n    # 进行训练\n    for batch_id, data in enumerate(train_reader()):\n        train_cost, train_acc = exe.run(program=fluid.default_main_program(),\n                                        feed=feeder.feed(data),\n                                        fetch_list=[avg_cost, acc])\n        # 每100个batch打印一次信息\n        if batch_id % 100 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' %\n                  (pass_id, batch_id, train_cost[0], train_acc[0]))\n```\n\n执行训练输出的信息：\n```\nLoad model: models/ResNet50_pretrained/res5a_branch2a_weights\nLoad model: models/ResNet50_pretrained/res4c_branch2a_weights\nLoad model: models/ResNet50_pretrained/res4f_branch2b_weights\nLoad model: models/ResNet50_pretrained/bn2a_branch2b_variance\nLoad model: models/ResNet50_pretrained/bn4d_branch2b_variance\nLoad model: models/ResNet50_pretrained/bn4f_branch2b_variance\nLoad model: models/ResNet50_pretrained/bn4e_branch2a_offset\nLoad model: models/ResNet50_pretrained/res4f_branch2c_weights\nLoad model: models/ResNet50_pretrained/res5c_branch2b_weights\n......\nPass:0, Batch:0, Cost:6.92118, Accuracy:0.00000\nPass:0, Batch:100, Cost:3.31085, Accuracy:0.31250\nPass:0, Batch:200, Cost:3.32227, Accuracy:0.18750\nPass:0, Batch:300, Cost:3.85708, Accuracy:0.31250\nPass:1, Batch:0, Cost:3.36264, Accuracy:0.25000\n......\n```\n\n训练结束之后，使用`fluid.io.save_params`接口保存参数，这个是已经符合这个数据集类别数量的，所以之后会使用都这个模型直接初始化模型，不需要再分离分类器。\n```python\n# 保存参数模型\nsave_pretrain_model_path = 'models/pretrain_model/'\n# 删除旧的模型文件\nshutil.rmtree(save_pretrain_model_path, ignore_errors=True)\n# 创建保持模型文件目录\nos.makedirs(save_pretrain_model_path)\n# 保存参数模型\nfluid.io.save_params(executor=exe, dirname=save_pretrain_model_path)\n```\n\n到这里预训练的第一步处理原预训练模型算是完成了，接下来就是使用这个已经处理过的模型正式训练了。\n\n\n# 使用过的模型开始正式训练\n这一部分是使用已经处理过的模型开始正式训练，创建一个`train.py`正式训练。首先导入相关的依赖包。\n\n```python\nimport os\nimport shutil\nimport paddle as paddle\nimport paddle.dataset.flowers as flowers\nimport paddle.fluid as fluid\nfrom paddle.fluid.param_attr import ParamAttr\n```\n\n\n定义一个残差神经网络，这个残差神经网络跟上面的基本一样的，只是把分类器也加进去了，这是一个完整的神经网络。\n```python\n# 定义残差神经网络（ResNet）\ndef resnet50(input, class_dim):\n    def conv_bn_layer(input, num_filters, filter_size, stride=1, groups=1, act=None, name=None):\n        conv = fluid.layers.conv2d(input=input,\n                                   num_filters=num_filters,\n                                   filter_size=filter_size,\n                                   stride=stride,\n                                   padding=(filter_size - 1) // 2,\n                                   groups=groups,\n                                   act=None,\n                                   param_attr=ParamAttr(name=name + \"_weights\"),\n                                   bias_attr=False,\n                                   name=name + '.conv2d.output.1')\n        if name == \"conv1\":\n            bn_name = \"bn_\" + name\n        else:\n            bn_name = \"bn\" + name[3:]\n        return fluid.layers.batch_norm(input=conv,\n                                       act=act,\n                                       name=bn_name + '.output.1',\n                                       param_attr=ParamAttr(name=bn_name + '_scale'),\n                                       bias_attr=ParamAttr(bn_name + '_offset'),\n                                       moving_mean_name=bn_name + '_mean',\n                                       moving_variance_name=bn_name + '_variance', )\n\n    def shortcut(input, ch_out, stride, name):\n        ch_in = input.shape[1]\n        if ch_in != ch_out or stride != 1:\n            return conv_bn_layer(input, ch_out, 1, stride, name=name)\n        else:\n            return input\n\n    def bottleneck_block(input, num_filters, stride, name):\n        conv0 = conv_bn_layer(input=input,\n                              num_filters=num_filters,\n                              filter_size=1,\n                              act='relu',\n                              name=name + \"_branch2a\")\n        conv1 = conv_bn_layer(input=conv0,\n                              num_filters=num_filters,\n                              filter_size=3,\n                              stride=stride,\n                              act='relu',\n                              name=name + \"_branch2b\")\n        conv2 = conv_bn_layer(input=conv1,\n                              num_filters=num_filters * 4,\n                              filter_size=1,\n                              act=None,\n                              name=name + \"_branch2c\")\n\n        short = shortcut(input, num_filters * 4, stride, name=name + \"_branch1\")\n\n        return fluid.layers.elementwise_add(x=short, y=conv2, act='relu', name=name + \".add.output.5\")\n\n    depth = [3, 4, 6, 3]\n    num_filters = [64, 128, 256, 512]\n\n    conv = conv_bn_layer(input=input, num_filters=64, filter_size=7, stride=2, act='relu', name=\"conv1\")\n    conv = fluid.layers.pool2d(input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')\n\n    for block in range(len(depth)):\n        for i in range(depth[block]):\n            conv_name = \"res\" + str(block + 2) + chr(97 + i)\n            conv = bottleneck_block(input=conv,\n                                    num_filters=num_filters[block],\n                                    stride=2 if i == 0 and block != 0 else 1,\n                                    name=conv_name)\n\n    pool = fluid.layers.pool2d(input=conv, pool_size=7, pool_type='avg', global_pooling=True)\n    output = fluid.layers.fc(input=pool, size=class_dim, act='softmax')\n    return output\n```\n\n\n然后定义一系列所需的函数，输入层，神经网络的分类器，损失函数，准确率函数，优化方法，获取flowers训练数据和测试数据，并创建一个执行器。\n```python\n# 定义输入层\nimage = fluid.layers.data(name='image', shape=[3, 224, 224], dtype='float32')\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n\n# 获取分类器\nmodel = resnet50(image, 102)\n\n# 获取损失函数和准确率函数\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n\n# 获取训练和测试程序\ntest_program = fluid.default_main_program().clone(for_test=True)\n\n# 定义优化方法\noptimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-3)\nopts = optimizer.minimize(avg_cost)\n\n# 获取MNIST数据\ntrain_reader = paddle.batch(flowers.train(), batch_size=16)\ntest_reader = paddle.batch(flowers.test(), batch_size=16)\n\n# 定义一个使用GPU的执行器\nplace = fluid.CUDAPlace(0)\n# place = fluid.CPUPlace()\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n```\n\n这里可以使用`fluid.io.load_params`接口加载已经处理过的预训练模型文件。\n```python\n# 经过处理的预训练预训练模型\npretrained_model_path = 'models/pretrain_model/'\n\n# 加载经过处理的模型\nfluid.io.load_params(executor=exe, dirname=pretrained_model_path)\n```\n\n之后就可以正常训练了，从训练输出的日志可以看出，模型收敛得非常快，而且准确率还非常高，如果没有使用预训练模型是很难达到这种准确率的。\n```python\n# 定义输入数据维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[image, label])\n\n# 训练10次\nfor pass_id in range(10):\n    # 进行训练\n    for batch_id, data in enumerate(train_reader()):\n        train_cost, train_acc = exe.run(program=fluid.default_main_program(),\n                                        feed=feeder.feed(data),\n                                        fetch_list=[avg_cost, acc])\n        # 每100个batch打印一次信息\n        if batch_id % 100 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' %\n                  (pass_id, batch_id, train_cost[0], train_acc[0]))\n\n    # 进行测试\n    test_accs = []\n    test_costs = []\n    for batch_id, data in enumerate(test_reader()):\n        test_cost, test_acc = exe.run(program=test_program,\n                                      feed=feeder.feed(data),\n                                      fetch_list=[avg_cost, acc])\n        test_accs.append(test_acc[0])\n        test_costs.append(test_cost[0])\n    # 求测试结果的平均值\n    test_cost = (sum(test_costs) / len(test_costs))\n    test_acc = (sum(test_accs) / len(test_accs))\n    print('Test:%d, Cost:%0.5f, Accuracy:%0.5f' % (pass_id, test_cost, test_acc))\n```\n\n执行训练输出的信息：\n```\nPass:0, Batch:0, Cost:0.11896, Accuracy:1.00000\nPass:0, Batch:100, Cost:1.73780, Accuracy:0.68750\nPass:0, Batch:200, Cost:1.32758, Accuracy:0.68750\nPass:0, Batch:300, Cost:1.56638, Accuracy:0.56250\nTest:0, Cost:1.82441, Accuracy:0.53841\nPass:1, Batch:0, Cost:0.71874, Accuracy:0.87500\n......\n```\n\n训练结束之后，可以保存预测模型用于之后的预测使用。\n```python\n# 保存预测模型\nsave_path = 'models/infer_model/'\n# 删除旧的模型文件\nshutil.rmtree(save_path, ignore_errors=True)\n# 创建保持模型文件目录\nos.makedirs(save_path)\n# 保存预测模型\nfluid.io.save_inference_model(save_path, feeded_var_names=[image.name], target_vars=[model], executor=exe)\n```\n\n同步到百度AI Studio平台：http://aistudio.baidu.com/aistudio/#/projectdetail/38853\n同步到科赛网K-Lab平台：https://www.kesci.com/home/project/5c3f495589f4aa002b845d6b\n项目代码GitHub地址：https://github.com/yeyupiaoling/LearnPaddle2/tree/master/note9\n\n**注意：** 最新代码以GitHub上的为准\n\n# 参考资料\n1. https://github.com/oraoto/learn_ml/blob/master/paddle/pretrained.ipynb\n2. http://www.paddlepaddle.org/documentation/docs/zh/1.2/api_cn/io_cn.html\n\n"
  },
  {
    "path": "note9/pretrain_model.py",
    "content": "import os\nimport shutil\nimport paddle as paddle\nimport paddle.dataset.flowers as flowers\nimport paddle.fluid as fluid\nfrom paddle.fluid.param_attr import ParamAttr\n\n\n# 定义残差神经网络（ResNet）\ndef resnet50(input):\n    def conv_bn_layer(input, num_filters, filter_size, stride=1, groups=1, act=None, name=None):\n        conv = fluid.layers.conv2d(input=input,\n                                   num_filters=num_filters,\n                                   filter_size=filter_size,\n                                   stride=stride,\n                                   padding=(filter_size - 1) // 2,\n                                   groups=groups,\n                                   act=None,\n                                   param_attr=ParamAttr(name=name + \"_weights\"),\n                                   bias_attr=False,\n                                   name=name + '.conv2d.output.1')\n        if name == \"conv1\":\n            bn_name = \"bn_\" + name\n        else:\n            bn_name = \"bn\" + name[3:]\n        return fluid.layers.batch_norm(input=conv,\n                                       act=act,\n                                       name=bn_name + '.output.1',\n                                       param_attr=ParamAttr(name=bn_name + '_scale'),\n                                       bias_attr=ParamAttr(bn_name + '_offset'),\n                                       moving_mean_name=bn_name + '_mean',\n                                       moving_variance_name=bn_name + '_variance', )\n\n    def shortcut(input, ch_out, stride, name):\n        ch_in = input.shape[1]\n        if ch_in != ch_out or stride != 1:\n            return conv_bn_layer(input, ch_out, 1, stride, name=name)\n        else:\n            return input\n\n    def bottleneck_block(input, num_filters, stride, name):\n        conv0 = conv_bn_layer(input=input,\n                              num_filters=num_filters,\n                              filter_size=1,\n                              act='relu',\n                              name=name + \"_branch2a\")\n        conv1 = conv_bn_layer(input=conv0,\n                              num_filters=num_filters,\n                              filter_size=3,\n                              stride=stride,\n                              act='relu',\n                              name=name + \"_branch2b\")\n        conv2 = conv_bn_layer(input=conv1,\n                              num_filters=num_filters * 4,\n                              filter_size=1,\n                              act=None,\n                              name=name + \"_branch2c\")\n\n        short = shortcut(input, num_filters * 4, stride, name=name + \"_branch1\")\n\n        return fluid.layers.elementwise_add(x=short, y=conv2, act='relu', name=name + \".add.output.5\")\n\n    depth = [3, 4, 6, 3]\n    num_filters = [64, 128, 256, 512]\n\n    conv = conv_bn_layer(input=input, num_filters=64, filter_size=7, stride=2, act='relu', name=\"conv1\")\n    conv = fluid.layers.pool2d(input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')\n\n    for block in range(len(depth)):\n        for i in range(depth[block]):\n            conv_name = \"res\" + str(block + 2) + chr(97 + i)\n            conv = bottleneck_block(input=conv,\n                                    num_filters=num_filters[block],\n                                    stride=2 if i == 0 and block != 0 else 1,\n                                    name=conv_name)\n\n    pool = fluid.layers.pool2d(input=conv, pool_size=7, pool_type='avg', global_pooling=True)\n    return pool\n\n\n# 定义输入层\nimage = fluid.layers.data(name='image', shape=[3, 224, 224], dtype='float32')\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n\n# 获取分类器\npool = resnet50(image)\n# 停止梯度下降\npool.stop_gradient = True\n# 由这里创建一个基本的主程序\nbase_model_program = fluid.default_main_program().clone()\n\n# 这里再重新加载网络的分类器，大小为本项目的分类大小\nmodel = fluid.layers.fc(input=pool, size=102, act='softmax')\n\n# 获取损失函数和准确率函数\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n\n# 定义优化方法\noptimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-3)\nopts = optimizer.minimize(avg_cost)\n\n# 获取flowers数据\ntrain_reader = paddle.batch(flowers.train(), batch_size=16)\n\n# 定义一个使用GPU的执行器\nplace = fluid.CUDAPlace(0)\n# place = fluid.CPUPlace()\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n\n# 官方提供的原预训练模型\nsrc_pretrain_model_path = 'models/ResNet50_pretrained/'\n\n\n# 通过这个函数判断模型文件是否存在\ndef if_exist(var):\n    path = os.path.join(src_pretrain_model_path, var.name)\n    exist = os.path.exists(path)\n    if exist:\n        print('Load model: %s' % path)\n    return exist\n\n\n# 加载模型文件，只加载存在模型的模型文件\nfluid.io.load_vars(executor=exe, dirname=src_pretrain_model_path, predicate=if_exist, main_program=base_model_program)\n\n# 优化内存\noptimized = fluid.transpiler.memory_optimize(input_program=fluid.default_main_program(), print_log=False)\n\n# 定义输入数据维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[image, label])\n\n# 训练10次\nfor pass_id in range(10):\n    # 进行训练\n    for batch_id, data in enumerate(train_reader()):\n        train_cost, train_acc = exe.run(program=fluid.default_main_program(),\n                                        feed=feeder.feed(data),\n                                        fetch_list=[avg_cost, acc])\n        # 每100个batch打印一次信息\n        if batch_id % 100 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' %\n                  (pass_id, batch_id, train_cost[0], train_acc[0]))\n\n# 保存参数模型\nsave_pretrain_model_path = 'models/pretrain_model/'\n# 删除旧的模型文件\nshutil.rmtree(save_pretrain_model_path, ignore_errors=True)\n# 创建保持模型文件目录\nos.makedirs(save_pretrain_model_path)\n# 保存参数模型\nfluid.io.save_params(executor=exe, dirname=save_pretrain_model_path)\n"
  },
  {
    "path": "note9/train.py",
    "content": "import os\nimport shutil\nimport paddle as paddle\nimport paddle.dataset.flowers as flowers\nimport paddle.fluid as fluid\nfrom paddle.fluid.param_attr import ParamAttr\n\n\n# 定义残差神经网络（ResNet）\ndef resnet50(input, class_dim):\n    def conv_bn_layer(input, num_filters, filter_size, stride=1, groups=1, act=None, name=None):\n        conv = fluid.layers.conv2d(input=input,\n                                   num_filters=num_filters,\n                                   filter_size=filter_size,\n                                   stride=stride,\n                                   padding=(filter_size - 1) // 2,\n                                   groups=groups,\n                                   act=None,\n                                   param_attr=ParamAttr(name=name + \"_weights\"),\n                                   bias_attr=False,\n                                   name=name + '.conv2d.output.1')\n        if name == \"conv1\":\n            bn_name = \"bn_\" + name\n        else:\n            bn_name = \"bn\" + name[3:]\n        return fluid.layers.batch_norm(input=conv,\n                                       act=act,\n                                       name=bn_name + '.output.1',\n                                       param_attr=ParamAttr(name=bn_name + '_scale'),\n                                       bias_attr=ParamAttr(bn_name + '_offset'),\n                                       moving_mean_name=bn_name + '_mean',\n                                       moving_variance_name=bn_name + '_variance', )\n\n    def shortcut(input, ch_out, stride, name):\n        ch_in = input.shape[1]\n        if ch_in != ch_out or stride != 1:\n            return conv_bn_layer(input, ch_out, 1, stride, name=name)\n        else:\n            return input\n\n    def bottleneck_block(input, num_filters, stride, name):\n        conv0 = conv_bn_layer(input=input,\n                              num_filters=num_filters,\n                              filter_size=1,\n                              act='relu',\n                              name=name + \"_branch2a\")\n        conv1 = conv_bn_layer(input=conv0,\n                              num_filters=num_filters,\n                              filter_size=3,\n                              stride=stride,\n                              act='relu',\n                              name=name + \"_branch2b\")\n        conv2 = conv_bn_layer(input=conv1,\n                              num_filters=num_filters * 4,\n                              filter_size=1,\n                              act=None,\n                              name=name + \"_branch2c\")\n\n        short = shortcut(input, num_filters * 4, stride, name=name + \"_branch1\")\n\n        return fluid.layers.elementwise_add(x=short, y=conv2, act='relu', name=name + \".add.output.5\")\n\n    depth = [3, 4, 6, 3]\n    num_filters = [64, 128, 256, 512]\n\n    conv = conv_bn_layer(input=input, num_filters=64, filter_size=7, stride=2, act='relu', name=\"conv1\")\n    conv = fluid.layers.pool2d(input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')\n\n    for block in range(len(depth)):\n        for i in range(depth[block]):\n            conv_name = \"res\" + str(block + 2) + chr(97 + i)\n            conv = bottleneck_block(input=conv,\n                                    num_filters=num_filters[block],\n                                    stride=2 if i == 0 and block != 0 else 1,\n                                    name=conv_name)\n\n    pool = fluid.layers.pool2d(input=conv, pool_size=7, pool_type='avg', global_pooling=True)\n    output = fluid.layers.fc(input=pool, size=class_dim, act='softmax')\n    return output\n\n\n# 定义输入层\nimage = fluid.layers.data(name='image', shape=[3, 224, 224], dtype='float32')\nlabel = fluid.layers.data(name='label', shape=[1], dtype='int64')\n\n# 获取分类器\nmodel = resnet50(image, 102)\n\n# 获取损失函数和准确率函数\ncost = fluid.layers.cross_entropy(input=model, label=label)\navg_cost = fluid.layers.mean(cost)\nacc = fluid.layers.accuracy(input=model, label=label)\n\n# 获取训练和测试程序\ntest_program = fluid.default_main_program().clone(for_test=True)\n\n# 定义优化方法\noptimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-3)\nopts = optimizer.minimize(avg_cost)\n\n# 获取花卉数据\ntrain_reader = paddle.batch(flowers.train(), batch_size=16)\ntest_reader = paddle.batch(flowers.test(), batch_size=16)\n\n# 定义一个使用GPU的执行器\nplace = fluid.CUDAPlace(0)\n# place = fluid.CPUPlace()\nexe = fluid.Executor(place)\n# 进行参数初始化\nexe.run(fluid.default_startup_program())\n\n# 经过处理的预训练预训练模型\npretrained_model_path = 'models/pretrain_model/'\n\n# 加载经过处理的模型\nfluid.io.load_params(executor=exe, dirname=pretrained_model_path)\n\n# 定义输入数据维度\nfeeder = fluid.DataFeeder(place=place, feed_list=[image, label])\n\n# 训练10次\nfor pass_id in range(10):\n    # 进行训练\n    for batch_id, data in enumerate(train_reader()):\n        train_cost, train_acc = exe.run(program=fluid.default_main_program(),\n                                        feed=feeder.feed(data),\n                                        fetch_list=[avg_cost, acc])\n        # 每100个batch打印一次信息\n        if batch_id % 100 == 0:\n            print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' %\n                  (pass_id, batch_id, train_cost[0], train_acc[0]))\n\n    # 进行测试\n    test_accs = []\n    test_costs = []\n    for batch_id, data in enumerate(test_reader()):\n        test_cost, test_acc = exe.run(program=test_program,\n                                      feed=feeder.feed(data),\n                                      fetch_list=[avg_cost, acc])\n        test_accs.append(test_acc[0])\n        test_costs.append(test_cost[0])\n    # 求测试结果的平均值\n    test_cost = (sum(test_costs) / len(test_costs))\n    test_acc = (sum(test_accs) / len(test_accs))\n    print('Test:%d, Cost:%0.5f, Accuracy:%0.5f' % (pass_id, test_cost, test_acc))\n\n    # 保存预测模型\n    save_path = 'models/infer_model/'\n    # 删除旧的模型文件\n    shutil.rmtree(save_path, ignore_errors=True)\n    # 创建保持模型文件目录\n    os.makedirs(save_path)\n    # 保存预测模型\n    fluid.io.save_inference_model(save_path, feeded_var_names=[image.name], target_vars=[model], executor=exe)\n"
  },
  {
    "path": "requirements.txt",
    "content": "paddlepaddle"
  }
]