[
  {
    "path": ".gitignore",
    "content": "creditcard*\noutputs*\nroc.pdf\nconfusion.pdf\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "<img align=\"left\" src=\"https://github.com/XanaduAI/quantum-neural-networks/blob/master/static/tetronimo.png\" width=300px>\n\n# Continuous-variable quantum neural networks\n\nThis repository contains the source code used to produce the results presented in [*\"Continuous-variable quantum neural networks\"*](https://doi.org/10.1103/PhysRevResearch.1.033063).\n\n<br/>\n\n## Requirements\n\nTo construct and optimize the variational quantum circuits, these scripts and notebooks use the TensorFlow backend of [Strawberry Fields](https://github.com/XanaduAI/strawberryfields). In addition, matplotlib is required for generating output plots.\n\n**Due to subsequent interface upgrades, these scripts will work only with the following\nconfiguration**\n \n- Strawberry Fields version 0.10.0\n- TensorFlow version 1.3\n- Python version 3.5 or 3.6\n\nYour version of Python can be checked by running `python --version`. The correct versions of\nStrawberryFields and TensorFlow can be installed by running `pip install -r requirements.txt`\nfrom the main directory of this repository. \n\n## Contents\n\n<!-- <p align=\"center\">\n\t<img src=\"https://github.com/XanaduAI/quantum-neural-networks/blob/master/static/function_fitting.png\">\n</p> -->\n\n* **Function fitting**: The folder `function_fitting` contains the Python script `function_fitting.py`, which automates the process of fitting classical functions using continuous-variable (CV) variational quantum circuits. Simply specify the function you would like to fit, along with other hyperparameters, and this script automatically constructs and optimizes the CV quantum neural network. In addition, training data is also provided.\n\n* **Quantum autoencoder**: coming soon.\n\n* **Quantum fraud detection**: The folder `fraud_detection` contains the Python script `fraud_detection.py`, which builds and trains a hybrid classical/quantum model for fraud detection. Additional scripts are provided for visualizing the results.\n\n* **Tetrominos learning**: The folder `tetrominos_learning` contains the Python script `tetrominos_learning.py`, which trains a continuous-variable (CV) quantum neural network. The task of the network is to encode 7 different 4X4 images, representing the (L,O,T,I,S,J,Z) [tetrominos](https://en.wikipedia.org/wiki/Tetromino), in the photon number distribution of two light modes. Once the training phase is completed, the script `plot_images.py` can be executed in order to generate a `.png` figure representing the final results.\n\n<img align='right' src=\"https://github.com/XanaduAI/quantum-neural-networks/blob/master/static/tetronimo_gif.gif\">\n\n## Using the scripts\n\nTo use the scripts, simply set the input data, output data, and hyperparametersby modifying the scripts directly - and then enter the subdirectory and run the script using Python 3:\n\n```bash\npython3 script_name.py\n```\n\nThe outputs of the simulations will be saved in the subdirectory.\n\nTo access any saved data, the file can be loaded using NumPy:\n\n```python\nresults = np.load('simulation_results.npz')\n```\n\n## Authors\n\nNathan Killoran, Thomas R. Bromley, Juan Miguel Arrazola, Maria Schuld, Nicolás Quesada, and Seth Lloyd.\n\nIf you are doing any research using this source code and Strawberry Fields, please cite the following two papers:\n\n> Nathan Killoran, Thomas R. Bromley, Juan Miguel Arrazola, Maria Schuld, Nicolás Quesada, and Seth Lloyd. Continuous-variable quantum neural networks. [Physical Review Research, 1(3), 033063](https://doi.org/10.1103/PhysRevResearch.1.033063) (2019).\n\n> Nathan Killoran, Josh Izaac, Nicolás Quesada, Ville Bergholm, Matthew Amy, and Christian Weedbrook. Strawberry Fields: A Software Platform for Photonic Quantum Computing. arXiv, 2018. [Quantum, 3, 129](https://quantum-journal.org/papers/q-2019-03-11-129/) (2019).\n\n## License\n\nThis source code is free and open source, released under the Apache License, Version 2.0.\n"
  },
  {
    "path": "fraud_detection/README.md",
    "content": "<img align=\"left\" src=\"https://github.com/XanaduAI/quantum-neural-networks/blob/master/static/fraud_detection.png\" width=300px>\n\n# Fraud detection\n\nThis folder provides the source code used in Experiment B in *\"Continuous-variable quantum neural networks\"* [arXiv:1806.06871](https://arxiv.org/abs/1806.06871).\n\n## Getting the data\n\nThe raw data is sourced from the [Credit Card Fraud Detection](https://www.kaggle.com/mlg-ulb/creditcardfraud) dataset on Kaggle. The `creditcard.csv` file should be downloaded and placed in this folder. The user can then run:\n```bash\npython3 data_processor.py\n```\nThis script creates two datasets for training and testing.\n\n## Training and testing the model\n\nThe model is a hybrid classical-quantum classifier, with a number of input classical layers that control the parameters of an input layer in a two-mode CV quantum neural network. The model is trained so that it outputs a photon in one mode for a genuine credit card transaction, and outputs a photon in the other mode for a fraudulent transaction.\n\nTraining can be performed with:\n```bash\npython3 fraud_detection.py\n```\n| WARNING: this script can take a long time to run. On a typical PC, it may take hours to arrive at a well-trained model. |\n| --- |\n\nThe model is periodically saved during training, and progress can be monitored by launching TensorBoard in the terminal:\n```bash\ntensorboard --logdir=outputs/tensorboard/simulation_label\n```\nwhere `simulation_label` is the name used to refer to a particular run of the script `fraud_detection.py` (this is specified within the file itself; the default is `1`).\n\nTesting can be performed with:\n```bash\npython3 testing.py\n```\n| WARNING: this script can take a long time to run|\n| --- |\n\nHere, the user must edit `testing.py` to point to the simulation label and checkpoint of the model which is to be tested. These are specified under the variables `simulation_label` and `ckpt_val` in `testing.py`.\n\nThe output of testing is a confusion table, which can be found as a numpy array in `outputs/confusion/simulation_label`. The confusion table is given for multiple threshold probabilities for a transaction to be considered as genuine.\n\n## Visualizing the results\n\nThe performance of the trained model can be investigated with:\n```bash\npython3 roc.py\n```\nwhich outputs the receiver operating characteristic (ROC) curve and confusion matrix for the optimal threshold probability.\n\n"
  },
  {
    "path": "fraud_detection/data_processor.py",
    "content": "# Copyright 2018 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n#     http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"For processing data from https://www.kaggle.com/mlg-ulb/creditcardfraud\"\"\"\nimport csv\nimport numpy as np\nimport random\n\n# creditcard.csv downloaded from https://www.kaggle.com/mlg-ulb/creditcardfraud\nwith open('creditcard.csv', 'r') as csv_file:\n    csv_reader = csv.reader(csv_file, delimiter=',')\n\n    data = list(csv_reader)\n\ndata = data[1:]\ndata_genuine = []\ndata_fraudulent = []\n\n# Splitting genuine and fraudulent data\nfor i in range(len(data)):\n    if int(data[i][30]) == 0:\n        data_genuine.append([float(i) for i in data[i]])\n    if int(data[i][30]) == 1:\n        data_fraudulent.append([float(i) for i in data[i]])\n\nfraudulent_data_points = len(data_fraudulent)\n\n# We want the genuine data points to be 3x the fraudulent ones\nundersampling_ratio = 3\n\ngenuine_data_points = fraudulent_data_points * undersampling_ratio\n\nrandom.shuffle(data_genuine)\nrandom.shuffle(data_fraudulent)\n\n# Fraudulent and genuine transactions are split into two datasets for cross validation\n\ndata_fraudulent_1 = data_fraudulent[:int(fraudulent_data_points / 2)]\ndata_fraudulent_2 = data_fraudulent[int(fraudulent_data_points / 2):]\n\ndata_genuine_1 = data_genuine[:int(genuine_data_points / 2)]\ndata_genuine_2 = data_genuine[int(genuine_data_points / 2):genuine_data_points]\ndata_genuine_remaining = data_genuine[genuine_data_points:]\n\nrandom.shuffle(data_fraudulent_1)\nrandom.shuffle(data_fraudulent_2)\nrandom.shuffle(data_genuine_1)\nrandom.shuffle(data_genuine_2)\n\nnp.savetxt('creditcard_genuine_1.csv', data_genuine_1, delimiter=',')\nnp.savetxt('creditcard_genuine_2.csv', data_genuine_2, delimiter=',')\nnp.savetxt('creditcard_fraudulent_1.csv', data_fraudulent_1, delimiter=',')\nnp.savetxt('creditcard_fraudulent_2.csv', data_fraudulent_2, delimiter=',')\n# Larger datasets are used for testing, including genuine transactions unseen in training\nnp.savetxt('creditcard_combined_1_big.csv', data_fraudulent_1 + data_genuine_1 + data_genuine_remaining, delimiter=',')\nnp.savetxt('creditcard_combined_2_big.csv', data_fraudulent_2 + data_genuine_2 + data_genuine_remaining, delimiter=',')\n"
  },
  {
    "path": "fraud_detection/fraud_detection.py",
    "content": "# Copyright 2018 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n#     http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Fraud detection fitting script\"\"\"\nimport numpy as np\nimport os\n\nimport tensorflow as tf\n\nimport strawberryfields as sf\nfrom strawberryfields.ops import Dgate, BSgate, Kgate, Sgate, Rgate\n\nimport sys\nsys.path.append(\"..\")\nimport version_check\n\n# ===================================================================================\n#                                   Hyperparameters\n# ===================================================================================\n\n# Two modes required: one for \"genuine\" transactions and one for \"fradulent\"\nmode_number = 2\n# Number of photonic quantum layers\ndepth = 4\n\n# Fock basis truncation\ncutoff = 10\n# Number of batches in optimization\nreps = 30000\n\n# Label for simulation\nsimulation_label = 1\n\n# Number of batches to use in the optimization\nbatch_size = 24\n\n# Random initialization of gate parameters\nsdev_photon = 0.1\nsdev = 1\n\n# Variable clipping values\ndisp_clip = 5\nsq_clip = 5\nkerr_clip = 1\n\n# If loading from checkpoint, previous batch number reached\nckpt_val = 0\n\n# Number of repetitions between each output to TensorBoard\ntb_reps = 100\n# Number of repetitions between each model save\nsavr_reps = 1000\n\nmodel_string = str(simulation_label)\n\n# Target location of output\nfolder_locator = './outputs/'\n\n# Locations of TensorBoard and model save outputs\nboard_string = folder_locator + 'tensorboard/' + model_string + '/'\ncheckpoint_string = folder_locator + 'models/' + model_string + '/'\n\n# ===================================================================================\n#                                   Loading the training data\n# ===================================================================================\n\n# Data outputted from data_processor.py\ndata_genuine = np.loadtxt('creditcard_genuine_1.csv', delimiter=',')\ndata_fraudulent = np.loadtxt('creditcard_fraudulent_1.csv', delimiter=',')\n\n# Combining genuine and fraudulent data\ndata_combined = np.append(data_genuine, data_fraudulent, axis=0)\ndata_points = len(data_combined)\n\n# ===================================================================================\n#                                   Setting up the classical NN input\n# ===================================================================================\n\n# Input neurons\ninput_neurons = 10\n# Widths of hidden layers\nnn_architecture = [10, 10]\n# Output neurons of classical part\noutput_neurons = 14\n\n# Defining classical network parameters\ninput_classical_layer = tf.placeholder(tf.float32, shape=[batch_size, input_neurons])\n\nlayer_matrix_1 = tf.Variable(tf.random_normal(shape=[input_neurons, nn_architecture[0]]))\noffset_1 = tf.Variable(tf.random_normal(shape=[nn_architecture[0]]))\n\nlayer_matrix_2 = tf.Variable(tf.random_normal(shape=[nn_architecture[0], nn_architecture[1]]))\noffset_2 = tf.Variable(tf.random_normal(shape=[nn_architecture[1]]))\n\nlayer_matrix_3 = tf.Variable(tf.random_normal(shape=[nn_architecture[1], output_neurons]))\noffset_3 = tf.Variable(tf.random_normal(shape=[output_neurons]))\n\n# Creating hidden layers and output\nlayer_1 = tf.nn.elu(tf.matmul(input_classical_layer, layer_matrix_1) + offset_1)\nlayer_2 = tf.nn.elu(tf.matmul(layer_1, layer_matrix_2) + offset_2)\n\noutput_layer = tf.nn.elu(tf.matmul(layer_2, layer_matrix_3) + offset_3)\n\n# ===================================================================================\n#                                   Defining QNN parameters\n# ===================================================================================\n\n# Number of beamsplitters in interferometer\nbs_in_interferometer = int(1.0 * mode_number * (mode_number - 1) / 2)\n\nwith tf.name_scope('variables'):\n    bs_variables = tf.Variable(tf.random_normal(shape=[depth, bs_in_interferometer, 2, 2]\n                                                , stddev=sdev))\n    phase_variables = tf.Variable(tf.random_normal(shape=[depth, mode_number, 2], stddev=sdev))\n\n    sq_magnitude_variables = tf.Variable(tf.random_normal(shape=[depth, mode_number]\n                                                          , stddev=sdev_photon))\n    sq_phase_variables = tf.Variable(tf.random_normal(shape=[depth, mode_number]\n                                                      , stddev=sdev))\n    disp_magnitude_variables = tf.Variable(tf.random_normal(shape=[depth, mode_number]\n                                                            , stddev=sdev_photon))\n    disp_phase_variables = tf.Variable(tf.random_normal(shape=[depth, mode_number]\n                                                        , stddev=sdev))\n    kerr_variables = tf.Variable(tf.random_normal(shape=[depth, mode_number], stddev=sdev_photon))\n\nparameters = [layer_matrix_1, offset_1, layer_matrix_2, offset_2, layer_matrix_3, offset_3, bs_variables,\n              phase_variables, sq_magnitude_variables, sq_phase_variables, disp_magnitude_variables,\n              disp_phase_variables, kerr_variables]\n\n\n# ===================================================================================\n#                                   Constructing quantum layers\n# ===================================================================================\n\n\n# Defining input QNN layer, whose parameters are set by the outputs of the classical network\ndef input_qnn_layer():\n    with tf.name_scope('inputlayer'):\n        Sgate(tf.clip_by_value(output_layer[:, 0], -sq_clip, sq_clip), output_layer[:, 1]) | q[0]\n        Sgate(tf.clip_by_value(output_layer[:, 2], -sq_clip, sq_clip), output_layer[:, 3]) | q[1]\n\n        BSgate(output_layer[:, 4], output_layer[:, 5]) | (q[0], q[1])\n\n        Rgate(output_layer[:, 6]) | q[0]\n        Rgate(output_layer[:, 7]) | q[1]\n\n        Dgate(tf.clip_by_value(output_layer[:, 8], -disp_clip, disp_clip), output_layer[:, 9]) \\\n        | q[0]\n        Dgate(tf.clip_by_value(output_layer[:, 10], -disp_clip, disp_clip), output_layer[:, 11]) \\\n        | q[1]\n\n        Kgate(tf.clip_by_value(output_layer[:, 12], -kerr_clip, kerr_clip)) | q[0]\n        Kgate(tf.clip_by_value(output_layer[:, 13], -kerr_clip, kerr_clip)) | q[1]\n\n\n# Defining standard QNN layers\ndef qnn_layer(layer_number):\n    with tf.name_scope('layer_{}'.format(layer_number)):\n        BSgate(bs_variables[layer_number, 0, 0, 0], bs_variables[layer_number, 0, 0, 1]) \\\n        | (q[0], q[1])\n\n        for i in range(mode_number):\n            Rgate(phase_variables[layer_number, i, 0]) | q[i]\n\n        for i in range(mode_number):\n            Sgate(tf.clip_by_value(sq_magnitude_variables[layer_number, i], -sq_clip, sq_clip),\n                  sq_phase_variables[layer_number, i]) | q[i]\n\n        BSgate(bs_variables[layer_number, 0, 1, 0], bs_variables[layer_number, 0, 1, 1]) \\\n        | (q[0], q[1])\n\n        for i in range(mode_number):\n            Rgate(phase_variables[layer_number, i, 1]) | q[i]\n\n        for i in range(mode_number):\n            Dgate(tf.clip_by_value(disp_magnitude_variables[layer_number, i], -disp_clip,\n                                   disp_clip), disp_phase_variables[layer_number, i]) | q[i]\n\n        for i in range(mode_number):\n            Kgate(tf.clip_by_value(kerr_variables[layer_number, i], -kerr_clip, kerr_clip)) | q[i]\n\n\n# ===================================================================================\n#                                   Defining QNN\n# ===================================================================================\n\n# construct the two-mode Strawberry Fields engine\neng, q = sf.Engine(mode_number)\n\n# construct the circuit\nwith eng:\n    input_qnn_layer()\n\n    for i in range(depth):\n        qnn_layer(i)\n\n# run the engine (in batch mode)\nstate = eng.run(\"tf\", cutoff_dim=cutoff, eval=False, batch_size=batch_size)\n# extract the state\nket = state.ket()\n\n# ===================================================================================\n#                                   Setting up cost function\n# ===================================================================================\n\n# Classifications for whole batch: rows act as data points in the batch and columns\n# are the one-hot classifications\nclassification = tf.placeholder(shape=[batch_size, 2], dtype=tf.int32)\n\nfunc_to_minimise = 0\n\n# Building up the function to minimize by looping through batch\nfor i in range(batch_size):\n    # Probabilities corresponding to a single photon in either mode\n    prob = tf.abs(ket[i, classification[i, 0], classification[i, 1]]) ** 2\n    # These probabilities should be optimised to 1\n    func_to_minimise += (1.0 / batch_size) * (prob - 1) ** 2\n\n# Defining the cost function\ncost_func = func_to_minimise\ntf.summary.scalar('Cost', cost_func)\n\n# ===================================================================================\n#                                   Training\n# ===================================================================================\n\n# We choose the Adam optimizer\noptimiser = tf.train.AdamOptimizer()\ntraining = optimiser.minimize(cost_func)\n\n# Saver/Loader for outputting model\nsaver = tf.train.Saver(parameters)\n\nsession = tf.Session()\nsession.run(tf.global_variables_initializer())\n\n# Load previous model if non-zero ckpt_val is specified\nif ckpt_val != 0:\n    saver.restore(session, checkpoint_string + 'sess.ckpt-' + str(ckpt_val))\n\n# TensorBoard writer\nwriter = tf.summary.FileWriter(board_string)\nmerge = tf.summary.merge_all()\n\ncounter = ckpt_val\n\n# Tracks optimum value found (set high so first iteration encodes value)\nopt_val = 1e20\n# Batch number in which optimum value occurs\nopt_position = 0\n# Flag to detect if new optimum occured in last batch\nnew_opt = False\n\nwhile counter <= reps:\n\n    # Shuffles data to create new epoch\n    np.random.shuffle(data_combined)\n\n    # Splits data into batches\n    split_data = np.split(data_combined, data_points / batch_size)\n\n    for batch in split_data:\n\n        if counter > reps:\n            break\n\n        # Input data (provided as principal components)\n        data_points_principal_components = batch[:, 1:input_neurons + 1]\n        # Data classes\n        classes = batch[:, -1]\n\n        # Encoding classes into one-hot form\n        one_hot_input = np.zeros((batch_size, 2))\n\n        for i in range(batch_size):\n            if int(classes[i]) == 0:\n                # Encoded such that genuine transactions should be outputted as a photon in the first mode\n                one_hot_input[i] = [1, 0]\n            else:\n                one_hot_input[i] = [0, 1]\n\n        # Output to TensorBoard\n        if counter % tb_reps == 0:\n            [summary, training_run, func_to_minimise_run] = session.run([merge, training, func_to_minimise],\n                                                                        feed_dict={\n                                                                            input_classical_layer:\n                                                                                data_points_principal_components,\n                                                                            classification: one_hot_input})\n            writer.add_summary(summary, counter)\n\n        else:\n            # Standard run of training\n            [training_run, func_to_minimise_run] = session.run([training, func_to_minimise], feed_dict={\n                input_classical_layer: data_points_principal_components, classification: one_hot_input})\n\n        # Ensures cost function is well behaved\n        if np.isnan(func_to_minimise_run):\n            compute_grads = session.run(optimiser.compute_gradients(cost_func),\n                                        feed_dict={input_classical_layer: data_points_principal_components,\n                                                   classification: one_hot_input})\n            if not os.path.exists(checkpoint_string):\n                os.makedirs(checkpoint_string)\n            # If cost function becomes NaN, output value of gradients for investigation\n            np.save(checkpoint_string + 'NaN.npy', compute_grads)\n            print('NaNs outputted - leaving at step ' + str(counter))\n            raise SystemExit\n\n        # Test to see if new optimum found in current batch\n        if func_to_minimise_run < opt_val:\n            opt_val = func_to_minimise_run\n            opt_position = counter\n            new_opt = True\n\n        # Save model every fixed number of batches, provided a new optimum value has occurred\n        if (counter % savr_reps == 0) and (i != 0) and new_opt and (not np.isnan(func_to_minimise_run)):\n            if not os.path.exists(checkpoint_string):\n                os.makedirs(checkpoint_string)\n            saver.save(session, checkpoint_string + 'sess.ckpt', global_step=counter)\n            # Saves position of optimum and corresponding value of cost function\n            np.savetxt(checkpoint_string + 'optimum.txt', [opt_position, opt_val])\n\n        counter += 1\n"
  },
  {
    "path": "fraud_detection/plot_confusion_matrix.py",
    "content": "# Code adapted from scikit-learn: https://scikit-learn.org/stable/_downloads/plot_confusion_matrix.py\n\"\"\"\nNew BSD License\n\nCopyright (c) 2007-2019 The scikit-learn developers.\nAll rights reserved.\n\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n  a. Redistributions of source code must retain the above copyright notice,\n     this list of conditions and the following disclaimer.\n  b. Redistributions in binary form must reproduce the above copyright\n     notice, this list of conditions and the following disclaimer in the\n     documentation and/or other materials provided with the distribution.\n  c. Neither the name of the Scikit-learn Developers  nor the names of\n     its contributors may be used to endorse or promote products\n     derived from this software without specific prior written\n     permission.\n\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\nLIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\nOUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGE.\n\n================\nConfusion matrix\n================\n\nExample of confusion matrix usage to evaluate the quality\nof the output of a classifier on the iris data set. The\ndiagonal elements represent the number of points for which\nthe predicted label is equal to the true label, while\noff-diagonal elements are those that are mislabeled by the\nclassifier. The higher the diagonal values of the confusion\nmatrix the better, indicating many correct predictions.\n\nThe figures show the confusion matrix with and without\nnormalization by class support size (number of elements\nin each class). This kind of normalization can be\ninteresting in case of class imbalance to have a more\nvisual interpretation of which class is being misclassified.\n\nHere the results are not as good as they could be as our\nchoice for the regularization parameter C was not the best.\nIn real life applications this parameter is usually chosen\nusing :ref:`grid_search`.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot_confusion_matrix(cm, classes, title=None, cmap=plt.cm.Blues):\n    \"\"\"\n    This function prints and plots the confusion matrix.\n    Normalization can be applied by setting `normalize=True`.\n    \"\"\"\n\n    fig, ax = plt.subplots()\n    im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n\n    # We want to show all ticks...\n    ax.set(xticks=np.arange(cm.shape[1]),\n           yticks=np.arange(cm.shape[0]),\n           # ... and label them with the respective list entries\n           xticklabels=classes, yticklabels=classes,\n           ylabel='True label',\n           xlabel='Predicted label')\n\n    # Rotate the tick labels and set their alignment.\n    plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n             rotation_mode=\"anchor\")\n\n    # Loop over data dimensions and create text annotations.\n    fmt = '.2f'\n    thresh = cm.max() / 2.\n    for i in range(cm.shape[0]):\n        for j in range(cm.shape[1]):\n            ax.text(j, i, format(cm[i, j], fmt),\n                    ha=\"center\", va=\"center\",\n                    color=\"white\" if cm[i, j] > thresh else \"black\")\n    fig.tight_layout()\n    return ax\n"
  },
  {
    "path": "fraud_detection/roc.py",
    "content": "# Copyright 2018 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n#     http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Script for creating Plots\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plot_confusion_matrix\n\nplt.switch_backend('agg')\n\n# Label for simulation\nsimulation_label = 1\n\n# Loading confusion table\nconfusion_table = np.load('./outputs/confusion/' + str(simulation_label) + '/confusion_table.npy')\n\n# Defining array of thresholds from 0 to 1 to consider in the ROC curve\nthresholds_points = 101\nthresholds = np.linspace(0, 1, num=thresholds_points)\n\n# false/true positive/negative rates\nfp_rate = []\ntp_rate = []\nfn_rate = []\ntn_rate = []\n\n# Creating rates\nfor i in range(thresholds_points):\n    fp_rate.append(confusion_table[i, 0, 1] / (confusion_table[i, 0, 1] + confusion_table[i, 0, 0]))\n    tp_rate.append(confusion_table[i, 1, 1] / (confusion_table[i, 1, 1] + confusion_table[i, 1, 0]))\n\n    fn_rate.append(confusion_table[i, 1, 0] / (confusion_table[i, 1, 1] + confusion_table[i, 1, 0]))\n    tn_rate.append(confusion_table[i, 0, 0] / (confusion_table[i, 0, 0] + confusion_table[i, 0, 1]))\n\n# Distance of each threshold from ideal point at (0, 1)\ndistance_from_ideal = (np.array(tn_rate) - 1)**2 + (np.array(fn_rate) - 0)**2\n\n# Threshold closest to (0, 1)\nclosest_threshold = np.argmin(distance_from_ideal)\n\n# Area under ROC curve\narea_under_curve = np.trapz(np.sort(tn_rate), x=np.sort(fn_rate))\n\nprint(\"Area under ROC curve: \" + str(area_under_curve))\nprint(\"Closest threshold to optimal ROC: \" + str(thresholds[closest_threshold]))\n\n# Plotting ROC curve\nstraight_line = np.linspace(0, 1, 1001)\n\nplt.gcf().subplots_adjust(bottom=0.15)\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\nplt.rc('font', serif='New Century Schoolbook')\nplt.gcf().subplots_adjust(bottom=0.15)\nplt.plot(fn_rate, tn_rate, color='#056eee', linewidth=2.2)\nplt.plot(straight_line, straight_line, color='#070d0d', linewidth=1.5, dashes=[6, 2])\nplt.plot(0.0, 1.0, 'ko')\nplt.plot(fn_rate[closest_threshold], tn_rate[closest_threshold], 'k^')\nplt.ylim(-0.05, 1.05)\nplt.xlim(-0.05, 1.05)\nplt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\nplt.xlabel('False negative rate', fontsize=15)\nplt.ylabel('True negative rate', fontsize=15)\nplt.tick_params(axis='both', which='major', labelsize=14, length=6, width=1)\nplt.tick_params(axis='both', which='minor', labelsize=14, length=6, width=1)\nplt.savefig('./roc.pdf')\nplt.close()\n\n# Selecting ideal confusion table and plotting\nconfusion_table_ideal = confusion_table[closest_threshold]\n\nplt.figure()\nplot_confusion_matrix.plot_confusion_matrix(confusion_table_ideal, classes=['Genuine', 'Fraudulent'], title='')\n\nplt.savefig('./confusion.pdf')\n"
  },
  {
    "path": "fraud_detection/testing.py",
    "content": "# Copyright 2018 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n#     http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Fraud detection fitting script\"\"\"\nimport numpy as np\nimport os\n\nimport tensorflow as tf\n\nimport strawberryfields as sf\nfrom strawberryfields.ops import Dgate, BSgate, Kgate, Sgate, Rgate\n\nimport sys\nsys.path.append(\"..\")\nimport version_check\n\n# ===================================================================================\n#                                   Hyperparameters\n# ===================================================================================\n\n# Two modes required: one for \"genuine\" transactions and one for \"fradulent\"\nmode_number = 2\n# Number of photonic quantum layers\ndepth = 4\n\n# Fock basis truncation\ncutoff = 10\n\n# Label for simulation\nsimulation_label = 1\n\n# Random initialization of gate parameters\nsdev_photon = 0.1\nsdev = 1\n\n# Variable clipping values\ndisp_clip = 5\nsq_clip = 5\nkerr_clip = 1\n\n# If loading from checkpoint, previous batch number reached\nckpt_val = 30000\n\nmodel_string = str(simulation_label)\n\n# Target location of output\nfolder_locator = './outputs/'\n\n# Locations of model saves and where confusion matrix will be saved\ncheckpoint_string = folder_locator + 'models/' + model_string + '/'\nconfusion_string = folder_locator + 'confusion/' + model_string + '/'\n\n# ===================================================================================\n#                                   Loading the testing data\n# ===================================================================================\n\n# Loading combined dataset with extra genuine datapoints unseen in training\ndata_combined = np.loadtxt('./creditcard_combined_2_big.csv', delimiter=',')\n\n# Set to a size so that the data can be equally split up with no remainder\nbatch_size = 29\n\ndata_combined_points = len(data_combined)\n\n# ===================================================================================\n#                                   Setting up the classical NN input\n# ===================================================================================\n\n# Input neurons\ninput_neurons = 10\n# Widths of hidden layers\nnn_architecture = [10, 10]\n# Output neurons of classical part\noutput_neurons = 14\n\n# Defining classical network parameters\ninput_classical_layer = tf.placeholder(tf.float32, shape=[batch_size, input_neurons])\n\nlayer_matrix_1 = tf.Variable(tf.random_normal(shape=[input_neurons, nn_architecture[0]]))\noffset_1 = tf.Variable(tf.random_normal(shape=[nn_architecture[0]]))\n\nlayer_matrix_2 = tf.Variable(tf.random_normal(shape=[nn_architecture[0], nn_architecture[1]]))\noffset_2 = tf.Variable(tf.random_normal(shape=[nn_architecture[1]]))\n\nlayer_matrix_3 = tf.Variable(tf.random_normal(shape=[nn_architecture[1], output_neurons]))\noffset_3 = tf.Variable(tf.random_normal(shape=[output_neurons]))\n\n# Creating hidden layers and output\nlayer_1 = tf.nn.elu(tf.matmul(input_classical_layer, layer_matrix_1) + offset_1)\nlayer_2 = tf.nn.elu(tf.matmul(layer_1, layer_matrix_2) + offset_2)\n\noutput_layer = tf.nn.elu(tf.matmul(layer_2, layer_matrix_3) + offset_3)\n\n# ===================================================================================\n#                                   Defining QNN parameters\n# ===================================================================================\n\n# Number of beamsplitters in interferometer\nbs_in_interferometer = int(1.0 * mode_number * (mode_number - 1) / 2)\n\nwith tf.name_scope('variables'):\n    bs_variables = tf.Variable(tf.random_normal(shape=[depth, bs_in_interferometer, 2, 2]\n                                                , stddev=sdev))\n    phase_variables = tf.Variable(tf.random_normal(shape=[depth, mode_number, 2], stddev=sdev))\n\n    sq_magnitude_variables = tf.Variable(tf.random_normal(shape=[depth, mode_number]\n                                                          , stddev=sdev_photon))\n    sq_phase_variables = tf.Variable(tf.random_normal(shape=[depth, mode_number]\n                                                      , stddev=sdev))\n    disp_magnitude_variables = tf.Variable(tf.random_normal(shape=[depth, mode_number]\n                                                            , stddev=sdev_photon))\n    disp_phase_variables = tf.Variable(tf.random_normal(shape=[depth, mode_number]\n                                                        , stddev=sdev))\n    kerr_variables = tf.Variable(tf.random_normal(shape=[depth, mode_number], stddev=sdev_photon))\n\nparameters = [layer_matrix_1, offset_1, layer_matrix_2, offset_2, layer_matrix_3, offset_3, bs_variables,\n              phase_variables, sq_magnitude_variables, sq_phase_variables, disp_magnitude_variables,\n              disp_phase_variables, kerr_variables]\n\n\n# ===================================================================================\n#                                   Constructing quantum layers\n# ===================================================================================\n\n\n# Defining input QNN layer, whose parameters are set by the outputs of the classical network\ndef input_qnn_layer():\n    with tf.name_scope('inputlayer'):\n        Sgate(tf.clip_by_value(output_layer[:, 0], -sq_clip, sq_clip), output_layer[:, 1]) | q[0]\n        Sgate(tf.clip_by_value(output_layer[:, 2], -sq_clip, sq_clip), output_layer[:, 3]) | q[1]\n\n        BSgate(output_layer[:, 4], output_layer[:, 5]) | (q[0], q[1])\n\n        Rgate(output_layer[:, 6]) | q[0]\n        Rgate(output_layer[:, 7]) | q[1]\n\n        Dgate(tf.clip_by_value(output_layer[:, 8], -disp_clip, disp_clip), output_layer[:, 9]) \\\n        | q[0]\n        Dgate(tf.clip_by_value(output_layer[:, 10], -disp_clip, disp_clip), output_layer[:, 11]) \\\n        | q[1]\n\n        Kgate(tf.clip_by_value(output_layer[:, 12], -kerr_clip, kerr_clip)) | q[0]\n        Kgate(tf.clip_by_value(output_layer[:, 13], -kerr_clip, kerr_clip)) | q[1]\n\n\n# Defining standard QNN layers\ndef qnn_layer(layer_number):\n    with tf.name_scope('layer_{}'.format(layer_number)):\n        BSgate(bs_variables[layer_number, 0, 0, 0], bs_variables[layer_number, 0, 0, 1]) \\\n        | (q[0], q[1])\n\n        for i in range(mode_number):\n            Rgate(phase_variables[layer_number, i, 0]) | q[i]\n\n        for i in range(mode_number):\n            Sgate(tf.clip_by_value(sq_magnitude_variables[layer_number, i], -sq_clip, sq_clip),\n                  sq_phase_variables[layer_number, i]) | q[i]\n\n        BSgate(bs_variables[layer_number, 0, 1, 0], bs_variables[layer_number, 0, 1, 1]) \\\n        | (q[0], q[1])\n\n        for i in range(mode_number):\n            Rgate(phase_variables[layer_number, i, 1]) | q[i]\n\n        for i in range(mode_number):\n            Dgate(tf.clip_by_value(disp_magnitude_variables[layer_number, i], -disp_clip,\n                                   disp_clip), disp_phase_variables[layer_number, i]) | q[i]\n\n        for i in range(mode_number):\n            Kgate(tf.clip_by_value(kerr_variables[layer_number, i], -kerr_clip, kerr_clip)) | q[i]\n\n\n# ===================================================================================\n#                                   Defining QNN\n# ===================================================================================\n\n# construct the two-mode Strawberry Fields engine\neng, q = sf.Engine(mode_number)\n\n# construct the circuit\nwith eng:\n    input_qnn_layer()\n\n    for i in range(depth):\n        qnn_layer(i)\n\n# run the engine (in batch mode)\nstate = eng.run(\"tf\", cutoff_dim=cutoff, eval=False, batch_size=batch_size)\n# extract the state\nket = state.ket()\n\n# ===================================================================================\n#                                   Extracting probabilities\n# ===================================================================================\n\n# Classifications for whole batch: rows act as data points in the batch and columns\n# are the one-hot classifications\nclassification = tf.placeholder(shape=[batch_size, 2], dtype=tf.int32)\n\nprob = []\n\nfor i in range(batch_size):\n    # Finds the probability of a photon being in either mode\n    prob.append([tf.abs(ket[i, 1, 0]) ** 2, tf.abs(ket[i, 0, 1]) ** 2])\n\n# ===================================================================================\n#                                   Testing performance\n# ===================================================================================\n\n# Defining array of thresholds from 0 to 1 to consider in the ROC curve\nthresholds_points = 101\nthresholds = np.linspace(0, 1, num=thresholds_points)\n\n# Saver/Loader for outputting model\nsaver = tf.train.Saver(parameters)\n\nsession = tf.Session()\nsession.run(tf.global_variables_initializer())\n\nsaver.restore(session, checkpoint_string + 'sess.ckpt-' + str(ckpt_val))\n\n# Split up data to process in batches\ndata_split = np.split(data_combined, data_combined_points / batch_size)\n\n# Defining confusion table\nconfusion_table = np.zeros((thresholds_points, 2, 2))\n\nfor batch in data_split:\n    # Input data (provided as principal components)\n    data_points_principal_components = batch[:, 1:input_neurons + 1]\n    # Data classes\n    classes = batch[:, -1]\n\n    # Probabilities outputted from circuit\n    prob_run = session.run(prob, feed_dict={input_classical_layer: data_points_principal_components})\n\n    for i in range(batch_size):\n        # Calculate probabilities of photon coming out of either mode\n        p = prob_run[i]\n        # Normalize to these two events (i.e. ignore all other outputs)\n        p = p / np.sum(p)\n\n        # Predicted class is a list corresponding to threshold probabilities\n        predicted_class = []\n\n        for j in range(thresholds_points):\n            # If probability of a photon exiting first mode is larger than threshold, attribute as genuine\n            if p[0] > thresholds[j]:\n                predicted_class.append(0)\n            else:\n                predicted_class.append(1)\n\n        actual_class = classes[i]\n\n        # Constructing confusion table\n        for j in range(2):\n            for k in range(2):\n                for l in range(thresholds_points):\n                    if actual_class == j and predicted_class[l] == k:\n                        confusion_table[l, j, k] += 1\n\n# Renormalizing confusion table\nfor i in range(thresholds_points):\n    confusion_table[i] = confusion_table[i] / data_combined_points * 100\n\nif not os.path.exists(confusion_string):\n    os.makedirs(confusion_string)\n\n# Save as numpy array\nnp.save(confusion_string + 'confusion_table.npy', confusion_table)\n"
  },
  {
    "path": "function_fitting/function_fitting.py",
    "content": "# Copyright 2018 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n#     http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Function fitting script\"\"\"\nimport os\nimport time\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\n\nimport tensorflow as tf\n\nimport strawberryfields as sf\nfrom strawberryfields.ops import *\n\nimport sys\nsys.path.append(\"..\")\nimport version_check\n\n# ===================================================================================\n#                                   Hyperparameters\n# ===================================================================================\n\n\n# Fock basis truncation\ncutoff = 10\n# domain [-xmax, xmax] to perform the function fitting over\nxmax = 1\n# Number of batches to use in the optimization\n# Each batch corresponds to a different input-output relation\nbatch_size = 50\n# Number of photonic quantum layers\ndepth = 6\n\n# variable clipping values\ndisp_clip = 100\nsq_clip = 50\nkerr_clip = 50\n\n# number of optimization steps\nreps = 1000\n\n# regularization\nregularization = 0.0\nreg_variance = 0.0\n\n\n# ===================================================================================\n#                                   Functions\n# ===================================================================================\n# This section contains various function we may wish to fit using our quantum\n# neural network.\n\n\ndef f1(x, eps=0.0):\n    \"\"\"The function f(x)=|x|+noise\"\"\"\n    return np.abs(x) + eps * np.random.normal(size=x.shape)\n\n\ndef f2(x, eps=0.0):\n    \"\"\"The function f(x)=sin(pi*x)/(pi*x)+noise\"\"\"\n    return np.sin(x*pi)/(pi*x) + eps * np.random.normal(size=x.shape)\n\n\ndef f3(x, eps=0.0):\n    \"\"\"The function f(x)=sin(pi*x)+noise\"\"\"\n    return 1.0*(np.sin(1.0 * x * np.pi) + eps * np.random.normal(size=x.shape))\n\n\ndef f4(x, eps=0.0):\n    \"\"\"The function f(x)=exp(x)+noise\"\"\"\n    return np.exp(x) + eps * np.random.normal(size=x.shape)\n\n\ndef f5(x, eps=0.0):\n    \"\"\"The function f(x)=tanh(4x)+noise\"\"\"\n    return np.tanh(4*x) + eps * np.random.normal(size=x.shape)\n\n\ndef f6(x, eps=0.0):\n    \"\"\"The function f(x)=x^3+noise\"\"\"\n    return x**3 + eps * np.random.normal(size=x.shape)\n\n\n# ===================================================================================\n#                                   Training data\n# ===================================================================================\n# load the training data from the provided files\n\ntrain_data = np.load('sine_train_data.npy')\ntest_data = np.load('sine_test_data.npy')\ndata_y = np.load('sine_outputs.npy')\n\n\n# ===================================================================================\n#                      Construct the quantum neural network\n# ===================================================================================\n\n# Random initialization of gate parameters\nsdev = 0.05\n\nwith tf.name_scope('variables'):\n    d_r = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n    d_phi = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n    r1 = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n    sq_r = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n    sq_phi = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n    r2 = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n    kappa1 = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n\n\n# construct the one-mode Strawberry Fields engine\neng, q = sf.Engine(1)\n\n\ndef layer(i):\n    \"\"\"This function generates the ith layer of the quantum neural network.\n\n    Note: it must be executed within a Strawberry Fields engine context.\n\n    Args:\n        i (int): the layer number.\n    \"\"\"\n    with tf.name_scope('layer_{}'.format(i)):\n        # displacement gate\n        Dgate(tf.clip_by_value(d_r[i], -disp_clip, disp_clip), d_phi[i]) | q[0]\n        # rotation gate\n        Rgate(r1[i]) | q[0]\n        # squeeze gate\n        Sgate(tf.clip_by_value(sq_r[i], -sq_clip, sq_clip), sq_phi[i]) | q[0]\n        # rotation gate\n        Rgate(r2[i]) | q[0]\n        # Kerr gate\n        Kgate(tf.clip_by_value(kappa1[i], -kerr_clip, kerr_clip)) | q[0]\n\n\n# Use a TensorFlow placeholder to store the input data\ninput_data = tf.placeholder(tf.float32, shape=[batch_size])\n\n# construct the circuit\nwith eng:\n    # the input data is encoded as displacement in the phase space\n    Dgate(input_data) | q[0]\n\n    for k in range(depth):\n        # apply layers to the required depth\n        layer(k)\n\n# run the engine\nstate = eng.run('tf', cutoff_dim=cutoff, eval=False, batch_size=batch_size)\n\n\n# ===================================================================================\n#                      Define the loss function\n# ===================================================================================\n\n# First, we calculate the x-quadrature expectation value\nket = state.ket()\nmean_x, svd_x = state.quad_expectation(0)\nerrors_y = tf.sqrt(svd_x)\n\n# the loss function is defined as mean(|<x>[batch_num] - data[batch_num]|^2)\noutput_data = tf.placeholder(tf.float32, shape=[batch_size])\nloss = tf.reduce_mean(tf.abs(mean_x - output_data) ** 2)\nvar = tf.reduce_mean(errors_y)\n\n# when constructing the cost function, we ensure that the norm of the state\n# remains close to 1, and that the variance in the error do not grow.\nstate_norm = tf.abs(tf.reduce_mean(state.trace()))\ncost = loss + regularization * (tf.abs(state_norm - 1) ** 2) + reg_variance*var\ntf.summary.scalar('cost', cost)\n\n\n# ===================================================================================\n#                      Perform the optimization\n# ===================================================================================\n\n# we choose the Adam optimizer\noptimiser = tf.train.AdamOptimizer()\nmin_op = optimiser.minimize(cost)\n\nsession = tf.Session()\nsession.run(tf.global_variables_initializer())\n\nprint('Beginning optimization')\n\nloss_vals = []\nerror_vals = []\n\n# start time\nstart_time = time.time()\n\nfor i in range(reps+1):\n\n    loss_, predictions, errors, mean_error, ket_norm, _ = session.run(\n        [loss, mean_x, errors_y, var, state_norm, min_op],\n        feed_dict={input_data: train_data, output_data: data_y})\n\n    loss_vals.append(loss_)\n    error_vals.append(mean_error)\n\n    if i % 100 == 0:\n        print('Step: {} Loss: {}'.format(i, loss_))\n\nend_time = time.time()\n\n\n# ===================================================================================\n#                      Analyze the results\n# ===================================================================================\n\ntest_predictions = session.run(mean_x, feed_dict={input_data: test_data})\n\nnp.save('sine_test_predictions', test_predictions)\n\nprint(\"Elapsed time is {} seconds\".format(np.round(end_time - start_time)))\n\nx = np.linspace(-xmax, xmax, 200)\n\n# set plotting options\nrcParams['font.family'] = 'serif'\nrcParams['font.sans-serif'] = ['Computer Modern Roman']\n\nfig, ax = plt.subplots(1,1)\n\n# plot the function to be fitted, in green\nax.plot(x, f3(x), color='#3f9b0b', zorder=1, linewidth=2)\n\n# plot the training data, in red\nax.scatter(train_data, data_y, color='#fb2943', marker='o', zorder=2, s=75)\n\n# plot the test predictions, in blue\nax.scatter(test_data, test_predictions, color='#0165fc', marker='x', zorder=3, s=75)\n\nax.set_xlabel('Input', fontsize=18)\nax.set_ylabel('Output', fontsize=18)\nax.tick_params(axis='both', which='minor', labelsize=16)\n\nfig.savefig('result.pdf', format='pdf', bbox_inches='tight')\n"
  },
  {
    "path": "requirements.txt",
    "content": "strawberryfields==0.10\ntensorflow==1.3\nmatplotlib\n"
  },
  {
    "path": "tetrominos_learning/plot_images.py",
    "content": "# Copyright 2018 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n#     http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" This scripts converts Tetris numpy images into a .png figure.\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n##################### set local directories ########\n# Model name\nmodel_string = 'tetris'\n\n# Output folder\nfolder_locator = './outputs/'\n\n# Locations of saved data and output figure\nsave_string = folder_locator + 'models/' + model_string + '/'\n\n\n# Loading of images\nimages_out = np.load(save_string + 'images_out.npy')\nimages_out_big = np.load(save_string + 'images_out_big.npy')\n\nnum_labels = 7\nplot_scale = 1\n\n# Plotting of the final image.\nfig_images, axs = plt.subplots(\n    nrows=2, ncols=num_labels, figsize=(num_labels * plot_scale, 2 * plot_scale)\n)\n\nall_images = [images_out, images_out_big]\nfor i in range(2):\n    for lable in range(num_labels):\n        ax = axs[i][lable]\n        ax.imshow(all_images[i][lable], cmap='gray')\n        ax.axis('off')\n        ax.set_xticklabels([])\n        ax.set_yticklabels([])\nplt.tight_layout()\nfig_images.savefig(save_string + 'fig_images.png')\n"
  },
  {
    "path": "tetrominos_learning/tetrominos_learning.py",
    "content": "# Copyright 2018 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n#     http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" This script trains a quantum network for encoding Tetris images in the quantum state of two bosonic modes.\"\"\"\n\nimport strawberryfields as sf\nfrom strawberryfields.ops import Dgate, BSgate, Kgate, Sgate, Rgate\nimport tensorflow as tf\nimport numpy as np\nimport time\nimport os\n\nimport sys\nsys.path.append(\"..\")\nimport version_check\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nos.environ['OMP_NUM_THREADS'] = '1'\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\n# =============================================\n#   Settings and hyperparameters\n# =============================================\n\n# Model name\nmodel_string = 'tetris'\n\n# Output folder\nfolder_locator = './outputs/'\n\n# Locations of TensorBoard and model saving outputs\nboard_string = folder_locator + 'tensorboard/' + model_string + '/'\nsave_string = folder_locator + 'models/' + model_string + '/'\n\n\n# Record initial time\ninit_time = time.time()\n\n# Set seed for random generator\ntf.set_random_seed(1)\n\n# Depth of the quantum network (suggested: 25)\ndepth = 25\n\n# Fock basis truncation\ncutoff = 11  # suggested value: 11\n\n# Image size (im_dim X im_dim)\nim_dim = 4\n\n# Number of optimization steps (suggested: 50000)\nreps = 20000\n\n# Number of steps between data logging/saving.\npartial_reps = 1000\n\n# Number of images to encode (suggested: 7)\nnum_images = 7\n\n# Clipping of training parameters\ndisp_clip = 5\nsq_clip = 5\nkerr_clip = 1\n\n# Weight for quantum state normalization\nnorm_weight = 100.0\n\n# ====================================================\n#   Manual definition of target images\n# ====================================================\n\ntrain_images = np.zeros((num_images, im_dim, im_dim))\n\n# Target images: L,O,T,I,S,J,Z tetrominos.\nL, O, T, I, S, J, Z = np.zeros((num_images, im_dim, im_dim))\n\nL[0, 0] = L[1, 0] = L[2, 0] = L[2, 1] = 1 / np.sqrt(4)\nO[0, 0] = O[1, 1] = O[0, 1] = O[1, 0] = 1 / np.sqrt(4)\nT[0, 0] = T[0, 1] = T[0, 2] = T[1, 1] = 1 / np.sqrt(4)\nI[0, 0] = I[1, 0] = I[2, 0] = I[3, 0] = 1 / np.sqrt(4)\nS[1, 0] = S[1, 1] = S[0, 1] = S[0, 2] = 1 / np.sqrt(4)\nJ[0, 1] = J[1, 1] = J[2, 1] = J[2, 0] = 1 / np.sqrt(4)\nZ[0, 0] = Z[0, 1] = Z[1, 1] = Z[1, 2] = 1 / np.sqrt(4)\n\ntrain_images = [L, O, T, I, S, J, Z]\n\n# ====================================================\n#   Initialization of TensorFlow variables\n# ====================================================\n\nprint('Initializing TensorFlow graph...')\n\n# Initial standard deviation of parameters\nsdev = 0.1\n\n# Coherent state amplitude\nalpha = 1.4\n\n# Combinations of two-mode amplitudes corresponding to different final images\ndisps_alpha = tf.constant(\n    [alpha, -alpha, alpha, -alpha, 1.0j * alpha, -1.0j * alpha, 1.0j * alpha]\n)\ndisps_beta = tf.constant(\n    [alpha, alpha, -alpha, -alpha, 1.0j * alpha, 1.0j * alpha, -1.0j * alpha]\n)\n\n# Trainable weights of the quantum network.\nwith tf.name_scope('variables'):\n    r1 = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n    r2 = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n\n    theta1 = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n    phi1 = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n\n    theta2 = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n    phi2 = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n\n    sqr1 = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n    sqphi1 = tf.Variable(tf.random_normal(shape=[depth]))\n\n    sqr2 = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n    sqphi2 = tf.Variable(tf.random_normal(shape=[depth]))\n\n    dr1 = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n    dphi1 = tf.Variable(tf.random_normal(shape=[depth]))\n\n    dr2 = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n    dphi2 = tf.Variable(tf.random_normal(shape=[depth]))\n\n    kappa1 = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n    kappa2 = tf.Variable(tf.random_normal(shape=[depth], stddev=sdev))\n\n# List of all the weights\nparameters = [\n    r1,\n    r2,\n    theta1,\n    phi1,\n    theta2,\n    phi2,\n    sqr1,\n    sqphi1,\n    sqr2,\n    sqphi2,\n    dr1,\n    dphi1,\n    dr2,\n    dphi2,\n    kappa1,\n    kappa2,\n]\n\n# ====================================================\n#   Definition of the quantum neural network\n# ====================================================\n\n# Single quantum variational layer\n\n\ndef layer(l):\n    with tf.name_scope('layer_{}'.format(l)):\n        BSgate(theta1[l], phi1[l]) | (q[0], q[1])\n        Rgate(r1[l]) | q[0]\n        Sgate(tf.clip_by_value(sqr1[l], -sq_clip, sq_clip), sqphi1[l]) | q[0]\n        Sgate(tf.clip_by_value(sqr2[l], -sq_clip, sq_clip), sqphi2[l]) | q[1]\n        BSgate(theta2[l], phi2[l]) | (q[0], q[1])\n        Rgate(r2[l]) | q[0]\n        Dgate(tf.clip_by_value(dr1[l], -disp_clip, disp_clip), dphi1[l]) | q[0]\n        Dgate(tf.clip_by_value(dr2[l], -disp_clip, disp_clip), dphi2[l]) | q[1]\n        Kgate(tf.clip_by_value(kappa1[l], -kerr_clip, kerr_clip)) | q[0]\n        Kgate(tf.clip_by_value(kappa2[l], -kerr_clip, kerr_clip)) | q[1]\n\n\n# StrawberryFields quantum simulator of 2 optical modes\nengine, q = sf.Engine(num_subsystems=2)\n\n# Definition of the CV quantum network\nwith engine:\n    # State preparation\n    Dgate(disps_alpha) | q[0]\n    Dgate(disps_beta) | q[1]\n    # Sequence of variational layers\n    for i in range(depth):\n        layer(i)\n\n# Symbolic evaluation of the output state\nstate = engine.run('tf', cutoff_dim=cutoff, eval=False, batch_size=num_images)\nket = state.ket()\ntrace = tf.abs(state.trace())\n\n# Projection on the subspace of up to im_dim-1 photons for each mode.\nket_reduced = ket[:, :im_dim, :im_dim]\nnorm = tf.sqrt(tf.abs(tf.reduce_sum(tf.conj(ket_reduced) * ket_reduced, axis=[1, 2])))\n# Since norm has shape [num_images] while ket_reduced has shape [num_images,im_dim,im_dim]\n# we need to add 2 extra dimensions to the norm tensor.\nnorm_extended = tf.reshape(norm, [num_images, 1, 1])\nket_processed = ket_reduced / tf.cast(norm_extended, dtype=tf.complex64)\n\n# ====================================================\n#   Definition of the loss function\n# ====================================================\n\n# Target images\ndata_states = tf.placeholder(tf.complex64, shape=[num_images, im_dim, im_dim])\n\n# Overlaps with target images\noverlaps = tf.abs(tf.reduce_sum(tf.conj(ket_processed) * data_states, axis=[1, 2])) ** 2\n\n# Overlap cost function\noverlap_cost = tf.reduce_mean((overlaps - 1) ** 2)\n\n# State norm cost function\nnorm_cost = tf.reduce_sum((trace - 1) ** 2)\n\ncost = overlap_cost + norm_weight * norm_cost\n\n# ====================================================\n#   TensorBoard logging of cost functions and images\n# ====================================================\n\ntf.summary.scalar('Cost', cost)\ntf.summary.scalar('Norm cost', norm_cost)\ntf.summary.scalar('Overlap cost', overlap_cost)\n\n# Output images with and without subspace projection.\nimages_out = tf.abs(ket_processed) ** 2\nimages_out_big = tf.abs(ket) ** 2\n\ntf.summary.image(\n    'image_out', tf.expand_dims(images_out, axis=3), max_outputs=num_images\n)\ntf.summary.image(\n    'image_out_big', tf.expand_dims(images_out_big, axis=3), max_outputs=num_images\n)\n\n# TensorBoard writer and summary\nwriter = tf.summary.FileWriter(board_string)\nmerge = tf.summary.merge_all()\n\n\n# ====================================================\n#   Training\n# ====================================================\n\n# Optimization algorithm (Adam optimizer)\noptim = tf.train.AdamOptimizer()\ntraining = optim.minimize(cost)\n\nprint('Graph building time: {:3f}'.format(time.time() - init_time))\n\n# TensorFlow session\nwith tf.Session() as session:\n    session.run(tf.global_variables_initializer())\n    start_time = time.time()\n\n    for i in range(reps):\n        rep_time = time.time()\n        # make an optimization step\n        _training = session.run(training, feed_dict={data_states: train_images})\n\n        if (i + 1) % partial_reps == 0:\n            # evaluate tensors for saving and logging\n            [summary, params_numpy, _images_out, _images_out_big] = session.run(\n                [merge, tf.squeeze(parameters), images_out, images_out_big],\n                feed_dict={data_states: train_images},\n            )\n            # save tensorboard data\n            writer.add_summary(summary, i + 1)\n\n            # save trained weights\n            os.makedirs(save_string, exist_ok=True)\n            np.save(save_string + 'trained_params.npy', params_numpy)\n\n            # save output images as numpy arrays\n            np.save(save_string + 'images_out.npy', _images_out)\n            np.save(save_string + 'images_out_big.npy', _images_out_big)\n\n            print(\n                'Iteration: {:d} Single iteration time {:.3f}'.format(\n                    i + 1, time.time() - rep_time\n                )\n            )\n\nprint('Script completed. Total time: {:3f}'.format(time.time() - init_time))\n"
  },
  {
    "path": "version_check.py",
    "content": "\"\"\"Script for checking the correct versions of Python, StrawberryFields and TensorFlow are being\nused.\"\"\"\nimport sys\n\nimport strawberryfields as sf\nimport tensorflow as tf\n\npython_version = sys.version_info\nsf_version = sf.__version__\ntf_version = tf.__version__.split(\".\")\n\nif python_version < (3, 5) or python_version > (3, 6):\n    raise SystemError(\"Your version of python is {}.{}. You must have Python 3.5 or 3.6 installed \"\n                      \"to run this script.\".format(python_version.major, python_version.minor))\n\nif sf_version != \"0.10.0\":\n    raise ImportError(\"An incompatible version of StrawberryFields is installed. You must have \"\n                      \"StrawberryFields version 0.10 to run this script. To install the correct \"\n                      \"version, run:\\n >>> pip install strawberryfields==0.10\")\n\nif not(tf_version[0] == \"1\" and tf_version[1] == \"3\"):\n    raise ImportError(\"An incompatible version of TensorFlow is installed. You must have \"\n                      \"TensorFlow version 1.3 to run this script. To install the correct \"\n                      \"version, run:\\n >>> pip install tensorflow==1.3\")\n"
  }
]