[
  {
    "path": ".gitignore",
    "content": "venv\n.DS_Store\n.idea"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "# SpecAugment [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\nThis is a implementation of SpecAugment that speech data augmentation method which directly process the spectrogram with Tensorflow & Pytorch, introduced by Google Brain[1]. This is currently under the Apache 2.0, Please feel free to use for your project. Enjoy!\n\n## How to use\n\nFirst, you need to have python 3 installed along with [Tensorflow](https://www.tensorflow.org/install/).\n\nNext, you need to install some audio libraries work properly. To install the requirement packages. Run the following command:\n\n```bash\npip3 install SpecAugment\n```\n\nAnd then, run the specAugment.py program. It modifies the spectrogram by warping it in the time direction, masking blocks of consecutive frequency channels, and masking blocks of utterances in time.\n\n#### *Try your audio file SpecAugment*\n\n```shell\n$ python3\n```\n\n```python\n>>> import librosa\n>>> from specAugment import spec_augment_tensorflow\n# If you are Pytorch, then import spec_augment_pytorch instead of spec_augment_tensorflow\n>>> audio, sampling_rate = librosa.load(audio_path)\n>>> mel_spectrogram = librosa.feature.melspectrogram(y=audio,\n                                                     sr=sampling_rate,\n                                                     n_mels=256,\n                                                     hop_length=128,\n                                                     fmax=8000)\n>>> warped_masked_spectrogram = spec_augment_tensorflow.spec_augment(mel_spectrogram=mel_spectrogram)\n>>> print(warped_masked_spectrogram)\n'\n[[1.54055389e-01 7.51822486e-01 7.29588015e-01 ... 1.03616300e-01\n  1.04682689e-01 1.05411769e-01]\n [2.21608739e-01 1.38559084e-01 1.01564167e-01 ... 4.19907116e-02\n  4.86430404e-02 5.27331798e-02]\n [3.62784019e-01 2.09934399e-01 1.79158230e-01 ... 2.42307431e-01\n  3.18662338e-01 3.67405599e-01]\n ...\n [6.36117335e-07 8.06897948e-07 8.55346431e-07 ... 2.84445018e-07\n  4.02975952e-07 5.57131738e-07]\n [6.27753429e-07 7.53681318e-07 8.13035033e-07 ... 1.35111146e-07\n  2.74058225e-07 4.56901031e-07]\n [0.00000000e+00 7.48416680e-07 5.51771037e-07 ... 1.13901361e-07\n  2.56365068e-07 4.43868592e-07]]\n'\n```\nLearn more examples about how to do specific tasks in SpecAugment at the test code.\n\n```bash\npython spec_augment_test.py\n```\nIn test code, we using one of the [LibriSpeech dataset](http://www.openslr.org/12/).\n\n<p align=\"center\">\n  <img src=\"https://github.com/shelling203/SpecAugment/blob/master/images/Figure_1.png\" alt=\"Example result of base spectrogram\"/ width=600>\n  <img src=\"https://github.com/shelling203/SpecAugment/blob/master/images/Figure_2.png\" alt=\"Example result of base spectrogram\"/ width=600>\n</p>\n\n\n# Reference\n\n1. https://arxiv.org/pdf/1904.08779.pdf\n"
  },
  {
    "path": "SpecAugment/__init__.py",
    "content": ""
  },
  {
    "path": "SpecAugment/sparse_image_warp_np.py",
    "content": "\"\"\"Image warping using sparse flow defined at control points.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport scipy as sp\nimport skimage\nfrom scipy.interpolate import interp2d\nfrom skimage.transform import warp\n\ndef _get_grid_locations(image_height, image_width):\n  \"\"\"Wrapper for np.meshgrid.\"\"\"\n\n  y_range = np.linspace(0, image_height - 1, image_height)\n  x_range = np.linspace(0, image_width - 1, image_width)\n  y_grid, x_grid = np.meshgrid(y_range, x_range, indexing='ij')\n  return np.stack((y_grid, x_grid), -1)\n\n\ndef _expand_to_minibatch(np_array, batch_size):\n  \"\"\"Tile arbitrarily-sized np_array to include new batch dimension.\"\"\"\n  tiles = [batch_size] + [1] * np_array.ndim\n  return np.tile(np.expand_dims(np_array, 0), tiles)\n\n\ndef _get_boundary_locations(image_height, image_width, num_points_per_edge):\n  \"\"\"Compute evenly-spaced indices along edge of image.\"\"\"\n  y_range = np.linspace(0, image_height - 1, num_points_per_edge + 2)\n  x_range = np.linspace(0, image_width - 1, num_points_per_edge + 2)\n  ys, xs = np.meshgrid(y_range, x_range, indexing='ij')\n  is_boundary = np.logical_or(\n      np.logical_or(xs == 0, xs == image_width - 1),\n      np.logical_or(ys == 0, ys == image_height - 1))\n  return np.stack([ys[is_boundary], xs[is_boundary]], axis=-1)\n\n\ndef _add_zero_flow_controls_at_boundary(control_point_locations,\n                                        control_point_flows, image_height,\n                                        image_width, boundary_points_per_edge):\n\n  # batch_size = tensor_shape.dimension_value(control_point_locations.shape[0])\n  batch_size = control_point_locations.shape[0]\n\n  boundary_point_locations = _get_boundary_locations(image_height, image_width,\n                                                     boundary_points_per_edge)\n\n  boundary_point_flows = np.zeros([boundary_point_locations.shape[0], 2])\n\n  type_to_use = control_point_locations.dtype\n  # boundary_point_locations = constant_op.constant(\n  #     _expand_to_minibatch(boundary_point_locations, batch_size),\n  #     dtype=type_to_use)\n  boundary_point_locations = _expand_to_minibatch(boundary_point_locations, batch_size)\n\n  # boundary_point_flows = constant_op.constant(\n  #     _expand_to_minibatch(boundary_point_flows, batch_size), dtype=type_to_use)\n  boundary_point_flows = _expand_to_minibatch(boundary_point_flows, batch_size)\n\n  # merged_control_point_locations = array_ops.concat(\n  #     [control_point_locations, boundary_point_locations], 1)\n\n  merged_control_point_locations = np.concatenate(\n      [control_point_locations, boundary_point_locations], 1)\n\n  # merged_control_point_flows = array_ops.concat(\n  #     [control_point_flows, boundary_point_flows], 1)\n\n  merged_control_point_flows = np.concatenate(\n      [control_point_flows, boundary_point_flows], 1)\n\n  return merged_control_point_locations, merged_control_point_flows\n\n\ndef sparse_image_warp_np(image,\n                      source_control_point_locations,\n                      dest_control_point_locations,\n                      interpolation_order=2,\n                      regularization_weight=0.0,\n                      num_boundary_points=0):\n\n  # image = ops.convert_to_tensor(image)\n  # source_control_point_locations = ops.convert_to_tensor(\n  #     source_control_point_locations)\n  # dest_control_point_locations = ops.convert_to_tensor(\n  #     dest_control_point_locations)\n\n  control_point_flows = (\n      dest_control_point_locations - source_control_point_locations)\n\n  clamp_boundaries = num_boundary_points > 0\n  boundary_points_per_edge = num_boundary_points - 1\n\n  # batch_size, image_height, image_width, _ = image.get_shape().as_list()\n  batch_size, image_height, image_width, _ = list(image.shape)\n\n  # This generates the dense locations where the interpolant\n  # will be evaluated.\n\n  grid_locations = _get_grid_locations(image_height, image_width)\n\n  flattened_grid_locations = np.reshape(grid_locations,\n                                          [image_height * image_width, 2])\n\n    # flattened_grid_locations = constant_op.constant(\n    #     _expand_to_minibatch(flattened_grid_locations, batch_size), image.dtype)\n\n  flattened_grid_locations = _expand_to_minibatch(flattened_grid_locations, batch_size)\n\n  if clamp_boundaries:\n    (dest_control_point_locations,\n     control_point_flows) = _add_zero_flow_controls_at_boundary(\n         dest_control_point_locations, control_point_flows, image_height,\n         image_width, boundary_points_per_edge)\n\n    # flattened_flows = interpolate_spline.interpolate_spline(\n    #     dest_control_point_locations, control_point_flows,\n    #     flattened_grid_locations, interpolation_order, regularization_weight)\n  flattened_flows = sp.interpolate.spline(\n        dest_control_point_locations, control_point_flows,\n        flattened_grid_locations, interpolation_order, regularization_weight)\n\n    # dense_flows = array_ops.reshape(flattened_flows,\n    #                                 [batch_size, image_height, image_width, 2])\n  dense_flows = np.reshape(flattened_flows,\n                                    [batch_size, image_height, image_width, 2])\n\n    # warped_image = dense_image_warp.dense_image_warp(image, dense_flows)\n  warped_image = warp(image, dense_flows)\n\n  return warped_image, dense_flows\n\n\ndef dense_image_warp(image, flow):\n    # batch_size, height, width, channels = (array_ops.shape(image)[0],\n    #                                        array_ops.shape(image)[1],\n    #                                        array_ops.shape(image)[2],\n    #                                        array_ops.shape(image)[3])\n    batch_size, height, width, channels = (np.shape(image)[0],\n                                           np.shape(image)[1],\n                                           np.shape(image)[2],\n                                           np.shape(image)[3])\n\n    # The flow is defined on the image grid. Turn the flow into a list of query\n    # points in the grid space.\n    # grid_x, grid_y = array_ops.meshgrid(\n    #     math_ops.range(width), math_ops.range(height))\n    # stacked_grid = math_ops.cast(\n    #     array_ops.stack([grid_y, grid_x], axis=2), flow.dtype)\n    # batched_grid = array_ops.expand_dims(stacked_grid, axis=0)\n    # query_points_on_grid = batched_grid - flow\n    # query_points_flattened = array_ops.reshape(query_points_on_grid,\n    #                                            [batch_size, height * width, 2])\n    grid_x, grid_y = np.meshgrid(\n        np.range(width), np.range(height))\n    stacked_grid = np.cast(\n        np.stack([grid_y, grid_x], axis=2), flow.dtype)\n    batched_grid = np.expand_dims(stacked_grid, axis=0)\n    query_points_on_grid = batched_grid - flow\n    query_points_flattened = np.reshape(query_points_on_grid,\n                                        [batch_size, height * width, 2])\n    # Compute values at the query points, then reshape the result back to the\n    # image grid.\n    interpolated = interp2d(image, query_points_flattened)\n    interpolated = np.reshape(interpolated,\n                              [batch_size, height, width, channels])\n    return interpolated\n\n"
  },
  {
    "path": "SpecAugment/sparse_image_warp_pytorch.py",
    "content": "# Copyright 2019 RnD at Spoon Radio\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# import torch\n# import numpy as np\n# from torch.autograd import Variable\n# import librosa\nimport random\nimport numpy as np\n# import scipy.signal\nimport torch\n# import torchaudio\n# from torchaudio import transforms\n# import math\n# from torch.utils.data import DataLoader\n# from torch.utils.data import Dataset\n\n\ndef time_warp(spec, W=5):\n    spec = spec.view(1, spec.shape[0], spec.shape[1])\n    num_rows = spec.shape[1]\n    spec_len = spec.shape[2]\n\n    y = num_rows // 2\n    horizontal_line_at_ctr = spec[0][y]\n    assert len(horizontal_line_at_ctr) == spec_len\n\n    point_to_warp = horizontal_line_at_ctr[random.randrange(W, spec_len - W)]\n    assert isinstance(point_to_warp, torch.Tensor)\n\n    # Uniform distribution from (0,W) with chance to be up to W negative\n    dist_to_warp = random.randrange(-W, W)\n    src_pts, dest_pts = torch.tensor([[[y, point_to_warp]]]), torch.tensor([[[y, point_to_warp + dist_to_warp]]])\n    warped_spectro, dense_flows = SparseImageWarp.sparse_image_warp(spec, src_pts, dest_pts)\n    return warped_spectro.squeeze(3)\n\n\ndef freq_mask(spec, F=15, num_masks=1, replace_with_zero=False):\n    cloned = spec.clone()\n    num_mel_channels = cloned.shape[1]\n\n    for i in range(0, num_masks):\n        f = random.randrange(0, F)\n        f_zero = random.randrange(0, num_mel_channels - f)\n\n        # avoids randrange error if values are equal and range is empty\n        if (f_zero == f_zero + f): return cloned\n\n        mask_end = random.randrange(f_zero, f_zero + f)\n        if (replace_with_zero):\n            cloned[0][f_zero:mask_end] = 0\n        else:\n            cloned[0][f_zero:mask_end] = cloned.mean()\n\n    return cloned\n\n\ndef time_mask(spec, T=15, num_masks=1, replace_with_zero=False):\n    cloned = spec.clone()\n    len_spectro = cloned.shape[2]\n\n    for i in range(0, num_masks):\n        t = random.randrange(0, T)\n        t_zero = random.randrange(0, len_spectro - t)\n\n        # avoids randrange error if values are equal and range is empty\n        if (t_zero == t_zero + t): return cloned\n\n        mask_end = random.randrange(t_zero, t_zero + t)\n        if (replace_with_zero):\n            cloned[0][:, t_zero:mask_end] = 0\n        else:\n            cloned[0][:, t_zero:mask_end] = cloned.mean()\n    return cloned\n\n\ndef sparse_image_warp(img_tensor,\n                      source_control_point_locations,\n                      dest_control_point_locations,\n                      interpolation_order=2,\n                      regularization_weight=0.0,\n                      num_boundaries_points=0):\n    control_point_flows = (dest_control_point_locations - source_control_point_locations)\n\n    batch_size, image_height, image_width = img_tensor.shape\n    grid_locations = get_grid_locations(image_height, image_width)\n    flattened_grid_locations = torch.tensor(flatten_grid_locations(grid_locations, image_height, image_width))\n\n    flattened_flows = interpolate_spline(\n        dest_control_point_locations,\n        control_point_flows,\n        flattened_grid_locations,\n        interpolation_order,\n        regularization_weight)\n\n    dense_flows = create_dense_flows(flattened_flows, batch_size, image_height, image_width)\n\n    warped_image = dense_image_warp(img_tensor, dense_flows)\n\n    return warped_image, dense_flows\n\n\ndef get_grid_locations(image_height, image_width):\n    \"\"\"Wrapper for np.meshgrid.\"\"\"\n\n    y_range = np.linspace(0, image_height - 1, image_height)\n    x_range = np.linspace(0, image_width - 1, image_width)\n    y_grid, x_grid = np.meshgrid(y_range, x_range, indexing='ij')\n    return np.stack((y_grid, x_grid), -1)\n\n\ndef flatten_grid_locations(grid_locations, image_height, image_width):\n    return np.reshape(grid_locations, [image_height * image_width, 2])\n\n\ndef create_dense_flows(flattened_flows, batch_size, image_height, image_width):\n    # possibly .view\n    return torch.reshape(flattened_flows, [batch_size, image_height, image_width, 2])\n\n\ndef interpolate_spline(train_points, train_values, query_points, order, regularization_weight=0.0, ):\n    # First, fit the spline to the observed data.\n    w, v = solve_interpolation(train_points, train_values, order, regularization_weight)\n    # Then, evaluate the spline at the query locations.\n    query_values = apply_interpolation(query_points, train_points, w, v, order)\n\n    return query_values\n\n\ndef solve_interpolation(train_points, train_values, order, regularization_weight):\n    b, n, d = train_points.shape\n    k = train_values.shape[-1]\n\n    # First, rename variables so that the notation (c, f, w, v, A, B, etc.)\n    # follows https://en.wikipedia.org/wiki/Polyharmonic_spline.\n    # To account for python style guidelines we use\n    # matrix_a for A and matrix_b for B.\n\n    c = train_points\n    f = train_values.float()\n\n    matrix_a = phi(cross_squared_distance_matrix(c, c), order).unsqueeze(0)  # [b, n, n]\n    #     if regularization_weight > 0:\n    #         batch_identity_matrix = array_ops.expand_dims(\n    #           linalg_ops.eye(n, dtype=c.dtype), 0)\n    #         matrix_a += regularization_weight * batch_identity_matrix\n\n    # Append ones to the feature values for the bias term in the linear model.\n    ones = torch.ones(1, dtype=train_points.dtype).view([-1, 1, 1])\n    matrix_b = torch.cat((c, ones), 2).float()  # [b, n, d + 1]\n\n    # [b, n + d + 1, n]\n    left_block = torch.cat((matrix_a, torch.transpose(matrix_b, 2, 1)), 1)\n\n    num_b_cols = matrix_b.shape[2]  # d + 1\n\n    # In Tensorflow, zeros are used here. Pytorch gesv fails with zeros for some reason we don't understand.\n    # So instead we use very tiny randn values (variance of one, zero mean) on one side of our multiplication.\n    lhs_zeros = torch.randn((b, num_b_cols, num_b_cols)) / 1e10\n    right_block = torch.cat((matrix_b, lhs_zeros),\n                            1)  # [b, n + d + 1, d + 1]\n    lhs = torch.cat((left_block, right_block),\n                    2)  # [b, n + d + 1, n + d + 1]\n\n    rhs_zeros = torch.zeros((b, d + 1, k), dtype=train_points.dtype).float()\n    rhs = torch.cat((f, rhs_zeros), 1)  # [b, n + d + 1, k]\n\n    # Then, solve the linear system and unpack the results.\n    X, LU = torch.solve(rhs, lhs)\n    w = X[:, :n, :]\n    v = X[:, n:, :]\n\n    return w, v\n\n\ndef cross_squared_distance_matrix(x, y):\n    \"\"\"Pairwise squared distance between two (batch) matrices' rows (2nd dim).\n        Computes the pairwise distances between rows of x and rows of y\n        Args:\n        x: [batch_size, n, d] float `Tensor`\n        y: [batch_size, m, d] float `Tensor`\n        Returns:\n        squared_dists: [batch_size, n, m] float `Tensor`, where\n        squared_dists[b,i,j] = ||x[b,i,:] - y[b,j,:]||^2\n    \"\"\"\n    x_norm_squared = torch.sum(torch.mul(x, x))\n    y_norm_squared = torch.sum(torch.mul(y, y))\n\n    x_y_transpose = torch.matmul(x.squeeze(0), y.squeeze(0).transpose(0, 1))\n\n    # squared_dists[b,i,j] = ||x_bi - y_bj||^2 = x_bi'x_bi- 2x_bi'x_bj + x_bj'x_bj\n    squared_dists = x_norm_squared - 2 * x_y_transpose + y_norm_squared\n\n    return squared_dists.float()\n\n\ndef phi(r, order):\n    \"\"\"Coordinate-wise nonlinearity used to define the order of the interpolation.\n    See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition.\n    Args:\n    r: input op\n    order: interpolation order\n    Returns:\n    phi_k evaluated coordinate-wise on r, for k = r\n    \"\"\"\n    EPSILON = torch.tensor(1e-10)\n    # using EPSILON prevents log(0), sqrt0), etc.\n    # sqrt(0) is well-defined, but its gradient is not\n    if order == 1:\n        r = torch.max(r, EPSILON)\n        r = torch.sqrt(r)\n        return r\n    elif order == 2:\n        return 0.5 * r * torch.log(torch.max(r, EPSILON))\n    elif order == 4:\n        return 0.5 * torch.square(r) * torch.log(torch.max(r, EPSILON))\n    elif order % 2 == 0:\n        r = torch.max(r, EPSILON)\n        return 0.5 * torch.pow(r, 0.5 * order) * torch.log(r)\n    else:\n        r = torch.max(r, EPSILON)\n        return torch.pow(r, 0.5 * order)\n\n\ndef apply_interpolation(query_points, train_points, w, v, order):\n    \"\"\"Apply polyharmonic interpolation model to data.\n    Given coefficients w and v for the interpolation model, we evaluate\n    interpolated function values at query_points.\n    Args:\n    query_points: `[b, m, d]` x values to evaluate the interpolation at\n    train_points: `[b, n, d]` x values that act as the interpolation centers\n                    ( the c variables in the wikipedia article)\n    w: `[b, n, k]` weights on each interpolation center\n    v: `[b, d, k]` weights on each input dimension\n    order: order of the interpolation\n    Returns:\n    Polyharmonic interpolation evaluated at points defined in query_points.\n    \"\"\"\n    query_points = query_points.unsqueeze(0)\n    # First, compute the contribution from the rbf term.\n    pairwise_dists = cross_squared_distance_matrix(query_points.float(), train_points.float())\n    phi_pairwise_dists = phi(pairwise_dists, order)\n\n    rbf_term = torch.matmul(phi_pairwise_dists, w)\n\n    # Then, compute the contribution from the linear term.\n    # Pad query_points with ones, for the bias term in the linear model.\n    ones = torch.ones_like(query_points[..., :1])\n    query_points_pad = torch.cat((\n        query_points,\n        ones\n    ), 2).float()\n    linear_term = torch.matmul(query_points_pad, v)\n\n    return rbf_term + linear_term\n\n\ndef dense_image_warp(image, flow):\n    \"\"\"Image warping using per-pixel flow vectors.\n    Apply a non-linear warp to the image, where the warp is specified by a dense\n    flow field of offset vectors that define the correspondences of pixel values\n    in the output image back to locations in the  source image. Specifically, the\n    pixel value at output[b, j, i, c] is\n    images[b, j - flow[b, j, i, 0], i - flow[b, j, i, 1], c].\n    The locations specified by this formula do not necessarily map to an int\n    index. Therefore, the pixel value is obtained by bilinear\n    interpolation of the 4 nearest pixels around\n    (b, j - flow[b, j, i, 0], i - flow[b, j, i, 1]). For locations outside\n    of the image, we use the nearest pixel values at the image boundary.\n    Args:\n    image: 4-D float `Tensor` with shape `[batch, height, width, channels]`.\n    flow: A 4-D float `Tensor` with shape `[batch, height, width, 2]`.\n    name: A name for the operation (optional).\n    Note that image and flow can be of type tf.half, tf.float32, or tf.float64,\n    and do not necessarily have to be the same type.\n    Returns:\n    A 4-D float `Tensor` with shape`[batch, height, width, channels]`\n    and same type as input image.\n    Raises:\n    ValueError: if height < 2 or width < 2 or the inputs have the wrong number\n    of dimensions.\n    \"\"\"\n    image = image.unsqueeze(3)  # add a single channel dimension to image tensor\n    batch_size, height, width, channels = image.shape\n\n    # The flow is defined on the image grid. Turn the flow into a list of query\n    # points in the grid space.\n    grid_x, grid_y = torch.meshgrid(\n        torch.arange(width), torch.arange(height))\n\n    stacked_grid = torch.stack((grid_y, grid_x), dim=2).float()\n\n    batched_grid = stacked_grid.unsqueeze(-1).permute(3, 1, 0, 2)\n\n    query_points_on_grid = batched_grid - flow\n    query_points_flattened = torch.reshape(query_points_on_grid,\n                                           [batch_size, height * width, 2])\n    # Compute values at the query points, then reshape the result back to the\n    # image grid.\n    interpolated = interpolate_bilinear(image, query_points_flattened)\n    interpolated = torch.reshape(interpolated,\n                                 [batch_size, height, width, channels])\n    return interpolated\n\n\ndef interpolate_bilinear(grid,\n                         query_points,\n                         name='interpolate_bilinear',\n                         indexing='ij'):\n    \"\"\"Similar to Matlab's interp2 function.\n    Finds values for query points on a grid using bilinear interpolation.\n    Args:\n    grid: a 4-D float `Tensor` of shape `[batch, height, width, channels]`.\n    query_points: a 3-D float `Tensor` of N points with shape `[batch, N, 2]`.\n    name: a name for the operation (optional).\n    indexing: whether the query points are specified as row and column (ij),\n      or Cartesian coordinates (xy).\n    Returns:\n    values: a 3-D `Tensor` with shape `[batch, N, channels]`\n    Raises:\n    ValueError: if the indexing mode is invalid, or if the shape of the inputs\n      invalid.\n    \"\"\"\n    if indexing != 'ij' and indexing != 'xy':\n        raise ValueError('Indexing mode must be \\'ij\\' or \\'xy\\'')\n\n    shape = grid.shape\n    if len(shape) != 4:\n        msg = 'Grid must be 4 dimensional. Received size: '\n        raise ValueError(msg + str(grid.shape))\n\n    batch_size, height, width, channels = grid.shape\n\n    shape = [batch_size, height, width, channels]\n    query_type = query_points.dtype\n    grid_type = grid.dtype\n\n    num_queries = query_points.shape[1]\n\n    alphas = []\n    floors = []\n    ceils = []\n    index_order = [0, 1] if indexing == 'ij' else [1, 0]\n    unstacked_query_points = query_points.unbind(2)\n\n    for dim in index_order:\n        queries = unstacked_query_points[dim]\n\n        size_in_indexing_dimension = shape[dim + 1]\n\n        # max_floor is size_in_indexing_dimension - 2 so that max_floor + 1\n        # is still a valid index into the grid.\n        max_floor = torch.tensor(size_in_indexing_dimension - 2, dtype=query_type)\n        min_floor = torch.tensor(0.0, dtype=query_type)\n        maxx = torch.max(min_floor, torch.floor(queries))\n        floor = torch.min(maxx, max_floor)\n        int_floor = floor.long()\n        floors.append(int_floor)\n        ceil = int_floor + 1\n        ceils.append(ceil)\n\n        # alpha has the same type as the grid, as we will directly use alpha\n        # when taking linear combinations of pixel values from the image.\n        alpha = torch.tensor(queries - floor, dtype=grid_type)\n        min_alpha = torch.tensor(0.0, dtype=grid_type)\n        max_alpha = torch.tensor(1.0, dtype=grid_type)\n        alpha = torch.min(torch.max(min_alpha, alpha), max_alpha)\n\n        # Expand alpha to [b, n, 1] so we can use broadcasting\n        # (since the alpha values don't depend on the channel).\n        alpha = torch.unsqueeze(alpha, 2)\n        alphas.append(alpha)\n\n    flattened_grid = torch.reshape(\n        grid, [batch_size * height * width, channels])\n    batch_offsets = torch.reshape(\n        torch.arange(batch_size) * height * width, [batch_size, 1])\n\n    # This wraps array_ops.gather. We reshape the image data such that the\n    # batch, y, and x coordinates are pulled into the first dimension.\n    # Then we gather. Finally, we reshape the output back. It's possible this\n    # code would be made simpler by using array_ops.gather_nd.\n    def gather(y_coords, x_coords, name):\n        linear_coordinates = batch_offsets + y_coords * width + x_coords\n        gathered_values = torch.gather(flattened_grid.t(), 1, linear_coordinates)\n        return torch.reshape(gathered_values,\n                             [batch_size, num_queries, channels])\n\n    # grab the pixel values in the 4 corners around each query point\n    top_left = gather(floors[0], floors[1], 'top_left')\n    top_right = gather(floors[0], ceils[1], 'top_right')\n    bottom_left = gather(ceils[0], floors[1], 'bottom_left')\n    bottom_right = gather(ceils[0], ceils[1], 'bottom_right')\n\n    interp_top = alphas[1] * (top_right - top_left) + top_left\n    interp_bottom = alphas[1] * (bottom_right - bottom_left) + bottom_left\n    interp = alphas[0] * (interp_bottom - interp_top) + interp_top\n\n    return interp"
  },
  {
    "path": "SpecAugment/spec_augment_pytorch.py",
    "content": "# Copyright 2019 RnD at Spoon Radio\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"SpecAugment Implementation for Tensorflow.\nRelated paper : https://arxiv.org/pdf/1904.08779.pdf\nIn this paper, show summarized parameters by each open datasets in Tabel 1.\n-----------------------------------------\nPolicy | W  | F  | m_F |  T  |  p  | m_T\n-----------------------------------------\nNone   |  0 |  0 |  -  |  0  |  -  |  -\n-----------------------------------------\nLB     | 80 | 27 |  1  | 100 | 1.0 | 1\n-----------------------------------------\nLD     | 80 | 27 |  2  | 100 | 1.0 | 2\n-----------------------------------------\nSM     | 40 | 15 |  2  |  70 | 0.2 | 2\n-----------------------------------------\nSS     | 40 | 27 |  2  |  70 | 0.2 | 2\n-----------------------------------------\nLB : LibriSpeech basic\nLD : LibriSpeech double\nSM : Switchboard mild\nSS : Switchboard strong\n\"\"\"\n\nimport librosa\nimport librosa.display\nimport numpy as np\nimport random\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom SpecAugment.sparse_image_warp_pytorch import sparse_image_warp\nimport torch\n\n\ndef time_warp(spec, W=5):\n    num_rows = spec.shape[1]\n    spec_len = spec.shape[2]\n\n    y = num_rows // 2\n    horizontal_line_at_ctr = spec[0][y]\n    # assert len(horizontal_line_at_ctr) == spec_len\n\n    point_to_warp = horizontal_line_at_ctr[random.randrange(W, spec_len-W)]\n    # assert isinstance(point_to_warp, torch.Tensor)\n\n    # Uniform distribution from (0,W) with chance to be up to W negative\n    dist_to_warp = random.randrange(-W, W)\n    src_pts = torch.tensor([[[y, point_to_warp]]])\n    dest_pts = torch.tensor([[[y, point_to_warp + dist_to_warp]]])\n    warped_spectro, dense_flows = sparse_image_warp(spec, src_pts, dest_pts)\n    return warped_spectro.squeeze(3)\n\n\ndef spec_augment(mel_spectrogram, time_warping_para=80, frequency_masking_para=27,\n                 time_masking_para=100, frequency_mask_num=1, time_mask_num=1):\n    \"\"\"Spec augmentation Calculation Function.\n    'SpecAugment' have 3 steps for audio data augmentation.\n    first step is time warping using Tensorflow's image_sparse_warp function.\n    Second step is frequency masking, last step is time masking.\n    # Arguments:\n      mel_spectrogram(numpy array): audio file path of you want to warping and masking.\n      time_warping_para(float): Augmentation parameter, \"time warp parameter W\".\n        If none, default = 80 for LibriSpeech.\n      frequency_masking_para(float): Augmentation parameter, \"frequency mask parameter F\"\n        If none, default = 100 for LibriSpeech.\n      time_masking_para(float): Augmentation parameter, \"time mask parameter T\"\n        If none, default = 27 for LibriSpeech.\n      frequency_mask_num(float): number of frequency masking lines, \"m_F\".\n        If none, default = 1 for LibriSpeech.\n      time_mask_num(float): number of time masking lines, \"m_T\".\n        If none, default = 1 for LibriSpeech.\n    # Returns\n      mel_spectrogram(numpy array): warped and masked mel spectrogram.\n    \"\"\"\n    v = mel_spectrogram.shape[1]\n    tau = mel_spectrogram.shape[2]\n\n    # Step 1 : Time warping\n    warped_mel_spectrogram = time_warp(mel_spectrogram, W=time_warping_para)\n\n    # Step 2 : Frequency masking\n    for i in range(frequency_mask_num):\n        f = np.random.uniform(low=0.0, high=frequency_masking_para)\n        f = int(f)\n        f0 = random.randint(0, v-f)\n        warped_mel_spectrogram[:, f0:f0+f, :] = 0\n\n    # Step 3 : Time masking\n    for i in range(time_mask_num):\n        t = np.random.uniform(low=0.0, high=time_masking_para)\n        t = int(t)\n        t0 = random.randint(0, tau-t)\n        warped_mel_spectrogram[:, :, t0:t0+t] = 0\n\n    return warped_mel_spectrogram\n\n\ndef visualization_spectrogram(mel_spectrogram, title):\n    \"\"\"visualizing result of SpecAugment\n    # Arguments:\n      mel_spectrogram(ndarray): mel_spectrogram to visualize.\n      title(String): plot figure's title\n    \"\"\"\n    # Show mel-spectrogram using librosa's specshow.\n    plt.figure(figsize=(10, 4))\n    librosa.display.specshow(librosa.power_to_db(mel_spectrogram[0, :, :], ref=np.max), y_axis='mel', fmax=8000, x_axis='time')\n    # plt.colorbar(format='%+2.0f dB')\n    plt.title(title)\n    plt.tight_layout()\n    plt.show()\n"
  },
  {
    "path": "SpecAugment/spec_augment_tensorflow.py",
    "content": "# Copyright 2019 RnD at Spoon Radio\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"SpecAugment Implementation for Tensorflow.\nRelated paper : https://arxiv.org/pdf/1904.08779.pdf\n\nIn this paper, show summarized parameters by each open datasets in Tabel 1.\n-----------------------------------------\nPolicy | W  | F  | m_F |  T  |  p  | m_T\n-----------------------------------------\nNone   |  0 |  0 |  -  |  0  |  -  |  -\n-----------------------------------------\nLB     | 80 | 27 |  1  | 100 | 1.0 | 1\n-----------------------------------------\nLD     | 80 | 27 |  2  | 100 | 1.0 | 2\n-----------------------------------------\nSM     | 40 | 15 |  2  |  70 | 0.2 | 2\n-----------------------------------------\nSS     | 40 | 27 |  2  |  70 | 0.2 | 2\n-----------------------------------------\nLB : LibriSpeech basic\nLD : LibriSpeech double\nSM : Switchboard mild\nSS : Switchboard strong\n\"\"\"\n\nimport librosa\nimport librosa.display\nimport tensorflow as tf\nfrom tensorflow_addons.image import sparse_image_warp\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef sparse_warp(mel_spectrogram, time_warping_para=80):\n    \"\"\"Spec augmentation Calculation Function.\n\n    'SpecAugment' have 3 steps for audio data augmentation.\n    first step is time warping using Tensorflow's image_sparse_warp function.\n    Second step is frequency masking, last step is time masking.\n\n    # Arguments:\n      mel_spectrogram(numpy array): audio file path of you want to warping and masking.\n      time_warping_para(float): Augmentation parameter, \"time warp parameter W\".\n        If none, default = 80 for LibriSpeech.\n\n    # Returns\n      mel_spectrogram(numpy array): warped and masked mel spectrogram.\n    \"\"\"\n\n    fbank_size = tf.shape(mel_spectrogram)\n    n, v = fbank_size[1], fbank_size[2]\n\n    # Step 1 : Time warping\n    # Image warping control point setting.\n    # Source\n    pt = tf.random.uniform([], time_warping_para, n-time_warping_para, tf.int32) # radnom point along the time axis\n    src_ctr_pt_freq = tf.range(v // 2)  # control points on freq-axis\n    src_ctr_pt_time = tf.ones_like(src_ctr_pt_freq) * pt  # control points on time-axis\n    src_ctr_pts = tf.stack((src_ctr_pt_time, src_ctr_pt_freq), -1)\n    src_ctr_pts = tf.cast(src_ctr_pts, dtype=tf.float32)\n\n    # Destination\n    w = tf.random.uniform([], -time_warping_para, time_warping_para, tf.int32)  # distance\n    dest_ctr_pt_freq = src_ctr_pt_freq\n    dest_ctr_pt_time = src_ctr_pt_time + w\n    dest_ctr_pts = tf.stack((dest_ctr_pt_time, dest_ctr_pt_freq), -1)\n    dest_ctr_pts = tf.cast(dest_ctr_pts, dtype=tf.float32)\n\n    # warp\n    source_control_point_locations = tf.expand_dims(src_ctr_pts, 0)  # (1, v//2, 2)\n    dest_control_point_locations = tf.expand_dims(dest_ctr_pts, 0)  # (1, v//2, 2)\n\n    warped_image, _ = sparse_image_warp(mel_spectrogram,\n                                        source_control_point_locations,\n                                        dest_control_point_locations)\n    return warped_image\n\n\ndef frequency_masking(mel_spectrogram, v, frequency_masking_para=27, frequency_mask_num=2):\n    \"\"\"Spec augmentation Calculation Function.\n\n    'SpecAugment' have 3 steps for audio data augmentation.\n    first step is time warping using Tensorflow's image_sparse_warp function.\n    Second step is frequency masking, last step is time masking.\n\n    # Arguments:\n      mel_spectrogram(numpy array): audio file path of you want to warping and masking.\n      frequency_masking_para(float): Augmentation parameter, \"frequency mask parameter F\"\n        If none, default = 100 for LibriSpeech.\n      frequency_mask_num(float): number of frequency masking lines, \"m_F\".\n        If none, default = 1 for LibriSpeech.\n\n    # Returns\n      mel_spectrogram(numpy array): warped and masked mel spectrogram.\n    \"\"\"\n    # Step 2 : Frequency masking\n    fbank_size = tf.shape(mel_spectrogram)\n    n, v = fbank_size[1], fbank_size[2]\n\n    for i in range(frequency_mask_num):\n        f = tf.random.uniform([], minval=0, maxval=frequency_masking_para, dtype=tf.int32)\n        v = tf.cast(v, dtype=tf.int32)\n        f0 = tf.random.uniform([], minval=0, maxval=v-f, dtype=tf.int32)\n\n        # warped_mel_spectrogram[f0:f0 + f, :] = 0\n        mask = tf.concat((tf.ones(shape=(1, n, v - f0 - f, 1)),\n                          tf.zeros(shape=(1, n, f, 1)),\n                          tf.ones(shape=(1, n, f0, 1)),\n                          ), 2)\n        mel_spectrogram = mel_spectrogram * mask\n    return tf.cast(mel_spectrogram, dtype=tf.float32)\n\n\ndef time_masking(mel_spectrogram, tau, time_masking_para=100, time_mask_num=2):\n    \"\"\"Spec augmentation Calculation Function.\n\n    'SpecAugment' have 3 steps for audio data augmentation.\n    first step is time warping using Tensorflow's image_sparse_warp function.\n    Second step is frequency masking, last step is time masking.\n\n    # Arguments:\n      mel_spectrogram(numpy array): audio file path of you want to warping and masking.\n      time_masking_para(float): Augmentation parameter, \"time mask parameter T\"\n        If none, default = 27 for LibriSpeech.\n      time_mask_num(float): number of time masking lines, \"m_T\".\n        If none, default = 1 for LibriSpeech.\n\n    # Returns\n      mel_spectrogram(numpy array): warped and masked mel spectrogram.\n    \"\"\"\n    fbank_size = tf.shape(mel_spectrogram)\n    n, v = fbank_size[1], fbank_size[2]\n\n    # Step 3 : Time masking\n    for i in range(time_mask_num):\n        t = tf.random.uniform([], minval=0, maxval=time_masking_para, dtype=tf.int32)\n        t0 = tf.random.uniform([], minval=0, maxval=tau-t, dtype=tf.int32)\n\n        # mel_spectrogram[:, t0:t0 + t] = 0\n        mask = tf.concat((tf.ones(shape=(1, n-t0-t, v, 1)),\n                          tf.zeros(shape=(1, t, v, 1)),\n                          tf.ones(shape=(1, t0, v, 1)),\n                          ), 1)\n        mel_spectrogram = mel_spectrogram * mask\n    return tf.cast(mel_spectrogram, dtype=tf.float32)\n\n\ndef spec_augment(mel_spectrogram):\n\n    v = mel_spectrogram.shape[0]\n    tau = mel_spectrogram.shape[1]\n\n    warped_mel_spectrogram = sparse_warp(mel_spectrogram)\n\n    warped_frequency_spectrogram = frequency_masking(warped_mel_spectrogram, v=v)\n\n    warped_frequency_time_sepctrogram = time_masking(warped_frequency_spectrogram, tau=tau)\n\n    return warped_frequency_time_sepctrogram\n\n\ndef visualization_spectrogram(mel_spectrogram, title):\n    \"\"\"visualizing first one result of SpecAugment\n\n    # Arguments:\n      mel_spectrogram(ndarray): mel_spectrogram to visualize.\n      title(String): plot figure's title\n    \"\"\"\n    # Show mel-spectrogram using librosa's specshow.\n    plt.figure(figsize=(10, 4))\n    librosa.display.specshow(librosa.power_to_db(mel_spectrogram[0, :, :, 0], ref=np.max), y_axis='mel', fmax=8000, x_axis='time')\n    plt.title(title)\n    plt.tight_layout()\n    plt.show()\n\n\ndef visualization_tensor_spectrogram(mel_spectrogram, title):\n    \"\"\"visualizing first one result of SpecAugment\n\n    # Arguments:\n      mel_spectrogram(ndarray): mel_spectrogram to visualize.\n      title(String): plot figure's title\n    \"\"\"\n\n    # Show mel-spectrogram using librosa's specshow.\n    plt.figure(figsize=(10, 4))\n    librosa.display.specshow(librosa.power_to_db(mel_spectrogram[0, :, :, 0], ref=np.max), y_axis='mel', fmax=8000, x_axis='time')\n    # plt.colorbar(format='%+2.0f dB')\n    plt.title(title)\n    plt.tight_layout()\n    plt.show()\n"
  },
  {
    "path": "requirements.txt",
    "content": "librosa\nmatplotlib\nnumpy"
  },
  {
    "path": "setup.cfg",
    "content": "[metadata]\ndescription-file = README.md"
  },
  {
    "path": "setup.py",
    "content": "from setuptools import setup, find_packages\n\nsetup(\n   name='SpecAugment',\n   version='1.2.3',\n   description='A implementation of \"SpecAugment\"',\n   url              = 'https://github.com/shelling203/SpecAugment',\n   packages         = find_packages(exclude = ['docs', 'tests*']),\n   install_requires=['tensorflow', 'librosa', 'matplotlib', 'torch'], #external packages as dependencies\n)"
  },
  {
    "path": "tests/spec_augment_test_TF.py",
    "content": "# Copyright 2019 RnD at Spoon Radio\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"SpecAugment test\"\"\"\n\nimport argparse\nimport librosa\nfrom SpecAugment import spec_augment_tensorflow\nimport os, sys\nimport numpy as np\n# sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\n\nparser = argparse.ArgumentParser(description='Spec Augment')\nparser.add_argument('--audio-path', default='../data/61-70968-0002.wav',\n                    help='The audio file.')\nparser.add_argument('--time-warp-para', default=80,\n                    help='time warp parameter W')\nparser.add_argument('--frequency-mask-para', default=100,\n                    help='frequency mask parameter F')\nparser.add_argument('--time-mask-para', default=27,\n                    help='time mask parameter T')\nparser.add_argument('--masking-line-number', default=1,\n                    help='masking line number')\n\nargs = parser.parse_args()\naudio_path = args.audio_path\ntime_warping_para = args.time_warp_para\ntime_masking_para = args.frequency_mask_para\nfrequency_masking_para = args.time_mask_para\nmasking_line_number = args.masking_line_number\n\nif __name__ == \"__main__\":\n\n    # Step 0 : load audio file, extract mel spectrogram\n    audio, sampling_rate = librosa.load(audio_path)\n    mel_spectrogram = librosa.feature.melspectrogram(y=audio,\n                                                     sr=sampling_rate,\n                                                     n_mels=256,\n                                                     hop_length=128,\n                                                     fmax=8000)\n\n    # reshape spectrogram shape to [batch_size, time, frequency, 1]\n    shape = mel_spectrogram.shape\n    mel_spectrogram = np.reshape(mel_spectrogram, (-1, shape[0], shape[1], 1))\n\n    # Show Raw mel-spectrogram\n    spec_augment_tensorflow.visualization_spectrogram(mel_spectrogram=mel_spectrogram,\n                                                      title=\"Raw Mel Spectrogram\")\n\n    # Show time warped & masked spectrogram\n    spec_augment_tensorflow.visualization_tensor_spectrogram(mel_spectrogram=spec_augment_tensorflow.spec_augment(mel_spectrogram),\n                                                      title=\"tensorflow Warped & Masked Mel Spectrogram\")\n"
  },
  {
    "path": "tests/spec_augment_test_pytorch.py",
    "content": "# Copyright 2019 RnD at Spoon Radio\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"SpecAugment test\"\"\"\n\nimport argparse\nimport librosa\nimport numpy as np\nimport torch\nfrom SpecAugment import spec_augment_pytorch\nimport os, sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\n\nparser = argparse.ArgumentParser(description='Spec Augment')\nparser.add_argument('--audio-path', default='../data/61-70968-0002.wav',\n                    help='The audio file.')\nparser.add_argument('--time-warp-para', default=80,\n                    help='time warp parameter W')\nparser.add_argument('--frequency-mask-para', default=100,\n                    help='frequency mask parameter F')\nparser.add_argument('--time-mask-para', default=27,\n                    help='time mask parameter T')\nparser.add_argument('--masking-line-number', default=1,\n                    help='masking line number')\n\nargs = parser.parse_args()\naudio_path = args.audio_path\ntime_warping_para = args.time_warp_para\ntime_masking_para = args.frequency_mask_para\nfrequency_masking_para = args.time_mask_para\nmasking_line_number = args.masking_line_number\n\nif __name__ == \"__main__\":\n\n    # Step 0 : load audio file, extract mel spectrogram\n    audio, sampling_rate = librosa.load(audio_path)\n    mel_spectrogram = librosa.feature.melspectrogram(y=audio,\n                                                     sr=sampling_rate,\n                                                     n_mels=256,\n                                                     hop_length=128,\n                                                     fmax=8000)\n\n    # reshape spectrogram shape to [batch_size, time, frequency]\n    shape = mel_spectrogram.shape\n    mel_spectrogram = np.reshape(mel_spectrogram, (-1, shape[0], shape[1]))\n    mel_spectrogram = torch.from_numpy(mel_spectrogram)\n\n    # Show Raw mel-spectrogram\n    spec_augment_pytorch.visualization_spectrogram(mel_spectrogram=mel_spectrogram,\n                                                      title=\"Raw Mel Spectrogram\")\n\n    # Calculate SpecAugment pytorch\n    warped_masked_spectrogram = spec_augment_pytorch.spec_augment(mel_spectrogram=mel_spectrogram)\n\n    # Show time warped & masked spectrogram\n    spec_augment_pytorch.visualization_spectrogram(mel_spectrogram=warped_masked_spectrogram,\n                                                      title=\"pytorch Warped & Masked Mel Spectrogram\")\n\n\n"
  }
]