Full Code of tensorflow/estimator for AI

master 26a7dc2ef7d3 cached
187 files
3.9 MB
1.0M tokens
4635 symbols
1 requests
Download .txt
Showing preview only (4,112K chars total). Download the full file or copy to clipboard to get everything.
Repository: tensorflow/estimator
Branch: master
Commit: 26a7dc2ef7d3
Files: 187
Total size: 3.9 MB

Directory structure:
gitextract_a04upojl/

├── .bazelrc
├── .gitignore
├── BUILD
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── WORKSPACE
├── tensorflow_estimator/
│   ├── BUILD
│   ├── estimator.bzl
│   ├── python/
│   │   └── estimator/
│   │       ├── BUILD
│   │       ├── api/
│   │       │   ├── BUILD
│   │       │   ├── api_gen.bzl
│   │       │   ├── extractor_wrapper.py
│   │       │   └── generator_wrapper.py
│   │       ├── canned/
│   │       │   ├── __init__.py
│   │       │   ├── baseline.py
│   │       │   ├── baseline_estimator_test.py
│   │       │   ├── baseline_test.py
│   │       │   ├── canned_estimator_ds_integration_test.py
│   │       │   ├── dnn.py
│   │       │   ├── dnn_estimator_test.py
│   │       │   ├── dnn_linear_combined.py
│   │       │   ├── dnn_linear_combined_estimator_test.py
│   │       │   ├── dnn_linear_combined_test.py
│   │       │   ├── dnn_test_fc_v2.py
│   │       │   ├── dnn_testing_utils.py
│   │       │   ├── head.py
│   │       │   ├── head_test.py
│   │       │   ├── kmeans.py
│   │       │   ├── kmeans_test.py
│   │       │   ├── linear.py
│   │       │   ├── linear_estimator_test.py
│   │       │   ├── linear_model_test.py
│   │       │   ├── linear_optimizer/
│   │       │   │   ├── BUILD
│   │       │   │   ├── __init__.py
│   │       │   │   ├── doc/
│   │       │   │   │   └── sdca.ipynb
│   │       │   │   └── python/
│   │       │   │       ├── sdca_test.py
│   │       │   │       └── utils/
│   │       │   │           ├── sdca_ops.py
│   │       │   │           ├── sdca_ops_test.py
│   │       │   │           ├── sharded_mutable_dense_hashtable.py
│   │       │   │           └── sharded_mutable_dense_hashtable_test.py
│   │       │   ├── linear_test.py
│   │       │   ├── linear_testing_utils.py
│   │       │   ├── metric_keys.py
│   │       │   ├── optimizers.py
│   │       │   ├── optimizers_test.py
│   │       │   ├── optimizers_test_v2.py
│   │       │   ├── parsing_utils.py
│   │       │   ├── parsing_utils_test.py
│   │       │   ├── prediction_keys.py
│   │       │   ├── rnn.py
│   │       │   ├── rnn_test.py
│   │       │   ├── saved_model_estimator.py
│   │       │   ├── saved_model_estimator_test.py
│   │       │   ├── testdata/
│   │       │   │   └── wire_vocabulary.txt
│   │       │   ├── timeseries/
│   │       │   │   ├── BUILD
│   │       │   │   ├── ar_model.py
│   │       │   │   ├── ar_model_test.py
│   │       │   │   ├── ar_model_training_test.py
│   │       │   │   ├── estimators.py
│   │       │   │   ├── estimators_test.py
│   │       │   │   ├── feature_keys.py
│   │       │   │   ├── head.py
│   │       │   │   ├── head_test.py
│   │       │   │   ├── math_utils.py
│   │       │   │   ├── math_utils_test.py
│   │       │   │   ├── model.py
│   │       │   │   ├── model_utils.py
│   │       │   │   ├── saved_model_utils.py
│   │       │   │   └── state_management.py
│   │       │   └── v1/
│   │       │       ├── __init__.py
│   │       │       ├── baseline_estimator_test_v1.py
│   │       │       ├── baseline_test_v1.py
│   │       │       ├── dnn_estimator_test_v1.py
│   │       │       ├── dnn_linear_combined_estimator_test_v1.py
│   │       │       ├── dnn_linear_combined_test_v1.py
│   │       │       ├── dnn_test_fc_v1_v1.py
│   │       │       ├── dnn_test_fc_v2_v1.py
│   │       │       ├── dnn_testing_utils_v1.py
│   │       │       ├── linear_estimator_test_v1.py
│   │       │       ├── linear_test_v1.py
│   │       │       └── linear_testing_utils_v1.py
│   │       ├── distribute_strategy_estimator_integration_test.py
│   │       ├── distribute_strategy_estimator_training_test.py
│   │       ├── early_stopping.py
│   │       ├── early_stopping_test.py
│   │       ├── estimator.py
│   │       ├── estimator_export.py
│   │       ├── estimator_export_test.py
│   │       ├── estimator_lib.py
│   │       ├── estimator_test.py
│   │       ├── export/
│   │       │   ├── __init__.py
│   │       │   ├── export.py
│   │       │   ├── export_lib.py
│   │       │   ├── export_output.py
│   │       │   ├── export_test.py
│   │       │   ├── function.py
│   │       │   └── function_test.py
│   │       ├── exporter.py
│   │       ├── exporter_test.py
│   │       ├── extenders.py
│   │       ├── extenders_test.py
│   │       ├── gc.py
│   │       ├── gc_test.py
│   │       ├── head/
│   │       │   ├── __init__.py
│   │       │   ├── base_head.py
│   │       │   ├── base_head_test.py
│   │       │   ├── binary_class_head.py
│   │       │   ├── binary_class_head_test.py
│   │       │   ├── head_utils.py
│   │       │   ├── multi_class_head.py
│   │       │   ├── multi_class_head_test.py
│   │       │   ├── multi_head.py
│   │       │   ├── multi_head_test.py
│   │       │   ├── multi_label_head.py
│   │       │   ├── multi_label_head_test.py
│   │       │   ├── regression_head.py
│   │       │   ├── regression_head_test.py
│   │       │   ├── sequential_head.py
│   │       │   └── sequential_head_test.py
│   │       ├── hooks/
│   │       │   ├── __init__.py
│   │       │   ├── basic_session_run_hooks.py
│   │       │   ├── basic_session_run_hooks_test.py
│   │       │   ├── fake_summary_writer.py
│   │       │   ├── hooks.py
│   │       │   ├── hooks_test.py
│   │       │   └── session_run_hook.py
│   │       ├── inputs/
│   │       │   ├── __init__.py
│   │       │   ├── inputs.py
│   │       │   ├── numpy_io.py
│   │       │   ├── numpy_io_test.py
│   │       │   ├── pandas_io.py
│   │       │   ├── pandas_io_test.py
│   │       │   └── queues/
│   │       │       ├── __init__.py
│   │       │       ├── feeding_functions.py
│   │       │       ├── feeding_functions_test.py
│   │       │       ├── feeding_queue_runner.py
│   │       │       └── feeding_queue_runner_test.py
│   │       ├── keras_distribute_strategy_test.py
│   │       ├── keras_lib.py
│   │       ├── keras_premade_model_test.py
│   │       ├── keras_test.py
│   │       ├── mode_keys.py
│   │       ├── model_fn.py
│   │       ├── model_fn_test.py
│   │       ├── object_checkpointing_test.py
│   │       ├── run_config.py
│   │       ├── run_config_test.py
│   │       ├── tf_estimator_doctest.py
│   │       ├── tools/
│   │       │   ├── __init__.py
│   │       │   ├── analytics.py
│   │       │   ├── checkpoint_converter.py
│   │       │   └── checkpoint_converter_test.py
│   │       ├── tpu/
│   │       │   ├── BUILD
│   │       │   ├── __init__.py
│   │       │   ├── _tpu_estimator_embedding.py
│   │       │   ├── autotuning_iterations_per_loop_test.py
│   │       │   ├── error_handling.py
│   │       │   ├── error_handling_test.py
│   │       │   ├── iteration_count_estimator.py
│   │       │   ├── spatial_partitioning_api.md
│   │       │   ├── tpu_config.py
│   │       │   ├── tpu_config_test.py
│   │       │   ├── tpu_context.py
│   │       │   ├── tpu_enqueue_sequence_test.py
│   │       │   ├── tpu_estimator.py
│   │       │   ├── tpu_estimator_embedding_test.py
│   │       │   ├── tpu_estimator_evaluation_test.py
│   │       │   ├── tpu_estimator_export_test.py
│   │       │   ├── tpu_estimator_gradients_test.py
│   │       │   ├── tpu_estimator_input_v2_test.py
│   │       │   ├── tpu_estimator_integration_test.py
│   │       │   ├── tpu_estimator_model_parallelism_test.py
│   │       │   ├── tpu_estimator_signals_test.py
│   │       │   ├── tpu_estimator_test.py
│   │       │   └── util.py
│   │       ├── training.py
│   │       ├── training_test.py
│   │       ├── util.py
│   │       └── util_test.py
│   └── tools/
│       └── pip_package/
│           ├── BUILD
│           ├── build_pip_package.sh
│           ├── create_pip_helper.py
│           └── setup.py
└── third_party/
    └── py/
        ├── BUILD
        ├── BUILD.tpl
        └── python_configure.bzl

================================================
FILE CONTENTS
================================================

================================================
FILE: .bazelrc
================================================

# Default options should come above this line

# Put user-specific options in .bazelrc.user
try-import %workspace%/.bazelrc.user


================================================
FILE: .gitignore
================================================
# editor files
*.swp
*~
.vscode/
.DS_Store

# bazel
/.bazelrc.user
/bazel-*

# python
*.pyc
*.pyo
__pycache__
*.whl
.ipynb_checkpoints


================================================
FILE: BUILD
================================================
# Description: Tensorflow Estimator.

licenses(["notice"])  # Apache 2.0

exports_files(["LICENSE"])


================================================
FILE: CONTRIBUTING.md
================================================
Want to contribute? Great! First, read this page (including the small print at the end).

### Before you contribute

Before we can use your code, you must sign the
[Google Individual Contributor License Agreement]
(https://cla.developers.google.com/about/google-individual)
(CLA), which you can do online. The CLA is necessary mainly because you own the
copyright to your changes, even after your contribution becomes part of our
codebase, so we need your permission to use and distribute your code. We also
need to be sure of various other things—for instance that you'll tell us if you
know that your code infringes on other people's patents. You don't have to sign
the CLA until after you've submitted your code for review and a member has
approved it, but you must do it before we can put your code into our codebase.
Before you start working on a larger contribution, you should get in touch with
us first through the issue tracker with your idea so that we can help out and
possibly guide you. Coordinating up front makes it much easier to avoid
frustration later on.

### Code reviews

All submissions, including submissions by project members, require review. We
use Github pull requests for this purpose.

### The small print

Contributions made by corporations are covered by a different agreement than
the one above, the
[Software Grant and Corporate Contributor License Agreement]
(https://cla.developers.google.com/about/google-corporate).


================================================
FILE: LICENSE
================================================
Copyright 2018 The TensorFlow Authors.  All rights reserved.

                                 Apache License
                           Version 2.0, January 2004
                        http://www.apache.org/licenses/

   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

   1. Definitions.

      "License" shall mean the terms and conditions for use, reproduction,
      and distribution as defined by Sections 1 through 9 of this document.

      "Licensor" shall mean the copyright owner or entity authorized by
      the copyright owner that is granting the License.

      "Legal Entity" shall mean the union of the acting entity and all
      other entities that control, are controlled by, or are under common
      control with that entity. For the purposes of this definition,
      "control" means (i) the power, direct or indirect, to cause the
      direction or management of such entity, whether by contract or
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      outstanding shares, or (iii) beneficial ownership of such entity.

      "You" (or "Your") shall mean an individual or Legal Entity
      exercising permissions granted by this License.

      "Source" form shall mean the preferred form for making modifications,
      including but not limited to software source code, documentation
      source, and configuration files.

      "Object" form shall mean any form resulting from mechanical
      transformation or translation of a Source form, including but
      not limited to compiled object code, generated documentation,
      and conversions to other media types.

      "Work" shall mean the work of authorship, whether in Source or
      Object form, made available under the License, as indicated by a
      copyright notice that is included in or attached to the work
      (an example is provided in the Appendix below).

      "Derivative Works" shall mean any work, whether in Source or Object
      form, that is based on (or derived from) the Work and for which the
      editorial revisions, annotations, elaborations, or other modifications
      represent, as a whole, an original work of authorship. For the purposes
      of this License, Derivative Works shall not include works that remain
      separable from, or merely link (or bind by name) to the interfaces of,
      the Work and Derivative Works thereof.

      "Contribution" shall mean any work of authorship, including
      the original version of the Work and any modifications or additions
      to that Work or Derivative Works thereof, that is intentionally
      submitted to Licensor for inclusion in the Work by the copyright owner
      or by an individual or Legal Entity authorized to submit on behalf of
      the copyright owner. For the purposes of this definition, "submitted"
      means any form of electronic, verbal, or written communication sent
      to the Licensor or its representatives, including but not limited to
      communication on electronic mailing lists, source code control systems,
      and issue tracking systems that are managed by, or on behalf of, the
      Licensor for the purpose of discussing and improving the Work, but
      excluding communication that is conspicuously marked or otherwise
      designated in writing by the copyright owner as "Not a Contribution."

      "Contributor" shall mean Licensor and any individual or Legal Entity
      on behalf of whom a Contribution has been received by Licensor and
      subsequently incorporated within the Work.

   2. Grant of Copyright License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      copyright license to reproduce, prepare Derivative Works of,
      publicly display, publicly perform, sublicense, and distribute the
      Work and such Derivative Works in Source or Object form.

   3. Grant of Patent License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      (except as stated in this section) patent license to make, have made,
      use, offer to sell, sell, import, and otherwise transfer the Work,
      where such license applies only to those patent claims licensable
      by such Contributor that are necessarily infringed by their
      Contribution(s) alone or by combination of their Contribution(s)
      with the Work to which such Contribution(s) was submitted. If You
      institute patent litigation against any entity (including a
      cross-claim or counterclaim in a lawsuit) alleging that the Work
      or a Contribution incorporated within the Work constitutes direct
      or contributory patent infringement, then any patent licenses
      granted to You under this License for that Work shall terminate
      as of the date such litigation is filed.

   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained
          within such NOTICE file, excluding those notices that do not
          pertain to any part of the Derivative Works, in at least one
          of the following places: within a NOTICE text file distributed
          as part of the Derivative Works; within the Source form or
          documentation, if provided along with the Derivative Works; or,
          within a display generated by the Derivative Works, if and
          wherever such third-party notices normally appear. The contents
          of the NOTICE file are for informational purposes only and
          do not modify the License. You may add Your own attribution
          notices within Derivative Works that You distribute, alongside
          or as an addendum to the NOTICE text from the Work, provided
          that such additional attribution notices cannot be construed
          as modifying the License.

      You may add Your own copyright statement to Your modifications and
      may provide additional or different license terms and conditions
      for use, reproduction, or distribution of Your modifications, or
      for any such Derivative Works as a whole, provided Your use,
      reproduction, and distribution of the Work otherwise complies with
      the conditions stated in this License.

   5. Submission of Contributions. Unless You explicitly state otherwise,
      any Contribution intentionally submitted for inclusion in the Work
      by You to the Licensor shall be under the terms and conditions of
      this License, without any additional terms or conditions.
      Notwithstanding the above, nothing herein shall supersede or modify
      the terms of any separate license agreement you may have executed
      with Licensor regarding such Contributions.

   6. Trademarks. This License does not grant permission to use the trade
      names, trademarks, service marks, or product names of the Licensor,
      except as required for reasonable and customary use in describing the
      origin of the Work and reproducing the content of the NOTICE file.

   7. Disclaimer of Warranty. Unless required by applicable law or
      agreed to in writing, Licensor provides the Work (and each
      Contributor provides its Contributions) on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      implied, including, without limitation, any warranties or conditions
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      PARTICULAR PURPOSE. You are solely responsible for determining the
      appropriateness of using or redistributing the Work and assume any
      risks associated with Your exercise of permissions under this License.

   8. Limitation of Liability. In no event and under no legal theory,
      whether in tort (including negligence), contract, or otherwise,
      unless required by applicable law (such as deliberate and grossly
      negligent acts) or agreed to in writing, shall any Contributor be
      liable to You for damages, including any direct, indirect, special,
      incidental, or consequential damages of any character arising as a
      result of this License or out of the use or inability to use the
      Work (including but not limited to damages for loss of goodwill,
      work stoppage, computer failure or malfunction, or any and all
      other commercial damages or losses), even if such Contributor
      has been advised of the possibility of such damages.

   9. Accepting Warranty or Additional Liability. While redistributing
      the Work or Derivative Works thereof, You may choose to offer,
      and charge a fee for, acceptance of support, warranty, indemnity,
      or other liability obligations and/or rights consistent with this
      License. However, in accepting such obligations, You may act only
      on Your own behalf and on Your sole responsibility, not on behalf
      of any other Contributor, and only if You agree to indemnify,
      defend, and hold each Contributor harmless for any liability
      incurred by, or claims asserted against, such Contributor by reason
      of your accepting any such warranty or additional liability.

   END OF TERMS AND CONDITIONS

   APPENDIX: How to apply the Apache License to your work.

      To apply the Apache License to your work, attach the following
      boilerplate notice, with the fields enclosed by brackets "[]"
      replaced with your own identifying information. (Don't include
      the brackets!)  The text should be enclosed in the appropriate
      comment syntax for the file format. We also recommend that a
      file or class name and description of purpose be included on the
      same "printed page" as the copyright notice for easier
      identification within third-party archives.

   Copyright 2017, The TensorFlow Authors.

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.


================================================
FILE: README.md
================================================
-----------------
| **`Documentation`** |
|-----------------|
| [![Documentation](https://img.shields.io/badge/api-reference-blue.svg)](https://www.tensorflow.org/api_docs/python/tf/estimator) |

TensorFlow Estimator is a high-level TensorFlow API that greatly simplifies machine learning programming.
Estimators encapsulate training, evaluation, prediction, and exporting for your model.

## Getting Started

See our Estimator
[getting started guide](https://www.tensorflow.org/guide/estimator) for an
introduction to the Estimator APIs.

## Installation

`tf.estimator` is installed when you install the TensorFlow pip package. See
[Installing TensorFlow](https://www.tensorflow.org/install) for instructions.

## Developing

If you want to build TensorFlow Estimator locally, you will need to
[install Bazel](https://docs.bazel.build/versions/master/install.html) and
[install TensorFlow](https://www.tensorflow.org/install/pip).

```sh
# To build TensorFlow Estimator whl file.
bazel build //tensorflow_estimator/tools/pip_package:build_pip_package
bazel-bin/tensorflow_estimator/tools/pip_package/build_pip_package /tmp/estimator_pip

# To run all Estimator tests
bazel test //tensorflow_estimator/...
```

## Contribution guidelines

If you want to contribute to TensorFlow Estimator, be sure to review the [contribution
guidelines](CONTRIBUTING.md).

**Note that this repository is included as a component of the main TensorFlow
package, and any issues encountered while using Estimators should be filed under
[TensorFlow GitHub Issues](https://github.com/tensorflow/tensorflow/issues),
as we do not separately track issues in this repository. You can link this
repository in any issues created as necessary.**

Please see
[TensorFlow Discuss](https://groups.google.com/a/tensorflow.org/forum/#!forum/discuss) for general questions
and discussion and please direct specific questions to
[Stack Overflow](https://stackoverflow.com/questions/tagged/tensorflow).

## License

[Apache License 2.0](LICENSE)


================================================
FILE: WORKSPACE
================================================
workspace(name = "org_tensorflow_estimator")

# Use a custom python toolchain to make sure we always use the python binary
# provided by PYTHON_BIN_PATH.
# This is required due to https://github.com/bazelbuild/bazel/issues/7899,
# because --python_path will not work since Bazel 0.27
load("//third_party/py:python_configure.bzl", "python_configure")

python_configure(name = "local_config_py_toolchain")

register_toolchains("@local_config_py_toolchain//:py_toolchain")


================================================
FILE: tensorflow_estimator/BUILD
================================================
# Placeholder: load py_library

# Description: Tensorflow Estimator.
load(
    "//tensorflow_estimator/python/estimator/api:api_gen.bzl",
    "ESTIMATOR_API_INIT_FILES_V1",
    "ESTIMATOR_API_INIT_FILES_V2",
    "generate_apis",
)

licenses(["notice"])

package(default_visibility = ["//tensorflow_estimator:internal"])

exports_files(["LICENSE"])

# TODO(mikecase): Clean up. Remove all non estimator packages.
package_group(
    name = "internal",
    packages = [
        "//learning/brain/...",
        "//learning/deepmind/research/...",
        "//learning/tfx/models/uplift/estimators/...",
        "//nlp/nlx/ads/expmatch/model/...",
        "//nlp/nlx/common/query_bert/...",
        "//nlp/nlx/i18n/pangloss/...",
        "//tensorflow_estimator/...",
        "//third_party/py/tensorflow_privacy/...",
        "//third_party/tensorflow/python/estimator/...",
    ],
)

# This flag specifies whether Estimator 2.0 API should be built instead
# of 1.* API. Note that Estimator 2.0 API is currently under development.
config_setting(
    name = "api_version_2",
    define_values = {"estimator_api_version": "2"},
)

config_setting(
    name = "no_estimator_py_deps",
    define_values = {"no_estimator_py_deps": "true"},
    visibility = ["//visibility:public"],
)

py_library(
    name = "tensorflow_estimator",
    srcs = [
        ":root_init_gen",
        ":estimator_python_api_gen_compat_v1",
        ":estimator_python_api_gen_compat_v2",
        # Old API files. Delete once TensorFlow is updated to import from new location.
        "//tensorflow_estimator/python/estimator/api:estimator_python_api_gen",
        "//tensorflow_estimator/python/estimator/api:estimator_python_api_gen_compat_v1",
        "//tensorflow_estimator/python/estimator/api:estimator_python_api_gen_compat_v2",
    ],
    srcs_version = "PY3",
    visibility = [
        "//tensorflow_estimator:internal",
        "//third_party/tensorflow/tools/docs/google:__subpackages__",
    ],
    deps = [
        "//tensorflow_estimator/python/estimator:estimator_py",
    ],
)

genrule(
    name = "root_init_gen",
    srcs = select({
        "api_version_2": ["_api/v2/v2.py"],
        "//conditions:default": ["_api/v1/v1.py"],
    }),
    outs = ["__init__.py"],
    cmd = select({
        "api_version_2": "cp $(location :_api/v2/v2.py) $(OUTS)",
        "//conditions:default": "cp $(location :_api/v1/v1.py) $(OUTS)",
    }),
)

generate_apis(
    name = "estimator_python_api_gen_compat_v1",
    api_version = 1,
    output_dir = "_api/v1/",
    output_files = ESTIMATOR_API_INIT_FILES_V1,
    output_package = "tensorflow_estimator._api.v1",
    root_file_name = "v1.py",
)

generate_apis(
    name = "estimator_python_api_gen_compat_v2",
    api_version = 2,
    output_dir = "_api/v2/",
    output_files = ESTIMATOR_API_INIT_FILES_V2,
    output_package = "tensorflow_estimator._api.v2",
    root_file_name = "v2.py",
)


================================================
FILE: tensorflow_estimator/estimator.bzl
================================================
"""Estimator common skylark macros."""

# Macro to run Estimator py_tests against pip installation.
def py_test(deps = [], **kwargs):
    native.py_test(
        deps = select({
            "//conditions:default": deps,
            "//tensorflow_estimator:no_estimator_py_deps": [],
        }),
        **kwargs
    )

def tpu_py_test(**kwargs):
    # Skip the tpu test for Estimator oss.
    pass

# We are never indexing generated code in the OSS build, but still
# return a select() for consistency.
def if_indexing_source_code(
        if_true,  # @unused
        if_false):
    """Return a select() on whether or not we are building for source code indexing."""
    return select({
        "//conditions:default": if_false,
    })


================================================
FILE: tensorflow_estimator/python/estimator/BUILD
================================================
# Placeholder: load py_library
load("//tensorflow_estimator:estimator.bzl", "py_test")

package(default_visibility = ["//tensorflow_estimator:internal"])

licenses(["notice"])

py_test(
    name = "tf_estimator_doctest",
    srcs = ["tf_estimator_doctest.py"],
    python_version = "PY3",
    tags = [
        "no_oss_py2",
        "noasan",
        "nomsan",
        "notsan",
    ],
    deps = [
        ":estimator_py",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
    ],
)

py_library(
    name = "estimator_py",
    srcs = [
        "estimator_lib.py",
    ],
    srcs_version = "PY3",
    visibility = ["//visibility:public"],
    deps = [
        ":base_head",
        ":baseline",
        ":basic_session_run_hooks",
        ":binary_class_head",
        ":checkpoint_converter",
        ":dnn",
        ":dnn_linear_combined",
        ":early_stopping",
        ":estimator",
        ":export",
        ":exporter",
        ":extenders",
        ":fake_summary_writer",
        ":function",
        ":hooks",
        ":inputs",
        ":keras",
        ":kmeans",
        ":linear",
        ":mode_keys",
        ":model_fn",
        ":multi_class_head",
        ":multi_head",
        ":multi_label_head",
        ":parsing_utils",
        ":regression_head",
        ":rnn",
        ":run_config",
        ":saved_model_estimator",
        ":sequential_head",
        ":session_run_hook",
        ":training",
        "//tensorflow_estimator/python/estimator:expect_tensorboard_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
        "//tensorflow_estimator/python/estimator/canned/timeseries:estimators",
        "//tensorflow_estimator/python/estimator/tpu:tpu_estimator",
    ],
)

py_library(
    name = "exporter",
    srcs = ["exporter.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator_export",
        ":gc",
        ":metric_keys",
        ":util",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "exporter_test",
    size = "medium",
    srcs = ["exporter_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":estimator",
        ":exporter",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "extenders",
    srcs = ["extenders.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator",
        ":estimator_export",
        ":mode_keys",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "extenders_test",
    size = "medium",
    srcs = ["extenders_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    tags = ["notsan"],  # b/62863147
    deps = [
        ":extenders",
        ":linear",
        ":run_config",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "gc",
    srcs = ["gc.py"],
    srcs_version = "PY3",
    deps = [
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "gc_test",
    size = "small",
    srcs = ["gc_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":gc",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "hooks",
    srcs = ["hooks/hooks.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator_export",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "hooks_test",
    srcs = ["hooks/hooks_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":estimator_py",
        ":hooks",
        "//tensorflow_estimator/python/estimator",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "model_fn",
    srcs = ["model_fn.py"],
    srcs_version = "PY3",
    visibility = [
        "//tensorflow_estimator:internal",
        "//third_party/tensorflow/python/tpu:__pkg__",
    ],
    deps = [
        ":estimator_export",
        ":mode_keys",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "model_fn_test",
    size = "small",
    srcs = ["model_fn_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":export_output",
        ":model_fn",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "mode_keys",
    srcs = ["mode_keys.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator_export",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "training",
    srcs = ["training.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator",
        ":estimator_export",
        ":exporter",
        ":run_config",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "training_test",
    size = "medium",
    srcs = ["training_test.py"],
    python_version = "PY3",
    shard_count = 4,
    srcs_version = "PY3",
    tags = [
        "notap",  # TODO(b/170896944): flaky, broken
        "notsan",
    ],
    deps = [
        ":dnn",
        ":estimator",
        ":exporter",
        ":inputs",
        ":run_config",
        ":training",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "run_config",
    srcs = ["run_config.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator_export",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "run_config_test",
    size = "small",
    srcs = ["run_config_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":run_config",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "baseline",
    srcs = ["canned/baseline.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator",
        ":estimator_export",
        ":head",
        ":head_utils",
        ":model_fn",
        ":optimizers",
        ":regression_head",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "baseline_test",
    size = "medium",
    srcs = ["canned/baseline_test.py"],
    python_version = "PY3",
    shard_count = 4,
    srcs_version = "PY3",
    tags = [
        "no_pip",
        "noasan",  # test flakily times out in asan mode.
        "notsan",  # b/67510291
        "optonly",  # flakily times out in fastbuild
    ],
    deps = [
        ":baseline",
        ":estimator",
        ":export_export",
        ":metric_keys",
        ":numpy_io",
        ":pandas_io",
        ":run_config",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_pandas_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "baseline_test_v1",
    size = "medium",
    srcs = ["canned/v1/baseline_test_v1.py"],
    python_version = "PY3",
    shard_count = 4,
    srcs_version = "PY3",
    tags = [
        "no_pip",
        "noasan",  # test flakily times out in asan mode.
        "notsan",  # b/67510291
        "optonly",  # flakily times out in fastbuild
    ],
    deps = [
        ":baseline",
        ":estimator",
        ":export_export",
        ":metric_keys",
        ":numpy_io",
        ":pandas_io",
        ":run_config",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_pandas_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "baseline_estimator_test",
    size = "medium",
    srcs = ["canned/baseline_estimator_test.py"],
    python_version = "PY3",
    shard_count = 4,
    srcs_version = "PY3",
    tags = [
        "no_pip",
        "noasan",  # test flakily times out in asan mode.
        "notsan",  # b/67510291
        "optonly",  # flakily times out in fastbuild
    ],
    deps = [
        ":baseline",
        ":estimator",
        ":export_export",
        ":metric_keys",
        ":numpy_io",
        ":regression_head",
        ":run_config",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "baseline_estimator_test_v1",
    size = "medium",
    srcs = ["canned/v1/baseline_estimator_test_v1.py"],
    python_version = "PY3",
    shard_count = 4,
    srcs_version = "PY3",
    tags = [
        "no_pip",
        "noasan",  # test flakily times out in asan mode.
        "notsan",  # b/67510291
        "optonly",  # flakily times out in fastbuild
    ],
    deps = [
        ":baseline",
        ":estimator",
        ":export_export",
        ":metric_keys",
        ":numpy_io",
        ":run_config",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "kmeans",
    srcs = ["canned/kmeans.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator",
        ":estimator_export",
        ":head",
        ":model_fn",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_test(
    name = "kmeans_test",
    size = "medium",
    srcs = ["canned/kmeans_test.py"],
    python_version = "PY3",
    shard_count = 8,
    srcs_version = "PY3",
    tags = [
        "notap",  # TODO(b/170974352): Flaky timeout
    ],
    deps = [
        ":inputs",
        ":kmeans",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_library(
    name = "dnn",
    srcs = ["canned/dnn.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator",
        ":estimator_export",
        ":head",
        ":head_utils",
        ":mode_keys",
        ":optimizers",
        ":regression_head",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "dnn_testing_utils",
    srcs = ["canned/dnn_testing_utils.py"],
    srcs_version = "PY3",
    visibility = ["//visibility:public"],
    deps = [
        ":estimator",
        ":head",
        ":metric_keys",
        ":mode_keys",
        ":model_fn",
        ":numpy_io",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_absl_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "dnn_testing_utils_v1",
    srcs = ["canned/v1/dnn_testing_utils_v1.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator",
        ":head",
        ":metric_keys",
        ":mode_keys",
        ":model_fn",
        ":numpy_io",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_absl_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "dnn_test_fc_v1_v1",
    size = "medium",
    srcs = ["canned/v1/dnn_test_fc_v1_v1.py"],
    python_version = "PY3",
    shard_count = 4,
    srcs_version = "PY3",
    tags = [
        "no_pip",
        "notsan",  # b/67510291
    ],
    deps = [
        ":dnn",
        ":dnn_testing_utils_v1",
        ":export_export",
        ":numpy_io",
        ":pandas_io",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_absl_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_pandas_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "dnn_test_fc_v2",
    size = "medium",
    srcs = ["canned/dnn_test_fc_v2.py"],
    python_version = "PY3",
    shard_count = 8,
    srcs_version = "PY3",
    tags = [
        "no_pip",
        "notsan",  # b/67510291
    ],
    deps = [
        ":dnn",
        ":dnn_testing_utils",
        ":export_export",
        ":numpy_io",
        ":pandas_io",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_absl_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_pandas_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "dnn_test_fc_v2_v1",
    size = "medium",
    srcs = ["canned/v1/dnn_test_fc_v2_v1.py"],
    python_version = "PY3",
    shard_count = 4,
    srcs_version = "PY3",
    tags = [
        "no_pip",
        "notsan",  # b/67510291
    ],
    deps = [
        ":dnn",
        ":dnn_testing_utils_v1",
        ":export_export",
        ":numpy_io",
        ":pandas_io",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_absl_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_pandas_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "dnn_estimator_test",
    size = "medium",
    srcs = ["canned/dnn_estimator_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    tags = [
        "no_pip",
        "notsan",
        "optonly",  # times out http://b/79220679
    ],
    deps = [
        ":dnn",
        ":dnn_testing_utils",
        ":export_export",
        ":multi_class_head",
        ":numpy_io",
        ":prediction_keys",
        ":regression_head",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "dnn_estimator_test_v1",
    size = "medium",
    srcs = ["canned/v1/dnn_estimator_test_v1.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    tags = [
        "no_pip",
        "notsan",
        "optonly",  # times out http://b/79220679
    ],
    deps = [
        ":dnn",
        ":dnn_testing_utils_v1",
        ":export_export",
        ":head",
        ":numpy_io",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "dnn_linear_combined",
    srcs = ["canned/dnn_linear_combined.py"],
    srcs_version = "PY3",
    deps = [
        ":dnn",
        ":estimator",
        ":estimator_export",
        ":head",
        ":head_utils",
        ":linear",
        ":model_fn",
        ":optimizers",
        ":regression_head",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "dnn_linear_combined_estimator_test",
    size = "medium",
    srcs = ["canned/dnn_linear_combined_estimator_test.py"],
    python_version = "PY3",
    shard_count = 3,
    srcs_version = "PY3",
    tags = [
        "no_pip",
        "notsan",
    ],
    deps = [
        ":dnn_linear_combined",
        ":dnn_testing_utils",
        ":export_export",
        ":linear_testing_utils",
        ":numpy_io",
        ":prediction_keys",
        ":regression_head",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "dnn_linear_combined_estimator_test_v1",
    size = "medium",
    srcs = ["canned/v1/dnn_linear_combined_estimator_test_v1.py"],
    python_version = "PY3",
    shard_count = 3,
    srcs_version = "PY3",
    tags = [
        "no_pip",
        "notsan",
    ],
    deps = [
        ":dnn_linear_combined",
        ":dnn_testing_utils_v1",
        ":export_export",
        ":head",
        ":linear_testing_utils_v1",
        ":numpy_io",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "dnn_linear_combined_test",
    size = "medium",
    srcs = ["canned/dnn_linear_combined_test.py"],
    python_version = "PY3",
    shard_count = 32,
    srcs_version = "PY3",
    tags = [
        "no_oss",  # TODO(b/143323557)
        "no_pip",
        "notsan",  # TODO(b/67510291)
    ],
    deps = [
        ":dnn_linear_combined",
        ":dnn_testing_utils",
        ":export_export",
        ":linear_testing_utils",
        ":numpy_io",
        ":pandas_io",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_absl_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_pandas_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "dnn_linear_combined_test_v1",
    size = "medium",
    srcs = ["canned/v1/dnn_linear_combined_test_v1.py"],
    python_version = "PY3",
    shard_count = 16,
    srcs_version = "PY3",
    tags = [
        "no_pip",
        "notsan",  # TODO(b/67510291)
    ],
    deps = [
        ":dnn_linear_combined",
        ":dnn_testing_utils_v1",
        ":export_export",
        ":linear_testing_utils_v1",
        ":numpy_io",
        ":pandas_io",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_absl_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_pandas_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "checkpoint_converter",
    srcs = ["tools/checkpoint_converter.py"],
    srcs_version = "PY3",
    deps = [
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "analytics_tools",
    srcs = ["tools/analytics.py"],
    srcs_version = "PY3",
    deps = ["//tensorflow_estimator/python/estimator:expect_tensorflow_installed"],
)

py_test(
    name = "checkpoint_converter_test",
    srcs = ["tools/checkpoint_converter_test.py"],
    python_version = "PY3",
    shard_count = 4,
    srcs_version = "PY3",
    deps = [
        ":checkpoint_converter",
        ":dnn",
        ":dnn_linear_combined",
        ":head",
        ":linear",
        ":numpy_io",
        ":regression_head",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_library(
    name = "util",
    srcs = [
        "util.py",
    ],
    srcs_version = "PY3",
    deps = [
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "util_test",
    srcs = ["util_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    tags = ["notsan"],  # b/67510291
    deps = [
        ":util",
        "//tensorflow_estimator/python/estimator:expect_absl_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "early_stopping",
    srcs = [
        "early_stopping.py",
    ],
    srcs_version = "PY3",
    deps = [
        ":estimator_export",
        ":export_export",
        ":model_fn",
        ":run_config",
        ":util",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "early_stopping_test",
    srcs = [
        "early_stopping_test.py",
    ],
    python_version = "PY3",
    srcs_version = "PY3",
    tags = [
        "notap",  # TODO(b/134928532): Reenable this test.
    ],
    deps = [
        ":early_stopping",
        "//tensorflow_estimator/python/estimator",
        "//tensorflow_estimator/python/estimator:expect_absl_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
    ],
)

py_library(
    name = "estimator",
    srcs = [
        "estimator.py",
    ],
    srcs_version = "PY3",
    visibility = [
        "//tensorflow_estimator:internal",
        "//third_party/tensorflow/python/tpu:__pkg__",
    ],
    deps = [
        ":estimator_export",
        ":export",
        ":mode_keys",
        ":model_fn",
        ":run_config",
        ":util",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "estimator_test",
    srcs = ["estimator_test.py"],
    python_version = "PY3",
    shard_count = 4,
    srcs_version = "PY3",
    tags = ["notsan"],  # b/67510291
    deps = [
        ":estimator",
        ":estimator_py",
        ":export",
        ":mode_keys",
        ":model_fn",
        ":numpy_io",
        ":run_config",
        # Placeholder for an internal build dep disabling tf2 behavior
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "parsing_utils",
    srcs = [
        "canned/parsing_utils.py",
    ],
    srcs_version = "PY3",
    deps = [
        ":estimator_export",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "parsing_utils_test",
    srcs = ["canned/parsing_utils_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":parsing_utils",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "export_output",
    srcs = ["export/export_output.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator_export",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_library(
    name = "export",
    srcs = [
        "export/export_lib.py",
    ],
    srcs_version = "PY3",
    deps = [
        ":export_export",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_library(
    name = "export_export",
    srcs = [
        "export/export.py",
    ],
    srcs_version = "PY3",
    deps = [
        ":estimator_export",
        ":util",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_test(
    name = "export_test",
    size = "small",
    srcs = ["export/export_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":export_export",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "function",
    srcs = [
        "export/function.py",
    ],
    srcs_version = "PY3",
    deps = [
        ":mode_keys",
        ":model_fn",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_test(
    name = "function_test",
    size = "small",
    srcs = ["export/function_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":export",
        ":function",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_library(
    name = "head",
    srcs = ["canned/head.py"],
    srcs_version = "PY3",
    deps = [
        ":export_output",
        ":metric_keys",
        ":model_fn",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "head_test",
    size = "medium",
    srcs = ["canned/head_test.py"],
    python_version = "PY3",
    shard_count = 4,
    srcs_version = "PY3",
    tags = [
        "manual",
        "no_pip",
        "notap",  # b/148804861
    ],
    deps = [
        ":dnn_testing_utils_v1",
        ":head",
        ":metric_keys",
        ":mode_keys",
        ":model_fn",
        ":numpy_io",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "head_utils",
    srcs = ["head/head_utils.py"],
    srcs_version = "PY3",
    deps = [
        ":binary_class_head",
        ":multi_class_head",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "base_head",
    srcs = ["head/base_head.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator_export",
        ":export_output",
        ":head",
        ":metric_keys",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "base_head_test",
    size = "small",
    srcs = ["head/base_head_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":base_head_test_lib",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
    ],
)

py_library(
    name = "base_head_test_lib",
    testonly = True,
    srcs = ["head/base_head_test.py"],
    srcs_version = "PY3",
    deps = [
        ":base_head",
        ":binary_class_head",
        ":head_utils",
        ":mode_keys",
        ":model_fn",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "binary_class_head",
    srcs = ["head/binary_class_head.py"],
    srcs_version = "PY3",
    deps = [
        ":base_head",
        ":estimator_export",
        ":export_output",
        ":metric_keys",
        ":mode_keys",
        ":model_fn",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "binary_class_head_test",
    size = "medium",
    srcs = ["head/binary_class_head_test.py"],
    python_version = "PY3",
    shard_count = 4,
    srcs_version = "PY3",
    tags = [
        "manual",
        "no_pip",
        "notap",  # b/148804861
    ],
    deps = [
        ":binary_class_head",
        ":dnn",
        ":dnn_testing_utils",
        ":head_utils",
        ":metric_keys",
        ":mode_keys",
        ":model_fn",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "multi_head",
    srcs = ["head/multi_head.py"],
    srcs_version = "PY3",
    deps = [
        ":base_head",
        ":estimator_export",
        ":export_output",
        ":metric_keys",
        ":mode_keys",
        ":model_fn",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "multi_head_test",
    size = "medium",
    srcs = ["head/multi_head_test.py"],
    python_version = "PY3",
    shard_count = 4,
    srcs_version = "PY3",
    deps = [
        ":head_utils",
        ":metric_keys",
        ":mode_keys",
        ":multi_head",
        ":multi_label_head",
        ":prediction_keys",
        ":regression_head",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "multi_class_head",
    srcs = ["head/multi_class_head.py"],
    srcs_version = "PY3",
    deps = [
        ":base_head",
        ":estimator_export",
        ":export_output",
        ":metric_keys",
        ":mode_keys",
        ":model_fn",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "multi_class_head_test",
    size = "medium",
    srcs = ["head/multi_class_head_test.py"],
    python_version = "PY3",
    shard_count = 4,
    srcs_version = "PY3",
    tags = [
        "no_oss",  # TODO(b/202525254): broken on TF 2.7
    ],
    deps = [
        ":dnn",
        ":head_utils",
        ":metric_keys",
        ":mode_keys",
        ":model_fn",
        ":multi_class_head",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "multi_label_head",
    srcs = ["head/multi_label_head.py"],
    srcs_version = "PY3",
    deps = [
        ":base_head",
        ":estimator_export",
        ":export_output",
        ":metric_keys",
        ":mode_keys",
        ":model_fn",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "multi_label_head_test",
    size = "medium",
    srcs = ["head/multi_label_head_test.py"],
    python_version = "PY3",
    shard_count = 4,
    srcs_version = "PY3",
    deps = [
        ":dnn",
        ":head_utils",
        ":metric_keys",
        ":mode_keys",
        ":model_fn",
        ":multi_label_head",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "regression_head",
    srcs = ["head/regression_head.py"],
    srcs_version = "PY3",
    deps = [
        ":base_head",
        ":estimator_export",
        ":export_output",
        ":metric_keys",
        ":mode_keys",
        ":model_fn",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "regression_head_test",
    size = "medium",
    srcs = ["head/regression_head_test.py"],
    python_version = "PY3",
    shard_count = 4,
    srcs_version = "PY3",
    tags = [
        "manual",
        "notap",  # b/148804861
    ],
    deps = [
        ":head_utils",
        ":metric_keys",
        ":mode_keys",
        ":model_fn",
        ":numpy_io",
        ":prediction_keys",
        ":regression_head",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "sequential_head",
    srcs = ["head/sequential_head.py"],
    srcs_version = "PY3",
    deps = [
        ":base_head",
        ":mode_keys",
        ":multi_head",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_test(
    name = "sequential_head_test",
    size = "medium",
    srcs = ["head/sequential_head_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":binary_class_head",
        ":head_utils",
        ":metric_keys",
        ":mode_keys",
        ":model_fn",
        ":multi_class_head",
        ":multi_head",
        ":prediction_keys",
        ":sequential_head",
        "//tensorflow_estimator/python/estimator:expect_absl_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_library(
    name = "inputs",
    srcs = ["inputs/inputs.py"],
    srcs_version = "PY3",
    deps = [
        ":numpy_io",
        ":pandas_io",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "linear",
    srcs = ["canned/linear.py"],
    srcs_version = "PY3",
    deps = [
        ":binary_class_head",
        ":estimator",
        ":estimator_export",
        ":head",
        ":head_utils",
        ":optimizers",
        ":regression_head",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
        "//tensorflow_estimator/python/estimator/canned/linear_optimizer:sdca_ops_py",
    ],
)

py_library(
    name = "linear_testing_utils",
    srcs = ["canned/linear_testing_utils.py"],
    srcs_version = "PY3",
    visibility = ["//visibility:public"],
    deps = [
        ":estimator",
        ":export_export",
        ":linear",
        ":metric_keys",
        ":numpy_io",
        ":pandas_io",
        ":run_config",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_pandas_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "linear_testing_utils_v1",
    srcs = ["canned/v1/linear_testing_utils_v1.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator",
        ":export_export",
        ":linear",
        ":metric_keys",
        ":numpy_io",
        ":pandas_io",
        ":run_config",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_pandas_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "linear_estimator_test",
    size = "medium",
    srcs = ["canned/linear_estimator_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    tags = [
        "no_pip",
        "notsan",
    ],
    deps = [
        ":export_export",
        ":linear",
        ":linear_testing_utils",
        ":numpy_io",
        ":prediction_keys",
        ":regression_head",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "linear_estimator_test_v1",
    size = "medium",
    srcs = ["canned/v1/linear_estimator_test_v1.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    tags = [
        "no_pip",
        "notsan",
    ],
    deps = [
        ":export_export",
        ":head",
        ":linear",
        ":linear_testing_utils_v1",
        ":numpy_io",
        ":prediction_keys",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "linear_test",
    size = "medium",
    srcs = ["canned/linear_test.py"],
    python_version = "PY3",
    shard_count = 8,
    srcs_version = "PY3",
    tags = [
        "no_pip",
        "notsan",  # b/67510291
    ],
    deps = [
        ":linear",
        ":linear_testing_utils",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

filegroup(
    name = "vocabulary_testdata",
    srcs = [
        "canned/testdata/wire_vocabulary.txt",
    ],
)

py_test(
    name = "linear_model_test",
    size = "medium",
    srcs = ["canned/linear_model_test.py"],
    data = [":vocabulary_testdata"],
    python_version = "PY3",
    shard_count = 8,
    srcs_version = "PY3",
    tags = [
        "no_cuda_on_cpu_tap",
        "no_pip",
        "no_rocm",
        "no_windows",
        "notsan",  # b/67510291
    ],
    deps = [
        ":linear",
        ":linear_testing_utils",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "linear_test_v1",
    size = "medium",
    srcs = ["canned/v1/linear_test_v1.py"],
    python_version = "PY3",
    shard_count = 8,
    srcs_version = "PY3",
    tags = [
        "no_pip",
        "notsan",  # b/67510291
    ],
    deps = [
        ":linear",
        ":linear_testing_utils_v1",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "metric_keys",
    srcs = ["canned/metric_keys.py"],
    srcs_version = "PY3",
    deps = [
        ":model_fn",
    ],
)

py_library(
    name = "numpy_io",
    srcs = ["inputs/numpy_io.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator_export",
        ":inputs_queues",
    ],
)

py_test(
    name = "numpy_io_test",
    size = "small",
    srcs = ["inputs/numpy_io_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":numpy_io",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "optimizers",
    srcs = ["canned/optimizers.py"],
    srcs_version = "PY3",
    deps = [
        ":util",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "optimizers_test",
    size = "small",
    srcs = ["canned/optimizers_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":optimizers",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "optimizers_test_v2",
    size = "small",
    srcs = ["canned/optimizers_test_v2.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":optimizers",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_test(
    name = "object_checkpointing_test",
    size = "medium",
    srcs = ["object_checkpointing_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":estimator",
        ":model_fn",
        ":optimizers",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "pandas_io",
    srcs = ["inputs/pandas_io.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator_export",
        ":inputs_queues",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_pandas_installed",
    ],
)

py_test(
    name = "pandas_io_test",
    size = "small",
    srcs = ["inputs/pandas_io_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":pandas_io",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_pandas_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "prediction_keys",
    srcs = ["canned/prediction_keys.py"],
    srcs_version = "PY3",
    visibility = ["//visibility:public"],
    deps = [],
)

py_library(
    name = "inputs_queues",
    srcs = [
        "inputs/queues/__init__.py",
        "inputs/queues/feeding_functions.py",
        "inputs/queues/feeding_queue_runner.py",
    ],
    srcs_version = "PY3",
    deps = [
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_pandas_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "feeding_functions_test",
    size = "small",
    srcs = [
        "inputs/queues/feeding_functions_test.py",
    ],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":inputs_queues",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_pandas_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "feeding_queue_runner_test",
    size = "small",
    srcs = ["inputs/queues/feeding_queue_runner_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":inputs_queues",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_pandas_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "keras",
    srcs = ["keras_lib.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator",
        ":export",
        ":mode_keys",
        ":model_fn",
        ":run_config",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "keras_test",
    size = "medium",
    srcs = ["keras_test.py"],
    python_version = "PY3",
    shard_count = 8,
    srcs_version = "PY3",
    tags = [
        "no_windows",
        "notsan",  # b/67510291
    ],
    deps = [
        ":export",
        ":keras",
        ":mode_keys",
        ":numpy_io",
        ":run_config",
        "//tensorflow_estimator/python/estimator:expect_h5py_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "keras_premade_model_test",
    size = "medium",
    srcs = ["keras_premade_model_test.py"],
    python_version = "PY3",
    shard_count = 4,
    deps = [
        ":export",
        ":keras",
        ":mode_keys",
        ":numpy_io",
        ":run_config",
        "//tensorflow_estimator/python/estimator:expect_h5py_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "keras_distribute_strategy_test",
    srcs = ["keras_distribute_strategy_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    tags = ["notsan"],
    deps = [
        ":keras",
        ":run_config",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_library(
    name = "saved_model_estimator",
    srcs = ["canned/saved_model_estimator.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator",
        ":estimator_export",
        ":export",
        ":mode_keys",
        ":model_fn",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "saved_model_estimator_test",
    size = "medium",
    srcs = ["canned/saved_model_estimator_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    tags = [
        "notsan",
    ],
    deps = [
        ":estimator",
        ":export",
        ":mode_keys",
        ":model_fn",
        ":saved_model_estimator",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "basic_session_run_hooks",
    srcs = ["hooks/basic_session_run_hooks.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator_export",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "basic_session_run_hooks_test",
    size = "medium",
    srcs = ["hooks/basic_session_run_hooks_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    deps = [
        ":estimator_py",
        ":fake_summary_writer",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
    ],
)

py_library(
    name = "session_run_hook",
    srcs = ["hooks/session_run_hook.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator_export",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "fake_summary_writer",
    srcs = [
        "hooks/fake_summary_writer.py",
    ],
    srcs_version = "PY3",
    deps = [
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "rnn",
    srcs = ["canned/rnn.py"],
    srcs_version = "PY3",
    deps = [
        ":binary_class_head",
        ":estimator",
        ":estimator_export",
        ":multi_class_head",
        ":optimizers",
        ":sequential_head",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_test(
    name = "rnn_test",
    size = "medium",
    srcs = ["canned/rnn_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    tags = [
        "no_oss",  # b/140934549
        "no_pip",
        "noasan",  # times out
        "notsan",
        "optonly",  # times out http://b/79220679
    ],
    deps = [
        ":export",
        ":head",
        ":metric_keys",
        ":multi_class_head",
        ":numpy_io",
        ":parsing_utils",
        ":prediction_keys",
        ":rnn",
        ":sequential_head",
        "//tensorflow_estimator/python/estimator:expect_absl_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_six_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_keras_installed",
    ],
)

py_library(
    name = "estimator_export",
    srcs = ["estimator_export.py"],
    srcs_version = "PY3",
    visibility = ["//tensorflow_estimator:internal"],
    deps = [
        ":expect_tensorflow_installed",
        ":util",
    ],
)

py_test(
    name = "estimator_export_test",
    srcs = ["estimator_export_test.py"],
    srcs_version = "PY3",
    deps = [
        ":estimator_export",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_library(
    name = "expect_absl_installed",
    srcs_version = "PY3",
    # This is a dummy rule used as a absl dependency in open-source.
    # We expect absl to already be installed on the system, e.g. via
    # `pip install absl`
    visibility = ["//visibility:public"],
)

py_library(
    name = "expect_numpy_installed",
    srcs_version = "PY3",
    # This is a dummy rule used as a numpy dependency in open-source.
    # We expect numpy to already be installed on the system, e.g. via
    # `pip install numpy`
    visibility = ["//visibility:public"],
)

py_library(
    name = "expect_pandas_installed",
    # This is a dummy rule used as a pandas dependency in open-source.
    # We expect pandas to already be installed on the system, e.g. via
    # `pip install pandas`
    visibility = ["//visibility:public"],
)

py_library(
    name = "expect_h5py_installed",
    srcs_version = "PY3",
    # This is a dummy rule used as a numpy dependency in open-source.
    # We expect h5py to already be installed on the system, e.g. via
    # `pip install h5py'
    visibility = ["//visibility:public"],
)

py_library(
    name = "expect_six_installed",
    srcs_version = "PY3",
    # This is a dummy rule used as a numpy dependency in open-source.
    # We expect six to already be installed on the system, e.g. via
    # `pip install six`
    visibility = ["//visibility:public"],
)

py_library(
    name = "expect_tensorboard_installed",
    srcs_version = "PY3",
    # This is a dummy rule used as a tensorboard dependency in open-source.
    # We expect tensorboard to already be installed on the system, e.g. via
    # `pip install tensorboard`.
    visibility = ["//visibility:public"],
)

py_library(
    name = "expect_tensorflow_installed",
    srcs_version = "PY3",
    # This is a dummy rule used as a numpy dependency in open-source.
    # We expect tensorflow to already be installed on the system, e.g. via
    # `pip install tensorflow` or `pip install tensorflow_gpu`
    visibility = ["//visibility:public"],
)

py_library(
    name = "expect_tensorflow_keras_installed",
    srcs_version = "PY3",
    # This is a dummy rule used as a numpy dependency in open-source.
    # We expect tensorflow to already be installed on the system, e.g. via
    # `pip install tensorflow` or `pip install tensorflow_gpu`
    visibility = ["//visibility:public"],
)

py_library(
    name = "expect_proto_cpp_installed",
    srcs_version = "PY3",
    # This is a dummy rule used as a numpy dependency in open-source.
    # We expect protobuf cpp python to already be installed on the system.
    visibility = ["//visibility:public"],
)

# The following targets are emulating cuda_py_test from //third_party/tensorflow:tensorflow.google.bzl
# cuda_py_test cannot be used directly because the bzl file cannot be imported into tensorflow_estimator

py_test(
    name = "distribute_strategy_estimator_integration_test",
    size = "medium",
    srcs = ["distribute_strategy_estimator_integration_test.py"],
    main = "distribute_strategy_estimator_integration_test.py",
    python_version = "PY3",
    srcs_version = "PY3",
    tags = [
        "tf_integration_test",
    ],
    deps = [
        ":estimator_py",
        "//tensorflow_estimator/python/estimator:expect_absl_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_test(
    name = "distribute_strategy_estimator_integration_test_gpu",
    size = "medium",
    srcs = ["distribute_strategy_estimator_integration_test.py"],
    main = "distribute_strategy_estimator_integration_test.py",
    python_version = "PY3",
    srcs_version = "PY3",
    tags = [
        "cuda",
        "gpu",
        "multi_and_single_gpu",
        "requires-gpu-nvidia",
        "tf_integration_test",
    ],
    deps = [
        ":estimator_py",
        "//tensorflow_estimator/python/estimator:expect_absl_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_test(
    name = "distribute_strategy_estimator_training_test",
    size = "medium",
    srcs = ["distribute_strategy_estimator_training_test.py"],
    main = "distribute_strategy_estimator_training_test.py",
    python_version = "PY3",
    shard_count = 48,
    srcs_version = "PY3",
    tags = [
        "no_oss",  # b/140933379
        # TODO(b/118768923): Re-enable {a,m,t}san test.
        "noasan",
        "nomsan",
        "notsan",
    ],
    deps = [
        ":estimator_py",
        # Placeholder for an internal build dep disabling tf2 behavior
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_test(
    name = "distribute_strategy_estimator_training_test_gpu",
    size = "medium",
    srcs = ["distribute_strategy_estimator_training_test.py"],
    main = "distribute_strategy_estimator_training_test.py",
    python_version = "PY3",
    shard_count = 48,
    srcs_version = "PY3",
    tags = [
        # TODO(b/118768923): Re-enable {a,m,t}san test.
        "noasan",
        "nomsan",
        "notsan",
        "cuda",
        "requires-gpu-nvidia",
        "gpu",
        "multi_and_single_gpu",
    ],
    deps = [
        ":estimator_py",
        # Placeholder for an internal build dep disabling tf2 behavior
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_test(
    name = "canned_estimator_ds_integration_test",
    size = "medium",
    srcs = ["canned/canned_estimator_ds_integration_test.py"],
    python_version = "PY3",
    srcs_version = "PY3",
    tags = [
        "notap",  # TODO(b/161835009): Re-enable.
    ],
    deps = [
        ":estimator_py",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)

py_test(
    name = "canned_estimator_ds_integration_test_gpu",
    size = "medium",
    srcs = ["canned/canned_estimator_ds_integration_test.py"],
    main = "canned/canned_estimator_ds_integration_test.py",
    python_version = "PY3",
    srcs_version = "PY3",
    tags = [
        "cuda",
        "gpu",
        "multi_and_single_gpu",
        "requires-gpu-nvidia",
        "tf_integration_test",
    ],
    deps = [
        ":estimator_py",
        "//tensorflow_estimator/python/estimator:expect_absl_installed",
        "//tensorflow_estimator/python/estimator:expect_numpy_installed",
        "//tensorflow_estimator/python/estimator:expect_proto_cpp_installed",
        "//tensorflow_estimator/python/estimator:expect_tensorflow_installed",
    ],
)


================================================
FILE: tensorflow_estimator/python/estimator/api/BUILD
================================================
# Placeholder: load aliased py_binary
load("//tensorflow_estimator/python/estimator/api:api_gen.bzl", "ESTIMATOR_API_INIT_FILES_V1", "ESTIMATOR_API_INIT_FILES_V2", "generate_apis")

package(default_visibility = ["//tensorflow_estimator:internal"])

licenses(["notice"])

# This flag specifies whether Estimator 2.0 API should be built instead
# of 1.* API. Note that Estimator 2.0 API is currently under development.
config_setting(
    name = "api_version_2",
    define_values = {"estimator_api_version": "2"},
)

py_binary(
    name = "extractor_wrapper",
    srcs = ["extractor_wrapper.py"],
    visibility = ["//visibility:public"],
    deps = [
        "//tensorflow_estimator/python/estimator:expect_absl_installed",  # absl:app
    ],
)

py_binary(
    name = "generator_wrapper",
    srcs = ["generator_wrapper.py"],
    visibility = ["//visibility:public"],
    deps = [
        "//tensorflow_estimator/python/estimator:expect_absl_installed",  # absl:app
    ],
)

genrule(
    name = "estimator_python_api_gen",
    srcs = select({
        "api_version_2": ["_v2/v2.py"],
        "//conditions:default": ["_v1/v1.py"],
    }),
    outs = ["__init__.py"],
    cmd = select({
        "api_version_2": "cp $(location :_v2/v2.py) $(OUTS)",
        "//conditions:default": "cp $(location :_v1/v1.py) $(OUTS)",
    }),
)

generate_apis(
    name = "estimator_python_api_gen_compat_v1",
    api_version = 1,
    output_dir = "_v1/",
    output_files = ESTIMATOR_API_INIT_FILES_V1,
    output_package = "tensorflow_estimator.python.estimator.api._v1",
    root_file_name = "v1.py",
    visibility = ["//visibility:public"],
)

generate_apis(
    name = "estimator_python_api_gen_compat_v2",
    api_version = 2,
    output_dir = "_v2/",
    output_files = ESTIMATOR_API_INIT_FILES_V2,
    output_package = "tensorflow_estimator.python.estimator.api._v2",
    root_file_name = "v2.py",
    visibility = ["//visibility:public"],
)


================================================
FILE: tensorflow_estimator/python/estimator/api/api_gen.bzl
================================================
"""Targets for generating TensorFlow Estimator Python API __init__.py files.

This bzl file is copied with slight modifications from
tensorflow/python/tools/api/generator2/generate_api.bzl
so that we can avoid needing to depend on TF source code in Bazel build.

It should be noted that because this file is executed during the build,
and it imports TensorFlow code, that installing TensorFlow python package
is required to Bazel build Estimator.
"""

# Placeholder: load PyInfo
load("//tensorflow_estimator:estimator.bzl", "if_indexing_source_code")

_TARGET_PATTERNS = [
    "//tensorflow_estimator:",
    "//tensorflow_estimator/",
]

_DECORATOR = "tensorflow_estimator.python.estimator.estimator_export.estimator_export"

_MODULE_PREFIX = ""

ESTIMATOR_API_INIT_FILES_V1 = [
    "__init__.py",
    "estimator/__init__.py",
    "estimator/experimental/__init__.py",
    "estimator/export/__init__.py",
    "estimator/inputs/__init__.py",
    "estimator/tpu/__init__.py",
    "estimator/tpu/experimental/__init__.py",
]

ESTIMATOR_API_INIT_FILES_V2 = [
    "__init__.py",
    "estimator/__init__.py",
    "estimator/experimental/__init__.py",
    "estimator/export/__init__.py",
    "estimator/inputs/__init__.py",
]

def _any_match(label):
    full_target = "//" + label.package + ":" + label.name
    for pattern in _TARGET_PATTERNS:
        if pattern in full_target:
            return True
    return False

def _join(path, *others):
    result = path

    for p in others:
        if not result or result.endswith("/"):
            result += p
        else:
            result += "/" + p

    return result

def _api_info_init(*, transitive_api):
    if type(transitive_api) != type(depset()):
        fail("ApiInfo.transitive_api must be a depset")
    return {"transitive_api": transitive_api}

ApiInfo, _new_api_info = provider(
    doc = "Provider for API symbols and docstrings extracted from Python files.",
    fields = {
        "transitive_api": "depset of files with extracted API.",
    },
    init = _api_info_init,
)

def _py_files(f):
    if f.basename.endswith(".py") or f.basename.endswith(".py3"):
        return f.path
    return None

def _merge_py_info(
        deps,
        direct_sources = None,
        direct_imports = None,
        has_py2_only_sources = False,
        has_py3_only_sources = False,
        uses_shared_libraries = False):
    transitive_sources = []
    transitive_imports = []
    for dep in deps:
        if PyInfo in dep:
            transitive_sources.append(dep[PyInfo].transitive_sources)
            transitive_imports.append(dep[PyInfo].imports)
            has_py2_only_sources = has_py2_only_sources or dep[PyInfo].has_py2_only_sources
            has_py3_only_sources = has_py3_only_sources or dep[PyInfo].has_py3_only_sources
            uses_shared_libraries = uses_shared_libraries or dep[PyInfo].uses_shared_libraries

    return PyInfo(
        transitive_sources = depset(direct = direct_sources, transitive = transitive_sources),
        imports = depset(direct = direct_imports, transitive = transitive_imports),
        has_py2_only_sources = has_py2_only_sources,
        has_py3_only_sources = has_py3_only_sources,
        uses_shared_libraries = uses_shared_libraries,
    )

def _merge_api_info(
        deps,
        direct_api = None):
    transitive_api = []
    for dep in deps:
        if ApiInfo in dep:
            transitive_api.append(dep[ApiInfo].transitive_api)
    return ApiInfo(transitive_api = depset(direct = direct_api, transitive = transitive_api))

def _api_extractor_impl(target, ctx):
    direct_api = []

    # Make sure the rule has a non-empty srcs attribute.
    if (
        _any_match(target.label) and
        hasattr(ctx.rule.attr, "srcs") and
        ctx.rule.attr.srcs
    ):
        output = ctx.actions.declare_file("_".join([
            target.label.name,
            "extracted_tensorflow_estimator_api.json",
        ]))

        args = ctx.actions.args()
        args.set_param_file_format("multiline")
        args.use_param_file("--flagfile=%s")

        args.add("--output", output)
        args.add("--decorator", _DECORATOR)
        args.add("--api_name", "tensorflow_estimator")
        args.add_all(ctx.rule.files.srcs, expand_directories = True, map_each = _py_files)

        ctx.actions.run(
            mnemonic = "ExtractAPI",
            executable = ctx.executable._extractor_bin,
            inputs = ctx.rule.files.srcs,
            outputs = [output],
            arguments = [args],
            progress_message = "Extracting tensorflow_estimator APIs for %{label} to %{output}.",
        )

        direct_api.append(output)

    return [
        _merge_api_info(ctx.rule.attr.deps if hasattr(ctx.rule.attr, "deps") else [], direct_api = direct_api),
    ]

api_extractor = aspect(
    doc = "Extracts the exported API for the given target and its dependencies.",
    implementation = _api_extractor_impl,
    attr_aspects = ["deps"],
    provides = [ApiInfo],
    # Currently the Python rules do not correctly advertise their providers.
    # required_providers = [PyInfo],
    attrs = {
        "_extractor_bin": attr.label(
            default = Label("//tensorflow_estimator/python/estimator/api:extractor_wrapper"),
            executable = True,
            cfg = "exec",
        ),
    },
)

def _extract_api_impl(ctx):
    return [
        _merge_api_info(ctx.attr.deps),
        _merge_py_info(ctx.attr.deps),
    ]

extract_api = rule(
    doc = "Extract Python API for all targets in transitive dependencies.",
    implementation = _extract_api_impl,
    attrs = {
        "deps": attr.label_list(
            doc = "Targets to extract API from.",
            allow_empty = False,
            aspects = [api_extractor],
            providers = [PyInfo],
            mandatory = True,
        ),
    },
    provides = [ApiInfo, PyInfo],
)

def _generate_api_impl(ctx):
    args = ctx.actions.args()
    args.set_param_file_format("multiline")
    args.use_param_file("--flagfile=%s")

    args.add_joined("--output_files", ctx.outputs.output_files, join_with = ",")
    args.add("--output_dir", _join(ctx.bin_dir.path, ctx.label.package, ctx.attr.output_dir))
    if ctx.file.root_init_template:
        args.add("--root_init_template", ctx.file.root_init_template)
    args.add("--apiversion", ctx.attr.api_version)
    args.add_joined("--compat_api_versions", ctx.attr.compat_api_versions, join_with = ",")
    args.add_joined("--compat_init_templates", ctx.files.compat_init_templates, join_with = ",")
    args.add("--output_package", ctx.attr.output_package)
    args.add_joined("--packages_to_ignore", ctx.attr.packages_to_ignore, join_with = ",")
    if _MODULE_PREFIX:
        args.add("--module_prefix", _MODULE_PREFIX)
    if ctx.attr.use_lazy_loading:
        args.add("--use_lazy_loading")
    else:
        args.add("--nouse_lazy_loading")
    if ctx.attr.proxy_module_root:
        args.add("--proxy_module_root", ctx.attr.proxy_module_root)
    args.add_joined("--file_prefixes_to_strip", [ctx.bin_dir.path, ctx.genfiles_dir.path], join_with = ",")
    if ctx.attr.root_file_name:
        args.add("--root_file_name", ctx.attr.root_file_name)

    inputs = depset(transitive = [
        dep[ApiInfo].transitive_api
        for dep in ctx.attr.deps
    ])
    args.add_all(
        inputs,
        expand_directories = True,
    )

    transitive_inputs = [inputs]
    if ctx.attr.root_init_template:
        transitive_inputs.append(ctx.attr.root_init_template.files)

    ctx.actions.run(
        mnemonic = "GenerateAPI",
        executable = ctx.executable._generator_bin,
        inputs = depset(
            direct = ctx.files.compat_init_templates,
            transitive = transitive_inputs,
        ),
        outputs = ctx.outputs.output_files,
        arguments = [args],
        progress_message = "Generating APIs for %{label} to %{output}.",
    )

generate_api = rule(
    doc = "Generate Python API for all targets in transitive dependencies.",
    implementation = _generate_api_impl,
    attrs = {
        "deps": attr.label_list(
            doc = "extract_api targets to generate API from.",
            allow_empty = True,
            providers = [ApiInfo, PyInfo],
            mandatory = True,
        ),
        "root_init_template": attr.label(
            doc = "Template for the top level __init__.py file",
            allow_single_file = True,
        ),
        "api_version": attr.int(
            doc = "The API version to generate (1 or 2)",
            values = [1, 2],
        ),
        "compat_api_versions": attr.int_list(
            doc = "Additional versions to generate in compat/ subdirectory.",
        ),
        "compat_init_templates": attr.label_list(
            doc = "Template for top-level __init__files under compat modules. This list must be " +
                  "in the same order as the list of versions in compat_apiversions",
            allow_files = True,
        ),
        "output_package": attr.string(
            doc = "Root output package.",
        ),
        "output_dir": attr.string(
            doc = "Subdirectory to output API to. If non-empty, must end with '/'.",
        ),
        "proxy_module_root": attr.string(
            doc = "Module root for proxy-import format. If specified, proxy files with " +
                  "`from proxy_module_root.proxy_module import *` will be created to enable " +
                  "import resolution under TensorFlow.",
        ),
        "output_files": attr.output_list(
            doc = "List of __init__.py files that should be generated. This list should include " +
                  "file name for every module exported using tf_export. For e.g. if an op is " +
                  "decorated with @tf_export('module1.module2', 'module3'). Then, output_files " +
                  "should include module1/module2/__init__.py and module3/__init__.py.",
        ),
        "use_lazy_loading": attr.bool(
            doc = "If true, lazy load imports in the generated API rather then imporing them all statically.",
        ),
        "packages_to_ignore": attr.string_list(
            doc = "List of packages to ignore tf_exports from.",
        ),
        "root_file_name": attr.string(
            doc = "The file name that should be generated for the top level API.",
        ),
        "_generator_bin": attr.label(
            default = Label("//tensorflow_estimator/python/estimator/api:generator_wrapper"),
            executable = True,
            cfg = "exec",
        ),
    },
)

def generate_apis(
        name,
        deps = [
            "//tensorflow_estimator/python/estimator:estimator_py",
            # "//third_party/tensorflow/lite/python:analyzer",
            # "//third_party/tensorflow/lite/python:lite",
            # "//third_party/tensorflow/lite/python/authoring",
        ],
        output_files = ESTIMATOR_API_INIT_FILES_V2,
        root_init_template = None,
        api_version = 2,
        compat_api_versions = [],
        compat_init_templates = [],
        output_package = "tensorflow_estimator.python.estimator.api",
        output_dir = "",
        proxy_module_root = None,
        packages_to_ignore = [],
        root_file_name = "__init__.py",
        visibility = ["//visibility:private"]):
    """Generate TensorFlow APIs for a set of libraries.

    Args:
        name: name of generate_api target.
        deps: python_library targets to serve as roots for extracting APIs.
        output_files: The list of files that the API generator is exected to create.
        root_init_template: The template for the top level __init__.py file generated.
            "#API IMPORTS PLACEHOLDER" comment will be replaced with imports.
        api_version: THhe API version to generate. (1 or 2)
        compat_api_versions: Additional versions to generate in compat/ subdirectory.
        compat_init_templates: Template for top level __init__.py files under the compat modules.
            The list must be in the same order as the list of versions in 'compat_api_versions'
        output_package: Root output package.
        output_dir: Directory where the generated output files are placed. This should be a prefix
            of every directory in 'output_files'
        proxy_module_root: Module root for proxy-import format. If specified, proxy files with
            `from proxy_module_root.proxy_module import *` will be created to enable import
            resolution under TensorFlow.
        packages_to_ignore: List of packages to ignore tf_exports from.
        root_file_name: The file name that should be generated for the top level API.
        visibility: Visibility of the target containing the generated files.
    """
    extract_name = name + ".extract-tensorflow-estimator"
    extract_api(
        name = extract_name,
        deps = deps,
        visibility = ["//visibility:private"],
    )

    if proxy_module_root != None:
        # Avoid conflicts between the __init__.py file of TensorFlow and proxy module.
        output_files = [f for f in output_files if f != "__init__.py"]

    if root_file_name != None:
        output_files = [f if f != "__init__.py" else root_file_name for f in output_files]

    all_output_files = [_join(output_dir, f) for f in output_files]

    generate_api(
        name = name,
        deps = [":" + extract_name],
        output_files = all_output_files,
        output_dir = output_dir,
        root_init_template = root_init_template,
        compat_api_versions = compat_api_versions,
        compat_init_templates = compat_init_templates,
        api_version = api_version,
        proxy_module_root = proxy_module_root,
        visibility = visibility,
        packages_to_ignore = packages_to_ignore,
        use_lazy_loading = False,
        output_package = output_package,
        root_file_name = root_file_name,
    )


================================================
FILE: tensorflow_estimator/python/estimator/api/extractor_wrapper.py
================================================
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Thin wrapper to call TensorFlow's API extractor script."""
from absl import app

from tensorflow.python.tools.api.generator2.extractor import extractor

if __name__ == "__main__":
  app.run(extractor.main)


================================================
FILE: tensorflow_estimator/python/estimator/api/generator_wrapper.py
================================================
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Thin wrapper to call TensorFlow's API generator script."""
from absl import app
from tensorflow.python.tools.api.generator2.generator import generator

if __name__ == "__main__":
  app.run(generator.main)


================================================
FILE: tensorflow_estimator/python/estimator/canned/__init__.py
================================================


================================================
FILE: tensorflow_estimator/python/estimator/canned/baseline.py
================================================
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Baseline estimators.

Baseline estimators are bias-only estimators that can be used for debugging
and as simple baselines.

Example:

```
# Build BaselineClassifier
classifier = BaselineClassifier(n_classes=3)

# Input builders
def input_fn_train():
  # Returns tf.data.Dataset of (x, y) tuple where y represents label's class
  # index.
  pass

def input_fn_eval():
  # Returns tf.data.Dataset of (x, y) tuple where y represents label's class
  # index.
  pass

# Fit model.
classifier.train(input_fn=input_fn_train)

# Evaluate cross entropy between the test and train labels.
loss = classifier.evaluate(input_fn=input_fn_eval)["loss"]

# predict outputs the probability distribution of the classes as seen in
# training.
predictions = classifier.predict(new_samples)
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import six
import tensorflow as tf
from tensorflow.python.feature_column import feature_column as feature_column_v1
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.framework import ops
from tensorflow_estimator.python.estimator import estimator
from tensorflow_estimator.python.estimator.canned import head as head_lib
from tensorflow_estimator.python.estimator.canned import optimizers
from tensorflow_estimator.python.estimator.estimator_export import estimator_export
from tensorflow_estimator.python.estimator.head import head_utils
from tensorflow_estimator.python.estimator.head import regression_head
from tensorflow_estimator.python.estimator.mode_keys import ModeKeys

# The default learning rate of 0.3 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.3


def _get_weight_column_key(weight_column):
  if weight_column is None:
    return None
  if isinstance(weight_column, six.string_types):
    return weight_column
  if not isinstance(weight_column, feature_column_v1._NumericColumn):  # pylint: disable=protected-access
    raise TypeError('Weight column must be either a string or _NumericColumn.'
                    ' Given type: {}.'.format(type(weight_column)))
  return weight_column.key()


def _get_weight_column_key_v2(weight_column):
  if weight_column is None:
    return None
  if isinstance(weight_column, six.string_types):
    return weight_column
  if not isinstance(weight_column, feature_column_v2.NumericColumn):
    raise TypeError('Weight column must be either a string or NumericColumn. '
                    'Given type: {}.'.format(type(weight_column)))
  return weight_column.key()


def _get_batch_size_and_size_checks(features, weight_column_key):
  """Returns batch_size and size_checks."""
  size_checks = []
  batch_size = None

  # The first dimension is assumed to be a batch size and must be consistent
  # among all of the features.
  for key, feature in features.items():
    # Skip weight_column to ensure we don't add size checks to it.
    # These would introduce a dependency on the weight at serving time.
    if key == weight_column_key:
      continue
    first_dim = tf.compat.v1.shape(feature)[0]
    if batch_size is None:
      batch_size = first_dim
    else:
      size_checks.append(
          tf.compat.v1.debugging.assert_equal(batch_size, first_dim))

  return size_checks, batch_size


def _baseline_logit_fn_builder(num_outputs, weight_column=None):
  """Function builder for a baseline logit_fn.

  Args:
    num_outputs: Number of outputs for the model.
    weight_column: A string or a `_NumericColumn` created by
      `tf.feature_column.numeric_column` defining feature column representing
      weights. It will be multiplied by the loss of the example.

  Returns:
    A logit_fn (see below).
  """

  def baseline_logit_fn(features):
    """Baseline model logit_fn.

    The baseline model simply learns a bias, so the output logits are a
    `Variable` with one weight for each output that learns the bias for the
    corresponding output.

    Args:
      features: The first item returned from the `input_fn` passed to `train`,
        `evaluate`, and `predict`. This should be a single `Tensor` or dict with
        `Tensor` values.

    Returns:
      A `Tensor` representing the logits.
    """
    weight_column_key = _get_weight_column_key(weight_column)
    size_checks, batch_size = _get_batch_size_and_size_checks(
        features, weight_column_key)
    with tf.control_dependencies(size_checks):
      with tf.compat.v1.variable_scope('baseline'):
        bias = tf.compat.v1.get_variable(
            'bias',
            shape=[num_outputs],
            initializer=tf.compat.v1.initializers.zeros)
        return tf.math.multiply(bias, tf.ones([batch_size, num_outputs]))

  return baseline_logit_fn


def _baseline_model_fn(features,
                       labels,
                       mode,
                       head,
                       optimizer,
                       weight_column=None,
                       config=None):
  """Model_fn for baseline models.

  Args:
    features: `Tensor` or dict of `Tensor` (depends on data passed to `train`).
    labels: `Tensor` of labels that are compatible with the `Head` instance.
    mode: Defines whether this is training, evaluation or prediction. See
      `ModeKeys`.
    head: A `Head` instance.
    optimizer: String, `tf.Optimizer` object, or callable that creates the
      optimizer to use for training. If not specified, will use `FtrlOptimizer`
      with a default learning rate of 0.3.
    weight_column: A string or a `_NumericColumn` created by
      `tf.feature_column.numeric_column` defining feature column representing
      weights. It will be multiplied by the loss of the example.
    config: `RunConfig` object to configure the runtime settings.

  Raises:
    KeyError: If weight column is specified but not present.
    ValueError: If features is an empty dictionary.

  Returns:
    An `EstimatorSpec` instance.
  """
  del config  # Unused.

  logit_fn = _baseline_logit_fn_builder(head.logits_dimension, weight_column)
  logits = logit_fn(features)

  def train_op_fn(loss):
    opt = optimizers.get_optimizer_instance(
        optimizer, learning_rate=_LEARNING_RATE)
    return opt.minimize(loss, global_step=tf.compat.v1.train.get_global_step())

  return head.create_estimator_spec(
      features=features,
      mode=mode,
      logits=logits,
      labels=labels,
      train_op_fn=train_op_fn)


def _baseline_model_fn_builder_v2(features, num_outputs, weight_column=None):
  """Function builder for a baseline logit_fn.

  Args:
    features: The first item returned from the `input_fn` passed to `train`,
      `evaluate`, and `predict`. This should be a single `Tensor` or dict with
      `Tensor` values.
    num_outputs: Number of outputs for the model.
    weight_column: A string or a `NumericColumn` created by
      `tf.feature_column.numeric_column` defining feature column representing
      weights. It will be multiplied by the loss of the example.

  Returns:
    A list of trainable variables and a `Tensor` representing the logits.
  """
  weight_column_key = _get_weight_column_key_v2(weight_column)
  size_checks, batch_size = _get_batch_size_and_size_checks(
      features, weight_column_key)
  with tf.control_dependencies(size_checks):
    with ops.name_scope('baseline'):
      bias = tf.Variable(initial_value=tf.zeros([num_outputs]), name='bias')
      logits = tf.math.multiply(bias, tf.ones([batch_size, num_outputs]))
  return [bias], logits


def _baseline_model_fn_v2(
    features,
    labels,
    mode,
    head,
    optimizer,
    weight_column=None,
    config=None,
    loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE):
  """Model_fn for baseline models.

  Args:
    features: `Tensor` or dict of `Tensor` (depends on data passed to `train`).
    labels: `Tensor` of labels that are compatible with the `Head` instance.
    mode: Defines whether this is training, evaluation or prediction. See
      `ModeKeys`.
    head: A `Head` instance.
    optimizer: String, `tf.Optimizer` object, or callable that creates the
      optimizer to use for training. If not specified, will use `FtrlOptimizer`
      with a default learning rate of 0.3.
    weight_column: A string or a `NumericColumn` created by
      `tf.feature_column.numeric_column` defining feature column representing
      weights. It will be multiplied by the loss of the example.
    config: `RunConfig` object to configure the runtime settings.
    loss_reduction: One of `tf_keras.losses.Reduction` except `NONE`. Describes
      how to reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`.

  Raises:
    KeyError: If weight column is specified but not present.
    ValueError: If features is an empty dictionary.

  Returns:
    An `EstimatorSpec` instance.
  """
  del config  # Unused.

  trainable_variables, logits = _baseline_model_fn_builder_v2(
      features, head.logits_dimension, weight_column)

  # In TRAIN mode, create optimizer and assign global_step variable to
  # optimizer.iterations to make global_step increased correctly, as Hooks
  # relies on global step as step counter.
  if mode == ModeKeys.TRAIN:
    opt = optimizers.get_optimizer_instance_v2(
        optimizer, learning_rate=_LEARNING_RATE)
    opt.iterations = tf.compat.v1.train.get_or_create_global_step()

  def train_op_fn(loss):
    # Scale loss by number of replicas.
    if loss_reduction == tf.losses.Reduction.SUM_OVER_BATCH_SIZE:
      num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
      if num_replicas > 1:
        loss *= (1. / num_replicas)
    return opt.get_updates(loss, trainable_variables)[0]

  return head.create_estimator_spec(
      features=features,
      mode=mode,
      logits=logits,
      labels=labels,
      train_op_fn=train_op_fn)


@estimator_export('estimator.BaselineClassifier', v1=[])
class BaselineClassifierV2(estimator.EstimatorV2):
  """A classifier that can establish a simple baseline.

  This classifier ignores feature values and will learn to predict the average
  value of each label. For single-label problems, this will predict the
  probability distribution of the classes as seen in the labels. For multi-label
  problems, this will predict the fraction of examples that are positive for
  each class.

  Example:

  ```python

  # Build BaselineClassifier
  classifier = tf.estimator.BaselineClassifier(n_classes=3)

  # Input builders
  def input_fn_train:
    # Returns tf.data.Dataset of (x, y) tuple where y represents label's class
    # index.
    pass

  def input_fn_eval:
    # Returns tf.data.Dataset of (x, y) tuple where y represents label's class
    # index.
    pass

  # Fit model.
  classifier.train(input_fn=input_fn_train)

  # Evaluate cross entropy between the test and train labels.
  loss = classifier.evaluate(input_fn=input_fn_eval)["loss"]

  # predict outputs the probability distribution of the classes as seen in
  # training.
  predictions = classifier.predict(new_samples)

  ```

  Input of `train` and `evaluate` should have following features,
    otherwise there will be a `KeyError`:

  * if `weight_column` is not `None`, a feature with
     `key=weight_column` whose value is a `Tensor`.

  @compatibility(eager)
  Estimators can be used while eager execution is enabled. Note that `input_fn`
  and all hooks are executed inside a graph context, so they have to be written
  to be compatible with graph mode. Note that `input_fn` code using `tf.data`
  generally works in both graph and eager modes.
  @end_compatibility
  """

  def __init__(self,
               model_dir=None,
               n_classes=2,
               weight_column=None,
               label_vocabulary=None,
               optimizer='Ftrl',
               config=None,
               loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE):
    """Initializes a BaselineClassifier instance.

    Args:
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator to
        continue training a previously saved model.
      n_classes: number of label classes. Default is binary classification.
        It must be greater than 1. Note: Class labels are integers representing
          the class index (i.e. values from 0 to n_classes-1). For arbitrary
          label values (e.g. string labels), convert to class indices first.
      weight_column: A string or a `NumericColumn` created by
        `tf.feature_column.numeric_column` defining feature column representing
        weights. It will be multiplied by the loss of the example.
      label_vocabulary: Optional list of strings with size `[n_classes]`
        defining the label vocabulary. Only supported for `n_classes` > 2.
      optimizer: String, `tf_keras.optimizers.*` object, or callable that
        creates the optimizer to use for training. If not specified, will use
        `Ftrl` as the default optimizer.
      config: `RunConfig` object to configure the runtime settings.
      loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
        to reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`.

    Returns:
      A `BaselineClassifier` estimator.

    Raises:
      ValueError: If `n_classes` < 2.
    """
    head = head_utils.binary_or_multi_class_head(
        n_classes,
        weight_column=weight_column,
        label_vocabulary=label_vocabulary,
        loss_reduction=loss_reduction)

    def _model_fn(features, labels, mode, config):
      return _baseline_model_fn_v2(
          features=features,
          labels=labels,
          mode=mode,
          head=head,
          optimizer=optimizer,
          weight_column=weight_column,
          config=config,
          loss_reduction=loss_reduction)

    super(BaselineClassifierV2, self).__init__(
        model_fn=_model_fn, model_dir=model_dir, config=config)


@estimator_export(v1=['estimator.BaselineClassifier'])  # pylint: disable=missing-docstring
class BaselineClassifier(estimator.Estimator):
  __doc__ = BaselineClassifierV2.__doc__.replace('SUM_OVER_BATCH_SIZE', 'SUM')

  def __init__(self,
               model_dir=None,
               n_classes=2,
               weight_column=None,
               label_vocabulary=None,
               optimizer='Ftrl',
               config=None,
               loss_reduction=tf.compat.v1.losses.Reduction.SUM):
    head = head_lib._binary_logistic_or_multi_class_head(  # pylint: disable=protected-access
        n_classes, weight_column, label_vocabulary, loss_reduction)

    def _model_fn(features, labels, mode, config):
      return _baseline_model_fn(
          features=features,
          labels=labels,
          mode=mode,
          head=head,
          optimizer=optimizer,
          weight_column=weight_column,
          config=config)

    super(BaselineClassifier, self).__init__(
        model_fn=_model_fn, model_dir=model_dir, config=config)


@estimator_export('estimator.BaselineEstimator', v1=[])
class BaselineEstimatorV2(estimator.EstimatorV2):
  """An estimator that can establish a simple baseline.

  The estimator uses a user-specified head.

  This estimator ignores feature values and will learn to predict the average
  value of each label. E.g. for single-label classification problems, this will
  predict the probability distribution of the classes as seen in the labels.
  For multi-label classification problems, it will predict the ratio of examples
  that contain each class.

  Example:

  ```python

  # Build baseline multi-label classifier.
  estimator = tf.estimator.BaselineEstimator(
      head=tf.estimator.MultiLabelHead(n_classes=3))

  # Input builders
  def input_fn_train:
    # Returns tf.data.Dataset of (x, y) tuple where y represents label's class
    # index.
    pass

  def input_fn_eval:
    # Returns tf.data.Dataset of (x, y) tuple where y represents label's class
    # index.
    pass

  # Fit model.
  estimator.train(input_fn=input_fn_train)

  # Evaluates cross entropy between the test and train labels.
  loss = estimator.evaluate(input_fn=input_fn_eval)["loss"]

  # For each class, predicts the ratio of training examples that contain the
  # class.
  predictions = estimator.predict(new_samples)

  ```

  Input of `train` and `evaluate` should have following features,
    otherwise there will be a `KeyError`:

  * if `weight_column` is specified in the `head` constructor (and not None) for
    the head passed to BaselineEstimator's constructor, a feature with
    `key=weight_column` whose value is a `Tensor`.
  """

  def __init__(self, head, model_dir=None, optimizer='Ftrl', config=None):
    """Initializes a BaselineEstimator instance.

    Args:
      head: A `Head` instance constructed with a method such as
        `tf.estimator.MultiLabelHead`.
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator to
        continue training a previously saved model.
      optimizer: String, `tf_keras.optimizers.*` object, or callable that
        creates the optimizer to use for training. If not specified, will use
        `Ftrl` as the default optimizer.
      config: `RunConfig` object to configure the runtime settings.
    """

    def _model_fn(features, labels, mode, config):
      return _baseline_model_fn_v2(
          features=features,
          labels=labels,
          mode=mode,
          head=head,
          optimizer=optimizer,
          config=config)

    super(BaselineEstimatorV2, self).__init__(
        model_fn=_model_fn, model_dir=model_dir, config=config)


@estimator_export(v1=['estimator.BaselineEstimator'])  # pylint: disable=missing-docstring
class BaselineEstimator(estimator.Estimator):
  __doc__ = BaselineEstimatorV2.__doc__

  def __init__(self, head, model_dir=None, optimizer='Ftrl', config=None):

    def _model_fn(features, labels, mode, config):
      return _baseline_model_fn(
          features=features,
          labels=labels,
          mode=mode,
          head=head,
          optimizer=optimizer,
          config=config)

    super(BaselineEstimator, self).__init__(
        model_fn=_model_fn, model_dir=model_dir, config=config)


@estimator_export('estimator.BaselineRegressor', v1=[])
class BaselineRegressorV2(estimator.EstimatorV2):
  """A regressor that can establish a simple baseline.

  This regressor ignores feature values and will learn to predict the average
  value of each label.

  Example:

  ```python

  # Build BaselineRegressor
  regressor = tf.estimator.BaselineRegressor()

  # Input builders
  def input_fn_train:
    # Returns tf.data.Dataset of (x, y) tuple where y represents label's class
    # index.
    pass

  def input_fn_eval:
    # Returns tf.data.Dataset of (x, y) tuple where y represents label's class
    # index.
    pass

  # Fit model.
  regressor.train(input_fn=input_fn_train)

  # Evaluate squared-loss between the test and train targets.
  loss = regressor.evaluate(input_fn=input_fn_eval)["loss"]

  # predict outputs the mean value seen during training.
  predictions = regressor.predict(new_samples)
  ```

  Input of `train` and `evaluate` should have following features,
    otherwise there will be a `KeyError`:

  * if `weight_column` is not `None`, a feature with
     `key=weight_column` whose value is a `Tensor`.

  @compatibility(eager)
  Estimators can be used while eager execution is enabled. Note that `input_fn`
  and all hooks are executed inside a graph context, so they have to be written
  to be compatible with graph mode. Note that `input_fn` code using `tf.data`
  generally works in both graph and eager modes.
  @end_compatibility
  """

  def __init__(self,
               model_dir=None,
               label_dimension=1,
               weight_column=None,
               optimizer='Ftrl',
               config=None,
               loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE):
    """Initializes a BaselineRegressor instance.

    Args:
      model_dir: Directory to save model parameters, graph and etc. This can
        also be used to load checkpoints from the directory into a estimator to
        continue training a previously saved model.
      label_dimension: Number of regression targets per example. This is the
        size of the last dimension of the labels and logits `Tensor` objects
        (typically, these have shape `[batch_size, label_dimension]`).
      weight_column: A string or a `_NumericColumn` created by
        `tf.feature_column.numeric_column` defining feature column representing
        weights. It will be multiplied by the loss of the example.
      optimizer: String, `tf_keras.optimizers.*` object, or callable that
        creates the optimizer to use for training. If not specified, will use
        `Ftrl` as the default optimizer.
      config: `RunConfig` object to configure the runtime settings.
      loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
        to reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`.

    Returns:
      A `BaselineRegressor` estimator.
    """
    head = regression_head.RegressionHead(
        label_dimension=label_dimension,
        weight_column=weight_column,
        loss_reduction=loss_reduction)

    def _model_fn(features, labels, mode, config):
      return _baseline_model_fn_v2(
          features=features,
          labels=labels,
          mode=mode,
          head=head,
          optimizer=optimizer,
          config=config)

    super(BaselineRegressorV2, self).__init__(
        model_fn=_model_fn, model_dir=model_dir, config=config)


@estimator_export(v1=['estimator.BaselineRegressor'])  # pylint: disable=missing-docstring
class BaselineRegressor(estimator.Estimator):
  __doc__ = BaselineRegressorV2.__doc__.replace('SUM_OVER_BATCH_SIZE', 'SUM')

  def __init__(self,
               model_dir=None,
               label_dimension=1,
               weight_column=None,
               optimizer='Ftrl',
               config=None,
               loss_reduction=tf.compat.v1.losses.Reduction.SUM):
    head = head_lib._regression_head(  # pylint: disable=protected-access
        label_dimension=label_dimension,
        weight_column=weight_column,
        loss_reduction=loss_reduction)

    def _model_fn(features, labels, mode, config):
      return _baseline_model_fn(
          features=features,
          labels=labels,
          mode=mode,
          head=head,
          optimizer=optimizer,
          config=config)

    super(BaselineRegressor, self).__init__(
        model_fn=_model_fn, model_dir=model_dir, config=config)


================================================
FILE: tensorflow_estimator/python/estimator/canned/baseline_estimator_test.py
================================================
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for BaselineEstimator."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import shutil
import tempfile

import numpy as np
import six
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow_estimator.python.estimator.util import tf_keras
from tensorflow_estimator.python.estimator.canned import baseline
from tensorflow_estimator.python.estimator.canned import metric_keys
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.head import regression_head
from tensorflow_estimator.python.estimator.inputs import numpy_io

# Names of variables created by model.
BIAS_NAME = 'baseline/bias'


def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
  with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
    expected = ops.convert_to_tensor(expected, name='expected')
    actual = ops.convert_to_tensor(actual, name='actual')
    rdiff = tf.math.abs(expected - actual, 'diff') / tf.math.abs(expected)
    rtol = ops.convert_to_tensor(rtol, name='rtol')
    return tf.compat.v1.debugging.assert_less(
        rdiff,
        rtol,
        data=('Condition expected =~ actual did not hold element-wise:'
              'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
              'rtol = ', rtol,),
        name=scope)


def save_variables_to_ckpt(model_dir):
  init_all_op = [tf.compat.v1.initializers.global_variables()]
  with tf.compat.v1.Session() as sess:
    sess.run(init_all_op)
    tf.compat.v1.train.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))


def _baseline_estimator_fn(weight_column=None, label_dimension=1, **kwargs):
  return baseline.BaselineEstimatorV2(
      head=regression_head.RegressionHead(
          weight_column=weight_column,
          label_dimension=label_dimension,
          loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE),
      **kwargs)


def mock_optimizer_v2(testcase, expected_loss=None):
  """Creates a mock optimizer to test the train method.

  Args:
    testcase: A TestCase instance.
    expected_loss: If given, will assert the loss value.

  Returns:
    A mock Optimizer.
  """
  expected_var_names = ['%s:0' % BIAS_NAME]

  class _Optimizer(tf_keras.optimizers.legacy.Optimizer):

    def get_updates(self, loss, params):
      trainable_vars = params
      testcase.assertItemsEqual(expected_var_names,
                                [var.name for var in trainable_vars])

      # Verify loss. We can't check the value directly, so we add an assert op.
      testcase.assertEquals(0, loss.shape.ndims)
      if expected_loss is None:
        if self.iterations is not None:
          return [self.iterations.assign_add(1).op]
        return [tf.no_op()]
      assert_loss = assert_close(
          tf.cast(expected_loss, name='expected', dtype=tf.dtypes.float32),
          loss,
          name='assert_loss')
      with tf.control_dependencies((assert_loss,)):
        if self.iterations is not None:
          return [self.iterations.assign_add(1).op]
        return [tf.no_op()]

    def get_config(self):
      config = super(_Optimizer, self).get_config()
      return config

  optimizer = _Optimizer(name='my_optimizer')

  return optimizer


class BaselineEstimatorEvaluationTest(tf.test.TestCase):

  def setUp(self):
    self._model_dir = tempfile.mkdtemp()

  def tearDown(self):
    if self._model_dir:
      tf.compat.v1.summary.FileWriterCache.clear()
      shutil.rmtree(self._model_dir)

  def test_evaluation_batch(self):
    """Tests evaluation for batch_size==2."""
    with tf.Graph().as_default():
      tf.Variable([13.0], name=BIAS_NAME)
      tf.Variable(
          100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    baseline_estimator = _baseline_estimator_fn(model_dir=self._model_dir)
    eval_metrics = baseline_estimator.evaluate(
        input_fn=lambda: ({'age': ((1,), (1,))}, ((10.,), (10.,))), steps=1)

    # Logit is bias = 13, while label is 10.
    # Loss per example is 3**2 = 9.
    # Training loss is the sum over batch size = (9 + 9) / 2 = 9
    # Average loss is the average over batch = 9
    self.assertDictEqual(
        {
            metric_keys.MetricKeys.LOSS: 9.,
            metric_keys.MetricKeys.LOSS_MEAN: 9.,
            metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
            metric_keys.MetricKeys.LABEL_MEAN: 10.,
            tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
        }, eval_metrics)

  def test_evaluation_weights(self):
    """Tests evaluation with weights."""
    with tf.Graph().as_default():
      tf.Variable([13.0], name=BIAS_NAME)
      tf.Variable(
          100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    def _input_fn():
      features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
      labels = ((10.,), (10.,))
      return features, labels

    baseline_estimator = _baseline_estimator_fn(
        weight_column='weights', model_dir=self._model_dir)
    eval_metrics = baseline_estimator.evaluate(input_fn=_input_fn, steps=1)

    # Logit is bias = 13, while label is 10.
    # Loss per example is 3**2 = 9.
    # Training loss is the weighted sum over batch size= (9 + 2*9) / 2 = 13.5
    # average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
    self.assertDictEqual(
        {
            metric_keys.MetricKeys.LOSS: 13.5,
            metric_keys.MetricKeys.LOSS_MEAN: 9.,
            metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
            metric_keys.MetricKeys.LABEL_MEAN: 10.,
            tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
        }, eval_metrics)

  def test_evaluation_for_multi_dimensions(self):
    label_dim = 2
    with tf.Graph().as_default():
      tf.Variable([46.0, 58.0], name=BIAS_NAME)
      tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    baseline_estimator = _baseline_estimator_fn(
        label_dimension=label_dim, model_dir=self._model_dir)
    input_fn = numpy_io.numpy_input_fn(
        x={
            'age': np.array([[2., 4., 5.]]),
        },
        y=np.array([[46., 58.]]),
        batch_size=1,
        num_epochs=None,
        shuffle=False)
    eval_metrics = baseline_estimator.evaluate(input_fn=input_fn, steps=1)

    self.assertItemsEqual(
        (metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
         metric_keys.MetricKeys.PREDICTION_MEAN,
         metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),
        eval_metrics.keys())

    # Logit is bias which is [46, 58]
    self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])


class BaselineEstimatorPredictTest(tf.test.TestCase):

  def setUp(self):
    self._model_dir = tempfile.mkdtemp()

  def tearDown(self):
    if self._model_dir:
      tf.compat.v1.summary.FileWriterCache.clear()
      shutil.rmtree(self._model_dir)

  def test_1d(self):
    """Tests predict when all variables are one-dimensional."""
    with tf.Graph().as_default():
      tf.Variable([.2], name=BIAS_NAME)
      tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    baseline_estimator = _baseline_estimator_fn(model_dir=self._model_dir)

    predict_input_fn = numpy_io.numpy_input_fn(
        x={'x': np.array([[2.]])},
        y=None,
        batch_size=1,
        num_epochs=1,
        shuffle=False)
    predictions = baseline_estimator.predict(input_fn=predict_input_fn)
    predicted_scores = list([x['predictions'] for x in predictions])
    # x * weight + bias = 2. * 10. + .2 = 20.2
    self.assertAllClose([[.2]], predicted_scores)

  def testMultiDim(self):
    """Tests predict when all variables are multi-dimenstional."""
    batch_size = 2
    label_dimension = 3
    with tf.Graph().as_default():
      tf.Variable(  # shape=[label_dimension]
          [.2, .4, .6], name=BIAS_NAME)
      tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    baseline_estimator = _baseline_estimator_fn(
        label_dimension=label_dimension, model_dir=self._model_dir)

    predict_input_fn = numpy_io.numpy_input_fn(
        # x shape=[batch_size, x_dim]
        x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
        y=None,
        batch_size=batch_size,
        num_epochs=1,
        shuffle=False)
    predictions = baseline_estimator.predict(input_fn=predict_input_fn)
    predicted_scores = list([x['predictions'] for x in predictions])
    # score = bias, shape=[batch_size, label_dimension]
    self.assertAllClose([[0.2, 0.4, 0.6], [0.2, 0.4, 0.6]], predicted_scores)


class BaselineEstimatorIntegrationTest(tf.test.TestCase):

  def setUp(self):
    self._model_dir = tempfile.mkdtemp()

  def tearDown(self):
    if self._model_dir:
      tf.compat.v1.summary.FileWriterCache.clear()
      shutil.rmtree(self._model_dir)

  def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
                          input_dimension, label_dimension, prediction_length):
    feature_columns = [
        tf.feature_column.numeric_column('x', shape=(input_dimension,))
    ]
    est = _baseline_estimator_fn(
        label_dimension=label_dimension, model_dir=self._model_dir)

    # TRAIN
    # learn y = x
    est.train(train_input_fn, steps=200)

    # EVALUTE
    scores = est.evaluate(eval_input_fn)
    self.assertEqual(200, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
    self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))

    # PREDICT
    predictions = np.array(
        [x['predictions'] for x in est.predict(predict_input_fn)])
    self.assertAllEqual((prediction_length, label_dimension), predictions.shape)

    # EXPORT
    feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
    serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
        feature_spec)
    export_dir = est.export_saved_model(tempfile.mkdtemp(),
                                        serving_input_receiver_fn)
    self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))

  def test_numpy_input_fn(self):
    """Tests complete flow with numpy_input_fn."""
    label_dimension = 2
    input_dimension = label_dimension
    batch_size = 10
    prediction_length = batch_size
    data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
    data = data.reshape(batch_size, label_dimension)

    train_input_fn = numpy_io.numpy_input_fn(
        x={'x': data},
        y=data,
        batch_size=batch_size,
        num_epochs=None,
        shuffle=True)
    eval_input_fn = numpy_io.numpy_input_fn(
        x={'x': data},
        y=data,
        batch_size=batch_size,
        num_epochs=1,
        shuffle=False)
    predict_input_fn = numpy_io.numpy_input_fn(
        x={'x': data},
        y=None,
        batch_size=batch_size,
        num_epochs=1,
        shuffle=False)

    self._test_complete_flow(
        train_input_fn=train_input_fn,
        eval_input_fn=eval_input_fn,
        predict_input_fn=predict_input_fn,
        input_dimension=input_dimension,
        label_dimension=label_dimension,
        prediction_length=prediction_length)


class BaselineEstimatorTrainingTest(tf.test.TestCase):

  def setUp(self):
    self._model_dir = tempfile.mkdtemp()

  def tearDown(self):
    if self._model_dir:
      tf.compat.v1.summary.FileWriterCache.clear()
      shutil.rmtree(self._model_dir)

  def _assert_checkpoint(self,
                         label_dimension,
                         expected_global_step,
                         expected_bias=None):
    shapes = {
        name: shape
        for (name, shape) in tf.train.list_variables(self._model_dir)
    }

    self.assertEqual([], shapes[tf.compat.v1.GraphKeys.GLOBAL_STEP])
    self.assertEqual(
        expected_global_step,
        tf.train.load_variable(self._model_dir,
                               tf.compat.v1.GraphKeys.GLOBAL_STEP))

    self.assertEqual([label_dimension], shapes[BIAS_NAME])
    if expected_bias is not None:
      self.assertEqual(expected_bias,
                       tf.train.load_variable(self._model_dir, BIAS_NAME))

  def testFromScratch(self):
    # Create BaselineRegressor.
    label = 5.
    age = 17
    # loss = (logits - label)^2 = (0 - 5.)^2 = 25.
    mock_optimizer = mock_optimizer_v2(self, expected_loss=25.)
    baseline_estimator = _baseline_estimator_fn(
        model_dir=self._model_dir, optimizer=mock_optimizer)

    # Train for a few steps, and validate optimizer and final checkpoint.
    num_steps = 10
    baseline_estimator.train(
        input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
    self.assertEqual(
        num_steps,
        baseline_estimator.get_variable_value(mock_optimizer.iterations.name))
    self._assert_checkpoint(
        label_dimension=1, expected_global_step=num_steps, expected_bias=[0.])

  def testFromCheckpoint(self):
    # Create initial checkpoint.
    bias = 7.0
    initial_global_step = 100
    with tf.Graph().as_default():
      tf.Variable([bias], name=BIAS_NAME)
      tf.Variable(
          initial_global_step,
          name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
          dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    # logits = bias = 6.
    # loss = (logits - label)^2 = (7 - 5)^2 = 4
    mock_optimizer = mock_optimizer_v2(self, expected_loss=4.)
    baseline_estimator = _baseline_estimator_fn(
        model_dir=self._model_dir, optimizer=mock_optimizer)

    # Train for a few steps, and validate optimizer and final checkpoint.
    num_steps = 10
    baseline_estimator.train(
        input_fn=lambda: ({'age': ((17,),)}, ((5.,),)), steps=num_steps)
    self.assertEqual(
        initial_global_step + num_steps,
        baseline_estimator.get_variable_value(mock_optimizer.iterations.name))
    self._assert_checkpoint(
        label_dimension=1,
        expected_global_step=initial_global_step + num_steps,
        expected_bias=[bias])


if __name__ == '__main__':
  tf.test.main()


================================================
FILE: tensorflow_estimator/python/estimator/canned/baseline_test.py
================================================
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for baseline.py."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import math
import os
import shutil
import tempfile

import numpy as np
import six
import tensorflow as tf
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import ops
from tensorflow_estimator.python.estimator.util import tf_keras
from tensorflow_estimator.python.estimator.canned import baseline
from tensorflow_estimator.python.estimator.canned import metric_keys
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.inputs import numpy_io
from tensorflow_estimator.python.estimator.inputs import pandas_io

try:
  # pylint: disable=g-import-not-at-top
  import pandas as pd
  HAS_PANDAS = True
except IOError:
  # Pandas writes a temporary file during import. If it fails, don't use pandas.
  HAS_PANDAS = False
except ImportError:
  HAS_PANDAS = False

# pylint rules which are disabled by default for test files.
# pylint: disable=invalid-name,protected-access,missing-docstring

# Names of variables created by model.
BIAS_NAME = 'baseline/bias'


def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
  with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
    expected = ops.convert_to_tensor(expected, name='expected')
    actual = ops.convert_to_tensor(actual, name='actual')
    rdiff = tf.math.abs(expected - actual, 'diff') / tf.math.abs(expected)
    rtol = ops.convert_to_tensor(rtol, name='rtol')
    return tf.compat.v1.debugging.assert_less(
        rdiff,
        rtol,
        data=('Condition expected =~ actual did not hold element-wise:'
              'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
              'rtol = ', rtol,),
        name=scope)


def save_variables_to_ckpt(model_dir):
  init_all_op = [tf.compat.v1.initializers.global_variables()]
  with tf.compat.v1.Session() as sess:
    sess.run(init_all_op)
    tf.compat.v1.train.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))


def queue_parsed_features(feature_map):
  tensors_to_enqueue = []
  keys = []
  for key, tensor in six.iteritems(feature_map):
    keys.append(key)
    tensors_to_enqueue.append(tensor)
  queue_dtypes = [x.dtype for x in tensors_to_enqueue]
  input_queue = tf.queue.FIFOQueue(capacity=100, dtypes=queue_dtypes)
  tf.compat.v1.train.queue_runner.add_queue_runner(
      tf.compat.v1.train.queue_runner.QueueRunner(
          input_queue, [input_queue.enqueue(tensors_to_enqueue)]))
  dequeued_tensors = input_queue.dequeue()
  return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}


def sorted_key_dict(unsorted_dict):
  return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}


def sigmoid(x):
  return 1 / (1 + np.exp(-1.0 * x))


def _baseline_regressor_fn(*args, **kwargs):
  return baseline.BaselineRegressorV2(*args, **kwargs)


def _baseline_classifier_fn(*args, **kwargs):
  return baseline.BaselineClassifierV2(*args, **kwargs)


def mock_optimizer_v2(testcase, expected_loss=None):
  """Creates a mock optimizer to test the train method.

  Args:
    testcase: A TestCase instance.
    expected_loss: If given, will assert the loss value.

  Returns:
    A mock Optimizer.
  """
  expected_var_names = ['%s:0' % BIAS_NAME]

  class _Optimizer(tf_keras.optimizers.legacy.Optimizer):

    def get_updates(self, loss, params):
      trainable_vars = params
      testcase.assertItemsEqual(expected_var_names,
                                [var.name for var in trainable_vars])

      # Verify loss. We can't check the value directly, so we add an assert op.
      testcase.assertEquals(0, loss.shape.ndims)
      if expected_loss is None:
        if self.iterations is not None:
          return [self.iterations.assign_add(1).op]
        return [tf.no_op()]
      assert_loss = assert_close(
          tf.cast(expected_loss, name='expected', dtype=tf.dtypes.float32),
          loss,
          name='assert_loss')
      with tf.control_dependencies((assert_loss,)):
        if self.iterations is not None:
          return [self.iterations.assign_add(1).op]
        return [tf.no_op()]

    def get_config(self):
      config = super(_Optimizer, self).get_config()
      return config

  optimizer = _Optimizer(name='my_optimizer')

  return optimizer


# Tests for Baseline Regressor.

# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.


class BaselineRegressorEvaluationTest(tf.test.TestCase):

  def setUp(self):
    self._model_dir = tempfile.mkdtemp()

  def tearDown(self):
    if self._model_dir:
      tf.compat.v1.summary.FileWriterCache.clear()
      shutil.rmtree(self._model_dir)

  def test_evaluation_for_simple_data(self):
    with tf.Graph().as_default():
      tf.Variable([13.0], name=BIAS_NAME)
      tf.Variable(
          100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
    eval_metrics = baseline_regressor.evaluate(
        input_fn=lambda: ({
            'age': ((1,),)
        }, ((10.,),)), steps=1)

    # Logit is bias = 13, while label is 10. Loss is 3**2 = 9.
    self.assertDictEqual(
        {
            metric_keys.MetricKeys.LOSS: 9.,
            metric_keys.MetricKeys.LOSS_MEAN: 9.,
            metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
            metric_keys.MetricKeys.LABEL_MEAN: 10.,
            tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
        }, eval_metrics)

  def test_evaluation_batch(self):
    """Tests evaluation for batch_size==2."""
    with tf.Graph().as_default():
      tf.Variable([13.0], name=BIAS_NAME)
      tf.Variable(
          100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
    eval_metrics = baseline_regressor.evaluate(
        input_fn=lambda: ({
            'age': ((1,), (1,))
        }, ((10.,), (10.,))), steps=1)

    # Logit is bias = 13, while label is 10.
    # Loss per example is 3**2 = 9.
    # Training loss is the sum over batch size = (9 + 9) / 2 = 9
    # Average loss is the average over batch = 9
    self.assertDictEqual(
        {
            metric_keys.MetricKeys.LOSS: 9.,
            metric_keys.MetricKeys.LOSS_MEAN: 9.,
            metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
            metric_keys.MetricKeys.LABEL_MEAN: 10.,
            tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
        }, eval_metrics)

  def test_evaluation_weights(self):
    """Tests evaluation with weights."""
    with tf.Graph().as_default():
      tf.Variable([13.0], name=BIAS_NAME)
      tf.Variable(
          100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    def _input_fn():
      features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
      labels = ((10.,), (10.,))
      return features, labels

    baseline_regressor = _baseline_regressor_fn(
        weight_column='weights', model_dir=self._model_dir)
    eval_metrics = baseline_regressor.evaluate(input_fn=_input_fn, steps=1)

    # Logit is bias = 13, while label is 10.
    # Loss per example is 3**2 = 9.
    # Training loss is the weighted sum over batch size = (9 + 2*9) / 2 = 13.5
    # average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
    self.assertDictEqual(
        {
            metric_keys.MetricKeys.LOSS: 13.5,
            metric_keys.MetricKeys.LOSS_MEAN: 9.,
            metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
            metric_keys.MetricKeys.LABEL_MEAN: 10.,
            tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
        }, eval_metrics)

  def test_evaluation_for_multi_dimensions(self):
    label_dim = 2
    with tf.Graph().as_default():
      tf.Variable([46.0, 58.0], name=BIAS_NAME)
      tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    baseline_regressor = _baseline_regressor_fn(
        label_dimension=label_dim, model_dir=self._model_dir)
    input_fn = numpy_io.numpy_input_fn(
        x={
            'age': np.array([[2., 4., 5.]]),
        },
        y=np.array([[46., 58.]]),
        batch_size=1,
        num_epochs=None,
        shuffle=False)
    eval_metrics = baseline_regressor.evaluate(input_fn=input_fn, steps=1)

    self.assertItemsEqual(
        (metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
         metric_keys.MetricKeys.PREDICTION_MEAN,
         metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),
        eval_metrics.keys())

    # Logit is bias which is [46, 58]
    self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])


class BaselineRegressorPredictTest(tf.test.TestCase):

  def setUp(self):
    self._model_dir = tempfile.mkdtemp()

  def tearDown(self):
    if self._model_dir:
      tf.compat.v1.summary.FileWriterCache.clear()
      shutil.rmtree(self._model_dir)

  def test_1d(self):
    """Tests predict when all variables are one-dimensional."""
    with tf.Graph().as_default():
      tf.Variable([.2], name=BIAS_NAME)
      tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)

    predict_input_fn = numpy_io.numpy_input_fn(
        x={'x': np.array([[2.]])},
        y=None,
        batch_size=1,
        num_epochs=1,
        shuffle=False)
    predictions = baseline_regressor.predict(input_fn=predict_input_fn)
    predicted_scores = list([x['predictions'] for x in predictions])
    # x * weight + bias = 2. * 10. + .2 = 20.2
    self.assertAllClose([[.2]], predicted_scores)

  def testMultiDim(self):
    """Tests predict when all variables are multi-dimenstional."""
    batch_size = 2
    label_dimension = 3
    with tf.Graph().as_default():
      tf.Variable(  # shape=[label_dimension]
          [.2, .4, .6], name=BIAS_NAME)
      tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    baseline_regressor = _baseline_regressor_fn(
        label_dimension=label_dimension, model_dir=self._model_dir)

    predict_input_fn = numpy_io.numpy_input_fn(
        # x shape=[batch_size, x_dim]
        x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
        y=None,
        batch_size=batch_size,
        num_epochs=1,
        shuffle=False)
    predictions = baseline_regressor.predict(input_fn=predict_input_fn)
    predicted_scores = list([x['predictions'] for x in predictions])
    # score = bias, shape=[batch_size, label_dimension]
    self.assertAllClose([[0.2, 0.4, 0.6], [0.2, 0.4, 0.6]], predicted_scores)


class BaselineRegressorIntegrationTest(tf.test.TestCase):

  def setUp(self):
    self._model_dir = tempfile.mkdtemp()

  def tearDown(self):
    if self._model_dir:
      tf.compat.v1.summary.FileWriterCache.clear()
      shutil.rmtree(self._model_dir)

  def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
                          input_dimension, label_dimension, prediction_length):
    feature_columns = [
        tf.feature_column.numeric_column('x', shape=(input_dimension,))
    ]
    est = _baseline_regressor_fn(
        label_dimension=label_dimension, model_dir=self._model_dir)

    # TRAIN
    # learn y = x
    est.train(train_input_fn, steps=200)

    # EVALUTE
    scores = est.evaluate(eval_input_fn)
    self.assertEqual(200, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
    self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))

    # PREDICT
    predictions = np.array(
        [x['predictions'] for x in est.predict(predict_input_fn)])
    self.assertAllEqual((prediction_length, label_dimension), predictions.shape)

    # EXPORT
    feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
    serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
        feature_spec)
    export_dir = est.export_saved_model(tempfile.mkdtemp(),
                                        serving_input_receiver_fn)
    self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))

  def test_numpy_input_fn(self):
    """Tests complete flow with numpy_input_fn."""
    label_dimension = 2
    input_dimension = label_dimension
    batch_size = 10
    prediction_length = batch_size
    data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
    data = data.reshape(batch_size, label_dimension)

    train_input_fn = numpy_io.numpy_input_fn(
        x={'x': data},
        y=data,
        batch_size=batch_size,
        num_epochs=None,
        shuffle=True)
    eval_input_fn = numpy_io.numpy_input_fn(
        x={'x': data},
        y=data,
        batch_size=batch_size,
        num_epochs=1,
        shuffle=False)
    predict_input_fn = numpy_io.numpy_input_fn(
        x={'x': data},
        y=None,
        batch_size=batch_size,
        num_epochs=1,
        shuffle=False)

    self._test_complete_flow(
        train_input_fn=train_input_fn,
        eval_input_fn=eval_input_fn,
        predict_input_fn=predict_input_fn,
        input_dimension=input_dimension,
        label_dimension=label_dimension,
        prediction_length=prediction_length)

  def test_pandas_input_fn(self):
    """Tests complete flow with pandas_input_fn."""
    if not HAS_PANDAS:
      return

    # Pandas DataFrame natually supports 1 dim data only.
    label_dimension = 1
    input_dimension = label_dimension
    batch_size = 10
    data = np.array([1., 2., 3., 4.], dtype=np.float32)
    x = pd.DataFrame({'x': data})
    y = pd.Series(data)
    prediction_length = 4

    train_input_fn = pandas_io.pandas_input_fn(
        x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
    eval_input_fn = pandas_io.pandas_input_fn(
        x=x, y=y, batch_size=batch_size, shuffle=False)
    predict_input_fn = pandas_io.pandas_input_fn(
        x=x, batch_size=batch_size, shuffle=False)

    self._test_complete_flow(
        train_input_fn=train_input_fn,
        eval_input_fn=eval_input_fn,
        predict_input_fn=predict_input_fn,
        input_dimension=input_dimension,
        label_dimension=label_dimension,
        prediction_length=prediction_length)

  def test_input_fn_from_parse_example(self):
    """Tests complete flow with input_fn constructed from parse_example."""
    label_dimension = 2
    input_dimension = label_dimension
    batch_size = 10
    prediction_length = batch_size
    data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
    data = data.reshape(batch_size, label_dimension)

    serialized_examples = []
    for datum in data:
      example = example_pb2.Example(
          features=feature_pb2.Features(
              feature={
                  'x':
                      feature_pb2.Feature(
                          float_list=feature_pb2.FloatList(value=datum)),
                  'y':
                      feature_pb2.Feature(
                          float_list=feature_pb2.FloatList(
                              value=datum[:label_dimension])),
              }))
      serialized_examples.append(example.SerializeToString())

    feature_spec = {
        'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
        'y': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),
    }

    def _train_input_fn():
      feature_map = tf.compat.v1.io.parse_example(serialized_examples,
                                                  feature_spec)
      features = queue_parsed_features(feature_map)
      labels = features.pop('y')
      return features, labels

    def _eval_input_fn():
      feature_map = tf.compat.v1.io.parse_example(
          tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
          feature_spec)
      features = queue_parsed_features(feature_map)
      labels = features.pop('y')
      return features, labels

    def _predict_input_fn():
      feature_map = tf.compat.v1.io.parse_example(
          tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
          feature_spec)
      features = queue_parsed_features(feature_map)
      features.pop('y')
      return features, None

    self._test_complete_flow(
        train_input_fn=_train_input_fn,
        eval_input_fn=_eval_input_fn,
        predict_input_fn=_predict_input_fn,
        input_dimension=input_dimension,
        label_dimension=label_dimension,
        prediction_length=prediction_length)


class BaselineRegressorTrainingTest(tf.test.TestCase):

  def setUp(self):
    self._model_dir = tempfile.mkdtemp()

  def tearDown(self):
    if self._model_dir:
      tf.compat.v1.summary.FileWriterCache.clear()
      shutil.rmtree(self._model_dir)

  def _assert_checkpoint(self,
                         label_dimension,
                         expected_global_step,
                         expected_bias=None):
    shapes = {
        name: shape
        for (name, shape) in tf.train.list_variables(self._model_dir)
    }

    self.assertEqual([], shapes[tf.compat.v1.GraphKeys.GLOBAL_STEP])
    self.assertEqual(
        expected_global_step,
        tf.train.load_variable(self._model_dir,
                               tf.compat.v1.GraphKeys.GLOBAL_STEP))

    self.assertEqual([label_dimension], shapes[BIAS_NAME])
    if expected_bias is not None:
      self.assertEqual(expected_bias,
                       tf.train.load_variable(self._model_dir, BIAS_NAME))

  def testFromScratchWithDefaultOptimizer(self):
    # Create BaselineRegressor.
    label = 5.
    age = 17
    baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)

    # Train for a few steps, and validate final checkpoint.
    num_steps = 10
    baseline_regressor.train(
        input_fn=lambda: ({
            'age': ((age,),)
        }, ((label,),)), steps=num_steps)
    self._assert_checkpoint(label_dimension=1, expected_global_step=num_steps)

  def testTrainWithOneDimLabel(self):
    label_dimension = 1
    batch_size = 20
    est = _baseline_regressor_fn(
        label_dimension=label_dimension, model_dir=self._model_dir)
    data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
    self.assertEqual((batch_size,), data_rank_1.shape)

    train_input_fn = numpy_io.numpy_input_fn(
        x={'age': data_rank_1},
        y=data_rank_1,
        batch_size=batch_size,
        num_epochs=None,
        shuffle=True)
    est.train(train_input_fn, steps=200)
    self._assert_checkpoint(label_dimension=1, expected_global_step=200)

  def testTrainWithOneDimWeight(self):
    label_dimension = 1
    batch_size = 20
    est = _baseline_regressor_fn(
        label_dimension=label_dimension,
        weight_column='w',
        model_dir=self._model_dir)

    data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
    self.assertEqual((batch_size,), data_rank_1.shape)

    train_input_fn = numpy_io.numpy_input_fn(
        x={
            'age': data_rank_1,
            'w': data_rank_1
        },
        y=data_rank_1,
        batch_size=batch_size,
        num_epochs=None,
        shuffle=True)
    est.train(train_input_fn, steps=200)
    self._assert_checkpoint(label_dimension=1, expected_global_step=200)

  def testFromScratch(self):
    # Create BaselineRegressor.
    label = 5.
    age = 17
    # loss = (logits - label)^2 = (0 - 5.)^2 = 25.
    mock_optimizer = mock_optimizer_v2(self, expected_loss=25.)
    baseline_regressor = _baseline_regressor_fn(
        model_dir=self._model_dir, optimizer=mock_optimizer)

    # Train for a few steps, and validate optimizer and final checkpoint.
    num_steps = 10
    baseline_regressor.train(
        input_fn=lambda: ({
            'age': ((age,),)
        }, ((label,),)), steps=num_steps)
    self.assertEqual(
        num_steps,
        baseline_regressor.get_variable_value(mock_optimizer.iterations.name))
    self._assert_checkpoint(
        label_dimension=1, expected_global_step=num_steps, expected_bias=[0.])

  def testFromCheckpoint(self):
    # Create initial checkpoint.
    bias = 7.0
    initial_global_step = 100
    with tf.Graph().as_default():
      tf.Variable([bias], name=BIAS_NAME)
      tf.Variable(
          initial_global_step,
          name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
          dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    # logits = bias = 6.
    # loss = (logits - label)^2 = (7 - 5)^2 = 4
    mock_optimizer = mock_optimizer_v2(self, expected_loss=4.)
    baseline_regressor = _baseline_regressor_fn(
        model_dir=self._model_dir, optimizer=mock_optimizer)

    # Train for a few steps, and validate optimizer and final checkpoint.
    num_steps = 10
    baseline_regressor.train(
        input_fn=lambda: ({
            'age': ((17,),)
        }, ((5.,),)), steps=num_steps)
    self.assertEqual(
        initial_global_step + num_steps,
        baseline_regressor.get_variable_value(mock_optimizer.iterations.name))
    self._assert_checkpoint(
        label_dimension=1,
        expected_global_step=initial_global_step + num_steps,
        expected_bias=[bias])

  def testFromCheckpointMultiBatch(self):
    # Create initial checkpoint.
    bias = 5.0
    initial_global_step = 100
    with tf.Graph().as_default():
      tf.Variable([bias], name=BIAS_NAME)
      tf.Variable(
          initial_global_step,
          name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
          dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    # logits = bias
    # logits[0] = 5.
    # logits[1] = 5.
    # loss = (sum(logits - label)^2 = (5 - 5)^2 + (5 - 3)^2) / 2 (batch size)
    # loss = 2
    mock_optimizer = mock_optimizer_v2(self, expected_loss=2.)
    baseline_regressor = _baseline_regressor_fn(
        model_dir=self._model_dir, optimizer=mock_optimizer)

    # Train for a few steps, and validate optimizer and final checkpoint.
    num_steps = 10
    baseline_regressor.train(
        input_fn=lambda: ({
            'age': ((17,), (15,))
        }, ((5.,), (3.,))),
        steps=num_steps)
    self.assertEqual(
        initial_global_step + num_steps,
        baseline_regressor.get_variable_value(mock_optimizer.iterations.name))
    self._assert_checkpoint(
        label_dimension=1,
        expected_global_step=initial_global_step + num_steps,
        expected_bias=bias)


# Tests for Baseline Classifier.


class BaselineClassifierTrainingTest(tf.test.TestCase):

  def setUp(self):
    self._model_dir = tempfile.mkdtemp()

  def tearDown(self):
    if self._model_dir:
      shutil.rmtree(self._model_dir)

  def _assert_checkpoint(self,
                         n_classes,
                         expected_global_step,
                         expected_bias=None):
    logits_dimension = n_classes if n_classes > 2 else 1

    shapes = {
        name: shape
        for (name, shape) in tf.train.list_variables(self._model_dir)
    }

    self.assertEqual([], shapes[tf.compat.v1.GraphKeys.GLOBAL_STEP])
    self.assertEqual(
        expected_global_step,
        tf.train.load_variable(self._model_dir,
                               tf.compat.v1.GraphKeys.GLOBAL_STEP))

    self.assertEqual([logits_dimension], shapes[BIAS_NAME])
    if expected_bias is not None:
      self.assertAllEqual(expected_bias,
                          tf.train.load_variable(self._model_dir, BIAS_NAME))

  def _testFromScratchWithDefaultOptimizer(self, n_classes):
    label = 0
    age = 17
    est = baseline.BaselineClassifierV2(
        n_classes=n_classes, model_dir=self._model_dir)

    # Train for a few steps, and validate final checkpoint.
    num_steps = 10
    est.train(
        input_fn=lambda: ({
            'age': ((age,),)
        }, ((label,),)), steps=num_steps)
    self._assert_checkpoint(n_classes, num_steps)

  def testBinaryClassesFromScratchWithDefaultOptimizer(self):
    self._testFromScratchWithDefaultOptimizer(n_classes=2)

  def testMultiClassesFromScratchWithDefaultOptimizer(self):
    self._testFromScratchWithDefaultOptimizer(n_classes=4)

  def _testTrainWithTwoDimsLabel(self, n_classes):
    batch_size = 20

    est = baseline.BaselineClassifierV2(
        n_classes=n_classes, model_dir=self._model_dir)
    data_rank_1 = np.array([0, 1])
    data_rank_2 = np.array([[0], [1]])
    self.assertEqual((2,), data_rank_1.shape)
    self.assertEqual((2, 1), data_rank_2.shape)

    train_input_fn = numpy_io.numpy_input_fn(
        x={'age': data_rank_1},
        y=data_rank_2,
        batch_size=batch_size,
        num_epochs=None,
        shuffle=True)
    est.train(train_input_fn, steps=200)
    self._assert_checkpoint(n_classes, 200)

  def testBinaryClassesTrainWithTwoDimsLabel(self):
    self._testTrainWithTwoDimsLabel(n_classes=2)

  def testMultiClassesTrainWithTwoDimsLabel(self):
    self._testTrainWithTwoDimsLabel(n_classes=4)

  def _testTrainWithOneDimLabel(self, n_classes):
    batch_size = 20

    est = baseline.BaselineClassifierV2(
        n_classes=n_classes, model_dir=self._model_dir)
    data_rank_1 = np.array([0, 1])
    self.assertEqual((2,), data_rank_1.shape)

    train_input_fn = numpy_io.numpy_input_fn(
        x={'age': data_rank_1},
        y=data_rank_1,
        batch_size=batch_size,
        num_epochs=None,
        shuffle=True)
    est.train(train_input_fn, steps=200)
    self._assert_checkpoint(n_classes, 200)

  def testBinaryClassesTrainWithOneDimLabel(self):
    self._testTrainWithOneDimLabel(n_classes=2)

  def testMultiClassesTrainWithOneDimLabel(self):
    self._testTrainWithOneDimLabel(n_classes=4)

  def _testTrainWithTwoDimsWeight(self, n_classes):
    batch_size = 20

    est = baseline.BaselineClassifierV2(
        weight_column='w', n_classes=n_classes, model_dir=self._model_dir)
    data_rank_1 = np.array([0, 1])
    data_rank_2 = np.array([[0], [1]])
    self.assertEqual((2,), data_rank_1.shape)
    self.assertEqual((2, 1), data_rank_2.shape)

    train_input_fn = numpy_io.numpy_input_fn(
        x={
            'age': data_rank_1,
            'w': data_rank_2
        },
        y=data_rank_1,
        batch_size=batch_size,
        num_epochs=None,
        shuffle=True)
    est.train(train_input_fn, steps=200)
    self._assert_checkpoint(n_classes, 200)

  def testBinaryClassesTrainWithTwoDimsWeight(self):
    self._testTrainWithTwoDimsWeight(n_classes=2)

  def testMultiClassesTrainWithTwoDimsWeight(self):
    self._testTrainWithTwoDimsWeight(n_classes=4)

  def _testTrainWithOneDimWeight(self, n_classes):
    batch_size = 20

    est = baseline.BaselineClassifierV2(
        weight_column='w', n_classes=n_classes, model_dir=self._model_dir)
    data_rank_1 = np.array([0, 1])
    self.assertEqual((2,), data_rank_1.shape)

    train_input_fn = numpy_io.numpy_input_fn(
        x={
            'age': data_rank_1,
            'w': data_rank_1
        },
        y=data_rank_1,
        batch_size=batch_size,
        num_epochs=None,
        shuffle=True)
    est.train(train_input_fn, steps=200)
    self._assert_checkpoint(n_classes, 200)

  def testBinaryClassesTrainWithOneDimWeight(self):
    self._testTrainWithOneDimWeight(n_classes=2)

  def testMultiClassesTrainWithOneDimWeight(self):
    self._testTrainWithOneDimWeight(n_classes=4)

  def _testFromScratch(self, n_classes):
    label = 1
    age = 17
    # For binary classifier:
    #   loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are
    #   all zero initially) and label = 1 so,
    #      loss = 1 * -log ( sigmoid(logits) ) = 0.69315
    # For multi class classifier:
    #   loss = cross_entropy(logits, label) where logits are all 0s (weights are
    #   all zero initially) and label = 1 so,
    #      loss = 1 * -log ( 1.0 / n_classes )
    # For this particular test case, as logits are same, the formula
    # 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.
    mock_optimizer = mock_optimizer_v2(
        self, expected_loss=-1 * math.log(1.0 / n_classes))

    est = baseline.BaselineClassifierV2(
        n_classes=n_classes,
        optimizer=mock_optimizer,
        model_dir=self._model_dir)

    # Train for a few steps, and validate optimizer and final checkpoint.
    num_steps = 10
    est.train(
        input_fn=lambda: ({
            'age': ((age,),)
        }, ((label,),)), steps=num_steps)
    self.assertEqual(num_steps,
                     est.get_variable_value(mock_optimizer.iterations.name))
    self._assert_checkpoint(
        n_classes,
        expected_global_step=num_steps,
        expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)

  def testBinaryClassesFromScratch(self):
    self._testFromScratch(n_classes=2)

  def testMultiClassesFromScratch(self):
    self._testFromScratch(n_classes=4)

  def _testFromCheckpoint(self, n_classes):
    # Create initial checkpoint.
    label = 1
    age = 17
    bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
    initial_global_step = 100
    with tf.Graph().as_default():
      tf.Variable(bias, name=BIAS_NAME)
      tf.Variable(
          initial_global_step,
          name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
          dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    # For binary classifier:
    #   logits = bias = -1.
    #   loss = sigmoid_cross_entropy(logits, label)
    #   so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133
    # For multi class classifier:
    #   loss = cross_entropy(logits, label)
    #   where logits = bias and label = 1
    #   so, loss = 1 * -log ( softmax(logits)[1] )
    if n_classes == 2:
      expected_loss = 1.3133
    else:
      logits = bias
      logits_exp = np.exp(logits)
      softmax = logits_exp / logits_exp.sum()
      expected_loss = -1 * math.log(softmax[label])

    mock_optimizer = mock_optimizer_v2(self, expected_loss=expected_loss)

    est = baseline.BaselineClassifierV2(
        n_classes=n_classes,
        optimizer=mock_optimizer,
        model_dir=self._model_dir)

    # Train for a few steps, and validate optimizer and final checkpoint.
    num_steps = 10
    est.train(
        input_fn=lambda: ({
            'age': ((age,),)
        }, ((label,),)), steps=num_steps)
    self.assertEqual(initial_global_step + num_steps,
                     est.get_variable_value(mock_optimizer.iterations.name))
    self._assert_checkpoint(
        n_classes,
        expected_global_step=initial_global_step + num_steps,
        expected_bias=bias)

  def testBinaryClassesFromCheckpoint(self):
    self._testFromCheckpoint(n_classes=2)

  def testMultiClassesFromCheckpoint(self):
    self._testFromCheckpoint(n_classes=4)

  def _testFromCheckpointFloatLabels(self, n_classes):
    """Tests float labels for binary classification."""
    # Create initial checkpoint.
    if n_classes > 2:
      return
    label = 0.8
    age = 17
    bias = [-1.0]
    initial_global_step = 100
    with tf.Graph().as_default():
      tf.Variable(bias, name=BIAS_NAME)
      tf.Variable(
          initial_global_step,
          name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
          dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    # logits = bias = -1.
    # loss = sigmoid_cross_entropy(logits, label)
    # => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617
    mock_optimizer = mock_optimizer_v2(self, expected_loss=1.1132617)

    est = baseline.BaselineClassifierV2(
        n_classes=n_classes,
        optimizer=mock_optimizer,
        model_dir=self._model_dir)

    # Train for a few steps, and validate optimizer and final checkpoint.
    num_steps = 10
    est.train(
        input_fn=lambda: ({
            'age': ((age,),)
        }, ((label,),)), steps=num_steps)
    self.assertEqual(initial_global_step + num_steps,
                     est.get_variable_value(mock_optimizer.iterations.name))

  def testBinaryClassesFromCheckpointFloatLabels(self):
    self._testFromCheckpointFloatLabels(n_classes=2)

  def testMultiClassesFromCheckpointFloatLabels(self):
    self._testFromCheckpointFloatLabels(n_classes=4)

  def _testFromCheckpointMultiBatch(self, n_classes):
    # Create initial checkpoint.
    label = [1, 0]
    age = [17, 18.5]
    batch_size = 2
    # For binary case, the expected weight has shape (1,1). For multi class
    # case, the shape is (1, n_classes). In order to test the weights, set
    # weights as 2.0 * range(n_classes).
    bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
    initial_global_step = 100
    with tf.Graph().as_default():
      tf.Variable(bias, name=BIAS_NAME)
      tf.Variable(
          initial_global_step,
          name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
          dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    # For binary classifier:
    #   logits = bias
    #   logits[0] = -1.
    #   logits[1] = -1.
    #   loss = sigmoid_cross_entropy(logits, label)
    #   so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133
    #       loss[1] = (1 - 0) * -log ( 1- sigmoid(-1) ) = 0.3132
    # For multi class classifier:
    #   loss = cross_entropy(logits, label)
    #   where logits = bias and label = [1, 0]
    #   so, loss = 1 * -log ( softmax(logits)[label] )
    if n_classes == 2:
      expected_loss = (1.3133 + 0.3132) / 2
    else:
      # Expand logits since batch_size=2
      logits = bias * np.ones(shape=(2, 1))
      logits_exp = np.exp(logits)
      softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
      softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
      expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
      expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
      expected_loss = (expected_loss_0 + expected_loss_1) / 2

    mock_optimizer = mock_optimizer_v2(self, expected_loss=expected_loss)

    est = baseline.BaselineClassifierV2(
        n_classes=n_classes,
        optimizer=mock_optimizer,
        model_dir=self._model_dir)

    # Train for a few steps, and validate optimizer and final checkpoint.
    num_steps = 10
    est.train(input_fn=lambda: ({'age': (age)}, (label)), steps=num_steps)
    self.assertEqual(initial_global_step + num_steps,
                     est.get_variable_value(mock_optimizer.iterations.name))
    self._assert_checkpoint(
        n_classes,
        expected_global_step=initial_global_step + num_steps,
        expected_bias=bias)

  def testBinaryClassesFromCheckpointMultiBatch(self):
    self._testFromCheckpointMultiBatch(n_classes=2)

  def testMultiClassesFromCheckpointMultiBatch(self):
    self._testFromCheckpointMultiBatch(n_classes=4)


class BaselineClassifierEvaluationTest(tf.test.TestCase):

  def setUp(self):
    self._model_dir = tempfile.mkdtemp()

  def tearDown(self):
    if self._model_dir:
      shutil.rmtree(self._model_dir)

  def _test_evaluation_for_simple_data(self, n_classes):
    label = 1
    age = 1.

    bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes

    with tf.Graph().as_default():
      tf.Variable(bias, name=BIAS_NAME)
      tf.Variable(
          100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    est = _baseline_classifier_fn(
        n_classes=n_classes, model_dir=self._model_dir)
    eval_metrics = est.evaluate(
        input_fn=lambda: ({
            'age': ((age,),)
        }, ((label,),)), steps=1)

    if n_classes == 2:
      # Binary classes: loss = -log(sigmoid(-1)) / batch size = 1.3133
      # Prediction = sigmoid(-1) = 0.2689
      expected_metrics = {
          metric_keys.MetricKeys.LOSS: 1.3133,
          tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
          metric_keys.MetricKeys.LOSS_MEAN: 1.3133,
          metric_keys.MetricKeys.ACCURACY: 0.,
          metric_keys.MetricKeys.PRECISION: 0.,
          metric_keys.MetricKeys.RECALL: 0.,
          metric_keys.MetricKeys.PREDICTION_MEAN: 0.2689,
          metric_keys.MetricKeys.LABEL_MEAN: 1.,
          metric_keys.MetricKeys.ACCURACY_BASELINE: 1,
          metric_keys.MetricKeys.AUC: 0.,
          metric_keys.MetricKeys.AUC_PR: 1.,
      }
    else:
      # Multi classes: loss = 1 * -log ( softmax(logits)[label] )
      logits = bias
      logits_exp = np.exp(logits)
      softmax = logits_exp / logits_exp.sum()
      expected_loss = -1 * math.log(softmax[label])

      expected_metrics = {
          metric_keys.MetricKeys.LOSS: expected_loss,
          tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
          metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
          metric_keys.MetricKeys.ACCURACY: 0.,
      }

    self.assertAllClose(
        sorted_key_dict(expected_metrics),
        sorted_key_dict(eval_metrics),
        rtol=1e-3)

  def test_binary_classes_evaluation_for_simple_data(self):
    self._test_evaluation_for_simple_data(n_classes=2)

  def test_multi_classes_evaluation_for_simple_data(self):
    self._test_evaluation_for_simple_data(n_classes=4)

  def _test_evaluation_batch(self, n_classes):
    """Tests evaluation for batch_size==2."""
    label = [1, 0]
    age = [17., 18.]
    bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
    initial_global_step = 100
    with tf.Graph().as_default():
      tf.Variable(bias, name=BIAS_NAME)
      tf.Variable(
          initial_global_step,
          name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
          dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    est = _baseline_classifier_fn(
        n_classes=n_classes, model_dir=self._model_dir)
    eval_metrics = est.evaluate(
        input_fn=lambda: ({
            'age': (age)
        }, (label)), steps=1)

    if n_classes == 2:
      # Logits are (-1., -1.) labels are (1, 0).
      # Loss is
      #   loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
      #   loss for row 2: (1 - 0) * -log(1 - sigmoid(-1)) = 0.3132
      # Prediction = sigmoid(-1) = 0.2689
      expected_loss = (1.3133 + 0.3132) / 2  # batch size

      expected_metrics = {
          metric_keys.MetricKeys.LOSS: expected_loss,
          tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
          metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
          metric_keys.MetricKeys.ACCURACY: 0.5,
          metric_keys.MetricKeys.PRECISION: 0.,
          metric_keys.MetricKeys.RECALL: 0.,
          metric_keys.MetricKeys.PREDICTION_MEAN: 0.2689,
          metric_keys.MetricKeys.LABEL_MEAN: 0.5,
          metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
          metric_keys.MetricKeys.AUC: 0.5,
          metric_keys.MetricKeys.AUC_PR: 0.5,
      }
    else:
      # Expand logits since batch_size=2
      logits = bias * np.ones(shape=(2, 1))
      logits_exp = np.exp(logits)
      softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
      softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
      expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
      expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
      expected_loss = (expected_loss_0 + expected_loss_1) / 2  # batch size

      expected_metrics = {
          metric_keys.MetricKeys.LOSS: expected_loss,
          tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
          metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
          metric_keys.MetricKeys.ACCURACY: 0.5,
      }

    self.assertAllClose(
        sorted_key_dict(expected_metrics),
        sorted_key_dict(eval_metrics),
        rtol=1e-3)

  def test_binary_classes_evaluation_batch(self):
    self._test_evaluation_batch(n_classes=2)

  def test_multi_classes_evaluation_batch(self):
    self._test_evaluation_batch(n_classes=4)

  def _test_evaluation_weights(self, n_classes):
    """Tests evaluation with weights."""

    label = [1, 0]
    age = [17., 18.]
    weights = [1., 2.]
    # For binary case, the expected weight has shape (1,1). For multi class
    # case, the shape is (1, n_classes). In order to test the weights, set
    # weights as 2.0 * range(n_classes).
    bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
    initial_global_step = 100
    with tf.Graph().as_default():
      tf.Variable(bias, name=BIAS_NAME)
      tf.Variable(
          initial_global_step,
          name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
          dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    est = _baseline_classifier_fn(
        n_classes=n_classes, weight_column='w', model_dir=self._model_dir)
    eval_metrics = est.evaluate(
        input_fn=lambda: ({
            'age': (age),
            'w': (weights)
        }, (label)), steps=1)

    if n_classes == 2:
      # Logits are (-1., -1.) labels are (1, 0).
      # Loss is
      #   loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
      #   loss for row 2: (1 - 0) * -log(1 - sigmoid(-1)) = 0.3132
      #   weights = [1., 2.]
      expected_loss = (1.3133 * 1. + 0.3132 * 2.) / 2  # batch size
      loss_mean = (1.3133 * 1. + 0.3132 * 2.) / (1.0 + 2.0)
      label_mean = np.average(label, weights=weights)
      logits = [-1, -1]
      logistics = sigmoid(np.array(logits))
      predictions_mean = np.average(logistics, weights=weights)

      expected_metrics = {
          metric_keys.MetricKeys.LOSS: expected_loss,
          tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
          metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
          metric_keys.MetricKeys.ACCURACY: 2. / (1. + 2.),
          metric_keys.MetricKeys.PRECISION: 0.,
          metric_keys.MetricKeys.RECALL: 0.,
          metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,
          metric_keys.MetricKeys.LABEL_MEAN: label_mean,
          metric_keys.MetricKeys.ACCURACY_BASELINE:
              (max(label_mean, 1 - label_mean)),
          metric_keys.MetricKeys.AUC: 0.5,
          metric_keys.MetricKeys.AUC_PR: 0.33333,
      }
    else:
      # Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )
      # Expand logits since batch_size=2
      logits = bias * np.ones(shape=(2, 1))
      logits_exp = np.exp(logits)
      softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
      softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
      expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
      expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
      loss_mean = np.average([expected_loss_0, expected_loss_1],
                             weights=weights)
      expected_loss = (loss_mean * np.sum(weights)) / 2  # batch size

      expected_metrics = {
          metric_keys.MetricKeys.LOSS: expected_loss,
          tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
          metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
          metric_keys.MetricKeys.ACCURACY: 2. / (1. + 2.),
      }

    self.assertAllClose(
        sorted_key_dict(expected_metrics),
        sorted_key_dict(eval_metrics),
        rtol=1e-3)

  def test_binary_classes_evaluation_weights(self):
    self._test_evaluation_weights(n_classes=2)

  def test_multi_classes_evaluation_weights(self):
    self._test_evaluation_weights(n_classes=4)


class BaselineClassifierPredictTest(tf.test.TestCase):

  def setUp(self):
    self._model_dir = tempfile.mkdtemp()

  def tearDown(self):
    if self._model_dir:
      shutil.rmtree(self._model_dir)

  def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
    """Tests predict when all variables are one-dimensional."""
    age = 1.

    bias = [10.0] if n_classes == 2 else [10.0] * n_classes

    with tf.Graph().as_default():
      tf.Variable(bias, name=BIAS_NAME)
      tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
      save_variables_to_ckpt(self._model_dir)

    est = _baseline_classifier_fn(
        label_vocabulary=label_vocabulary,
        n_classes=n_classes,
        model_dir=self._model_dir)

    predict_input_fn = numpy_io.numpy_input_fn(
        x={'age': np.array([[age]])},
        y=None,
        batch_size=1,
        num_epochs=1,
        shuffle=False)
    predictions = list(est.predict(input_fn=predict_input_fn))

    if n_classes == 2:
      scalar_logits = bias[0]
      two_classes_logits = [0, scalar_logits]
      two_classes_logits_exp = np.exp(two_classes_logits)
      softmax = two_classes_logits_exp / two_classes_logits_exp.sum()

      expected_predictions = {
          'class_ids': [1],
          'all_class_ids': [0, 1],
          'classes': [label_output_fn(1)],
          'all_classes': [label_output_fn(0),
                          label_output_fn(1)],
          'logistic': [sigmoid(np.array(scalar_logits))],
          'logits': [scalar_logits],
          'probabilities': softmax,
      }
    else:
      onedim_logits = np.array(bias)
      class_ids = onedim_logits.argmax()
      all_class_ids = list(range(len(onedim_logits)))
      logits_exp = np.exp(onedim_logits)
      softmax = logits_exp / logits_exp.sum()
      expected_predictions = {
          'class_ids': [class_ids],
          'all_class_ids': all_class_ids,
          'classes': [label_output_fn(class_ids)],
          'all_classes': [label_output_fn(i) for i in all_class_ids],
          'logits': onedim_logits,
          'probabilities': softmax,
      }

    self.assertEqual(1, len(predictions))
    # assertAllClose cannot handle byte type.
    self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])
    expected_predictions.pop('classes')
    predictions[0].pop('classes')
    self.assertAllEqual(expected_predictions['all_classes'],
                        predictions[0]['all_classes'])
    expected_predictions.pop('all_classes')
    predictions[0].pop('all_classes')
    self.assertAllClose(
        sorted_key_dict(expected_predictions), sorted_key_dict(predictions[0]))

  def testBinaryClassesWithoutLabelVocabulary(self):
    n_classes = 2
    self._testPredictions(
        n_classes,
        label_vocabulary=None,
        label_output_fn=lambda x: ('%s' % x).encode())

  def testBinaryClassesWithLabelVocabulary(self):
    n_classes = 2
    self._testPredictions(
        n_classes,
        label_vocabulary=['class_vocab_{}'.format(i) for i in range(n_classes)],
        label_output_fn=lambda x: ('class_vocab_%s' % x).encode())

  def testMultiClassesWithoutLabelVocabulary(self):
    n_classes = 4
    self._testPredictions(
        n_classes,
        label_vocabulary=None,
        label_output_fn=lambda x: ('%s' % x).encode())

  def testMultiClassesWithLabelVocabulary(self):
    n_classes = 4
    self._testPredictions(
        n_classes,
        label_vocabulary=['class_vocab_{}'.format(i) for i in range(n_classes)],
        label_output_fn=lambda x: ('class_vocab_%s' % x).encode())


class BaselineClassifierIntegrationTest(tf.test.TestCase):

  def setUp(self):
    self._model_dir = tempfile.mkdtemp()

  def tearDown(self):
    if self._model_dir:
      shutil.rmtree(self._model_dir)

  def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
                          predict_input_fn, input_dimension, prediction_length):
    feature_columns = [
        tf.feature_column.numeric_column('x', shape=(input_dimension,))
    ]
    est = _baseline_classifier_fn(
        n_classes=n_classes, model_dir=self._model_dir)

    # TRAIN
    # learn y = x
    est.train(train_input_fn, steps=200)

    # EVALUTE
    scores = est.evaluate(eval_input_fn)
    self.assertEqual(200, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
    self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))

    # PREDICT
    predictions = np.array(
        [x['classes'] for x in est.predict(predict_input_fn)])
    self.assertAllEqual((prediction_length, 1), predictions.shape)

    # EXPORT
    feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
    serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
        feature_spec)
    export_dir = est.export_saved_model(tempfile.mkdtemp(),
                                        serving_input_receiver_fn)
    self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))

  def _test_numpy_input_fn(self, n_classes):
    """Tests complete flow with numpy_input_fn."""
    input_dimension = 4
    batch_size = 10
    prediction_length = batch_size
    data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
    data = data.reshape(batch_size, input_dimension)
    target = np.array([1] * batch_size)

    train_input_fn = numpy_io.numpy_input_fn(
        x={'x': data},
        y=target,
        batch_size=batch_size,
        num_epochs=None,
        shuffle=True)
    eval_input_fn = numpy_io.numpy_input_fn(
        x={'x': data},
        y=target,
        batch_size=batch_size,
        num_epochs=1,
        shuffle=False)
    predict_input_fn = numpy_io.numpy_input_fn(
        x={'x': data},
        y=None,
        batch_size=batch_size,
        num_epochs=1,
        shuffle=False)

    self._test_complete_flow(
        n_classes=n_classes,
        train_input_fn=train_input_fn,
        eval_input_fn=eval_input_fn,
        predict_input_fn=predict_input_fn,
        input_dimension=input_dimension,
        prediction_length=prediction_length)

  def test_binary_classes_numpy_input_fn(self):
    self._test_numpy_input_fn(n_classes=2)

  def test_multi_classes_numpy_input_fn(self):
    self._test_numpy_input_fn(n_classes=4)

  def _test_pandas_input_fn(self, n_classes):
    """Tests complete flow with pandas_input_fn."""
    if not HAS_PANDAS:
      return

    # Pandas DataFrame natually supports 1 dim data only.
    input_dimension = 1
    batch_size = 10
    data = np.array([1., 2., 3., 4.], dtype=np.float32)
    target = np.array([1, 0, 1, 0], dtype=np.int32)
    x = pd.DataFrame({'x': data})
    y = pd.Series(target)
    prediction_length = 4

    train_input_fn = pandas_io.pandas_input_fn(
        x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
    eval_input_fn = pandas_io.pandas_input_fn(
        x=x, y=y, batch_size=batch_size, shuffle=False)
    predict_input_fn = pandas_io.pandas_input_fn(
        x=x, batch_size=batch_size, shuffle=False)

    self._test_complete_flow(
        n_classes=n_classes,
        train_input_fn=train_input_fn,
        eval_input_fn=eval_input_fn,
        predict_input_fn=predict_input_fn,
        input_dimension=input_dimension,
        prediction_length=prediction_length)

  def test_binary_classes_pandas_input_fn(self):
    self._test_pandas_input_fn(n_classes=2)

  def test_multi_classes_pandas_input_fn(self):
    self._test_pandas_input_fn(n_classes=4)

  def _test_input_fn_from_parse_example(self, n_classes):
    """Tests complete flow with input_fn constructed from parse_example."""
    input_dimension = 2
    batch_size = 10
    prediction_length = batch_size
    data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
    data = data.reshape(batch_size, input_dimension)
    target = np.array([1] * batch_size, dtype=np.int64)

    serialized_examples = []
    for x, y in zip(data, target):
      example = example_pb2.Example(
          features=feature_pb2.Features(
              feature={
                  'x':
                      feature_pb2.Feature(
                          float_list=feature_pb2.FloatList(value=x)),
                  'y':
                      feature_pb2.Feature(
                          int64_list=feature_pb2.Int64List(value=[y])),
              }))
      serialized_examples.append(example.SerializeToString())

    feature_spec = {
        'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
        'y': tf.io.FixedLenFeature([1], tf.dtypes.int64),
    }

    def _train_input_fn():
      feature_map = tf.compat.v1.io.parse_example(serialized_examples,
                                                  feature_spec)
      features = queue_parsed_features(feature_map)
      labels = features.pop('y')
      return features, labels

    def _eval_input_fn():
      feature_map = tf.compat.v1.io.parse_example(
          tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
          feature_spec)
      features = queue_parsed_features(feature_map)
      labels = features.pop('y')
      return features, labels

    def _predict_input_fn():
      feature_map = tf.compat.v1.io.parse_example(
          tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
          feature_spec)
      features = queue_parsed_features(feature_map)
      features.pop('y')
      return features, None

    self._test_complete_flow(
        n_classes=n_classes,
        train_input_fn=_train_input_fn,
        eval_input_fn=_eval_input_fn,
        predict_input_fn=_predict_input_fn,
        input_dimension=input_dimension,
        prediction_length=prediction_length)

  def test_binary_classes_input_fn_from_parse_example(self):
    self._test_input_fn_from_parse_example(n_classes=2)

  def test_multi_classes_input_fn_from_parse_example(self):
    self._test_input_fn_from_parse_example(n_classes=4)


# Tests for Baseline logit_fn.


class BaselineLogitFnTest(tf.test.TestCase):

  def test_basic_logit_correctness(self):
    """baseline_logit_fn simply returns the bias variable."""
    with tf.Graph().as_default():
      bias_var, logits = baseline._baseline_model_fn_builder_v2(
          features={'age': [[23.], [31.]]}, num_outputs=2)
      with tf.compat.v1.Session() as sess:
        sess.run([tf.compat.v1.initializers.global_variables()])
        self.assertAllClose([[0., 0.], [0., 0.]], logits.eval())
        sess.run(bias_var[0].assign([10., 5.]))
        self.assertAllClose([[10., 5.], [10., 5.]], logits.eval())


if __name__ == '__main__':
  tf.test.main()


================================================
FILE: tensorflow_estimator/python/estimator/canned/canned_estimator_ds_integration_test.py
================================================
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests canned estimators with distribution strategy."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import inspect
import tempfile

fro
Download .txt
gitextract_a04upojl/

├── .bazelrc
├── .gitignore
├── BUILD
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── WORKSPACE
├── tensorflow_estimator/
│   ├── BUILD
│   ├── estimator.bzl
│   ├── python/
│   │   └── estimator/
│   │       ├── BUILD
│   │       ├── api/
│   │       │   ├── BUILD
│   │       │   ├── api_gen.bzl
│   │       │   ├── extractor_wrapper.py
│   │       │   └── generator_wrapper.py
│   │       ├── canned/
│   │       │   ├── __init__.py
│   │       │   ├── baseline.py
│   │       │   ├── baseline_estimator_test.py
│   │       │   ├── baseline_test.py
│   │       │   ├── canned_estimator_ds_integration_test.py
│   │       │   ├── dnn.py
│   │       │   ├── dnn_estimator_test.py
│   │       │   ├── dnn_linear_combined.py
│   │       │   ├── dnn_linear_combined_estimator_test.py
│   │       │   ├── dnn_linear_combined_test.py
│   │       │   ├── dnn_test_fc_v2.py
│   │       │   ├── dnn_testing_utils.py
│   │       │   ├── head.py
│   │       │   ├── head_test.py
│   │       │   ├── kmeans.py
│   │       │   ├── kmeans_test.py
│   │       │   ├── linear.py
│   │       │   ├── linear_estimator_test.py
│   │       │   ├── linear_model_test.py
│   │       │   ├── linear_optimizer/
│   │       │   │   ├── BUILD
│   │       │   │   ├── __init__.py
│   │       │   │   ├── doc/
│   │       │   │   │   └── sdca.ipynb
│   │       │   │   └── python/
│   │       │   │       ├── sdca_test.py
│   │       │   │       └── utils/
│   │       │   │           ├── sdca_ops.py
│   │       │   │           ├── sdca_ops_test.py
│   │       │   │           ├── sharded_mutable_dense_hashtable.py
│   │       │   │           └── sharded_mutable_dense_hashtable_test.py
│   │       │   ├── linear_test.py
│   │       │   ├── linear_testing_utils.py
│   │       │   ├── metric_keys.py
│   │       │   ├── optimizers.py
│   │       │   ├── optimizers_test.py
│   │       │   ├── optimizers_test_v2.py
│   │       │   ├── parsing_utils.py
│   │       │   ├── parsing_utils_test.py
│   │       │   ├── prediction_keys.py
│   │       │   ├── rnn.py
│   │       │   ├── rnn_test.py
│   │       │   ├── saved_model_estimator.py
│   │       │   ├── saved_model_estimator_test.py
│   │       │   ├── testdata/
│   │       │   │   └── wire_vocabulary.txt
│   │       │   ├── timeseries/
│   │       │   │   ├── BUILD
│   │       │   │   ├── ar_model.py
│   │       │   │   ├── ar_model_test.py
│   │       │   │   ├── ar_model_training_test.py
│   │       │   │   ├── estimators.py
│   │       │   │   ├── estimators_test.py
│   │       │   │   ├── feature_keys.py
│   │       │   │   ├── head.py
│   │       │   │   ├── head_test.py
│   │       │   │   ├── math_utils.py
│   │       │   │   ├── math_utils_test.py
│   │       │   │   ├── model.py
│   │       │   │   ├── model_utils.py
│   │       │   │   ├── saved_model_utils.py
│   │       │   │   └── state_management.py
│   │       │   └── v1/
│   │       │       ├── __init__.py
│   │       │       ├── baseline_estimator_test_v1.py
│   │       │       ├── baseline_test_v1.py
│   │       │       ├── dnn_estimator_test_v1.py
│   │       │       ├── dnn_linear_combined_estimator_test_v1.py
│   │       │       ├── dnn_linear_combined_test_v1.py
│   │       │       ├── dnn_test_fc_v1_v1.py
│   │       │       ├── dnn_test_fc_v2_v1.py
│   │       │       ├── dnn_testing_utils_v1.py
│   │       │       ├── linear_estimator_test_v1.py
│   │       │       ├── linear_test_v1.py
│   │       │       └── linear_testing_utils_v1.py
│   │       ├── distribute_strategy_estimator_integration_test.py
│   │       ├── distribute_strategy_estimator_training_test.py
│   │       ├── early_stopping.py
│   │       ├── early_stopping_test.py
│   │       ├── estimator.py
│   │       ├── estimator_export.py
│   │       ├── estimator_export_test.py
│   │       ├── estimator_lib.py
│   │       ├── estimator_test.py
│   │       ├── export/
│   │       │   ├── __init__.py
│   │       │   ├── export.py
│   │       │   ├── export_lib.py
│   │       │   ├── export_output.py
│   │       │   ├── export_test.py
│   │       │   ├── function.py
│   │       │   └── function_test.py
│   │       ├── exporter.py
│   │       ├── exporter_test.py
│   │       ├── extenders.py
│   │       ├── extenders_test.py
│   │       ├── gc.py
│   │       ├── gc_test.py
│   │       ├── head/
│   │       │   ├── __init__.py
│   │       │   ├── base_head.py
│   │       │   ├── base_head_test.py
│   │       │   ├── binary_class_head.py
│   │       │   ├── binary_class_head_test.py
│   │       │   ├── head_utils.py
│   │       │   ├── multi_class_head.py
│   │       │   ├── multi_class_head_test.py
│   │       │   ├── multi_head.py
│   │       │   ├── multi_head_test.py
│   │       │   ├── multi_label_head.py
│   │       │   ├── multi_label_head_test.py
│   │       │   ├── regression_head.py
│   │       │   ├── regression_head_test.py
│   │       │   ├── sequential_head.py
│   │       │   └── sequential_head_test.py
│   │       ├── hooks/
│   │       │   ├── __init__.py
│   │       │   ├── basic_session_run_hooks.py
│   │       │   ├── basic_session_run_hooks_test.py
│   │       │   ├── fake_summary_writer.py
│   │       │   ├── hooks.py
│   │       │   ├── hooks_test.py
│   │       │   └── session_run_hook.py
│   │       ├── inputs/
│   │       │   ├── __init__.py
│   │       │   ├── inputs.py
│   │       │   ├── numpy_io.py
│   │       │   ├── numpy_io_test.py
│   │       │   ├── pandas_io.py
│   │       │   ├── pandas_io_test.py
│   │       │   └── queues/
│   │       │       ├── __init__.py
│   │       │       ├── feeding_functions.py
│   │       │       ├── feeding_functions_test.py
│   │       │       ├── feeding_queue_runner.py
│   │       │       └── feeding_queue_runner_test.py
│   │       ├── keras_distribute_strategy_test.py
│   │       ├── keras_lib.py
│   │       ├── keras_premade_model_test.py
│   │       ├── keras_test.py
│   │       ├── mode_keys.py
│   │       ├── model_fn.py
│   │       ├── model_fn_test.py
│   │       ├── object_checkpointing_test.py
│   │       ├── run_config.py
│   │       ├── run_config_test.py
│   │       ├── tf_estimator_doctest.py
│   │       ├── tools/
│   │       │   ├── __init__.py
│   │       │   ├── analytics.py
│   │       │   ├── checkpoint_converter.py
│   │       │   └── checkpoint_converter_test.py
│   │       ├── tpu/
│   │       │   ├── BUILD
│   │       │   ├── __init__.py
│   │       │   ├── _tpu_estimator_embedding.py
│   │       │   ├── autotuning_iterations_per_loop_test.py
│   │       │   ├── error_handling.py
│   │       │   ├── error_handling_test.py
│   │       │   ├── iteration_count_estimator.py
│   │       │   ├── spatial_partitioning_api.md
│   │       │   ├── tpu_config.py
│   │       │   ├── tpu_config_test.py
│   │       │   ├── tpu_context.py
│   │       │   ├── tpu_enqueue_sequence_test.py
│   │       │   ├── tpu_estimator.py
│   │       │   ├── tpu_estimator_embedding_test.py
│   │       │   ├── tpu_estimator_evaluation_test.py
│   │       │   ├── tpu_estimator_export_test.py
│   │       │   ├── tpu_estimator_gradients_test.py
│   │       │   ├── tpu_estimator_input_v2_test.py
│   │       │   ├── tpu_estimator_integration_test.py
│   │       │   ├── tpu_estimator_model_parallelism_test.py
│   │       │   ├── tpu_estimator_signals_test.py
│   │       │   ├── tpu_estimator_test.py
│   │       │   └── util.py
│   │       ├── training.py
│   │       ├── training_test.py
│   │       ├── util.py
│   │       └── util_test.py
│   └── tools/
│       └── pip_package/
│           ├── BUILD
│           ├── build_pip_package.sh
│           ├── create_pip_helper.py
│           └── setup.py
└── third_party/
    └── py/
        ├── BUILD
        ├── BUILD.tpl
        └── python_configure.bzl
Download .txt
Showing preview only (436K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (4635 symbols across 144 files)

FILE: tensorflow_estimator/python/estimator/canned/baseline.py
  function _get_weight_column_key (line 70) | def _get_weight_column_key(weight_column):
  function _get_weight_column_key_v2 (line 81) | def _get_weight_column_key_v2(weight_column):
  function _get_batch_size_and_size_checks (line 92) | def _get_batch_size_and_size_checks(features, weight_column_key):
  function _baseline_logit_fn_builder (line 114) | def _baseline_logit_fn_builder(num_outputs, weight_column=None):
  function _baseline_model_fn (line 156) | def _baseline_model_fn(features,
  function _baseline_model_fn_builder_v2 (line 204) | def _baseline_model_fn_builder_v2(features, num_outputs, weight_column=N...
  function _baseline_model_fn_v2 (line 229) | def _baseline_model_fn_v2(
  class BaselineClassifierV2 (line 293) | class BaselineClassifierV2(estimator.EstimatorV2):
    method __init__ (line 346) | def __init__(self,
  class BaselineClassifier (line 404) | class BaselineClassifier(estimator.Estimator):
    method __init__ (line 407) | def __init__(self,
  class BaselineEstimatorV2 (line 433) | class BaselineEstimatorV2(estimator.EstimatorV2):
    method __init__ (line 483) | def __init__(self, head, model_dir=None, optimizer='Ftrl', config=None):
  class BaselineEstimator (line 512) | class BaselineEstimator(estimator.Estimator):
    method __init__ (line 515) | def __init__(self, head, model_dir=None, optimizer='Ftrl', config=None):
  class BaselineRegressorV2 (line 531) | class BaselineRegressorV2(estimator.EstimatorV2):
    method __init__ (line 579) | def __init__(self,
  class BaselineRegressor (line 627) | class BaselineRegressor(estimator.Estimator):
    method __init__ (line 630) | def __init__(self,

FILE: tensorflow_estimator/python/estimator/canned/baseline_estimator_test.py
  function assert_close (line 40) | def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
  function save_variables_to_ckpt (line 55) | def save_variables_to_ckpt(model_dir):
  function _baseline_estimator_fn (line 62) | def _baseline_estimator_fn(weight_column=None, label_dimension=1, **kwar...
  function mock_optimizer_v2 (line 71) | def mock_optimizer_v2(testcase, expected_loss=None):
  class BaselineEstimatorEvaluationTest (line 114) | class BaselineEstimatorEvaluationTest(tf.test.TestCase):
    method setUp (line 116) | def setUp(self):
    method tearDown (line 119) | def tearDown(self):
    method test_evaluation_batch (line 124) | def test_evaluation_batch(self):
    method test_evaluation_weights (line 149) | def test_evaluation_weights(self):
    method test_evaluation_for_multi_dimensions (line 179) | def test_evaluation_for_multi_dimensions(self):
  class BaselineEstimatorPredictTest (line 208) | class BaselineEstimatorPredictTest(tf.test.TestCase):
    method setUp (line 210) | def setUp(self):
    method tearDown (line 213) | def tearDown(self):
    method test_1d (line 218) | def test_1d(self):
    method testMultiDim (line 238) | def testMultiDim(self):
  class BaselineEstimatorIntegrationTest (line 264) | class BaselineEstimatorIntegrationTest(tf.test.TestCase):
    method setUp (line 266) | def setUp(self):
    method tearDown (line 269) | def tearDown(self):
    method _test_complete_flow (line 274) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 304) | def test_numpy_input_fn(self):
  class BaselineEstimatorTrainingTest (line 341) | class BaselineEstimatorTrainingTest(tf.test.TestCase):
    method setUp (line 343) | def setUp(self):
    method tearDown (line 346) | def tearDown(self):
    method _assert_checkpoint (line 351) | def _assert_checkpoint(self,
    method testFromScratch (line 371) | def testFromScratch(self):
    method testFromCheckpoint (line 390) | def testFromCheckpoint(self):

FILE: tensorflow_estimator/python/estimator/canned/baseline_test.py
  function assert_close (line 56) | def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
  function save_variables_to_ckpt (line 71) | def save_variables_to_ckpt(model_dir):
  function queue_parsed_features (line 78) | def queue_parsed_features(feature_map):
  function sorted_key_dict (line 93) | def sorted_key_dict(unsorted_dict):
  function sigmoid (line 97) | def sigmoid(x):
  function _baseline_regressor_fn (line 101) | def _baseline_regressor_fn(*args, **kwargs):
  function _baseline_classifier_fn (line 105) | def _baseline_classifier_fn(*args, **kwargs):
  function mock_optimizer_v2 (line 109) | def mock_optimizer_v2(testcase, expected_loss=None):
  class BaselineRegressorEvaluationTest (line 157) | class BaselineRegressorEvaluationTest(tf.test.TestCase):
    method setUp (line 159) | def setUp(self):
    method tearDown (line 162) | def tearDown(self):
    method test_evaluation_for_simple_data (line 167) | def test_evaluation_for_simple_data(self):
    method test_evaluation_batch (line 190) | def test_evaluation_batch(self):
    method test_evaluation_weights (line 217) | def test_evaluation_weights(self):
    method test_evaluation_for_multi_dimensions (line 247) | def test_evaluation_for_multi_dimensions(self):
  class BaselineRegressorPredictTest (line 276) | class BaselineRegressorPredictTest(tf.test.TestCase):
    method setUp (line 278) | def setUp(self):
    method tearDown (line 281) | def tearDown(self):
    method test_1d (line 286) | def test_1d(self):
    method testMultiDim (line 306) | def testMultiDim(self):
  class BaselineRegressorIntegrationTest (line 332) | class BaselineRegressorIntegrationTest(tf.test.TestCase):
    method setUp (line 334) | def setUp(self):
    method tearDown (line 337) | def tearDown(self):
    method _test_complete_flow (line 342) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 372) | def test_numpy_input_fn(self):
    method test_pandas_input_fn (line 408) | def test_pandas_input_fn(self):
    method test_input_fn_from_parse_example (line 437) | def test_input_fn_from_parse_example(self):
  class BaselineRegressorTrainingTest (line 498) | class BaselineRegressorTrainingTest(tf.test.TestCase):
    method setUp (line 500) | def setUp(self):
    method tearDown (line 503) | def tearDown(self):
    method _assert_checkpoint (line 508) | def _assert_checkpoint(self,
    method testFromScratchWithDefaultOptimizer (line 528) | def testFromScratchWithDefaultOptimizer(self):
    method testTrainWithOneDimLabel (line 542) | def testTrainWithOneDimLabel(self):
    method testTrainWithOneDimWeight (line 559) | def testTrainWithOneDimWeight(self):
    method testFromScratch (line 582) | def testFromScratch(self):
    method testFromCheckpoint (line 603) | def testFromCheckpoint(self):
    method testFromCheckpointMultiBatch (line 635) | def testFromCheckpointMultiBatch(self):
  class BaselineClassifierTrainingTest (line 675) | class BaselineClassifierTrainingTest(tf.test.TestCase):
    method setUp (line 677) | def setUp(self):
    method tearDown (line 680) | def tearDown(self):
    method _assert_checkpoint (line 684) | def _assert_checkpoint(self,
    method _testFromScratchWithDefaultOptimizer (line 706) | def _testFromScratchWithDefaultOptimizer(self, n_classes):
    method testBinaryClassesFromScratchWithDefaultOptimizer (line 720) | def testBinaryClassesFromScratchWithDefaultOptimizer(self):
    method testMultiClassesFromScratchWithDefaultOptimizer (line 723) | def testMultiClassesFromScratchWithDefaultOptimizer(self):
    method _testTrainWithTwoDimsLabel (line 726) | def _testTrainWithTwoDimsLabel(self, n_classes):
    method testBinaryClassesTrainWithTwoDimsLabel (line 745) | def testBinaryClassesTrainWithTwoDimsLabel(self):
    method testMultiClassesTrainWithTwoDimsLabel (line 748) | def testMultiClassesTrainWithTwoDimsLabel(self):
    method _testTrainWithOneDimLabel (line 751) | def _testTrainWithOneDimLabel(self, n_classes):
    method testBinaryClassesTrainWithOneDimLabel (line 768) | def testBinaryClassesTrainWithOneDimLabel(self):
    method testMultiClassesTrainWithOneDimLabel (line 771) | def testMultiClassesTrainWithOneDimLabel(self):
    method _testTrainWithTwoDimsWeight (line 774) | def _testTrainWithTwoDimsWeight(self, n_classes):
    method testBinaryClassesTrainWithTwoDimsWeight (line 796) | def testBinaryClassesTrainWithTwoDimsWeight(self):
    method testMultiClassesTrainWithTwoDimsWeight (line 799) | def testMultiClassesTrainWithTwoDimsWeight(self):
    method _testTrainWithOneDimWeight (line 802) | def _testTrainWithOneDimWeight(self, n_classes):
    method testBinaryClassesTrainWithOneDimWeight (line 822) | def testBinaryClassesTrainWithOneDimWeight(self):
    method testMultiClassesTrainWithOneDimWeight (line 825) | def testMultiClassesTrainWithOneDimWeight(self):
    method _testFromScratch (line 828) | def _testFromScratch(self, n_classes):
    method testBinaryClassesFromScratch (line 862) | def testBinaryClassesFromScratch(self):
    method testMultiClassesFromScratch (line 865) | def testMultiClassesFromScratch(self):
    method _testFromCheckpoint (line 868) | def _testFromCheckpoint(self, n_classes):
    method testBinaryClassesFromCheckpoint (line 918) | def testBinaryClassesFromCheckpoint(self):
    method testMultiClassesFromCheckpoint (line 921) | def testMultiClassesFromCheckpoint(self):
    method _testFromCheckpointFloatLabels (line 924) | def _testFromCheckpointFloatLabels(self, n_classes):
    method testBinaryClassesFromCheckpointFloatLabels (line 960) | def testBinaryClassesFromCheckpointFloatLabels(self):
    method testMultiClassesFromCheckpointFloatLabels (line 963) | def testMultiClassesFromCheckpointFloatLabels(self):
    method _testFromCheckpointMultiBatch (line 966) | def _testFromCheckpointMultiBatch(self, n_classes):
    method testBinaryClassesFromCheckpointMultiBatch (line 1024) | def testBinaryClassesFromCheckpointMultiBatch(self):
    method testMultiClassesFromCheckpointMultiBatch (line 1027) | def testMultiClassesFromCheckpointMultiBatch(self):
  class BaselineClassifierEvaluationTest (line 1031) | class BaselineClassifierEvaluationTest(tf.test.TestCase):
    method setUp (line 1033) | def setUp(self):
    method tearDown (line 1036) | def tearDown(self):
    method _test_evaluation_for_simple_data (line 1040) | def _test_evaluation_for_simple_data(self, n_classes):
    method test_binary_classes_evaluation_for_simple_data (line 1094) | def test_binary_classes_evaluation_for_simple_data(self):
    method test_multi_classes_evaluation_for_simple_data (line 1097) | def test_multi_classes_evaluation_for_simple_data(self):
    method _test_evaluation_batch (line 1100) | def _test_evaluation_batch(self, n_classes):
    method test_binary_classes_evaluation_batch (line 1164) | def test_binary_classes_evaluation_batch(self):
    method test_multi_classes_evaluation_batch (line 1167) | def test_multi_classes_evaluation_batch(self):
    method _test_evaluation_weights (line 1170) | def _test_evaluation_weights(self, n_classes):
    method test_binary_classes_evaluation_weights (line 1249) | def test_binary_classes_evaluation_weights(self):
    method test_multi_classes_evaluation_weights (line 1252) | def test_multi_classes_evaluation_weights(self):
  class BaselineClassifierPredictTest (line 1256) | class BaselineClassifierPredictTest(tf.test.TestCase):
    method setUp (line 1258) | def setUp(self):
    method tearDown (line 1261) | def tearDown(self):
    method _testPredictions (line 1265) | def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
    method testBinaryClassesWithoutLabelVocabulary (line 1332) | def testBinaryClassesWithoutLabelVocabulary(self):
    method testBinaryClassesWithLabelVocabulary (line 1339) | def testBinaryClassesWithLabelVocabulary(self):
    method testMultiClassesWithoutLabelVocabulary (line 1346) | def testMultiClassesWithoutLabelVocabulary(self):
    method testMultiClassesWithLabelVocabulary (line 1353) | def testMultiClassesWithLabelVocabulary(self):
  class BaselineClassifierIntegrationTest (line 1361) | class BaselineClassifierIntegrationTest(tf.test.TestCase):
    method setUp (line 1363) | def setUp(self):
    method tearDown (line 1366) | def tearDown(self):
    method _test_complete_flow (line 1370) | def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
    method _test_numpy_input_fn (line 1400) | def _test_numpy_input_fn(self, n_classes):
    method test_binary_classes_numpy_input_fn (line 1436) | def test_binary_classes_numpy_input_fn(self):
    method test_multi_classes_numpy_input_fn (line 1439) | def test_multi_classes_numpy_input_fn(self):
    method _test_pandas_input_fn (line 1442) | def _test_pandas_input_fn(self, n_classes):
    method test_binary_classes_pandas_input_fn (line 1471) | def test_binary_classes_pandas_input_fn(self):
    method test_multi_classes_pandas_input_fn (line 1474) | def test_multi_classes_pandas_input_fn(self):
    method _test_input_fn_from_parse_example (line 1477) | def _test_input_fn_from_parse_example(self, n_classes):
    method test_binary_classes_input_fn_from_parse_example (line 1536) | def test_binary_classes_input_fn_from_parse_example(self):
    method test_multi_classes_input_fn_from_parse_example (line 1539) | def test_multi_classes_input_fn_from_parse_example(self):
  class BaselineLogitFnTest (line 1546) | class BaselineLogitFnTest(tf.test.TestCase):
    method test_basic_logit_correctness (line 1548) | def test_basic_logit_correctness(self):

FILE: tensorflow_estimator/python/estimator/canned/canned_estimator_ds_integration_test.py
  class CannedEstimatorDistributionStrategyTest (line 35) | class CannedEstimatorDistributionStrategyTest(tf.test.TestCase,
    method setUp (line 38) | def setUp(self):
    method dataset_input_fn (line 45) | def dataset_input_fn(self, x, y, batch_size, shuffle):
    method test_canned_estimator (line 69) | def test_canned_estimator(self, distribution, estimator_cls):

FILE: tensorflow_estimator/python/estimator/canned/dnn.py
  function _add_hidden_layer_summary (line 41) | def _add_hidden_layer_summary(value, tag):
  function dnn_logit_fn_builder (line 48) | def dnn_logit_fn_builder(units, hidden_units, feature_columns, activatio...
  function dnn_logit_fn_builder_v2 (line 101) | def dnn_logit_fn_builder_v2(units, hidden_units, feature_columns, activa...
  function _get_previous_name_scope (line 152) | def _get_previous_name_scope():
  class _DNNModel (line 157) | class _DNNModel(tf_keras.Model):
    method __init__ (line 160) | def __init__(self,
    method call (line 232) | def call(self, features, mode):
    method _add_layer (line 259) | def _add_layer(self, layer, layer_name):
  function _name_from_scope_name (line 266) | def _name_from_scope_name(name):
  class _DNNModelV2 (line 278) | class _DNNModelV2(tf_keras.Model):
    method __init__ (line 281) | def __init__(self,
    method call (line 349) | def call(self, features, mode):
  function _validate_features (line 368) | def _validate_features(features):
  function _get_dnn_estimator_spec (line 374) | def _get_dnn_estimator_spec(use_tpu, head, features, labels, mode, logits,
  function _dnn_model_fn (line 393) | def _dnn_model_fn(features,
  function _dnn_model_fn_builder_v2 (line 466) | def _dnn_model_fn_builder_v2(units, hidden_units, feature_columns,
  function dnn_model_fn_v2 (line 513) | def dnn_model_fn_v2(features,
  class DNNClassifierV2 (line 596) | class DNNClassifierV2(estimator.EstimatorV2):
    method __init__ (line 682) | def __init__(
  class DNNClassifier (line 773) | class DNNClassifier(estimator.Estimator):
    method __init__ (line 776) | def __init__(
  class DNNEstimatorV2 (line 821) | class DNNEstimatorV2(estimator.EstimatorV2):
    method __init__ (line 911) | def __init__(self,
  class DNNEstimator (line 976) | class DNNEstimator(estimator.Estimator):
    method __init__ (line 979) | def __init__(self,
  class DNNRegressorV2 (line 1017) | class DNNRegressorV2(estimator.EstimatorV2):
    method __init__ (line 1103) | def __init__(
  class DNNRegressor (line 1186) | class DNNRegressor(estimator.Estimator):
    method __init__ (line 1189) | def __init__(

FILE: tensorflow_estimator/python/estimator/canned/dnn_estimator_test.py
  function _dnn_estimator_fn (line 37) | def _dnn_estimator_fn(weight_column=None, label_dimension=1, **kwargs):
  function _dnn_estimator_classifier_fn (line 48) | def _dnn_estimator_classifier_fn(n_classes=3, **kwargs):
  class DNNLogitFnBuilderTest (line 53) | class DNNLogitFnBuilderTest(tf.test.TestCase):
    method testLongInPy2 (line 55) | def testLongInPy2(self):
  class DNNEstimatorEvaluateTest (line 62) | class DNNEstimatorEvaluateTest(dnn_testing_utils.BaseDNNRegressorEvaluat...
    method __init__ (line 65) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNEstimatorPredictTest (line 71) | class DNNEstimatorPredictTest(dnn_testing_utils.BaseDNNRegressorPredictT...
    method __init__ (line 74) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNEstimatorTrainTest (line 80) | class DNNEstimatorTrainTest(dnn_testing_utils.BaseDNNRegressorTrainTest,
    method __init__ (line 83) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNEstimatorWarmStartingTest (line 89) | class DNNEstimatorWarmStartingTest(dnn_testing_utils.BaseDNNWarmStarting...
    method __init__ (line 92) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNEstimatorIntegrationTest (line 98) | class DNNEstimatorIntegrationTest(tf.test.TestCase):
    method setUp (line 100) | def setUp(self):
    method tearDown (line 103) | def tearDown(self):
    method _test_complete_flow (line 108) | def _test_complete_flow(self,
    method _create_input_fn (line 150) | def _create_input_fn(self, label_dimension, batch_size):
    method test_numpy_input_fn (line 168) | def test_numpy_input_fn(self):
    method test_numpy_input_fn_with_optimizer_instance (line 183) | def test_numpy_input_fn_with_optimizer_instance(self):

FILE: tensorflow_estimator/python/estimator/canned/dnn_linear_combined.py
  function _check_no_sync_replicas_optimizer (line 41) | def _check_no_sync_replicas_optimizer(optimizer):
  function _linear_learning_rate (line 50) | def _linear_learning_rate(num_linear_feature_columns):
  function _add_layer_summary (line 67) | def _add_layer_summary(value, tag):
  function _validate_feature_columns (line 73) | def _validate_feature_columns(linear_feature_columns, dnn_feature_columns):
  function _dnn_linear_combined_model_fn_v2 (line 84) | def _dnn_linear_combined_model_fn_v2(
  function _dnn_linear_combined_model_fn (line 235) | def _dnn_linear_combined_model_fn(features,
  class DNNLinearCombinedClassifierV2 (line 393) | class DNNLinearCombinedClassifierV2(estimator.EstimatorV2):
    method __init__ (line 477) | def __init__(self,
  class DNNLinearCombinedClassifier (line 594) | class DNNLinearCombinedClassifier(estimator.Estimator):
    method __init__ (line 598) | def __init__(self,
  function _init_dnn_linear_combined_estimator (line 651) | def _init_dnn_linear_combined_estimator(head, linear_feature_columns,
  class DNNLinearCombinedEstimatorV2 (line 687) | class DNNLinearCombinedEstimatorV2(estimator.EstimatorV2):
    method __init__ (line 770) | def __init__(self,
  class DNNLinearCombinedEstimator (line 852) | class DNNLinearCombinedEstimator(estimator.Estimator):
    method __init__ (line 855) | def __init__(self,
  class DNNLinearCombinedRegressorV2 (line 899) | class DNNLinearCombinedRegressorV2(estimator.EstimatorV2):
    method __init__ (line 983) | def __init__(self,
  class DNNLinearCombinedRegressor (line 1091) | class DNNLinearCombinedRegressor(estimator.Estimator):
    method __init__ (line 1095) | def __init__(self,

FILE: tensorflow_estimator/python/estimator/canned/dnn_linear_combined_estimator_test.py
  function _dnn_only_estimator_fn (line 37) | def _dnn_only_estimator_fn(hidden_units,
  class DNNOnlyEstimatorEvaluateTest (line 61) | class DNNOnlyEstimatorEvaluateTest(
    method __init__ (line 64) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyEstimatorPredictTest (line 70) | class DNNOnlyEstimatorPredictTest(dnn_testing_utils.BaseDNNRegressorPred...
    method __init__ (line 73) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyEstimatorTrainTest (line 79) | class DNNOnlyEstimatorTrainTest(dnn_testing_utils.BaseDNNRegressorTrainT...
    method __init__ (line 82) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  function _linear_only_estimator_fn (line 88) | def _linear_only_estimator_fn(feature_columns,
  class LinearOnlyEstimatorEvaluateTest (line 108) | class LinearOnlyEstimatorEvaluateTest(
    method __init__ (line 111) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyEstimatorPredictTest (line 117) | class LinearOnlyEstimatorPredictTest(
    method __init__ (line 120) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyEstimatorTrainTest (line 126) | class LinearOnlyEstimatorTrainTest(
    method __init__ (line 129) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNLinearCombinedEstimatorIntegrationTest (line 135) | class DNNLinearCombinedEstimatorIntegrationTest(tf.test.TestCase):
    method setUp (line 137) | def setUp(self):
    method tearDown (line 140) | def tearDown(self):
    method _test_complete_flow (line 145) | def _test_complete_flow(self,
    method _create_input_fn (line 194) | def _create_input_fn(self, label_dimension, batch_size):
    method test_numpy_input_fn (line 212) | def test_numpy_input_fn(self):
    method test_numpy_input_fn_with_optimizer_instance (line 227) | def test_numpy_input_fn_with_optimizer_instance(self):

FILE: tensorflow_estimator/python/estimator/canned/dnn_linear_combined_test.py
  class DNNOnlyModelFnTest (line 53) | class DNNOnlyModelFnTest(dnn_testing_utils.BaseDNNModelFnTest,
    method __init__ (line 56) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
    method _dnn_only_model_fn (line 60) | def _dnn_only_model_fn(self,
  function _linear_regressor_fn (line 86) | def _linear_regressor_fn(feature_columns,
  class LinearOnlyRegressorEvaluationV2Test (line 103) | class LinearOnlyRegressorEvaluationV2Test(
    method __init__ (line 106) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyRegressorPredictV2Test (line 112) | class LinearOnlyRegressorPredictV2Test(
    method __init__ (line 115) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyRegressorIntegrationV2Test (line 121) | class LinearOnlyRegressorIntegrationV2Test(
    method __init__ (line 124) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyRegressorTrainingV2Test (line 130) | class LinearOnlyRegressorTrainingV2Test(
    method __init__ (line 133) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  function _linear_classifier_fn (line 139) | def _linear_classifier_fn(feature_columns,
  class LinearOnlyClassifierTrainingV2Test (line 158) | class LinearOnlyClassifierTrainingV2Test(
    method __init__ (line 161) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyClassifierClassesEvaluationV2Test (line 169) | class LinearOnlyClassifierClassesEvaluationV2Test(
    method __init__ (line 172) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyClassifierPredictV2Test (line 180) | class LinearOnlyClassifierPredictV2Test(
    method __init__ (line 183) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyClassifierIntegrationV2Test (line 191) | class LinearOnlyClassifierIntegrationV2Test(
    method __init__ (line 194) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNLinearCombinedRegressorIntegrationTest (line 203) | class DNNLinearCombinedRegressorIntegrationTest(tf.test.TestCase):
    method setUp (line 205) | def setUp(self):
    method tearDown (line 208) | def tearDown(self):
    method _test_complete_flow_helper (line 213) | def _test_complete_flow_helper(self, linear_feature_columns,
    method _test_complete_flow (line 248) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method _test_complete_flow_dnn_fc_v1 (line 264) | def _test_complete_flow_dnn_fc_v1(self, train_input_fn, eval_input_fn,
    method _test_complete_flow_linear_fc_v1 (line 282) | def _test_complete_flow_linear_fc_v1(self, train_input_fn, eval_input_fn,
    method _test_numpy_input_fn_helper (line 300) | def _test_numpy_input_fn_helper(self, fc_impl, fn_to_run):
    method test_numpy_input_fn_basic (line 327) | def test_numpy_input_fn_basic(self, fc_impl):
    method test_numpy_input_fn_dnn_fc_v1 (line 330) | def test_numpy_input_fn_dnn_fc_v1(self, fc_impl):
    method test_numpy_input_fn_linear_fc_v1 (line 336) | def test_numpy_input_fn_linear_fc_v1(self, fc_impl):
    method _test_pandas_input_fn_helper (line 342) | def _test_pandas_input_fn_helper(self, fc_impl, fn_to_run):
    method test_pandas_input_fn_basic (line 367) | def test_pandas_input_fn_basic(self, fc_impl):
    method test_pandas_input_fn_dnn_fc_v1 (line 370) | def test_pandas_input_fn_dnn_fc_v1(self, fc_impl):
    method test_pandas_input_fn_linear_fc_v1 (line 376) | def test_pandas_input_fn_linear_fc_v1(self, fc_impl):
    method _test_input_fn_from_parse_example_helper (line 382) | def _test_input_fn_from_parse_example_helper(self, fc_impl, fn_to_run):
    method test_input_fn_from_parse_example_basic (line 440) | def test_input_fn_from_parse_example_basic(self, fc_impl):
    method test_input_fn_from_parse_example_dnn_fc_v1 (line 444) | def test_input_fn_from_parse_example_dnn_fc_v1(self, fc_impl):
    method test_input_fn_from_parse_example_linear_fc_v1 (line 450) | def test_input_fn_from_parse_example_linear_fc_v1(self, fc_impl):
  function _dnn_classifier_fn (line 458) | def _dnn_classifier_fn(hidden_units,
  class DNNOnlyClassifierEvaluateV2Test (line 477) | class DNNOnlyClassifierEvaluateV2Test(
    method __init__ (line 480) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyClassifierPredictV2Test (line 486) | class DNNOnlyClassifierPredictV2Test(
    method __init__ (line 489) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyClassifierTrainV2Test (line 495) | class DNNOnlyClassifierTrainV2Test(dnn_testing_utils.BaseDNNClassifierTr...
    method __init__ (line 498) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  function _dnn_regressor_fn (line 505) | def _dnn_regressor_fn(hidden_units,
  class DNNOnlyRegressorEvaluateV2Test (line 522) | class DNNOnlyRegressorEvaluateV2Test(
    method __init__ (line 525) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyRegressorPredictV2Test (line 531) | class DNNOnlyRegressorPredictV2Test(
    method __init__ (line 534) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyRegressorTrainV2Test (line 540) | class DNNOnlyRegressorTrainV2Test(dnn_testing_utils.BaseDNNRegressorTrai...
    method __init__ (line 543) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNLinearCombinedClassifierIntegrationTest (line 550) | class DNNLinearCombinedClassifierIntegrationTest(tf.test.TestCase):
    method setUp (line 552) | def setUp(self):
    method tearDown (line 555) | def tearDown(self):
    method _as_label (line 560) | def _as_label(self, data_in_float):
    method _test_complete_flow (line 563) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 603) | def test_numpy_input_fn(self, fc_impl):
    method test_pandas_input_fn (line 633) | def test_pandas_input_fn(self, fc_impl):
    method test_input_fn_from_parse_example (line 659) | def test_input_fn_from_parse_example(self, fc_impl):
  class DNNLinearCombinedTests (line 722) | class DNNLinearCombinedTests(tf.test.TestCase):
    method setUp (line 724) | def setUp(self):
    method tearDown (line 727) | def tearDown(self):
    method test_train_op_calls_both_dnn_and_linear (line 731) | def test_train_op_calls_both_dnn_and_linear(self, fc_impl):
    method test_dnn_and_linear_logits_are_added (line 757) | def test_dnn_and_linear_logits_are_added(self, fc_impl):
  class DNNLinearCombinedWarmStartingTest (line 785) | class DNNLinearCombinedWarmStartingTest(tf.test.TestCase):
    method setUp (line 787) | def setUp(self):
    method tearDown (line 801) | def tearDown(self):
    method test_classifier_basic_warm_starting (line 806) | def test_classifier_basic_warm_starting(self, fc_impl):
    method test_regressor_basic_warm_starting (line 856) | def test_regressor_basic_warm_starting(self, fc_impl):
    method test_warm_starting_selective_variables (line 904) | def test_warm_starting_selective_variables(self, fc_impl):

FILE: tensorflow_estimator/python/estimator/canned/dnn_test_fc_v2.py
  function _dnn_classifier_fn (line 52) | def _dnn_classifier_fn(*args, **kwargs):
  class DNNModelFnV2Test (line 56) | class DNNModelFnV2Test(dnn_testing_utils.BaseDNNModelFnTest, tf.test.Tes...
    method __init__ (line 58) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNLogitFnV2Test (line 64) | class DNNLogitFnV2Test(dnn_testing_utils.BaseDNNLogitFnTest, tf.test.Tes...
    method __init__ (line 66) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNWarmStartingV2Test (line 72) | class DNNWarmStartingV2Test(dnn_testing_utils.BaseDNNWarmStartingTest,
    method __init__ (line 75) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNClassifierEvaluateV2Test (line 81) | class DNNClassifierEvaluateV2Test(
    method __init__ (line 84) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNClassifierPredictV2Test (line 90) | class DNNClassifierPredictV2Test(dnn_testing_utils.BaseDNNClassifierPred...
    method __init__ (line 93) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNClassifierTrainV2Test (line 99) | class DNNClassifierTrainV2Test(dnn_testing_utils.BaseDNNClassifierTrainT...
    method __init__ (line 102) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  function _dnn_regressor_fn (line 108) | def _dnn_regressor_fn(*args, **kwargs):
  class DNNRegressorEvaluateV2Test (line 112) | class DNNRegressorEvaluateV2Test(dnn_testing_utils.BaseDNNRegressorEvalu...
    method __init__ (line 115) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNRegressorPredictV2Test (line 121) | class DNNRegressorPredictV2Test(dnn_testing_utils.BaseDNNRegressorPredic...
    method __init__ (line 124) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNRegressorTrainV2Test (line 130) | class DNNRegressorTrainV2Test(dnn_testing_utils.BaseDNNRegressorTrainTest,
    method __init__ (line 133) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  function _queue_parsed_features (line 139) | def _queue_parsed_features(feature_map):
  class DNNRegressorIntegrationTest (line 154) | class DNNRegressorIntegrationTest(tf.test.TestCase, parameterized.TestCa...
    method setUp (line 156) | def setUp(self):
    method tearDown (line 159) | def tearDown(self):
    method _test_complete_flow (line 164) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 200) | def test_numpy_input_fn(self):
    method test_pandas_input_fn (line 226) | def test_pandas_input_fn(self):
    method test_input_fn_from_parse_example (line 250) | def test_input_fn_from_parse_example(self):
  class DNNClassifierIntegrationTest (line 308) | class DNNClassifierIntegrationTest(tf.test.TestCase):
    method setUp (line 310) | def setUp(self):
    method tearDown (line 313) | def tearDown(self):
    method _as_label (line 318) | def _as_label(self, data_in_float):
    method _test_complete_flow (line 321) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 357) | def test_numpy_input_fn(self):
    method test_pandas_input_fn (line 386) | def test_pandas_input_fn(self):
    method test_input_fn_from_parse_example (line 411) | def test_input_fn_from_parse_example(self):
  class DNNTrainingMode (line 472) | class DNNTrainingMode(tf.test.TestCase):
    method setUp (line 475) | def setUp(self):
    method tearDown (line 480) | def tearDown(self):
    method _create_data (line 485) | def _create_data(self):
    method _get_estimator (line 490) | def _get_estimator(self):
    method test_train_vs_eval_mode (line 500) | def test_train_vs_eval_mode(self):

FILE: tensorflow_estimator/python/estimator/canned/dnn_testing_utils.py
  function assert_close (line 59) | def assert_close(expected, actual, rtol=1e-04, message='', name='assert_...
  function create_checkpoint (line 75) | def create_checkpoint(weights_and_biases,
  function mock_head (line 123) | def mock_head(testcase, hidden_units, logits_dimension, expected_logits):
  function mock_optimizer (line 191) | def mock_optimizer(testcase, hidden_units, expected_loss=None):
  class BaseDNNModelFnTest (line 242) | class BaseDNNModelFnTest(object):
    method __init__ (line 245) | def __init__(self, dnn_model_fn, fc_impl=feature_column_v2):
    method setUp (line 249) | def setUp(self):
    method tearDown (line 252) | def tearDown(self):
    method _test_logits (line 257) | def _test_logits(self, mode, hidden_units, logits_dimension, inputs,
    method test_one_dim_logits (line 289) | def test_one_dim_logits(self):
    method test_multi_dim_logits (line 313) | def test_multi_dim_logits(self):
    method test_multi_example_multi_dim_logits (line 338) | def test_multi_example_multi_dim_logits(self):
    method test_multi_dim_input_one_dim_logits (line 367) | def test_multi_dim_input_one_dim_logits(self):
    method test_multi_dim_input_multi_dim_logits (line 392) | def test_multi_dim_input_multi_dim_logits(self):
    method test_multi_feature_column_multi_dim_logits (line 417) | def test_multi_feature_column_multi_dim_logits(self):
    method test_multi_feature_column_mix_multi_dim_logits (line 468) | def test_multi_feature_column_mix_multi_dim_logits(self):
    method test_features_tensor_raises_value_error (line 519) | def test_features_tensor_raises_value_error(self):
  class BaseDNNLogitFnTest (line 547) | class BaseDNNLogitFnTest(object):
    method __init__ (line 550) | def __init__(self, dnn_logit_fn_builder, fc_impl=feature_column_v2):
    method setUp (line 554) | def setUp(self):
    method tearDown (line 557) | def tearDown(self):
    method _test_logits (line 562) | def _test_logits(self,
    method test_one_dim_logits (line 589) | def test_one_dim_logits(self):
    method test_one_dim_logits_with_batch_norm (line 612) | def test_one_dim_logits_with_batch_norm(self):
    method test_multi_dim_logits (line 677) | def test_multi_dim_logits(self):
    method test_multi_example_multi_dim_logits (line 701) | def test_multi_example_multi_dim_logits(self):
    method test_multi_dim_input_one_dim_logits (line 729) | def test_multi_dim_input_one_dim_logits(self):
    method test_multi_dim_input_multi_dim_logits (line 754) | def test_multi_dim_input_multi_dim_logits(self):
    method test_multi_feature_column_multi_dim_logits (line 778) | def test_multi_feature_column_multi_dim_logits(self):
    method test_multi_feature_column_mix_multi_dim_logits (line 822) | def test_multi_feature_column_mix_multi_dim_logits(self):
  class BaseDNNWarmStartingTest (line 867) | class BaseDNNWarmStartingTest(object):
    method __init__ (line 869) | def __init__(self,
    method setUp (line 877) | def setUp(self):
    method tearDown (line 895) | def tearDown(self):
    method assertAllNotClose (line 900) | def assertAllNotClose(self, t1, t2):
    method test_classifier_basic_warm_starting (line 911) | def test_classifier_basic_warm_starting(self):
    method test_regressor_basic_warm_starting (line 949) | def test_regressor_basic_warm_starting(self):
    method test_warm_starting_selective_variables (line 985) | def test_warm_starting_selective_variables(self):
    method test_warm_starting_with_vocab_remapping (line 1034) | def test_warm_starting_with_vocab_remapping(self):
    method test_warm_starting_with_naming_change (line 1128) | def test_warm_starting_with_naming_change(self):
  class BaseDNNClassifierEvaluateTest (line 1183) | class BaseDNNClassifierEvaluateTest(object):
    method __init__ (line 1185) | def __init__(self, dnn_classifier_fn, fc_impl=feature_column_v2):
    method setUp (line 1189) | def setUp(self):
    method tearDown (line 1192) | def tearDown(self):
    method test_one_dim (line 1197) | def test_one_dim(self):
    method test_multi_dim (line 1250) | def test_multi_dim(self):
    method test_float_labels (line 1287) | def test_float_labels(self):
    method test_multi_dim_weights (line 1315) | def test_multi_dim_weights(self):
  class BaseDNNRegressorEvaluateTest (line 1346) | class BaseDNNRegressorEvaluateTest(object):
    method __init__ (line 1348) | def __init__(self, dnn_regressor_fn, fc_impl=feature_column_v2):
    method setUp (line 1352) | def setUp(self):
    method tearDown (line 1355) | def tearDown(self):
    method test_one_dim (line 1360) | def test_one_dim(self):
    method test_multi_dim (line 1392) | def test_multi_dim(self):
    method test_multi_dim_weights (line 1428) | def test_multi_dim_weights(self):
  class BaseDNNClassifierPredictTest (line 1458) | class BaseDNNClassifierPredictTest(object):
    method __init__ (line 1460) | def __init__(self, dnn_classifier_fn, fc_impl=feature_column_v2):
    method setUp (line 1464) | def setUp(self):
    method tearDown (line 1467) | def tearDown(self):
    method _test_one_dim (line 1472) | def _test_one_dim(self, label_vocabulary, label_output_fn):
    method test_one_dim_without_label_vocabulary (line 1513) | def test_one_dim_without_label_vocabulary(self):
    method test_one_dim_with_label_vocabulary (line 1517) | def test_one_dim_with_label_vocabulary(self):
    method _test_multi_dim_with_3_classes (line 1523) | def _test_multi_dim_with_3_classes(self, label_vocabulary, label_outpu...
    method test_multi_dim_with_3_classes_but_no_label_vocab (line 1577) | def test_multi_dim_with_3_classes_but_no_label_vocab(self):
    method test_multi_dim_with_3_classes_and_label_vocab (line 1581) | def test_multi_dim_with_3_classes_and_label_vocab(self):
  class BaseDNNRegressorPredictTest (line 1588) | class BaseDNNRegressorPredictTest(object):
    method __init__ (line 1590) | def __init__(self, dnn_regressor_fn, fc_impl=feature_column_v2):
    method setUp (line 1594) | def setUp(self):
    method tearDown (line 1597) | def tearDown(self):
    method test_one_dim (line 1602) | def test_one_dim(self):
    method test_multi_dim (line 1626) | def test_multi_dim(self):
  class _SummaryHook (line 1655) | class _SummaryHook(tf.compat.v1.train.SessionRunHook):
    method __init__ (line 1658) | def __init__(self):
    method begin (line 1661) | def begin(self):
    method before_run (line 1664) | def before_run(self, run_context):
    method after_run (line 1667) | def after_run(self, run_context, run_values):
    method summaries (line 1672) | def summaries(self):
  function _assert_checkpoint (line 1676) | def _assert_checkpoint(testcase, global_step, input_units, hidden_units,
  function _assert_simple_summary (line 1712) | def _assert_simple_summary(testcase, expected_values, actual_summary):
  class BaseDNNClassifierTrainTest (line 1728) | class BaseDNNClassifierTrainTest(object):
    method __init__ (line 1730) | def __init__(self, dnn_classifier_fn, fc_impl=feature_column_v2):
    method setUp (line 1734) | def setUp(self):
    method tearDown (line 1737) | def tearDown(self):
    method test_from_scratch_with_default_optimizer_binary (line 1742) | def test_from_scratch_with_default_optimizer_binary(self):
    method test_from_scratch_with_default_optimizer_multi_class (line 1763) | def test_from_scratch_with_default_optimizer_multi_class(self):
    method test_from_scratch_validate_summary (line 1786) | def test_from_scratch_validate_summary(self):
    method test_binary_classification (line 1820) | def test_binary_classification(self):
    method test_binary_classification_float_labels (line 1872) | def test_binary_classification_float_labels(self):
    method test_multi_class (line 1904) | def test_multi_class(self):
  class BaseDNNRegressorTrainTest (line 1959) | class BaseDNNRegressorTrainTest(object):
    method __init__ (line 1961) | def __init__(self, dnn_regressor_fn, fc_impl=feature_column_v2):
    method setUp (line 1965) | def setUp(self):
    method tearDown (line 1968) | def tearDown(self):
    method test_from_scratch_with_default_optimizer (line 1973) | def test_from_scratch_with_default_optimizer(self):
    method test_from_scratch (line 1994) | def test_from_scratch(self):
    method test_one_dim (line 2028) | def test_one_dim(self):
    method test_multi_dim (line 2081) | def test_multi_dim(self):

FILE: tensorflow_estimator/python/estimator/canned/head.py
  function _summary_key (line 55) | def _summary_key(head_name, val):
  function _create_eval_metrics_tuple (line 59) | def _create_eval_metrics_tuple(fn, kwargs):
  class _Head (line 90) | class _Head(object):
    method name (line 147) | def name(self):
    method logits_dimension (line 156) | def logits_dimension(self):
    method create_loss (line 167) | def create_loss(self, features, mode, logits, labels):
    method create_estimator_spec (line 195) | def create_estimator_spec(self,
    method _create_tpu_estimator_spec (line 242) | def _create_tpu_estimator_spec(self,
  function _check_dense_labels_match_logits_and_reshape (line 276) | def _check_dense_labels_match_logits_and_reshape(labels, logits,
  function _get_weights_and_check_match_logits (line 355) | def _get_weights_and_check_match_logits(features,
  function _check_logits_final_dim (line 444) | def _check_logits_final_dim(logits, expected_logits_dimension):
  function _validate_loss_fn_args (line 473) | def _validate_loss_fn_args(loss_fn):
  function _validate_n_classes (line 495) | def _validate_n_classes(n_classes):
  function _call_loss_fn (line 521) | def _call_loss_fn(loss_fn, labels, logits, features, expected_loss_dim=1):
  function _indicator_labels_mean (line 560) | def _indicator_labels_mean(labels, weights=None, name=None):
  function _all_class_ids (line 568) | def _all_class_ids(logits, n_classes):
  function _all_classes (line 576) | def _all_classes(logits, n_classes, label_vocabulary=None):
  function _classification_output (line 587) | def _classification_output(scores, n_classes, label_vocabulary=None):
  function _accuracy_baseline (line 602) | def _accuracy_baseline(labels_mean):
  function _predictions_mean (line 619) | def _predictions_mean(predictions, weights=None, name=None):
  function _auc (line 629) | def _auc(labels, predictions, weights=None, curve='ROC', name=None):
  function _accuracy_at_threshold (line 643) | def _accuracy_at_threshold(labels, predictions, weights, threshold, name...
  function _precision_at_threshold (line 655) | def _precision_at_threshold(labels, predictions, weights, threshold, nam...
  function _recall_at_threshold (line 668) | def _recall_at_threshold(labels, predictions, weights, threshold, name=N...
  function _multi_class_head_with_softmax_cross_entropy_loss (line 681) | def _multi_class_head_with_softmax_cross_entropy_loss(
  class _MultiClassHeadWithSoftmaxCrossEntropyLoss (line 755) | class _MultiClassHeadWithSoftmaxCrossEntropyLoss(_Head):
    method __init__ (line 758) | def __init__(self,
    method name (line 775) | def name(self):
    method logits_dimension (line 779) | def logits_dimension(self):
    method _eval_metric_ops (line 782) | def _eval_metric_ops(self, labels, class_ids, weights, unreduced_loss,
    method _label_ids (line 808) | def _label_ids(self, labels):
    method create_loss (line 825) | def create_loss(self, features, mode, logits, labels):
    method _create_tpu_estimator_spec (line 856) | def _create_tpu_estimator_spec(self,
  function _binary_logistic_head_with_sigmoid_cross_entropy_loss (line 999) | def _binary_logistic_head_with_sigmoid_cross_entropy_loss(
  class _BinaryLogisticHeadWithSigmoidCrossEntropyLoss (line 1084) | class _BinaryLogisticHeadWithSigmoidCrossEntropyLoss(_Head):
    method __init__ (line 1087) | def __init__(self,
    method name (line 1102) | def name(self):
    method logits_dimension (line 1106) | def logits_dimension(self):
    method _eval_metric_ops (line 1109) | def _eval_metric_ops(self, labels, logits, logistic, class_ids, weights,
    method create_loss (line 1195) | def create_loss(self, features, mode, logits, labels):
    method _create_tpu_estimator_spec (line 1227) | def _create_tpu_estimator_spec(self,
  function _regression_head (line 1378) | def _regression_head(weight_column=None,
  class _RegressionHeadWithMeanSquaredErrorLoss (line 1447) | class _RegressionHeadWithMeanSquaredErrorLoss(_Head):
    method __init__ (line 1450) | def __init__(self,
    method name (line 1468) | def name(self):
    method logits_dimension (line 1472) | def logits_dimension(self):
    method create_loss (line 1475) | def create_loss(self, features, mode, logits, labels):
    method _eval_metric_ops (line 1509) | def _eval_metric_ops(self, predicted_value, labels, weights, unreduced...
    method _create_tpu_estimator_spec (line 1532) | def _create_tpu_estimator_spec(self,
  function _append_update_ops (line 1657) | def _append_update_ops(train_op):
  function _assert_range (line 1665) | def _assert_range(labels, n_classes, message=None):
  function _binary_logistic_or_multi_class_head (line 1677) | def _binary_logistic_or_multi_class_head(n_classes, weight_column,

FILE: tensorflow_estimator/python/estimator/canned/head_test.py
  function _initialize_variables (line 36) | def _initialize_variables(test_case, scaffold):
  function _assert_simple_summaries (line 47) | def _assert_simple_summaries(test_case,
  function _assert_no_hooks (line 67) | def _assert_no_hooks(test_case, spec):
  function _sigmoid (line 72) | def _sigmoid(logits):
  class CreateEstimatorSpecTest (line 77) | class CreateEstimatorSpecTest(tf.test.TestCase):
    class _HeadWithTPUSupport (line 79) | class _HeadWithTPUSupport(head_lib._Head):
      method name (line 82) | def name(self):
      method logits_dimension (line 85) | def logits_dimension(self):
      method create_loss (line 88) | def create_loss(self, features, mode, logits, labels):
      method _create_tpu_estimator_spec (line 91) | def _create_tpu_estimator_spec(self,
    class _HeadWithOutTPUSupport (line 102) | class _HeadWithOutTPUSupport(head_lib._Head):
      method name (line 105) | def name(self):
      method logits_dimension (line 108) | def logits_dimension(self):
      method create_loss (line 111) | def create_loss(self, features, mode, logits, labels):
      method create_estimator_spec (line 114) | def create_estimator_spec(self,
    class _InvalidHead (line 125) | class _InvalidHead(head_lib._Head):
      method name (line 128) | def name(self):
      method logits_dimension (line 131) | def logits_dimension(self):
      method create_loss (line 134) | def create_loss(self, features, mode, logits, labels):
    method test_head_override_tpu_estimator_spec (line 137) | def test_head_override_tpu_estimator_spec(self):
    method test_head_override_estimator_spec (line 147) | def test_head_override_estimator_spec(self):
    method test_invalid_head_class (line 158) | def test_invalid_head_class(self):
  class MultiClassHeadWithSoftmaxCrossEntropyLoss (line 173) | class MultiClassHeadWithSoftmaxCrossEntropyLoss(tf.test.TestCase):
    method setUp (line 175) | def setUp(self):
    method test_n_classes_is_none (line 178) | def test_n_classes_is_none(self):
    method test_n_classes_is_2 (line 182) | def test_n_classes_is_2(self):
    method test_invalid_loss_reduction (line 186) | def test_invalid_loss_reduction(self):
    method test_loss_fn_arg_labels_missing (line 195) | def test_loss_fn_arg_labels_missing(self):
    method test_loss_fn_arg_logits_missing (line 206) | def test_loss_fn_arg_logits_missing(self):
    method test_loss_fn_arg_features_ok (line 217) | def test_loss_fn_arg_features_ok(self):
    method test_loss_fn_arg_invalid (line 225) | def test_loss_fn_arg_invalid(self):
    method test_invalid_logits_shape (line 235) | def test_invalid_logits_shape(self):
    method test_invalid_labels_shape (line 261) | def test_invalid_labels_shape(self):
    method test_invalid_labels_type (line 297) | def test_invalid_labels_type(self):
    method test_invalid_labels_values (line 326) | def test_invalid_labels_values(self):
    method test_invalid_labels_sparse_tensor (line 356) | def test_invalid_labels_sparse_tensor(self):
    method test_incompatible_labels_shape (line 375) | def test_incompatible_labels_shape(self):
    method test_name (line 416) | def test_name(self):
    method test_predict (line 421) | def test_predict(self):
    method test_predict_with_tensor_n_classes (line 471) | def test_predict_with_tensor_n_classes(self):
    method test_predict_with_vocabulary_list (line 521) | def test_predict_with_vocabulary_list(self):
    method test_weight_should_not_impact_prediction (line 544) | def test_weight_should_not_impact_prediction(self):
    method test_eval_create_loss (line 570) | def test_eval_create_loss(self):
    method test_eval_create_loss_loss_fn (line 587) | def test_eval_create_loss_loss_fn(self):
    method test_eval_create_loss_loss_fn_wrong_shape (line 613) | def test_eval_create_loss_loss_fn_wrong_shape(self):
    method test_eval_labels_none (line 639) | def test_eval_labels_none(self):
    method test_eval (line 652) | def test_eval(self):
    method test_eval_metric_ops_with_head_name (line 693) | def test_eval_metric_ops_with_head_name(self):
    method test_eval_with_regularization_losses (line 710) | def test_eval_with_regularization_losses(self):
    method test_eval_with_label_vocabulary_create_loss (line 756) | def test_eval_with_label_vocabulary_create_loss(self):
    method test_eval_with_label_vocabulary (line 772) | def test_eval_with_label_vocabulary(self):
    method test_weighted_multi_example_eval (line 805) | def test_weighted_multi_example_eval(self):
    method test_train_create_loss (line 856) | def test_train_create_loss(self):
    method test_train_create_loss_loss_reduction (line 881) | def test_train_create_loss_loss_reduction(self):
    method test_train_labels_none (line 908) | def test_train_labels_none(self):
    method test_train (line 926) | def test_train(self):
    method test_train_with_optimizer (line 973) | def test_train_with_optimizer(self):
    method test_train_with_update_ops (line 1009) | def test_train_with_update_ops(self):
    method test_train_summaries_with_head_name (line 1040) | def test_train_summaries_with_head_name(self):
    method test_train_with_regularization_losses (line 1077) | def test_train_with_regularization_losses(self):
    method test_train_one_dim_create_loss (line 1127) | def test_train_one_dim_create_loss(self):
    method test_train_one_dim (line 1160) | def test_train_one_dim(self):
    method test_train_with_vocabulary_create_loss (line 1219) | def test_train_with_vocabulary_create_loss(self):
    method test_train_with_vocabulary (line 1236) | def test_train_with_vocabulary(self):
    method test_weighted_multi_example_train (line 1264) | def test_weighted_multi_example_train(self):
    method test_multi_dim_weighted_train_create_loss (line 1324) | def test_multi_dim_weighted_train_create_loss(self):
    method test_multi_dim_weighted_train (line 1354) | def test_multi_dim_weighted_train(self):
    method test_multi_dim_train_weights_wrong_inner_dim (line 1391) | def test_multi_dim_train_weights_wrong_inner_dim(self):
    method test_multi_dim_train_weights_wrong_outer_dim (line 1417) | def test_multi_dim_train_weights_wrong_outer_dim(self):
    method test_multi_dim_weighted_eval (line 1445) | def test_multi_dim_weighted_eval(self):
  class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest (line 1488) | class BinaryLogisticHeadWithSigmoidCrossEntropyLossTest(tf.test.TestCase):
    method setUp (line 1490) | def setUp(self):
    method test_threshold_too_small (line 1493) | def test_threshold_too_small(self):
    method test_threshold_too_large (line 1498) | def test_threshold_too_large(self):
    method test_invalid_loss_reduction (line 1503) | def test_invalid_loss_reduction(self):
    method test_loss_fn_arg_labels_missing (line 1512) | def test_loss_fn_arg_labels_missing(self):
    method test_loss_fn_arg_logits_missing (line 1523) | def test_loss_fn_arg_logits_missing(self):
    method test_loss_fn_arg_features_ok (line 1534) | def test_loss_fn_arg_features_ok(self):
    method test_loss_fn_arg_invalid (line 1541) | def test_loss_fn_arg_invalid(self):
    method test_invalid_logits_shape (line 1551) | def test_invalid_logits_shape(self):
    method test_invalid_labels_shape (line 1576) | def test_invalid_labels_shape(self):
    method test_incompatible_labels_shape (line 1609) | def test_incompatible_labels_shape(self):
    method test_name (line 1658) | def test_name(self):
    method test_predict (line 1663) | def test_predict(self):
    method test_predict_with_vocabulary_list (line 1721) | def test_predict_with_vocabulary_list(self):
    method test_eval_create_loss (line 1739) | def test_eval_create_loss(self):
    method test_eval_labels_none (line 1755) | def test_eval_labels_none(self):
    method test_eval (line 1767) | def test_eval(self):
    method test_eval_metric_ops_with_head_name (line 1812) | def test_eval_metric_ops_with_head_name(self):
    method test_eval_with_regularization_losses (line 1835) | def test_eval_with_regularization_losses(self):
    method test_eval_with_vocabulary_list_create_loss (line 1884) | def test_eval_with_vocabulary_list_create_loss(self):
    method test_eval_with_vocabulary_list (line 1897) | def test_eval_with_vocabulary_list(self):
    method test_eval_with_thresholds_create_loss (line 1916) | def test_eval_with_thresholds_create_loss(self):
    method test_eval_with_thresholds (line 1937) | def test_eval_with_thresholds(self):
    method test_train_create_loss (line 1990) | def test_train_create_loss(self):
    method test_train_create_loss_loss_reduction (line 2011) | def test_train_create_loss_loss_reduction(self):
    method test_eval_create_loss_loss_fn (line 2034) | def test_eval_create_loss_loss_fn(self):
    method test_eval_create_loss_loss_fn_wrong_shape (line 2060) | def test_eval_create_loss_loss_fn_wrong_shape(self):
    method test_train_labels_none (line 2086) | def test_train_labels_none(self):
    method test_train (line 2103) | def test_train(self):
    method test_train_with_optimizer (line 2152) | def test_train_with_optimizer(self):
    method test_train_with_update_ops (line 2186) | def test_train_with_update_ops(self):
    method test_train_summaries_with_head_name (line 2216) | def test_train_summaries_with_head_name(self):
    method test_train_with_regularization_losses (line 2253) | def test_train_with_regularization_losses(self):
    method test_float_labels_invalid_values (line 2300) | def test_float_labels_invalid_values(self):
    method test_float_labels_train_create_loss (line 2312) | def test_float_labels_train_create_loss(self):
    method test_float_labels_train (line 2333) | def test_float_labels_train(self):
    method test_float_labels_eval_create_loss (line 2368) | def test_float_labels_eval_create_loss(self):
    method test_float_labels_eval (line 2389) | def test_float_labels_eval(self):
    method test_weighted_multi_example_predict (line 2416) | def test_weighted_multi_example_predict(self):
    method test_weighted_multi_example_eval (line 2449) | def test_weighted_multi_example_eval(self):
    method test_train_one_dim_create_loss (line 2505) | def test_train_one_dim_create_loss(self):
    method test_train_one_dim (line 2538) | def test_train_one_dim(self):
    method test_weighted_multi_example_train (line 2595) | def test_weighted_multi_example_train(self):
    method test_multi_dim_weighted_train_create_loss (line 2649) | def test_multi_dim_weighted_train_create_loss(self):
    method test_multi_dim_weighted_train (line 2678) | def test_multi_dim_weighted_train(self):
    method test_multi_dim_train_weights_wrong_inner_dim (line 2715) | def test_multi_dim_train_weights_wrong_inner_dim(self):
    method test_multi_dim_train_weights_wrong_outer_dim (line 2741) | def test_multi_dim_train_weights_wrong_outer_dim(self):
    method test_multi_dim_weighted_eval (line 2770) | def test_multi_dim_weighted_eval(self):
  class RegressionHead (line 2824) | class RegressionHead(tf.test.TestCase):
    method setUp (line 2826) | def setUp(self):
    method test_invalid_label_dimension (line 2829) | def test_invalid_label_dimension(self):
    method test_invalid_loss_reduction (line 2835) | def test_invalid_loss_reduction(self):
    method test_loss_fn_arg_labels_missing (line 2843) | def test_loss_fn_arg_labels_missing(self):
    method test_loss_fn_arg_logits_missing (line 2853) | def test_loss_fn_arg_logits_missing(self):
    method test_loss_fn_arg_features_ok (line 2863) | def test_loss_fn_arg_features_ok(self):
    method test_loss_fn_arg_invalid (line 2869) | def test_loss_fn_arg_invalid(self):
    method test_invalid_logits (line 2878) | def test_invalid_logits(self):
    method test_incompatible_labels_eval (line 2901) | def test_incompatible_labels_eval(self):
    method test_incompatible_labels_train (line 2950) | def test_incompatible_labels_train(self):
    method test_name (line 3001) | def test_name(self):
    method test_predict (line 3005) | def test_predict(self):
    method test_predict_with_inverse_link_fn (line 3039) | def test_predict_with_inverse_link_fn(self):
    method test_eval_create_loss (line 3081) | def test_eval_create_loss(self):
    method test_eval_create_loss_loss_fn (line 3094) | def test_eval_create_loss_loss_fn(self):
    method test_eval_create_loss_loss_fn_wrong_shape (line 3119) | def test_eval_create_loss_loss_fn_wrong_shape(self):
    method test_eval_labels_none (line 3144) | def test_eval_labels_none(self):
    method test_eval (line 3156) | def test_eval(self):
    method test_eval_metric_ops_with_head_name_for_regression (line 3197) | def test_eval_metric_ops_with_head_name_for_regression(self):
    method test_eval_with_regularization_losses (line 3214) | def test_eval_with_regularization_losses(self):
    method test_train_create_loss (line 3261) | def test_train_create_loss(self):
    method test_train_create_loss_loss_reduction (line 3281) | def test_train_create_loss_loss_reduction(self):
    method test_train_labels_none (line 3303) | def test_train_labels_none(self):
    method test_train (line 3320) | def test_train(self):
    method test_train_with_optimizer (line 3375) | def test_train_with_optimizer(self):
    method test_train_with_update_ops (line 3410) | def test_train_with_update_ops(self):
    method test_train_summaries_with_head_name (line 3440) | def test_train_summaries_with_head_name(self):
    method test_train_with_regularization_losses (line 3479) | def test_train_with_regularization_losses(self):
    method test_weighted_multi_example_eval (line 3529) | def test_weighted_multi_example_eval(self):
    method test_weight_with_numeric_column (line 3575) | def test_weight_with_numeric_column(self):
    method test_weighted_multi_example_train (line 3601) | def test_weighted_multi_example_train(self):
    method test_train_one_dim_create_loss (line 3658) | def test_train_one_dim_create_loss(self):
    method test_train_one_dim (line 3684) | def test_train_one_dim(self):
    method test_weighted_multi_value_eval_create_loss (line 3746) | def test_weighted_multi_value_eval_create_loss(self):
    method test_weighted_multi_value_eval (line 3765) | def test_weighted_multi_value_eval(self):
    method test_weighted_multi_value_train_create_loss (line 3811) | def test_weighted_multi_value_train_create_loss(self):
    method test_weighted_multi_value_train (line 3830) | def test_weighted_multi_value_train(self):
    method test_weighted_multi_batch_eval (line 3890) | def test_weighted_multi_batch_eval(self):
    method test_weighted_multi_batch_train (line 3962) | def test_weighted_multi_batch_train(self):
    method test_multi_dim_weighted_train_create_loss (line 4009) | def test_multi_dim_weighted_train_create_loss(self):
    method test_multi_dim_weighted_train (line 4040) | def test_multi_dim_weighted_train(self):
    method test_multi_dim_train_weights_wrong_inner_dim (line 4073) | def test_multi_dim_train_weights_wrong_inner_dim(self):
    method test_multi_dim_train_weights_wrong_outer_dim (line 4102) | def test_multi_dim_train_weights_wrong_outer_dim(self):

FILE: tensorflow_estimator/python/estimator/canned/kmeans.py
  class _LossRelativeChangeHook (line 36) | class _LossRelativeChangeHook(tf.compat.v1.train.SessionRunHook):
    method __init__ (line 39) | def __init__(self, loss_tensor, tolerance):
    method before_run (line 50) | def before_run(self, run_context):
    method after_run (line 54) | def after_run(self, run_context, run_values):
  class _InitializeClustersHook (line 65) | class _InitializeClustersHook(tf.compat.v1.train.SessionRunHook):
    method __init__ (line 72) | def __init__(self, init_op, is_initialized_var, is_chief):
    method after_create_session (line 86) | def after_create_session(self, session, coord):
  function _parse_features_if_necessary (line 102) | def _parse_features_if_necessary(features, feature_columns):
  class _ModelFn (line 130) | class _ModelFn(object):
    method __init__ (line 133) | def __init__(self, num_clusters, initial_clusters, distance_metric, seed,
    method model_fn (line 147) | def model_fn(self, features, mode, config):
  class KMeansClustering (line 241) | class KMeansClustering(estimator.Estimator):
    method __init__ (line 315) | def __init__(self,
    method _predict_one_key (line 423) | def _predict_one_key(self, input_fn, predict_key):
    method predict_cluster_index (line 427) | def predict_cluster_index(self, input_fn):
    method score (line 440) | def score(self, input_fn):
    method transform (line 456) | def transform(self, input_fn):
    method cluster_centers (line 477) | def cluster_centers(self):

FILE: tensorflow_estimator/python/estimator/canned/kmeans_test.py
  function normalize (line 36) | def normalize(x):
  function cosine_similarity (line 40) | def cosine_similarity(x, y):
  function make_random_centers (line 44) | def make_random_centers(num_centers, num_dims, center_norm=500):
  function make_random_points (line 49) | def make_random_points(centers, num_points, max_offset=20):
  class KMeansTestBase (line 58) | class KMeansTestBase(tf.test.TestCase):
    method input_fn (line 60) | def input_fn(self,
    method config (line 106) | def config(tf_random_seed):
    method initial_clusters (line 110) | def initial_clusters(self):
    method batch_size (line 114) | def batch_size(self):
    method use_mini_batch (line 118) | def use_mini_batch(self):
    method mini_batch_steps_per_iteration (line 122) | def mini_batch_steps_per_iteration(self):
  class KMeansTest (line 127) | class KMeansTest(KMeansTestBase):
    method setUp (line 129) | def setUp(self):
    method _kmeans (line 139) | def _kmeans(self, relative_tolerance=None):
    method test_clusters (line 149) | def test_clusters(self):
    method test_fit (line 155) | def test_fit(self):
    method test_monitor (line 165) | def test_monitor(self):
    method _infer_helper (line 186) | def _infer_helper(self, kmeans, clusters, num_points):
    method test_infer (line 208) | def test_infer(self):
    method _parse_feature_dict_helper (line 219) | def _parse_feature_dict_helper(self, features, parsed_feature_dict):
    method test_parse_features (line 228) | def test_parse_features(self):
  class KMeansTestMultiStageInit (line 262) | class KMeansTestMultiStageInit(KMeansTestBase):
    method test_random (line 264) | def test_random(self):
    method test_kmeans_plus_plus_batch_just_right (line 281) | def test_kmeans_plus_plus_batch_just_right(self):
    method test_kmeans_plus_plus_batch_too_small (line 297) | def test_kmeans_plus_plus_batch_too_small(self):
  class MiniBatchKMeansTest (line 315) | class MiniBatchKMeansTest(KMeansTest):
    method batch_size (line 318) | def batch_size(self):
    method use_mini_batch (line 322) | def use_mini_batch(self):
  class FullBatchAsyncKMeansTest (line 327) | class FullBatchAsyncKMeansTest(KMeansTest):
    method batch_size (line 330) | def batch_size(self):
    method use_mini_batch (line 334) | def use_mini_batch(self):
    method mini_batch_steps_per_iteration (line 338) | def mini_batch_steps_per_iteration(self):
  class KMeansCosineDistanceTest (line 343) | class KMeansCosineDistanceTest(KMeansTestBase):
    method setUp (line 345) | def setUp(self):
    method test_fit (line 370) | def test_fit(self):
    method test_transform (line 378) | def test_transform(self):
    method test_predict (line 387) | def test_predict(self):
    method test_predict_kmeans_plus_plus (line 407) | def test_predict_kmeans_plus_plus(self):
  class MiniBatchKMeansCosineTest (line 449) | class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
    method batch_size (line 452) | def batch_size(self):
    method use_mini_batch (line 456) | def use_mini_batch(self):
  class FullBatchAsyncKMeansCosineTest (line 461) | class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
    method batch_size (line 464) | def batch_size(self):
    method use_mini_batch (line 468) | def use_mini_batch(self):
    method mini_batch_steps_per_iteration (line 472) | def mini_batch_steps_per_iteration(self):
  class KMeansBenchmark (line 476) | class KMeansBenchmark(benchmark.Benchmark):
    method SetUp (line 479) | def SetUp(self,
    method _report (line 494) | def _report(self, num_iters, start, end, scores):
    method _fit (line 504) | def _fit(self, num_iters=10):
    method benchmark_01_2dim_5center_500point (line 507) | def benchmark_01_2dim_5center_500point(self):
    method benchmark_02_20dim_20center_10kpoint (line 511) | def benchmark_02_20dim_20center_10kpoint(self):
    method benchmark_03_100dim_50center_50kpoint (line 515) | def benchmark_03_100dim_50center_50kpoint(self):
    method benchmark_03_100dim_50center_50kpoint_unseparated (line 519) | def benchmark_03_100dim_50center_50kpoint_unseparated(self):
    method benchmark_04_100dim_500center_500kpoint (line 527) | def benchmark_04_100dim_500center_500kpoint(self):
    method benchmark_05_100dim_500center_500kpoint_unseparated (line 531) | def benchmark_05_100dim_500center_500kpoint_unseparated(self):
  class TensorflowKMeansBenchmark (line 540) | class TensorflowKMeansBenchmark(KMeansBenchmark):
    method _fit (line 542) | def _fit(self, num_iters=10):
  class SklearnKMeansBenchmark (line 562) | class SklearnKMeansBenchmark(KMeansBenchmark):
    method _fit (line 564) | def _fit(self, num_iters=10):
  class KMeansTestQueues (line 582) | class KMeansTestQueues(tf.test.TestCase):
    method input_fn (line 584) | def input_fn(self):
    method test_queues (line 600) | def test_queues(self):

FILE: tensorflow_estimator/python/estimator/canned/linear.py
  class LinearSDCA (line 47) | class LinearSDCA(object):
    method __init__ (line 107) | def __init__(self,
    method _prune_and_unique_sparse_ids (line 135) | def _prune_and_unique_sparse_ids(self, id_weight_pair):
    method get_train_step (line 177) | def get_train_step(self, state_manager, weight_column_name, loss_type,
  function _get_default_optimizer_v2 (line 242) | def _get_default_optimizer_v2(feature_columns):
  function _get_default_optimizer (line 247) | def _get_default_optimizer(feature_columns):
  function _get_expanded_variable_list (line 252) | def _get_expanded_variable_list(var_list):
  function _compute_fraction_of_zero (line 274) | def _compute_fraction_of_zero(variables):
  function linear_logit_fn_builder_v2 (line 311) | def linear_logit_fn_builder_v2(units, feature_columns, sparse_combiner='...
  function linear_logit_fn_builder (line 379) | def linear_logit_fn_builder(units, feature_columns, sparse_combiner='sum'):
  function _sdca_model_fn (line 451) | def _sdca_model_fn(features, labels, mode, head, feature_columns, optimi...
  class _SDCAUpdateWeightsHook (line 543) | class _SDCAUpdateWeightsHook(tf.compat.v1.train.SessionRunHook):
    method __init__ (line 546) | def __init__(self, sdca_model, train_op):
    method begin (line 550) | def begin(self):
    method before_run (line 557) | def before_run(self, run_context):
  function _linear_model_fn_builder_v2 (line 562) | def _linear_model_fn_builder_v2(units,
  function _linear_model_fn_v2 (line 622) | def _linear_model_fn_v2(features,
  function _linear_model_fn (line 687) | def _linear_model_fn(features,
  function _validate_linear_sdca_optimizer_for_linear_classifier (line 756) | def _validate_linear_sdca_optimizer_for_linear_classifier(
  class LinearClassifierV2 (line 771) | class LinearClassifierV2(estimator.EstimatorV2):
    method __init__ (line 858) | def __init__(self,
  class LinearClassifier (line 953) | class LinearClassifier(estimator.Estimator):
    method __init__ (line 956) | def __init__(self,
  class LinearEstimatorV2 (line 999) | class LinearEstimatorV2(estimator.EstimatorV2):
    method __init__ (line 1078) | def __init__(self,
  class LinearEstimator (line 1131) | class LinearEstimator(estimator.Estimator):
    method __init__ (line 1134) | def __init__(self,
  function _validate_linear_sdca_optimizer_for_linear_regressor (line 1189) | def _validate_linear_sdca_optimizer_for_linear_regressor(
  class LinearRegressorV2 (line 1205) | class LinearRegressorV2(estimator.EstimatorV2):
    method __init__ (line 1292) | def __init__(self,
  class LinearRegressor (line 1371) | class LinearRegressor(estimator.Estimator):
    method __init__ (line 1374) | def __init__(self,
  class _LinearModelLayer (line 1417) | class _LinearModelLayer(tf_keras.layers.Layer):
    method __init__ (line 1420) | def __init__(self,
    method build (line 1443) | def build(self, _):
    method call (line 1483) | def call(self, features):
    method get_config (line 1513) | def get_config(self):
    method from_config (line 1529) | def from_config(cls, config, custom_objects=None):
  class LinearModel (line 1540) | class LinearModel(tf_keras.Model):
    method __init__ (line 1579) | def __init__(self,
    method call (line 1653) | def call(self, features):
    method bias (line 1672) | def bias(self):

FILE: tensorflow_estimator/python/estimator/canned/linear_estimator_test.py
  function _linear_estimator_fn (line 37) | def _linear_estimator_fn(weight_column=None, label_dimension=1, **kwargs):
  function _linear_estimator_classifier_fn (line 48) | def _linear_estimator_classifier_fn(n_classes=3, **kwargs):
  class LinearEstimatorEvaluateTest (line 53) | class LinearEstimatorEvaluateTest(
    method __init__ (line 56) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearEstimatorPredictTest (line 62) | class LinearEstimatorPredictTest(
    method __init__ (line 65) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearEstimatorTrainTest (line 71) | class LinearEstimatorTrainTest(
    method __init__ (line 74) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearEstimatorWarmStartingTest (line 80) | class LinearEstimatorWarmStartingTest(
    method __init__ (line 83) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearEstimatorIntegrationTest (line 91) | class LinearEstimatorIntegrationTest(tf.test.TestCase):
    method setUp (line 93) | def setUp(self):
    method tearDown (line 96) | def tearDown(self):
    method _test_complete_flow (line 101) | def _test_complete_flow(self,
    method _create_input_fn (line 143) | def _create_input_fn(self, label_dimension, batch_size):
    method test_numpy_input_fn (line 161) | def test_numpy_input_fn(self):
    method test_numpy_input_fn_with_optimizer_instance (line 175) | def test_numpy_input_fn_with_optimizer_instance(self):

FILE: tensorflow_estimator/python/estimator/canned/linear_model_test.py
  function _initialized_session (line 33) | def _initialized_session(config=None):
  function get_linear_model_bias (line 40) | def get_linear_model_bias(name='linear_model'):
  function get_linear_model_column_var (line 45) | def get_linear_model_column_var(column, name='linear_model'):
  class BaseFeatureColumnForTests (line 50) | class BaseFeatureColumnForTests(tf.compat.v2.__internal__.feature_column...
    method parents (line 58) | def parents(self):
    method from_config (line 62) | def from_config(cls, config, custom_objects=None, columns_by_name=None):
    method get_config (line 65) | def get_config(self):
  class SortableFeatureColumnTest (line 69) | class SortableFeatureColumnTest(tf.test.TestCase):
    method test_linear_model (line 72) | def test_linear_model(self):
    method test_linear_model_sanitizes_scope_names (line 87) | def test_linear_model_sanitizes_scope_names(self):
  class BucketizedColumnTest (line 102) | class BucketizedColumnTest(tf.test.TestCase):
    method test_linear_model_one_input_value (line 104) | def test_linear_model_one_input_value(self):
    method test_linear_model_two_input_values (line 132) | def test_linear_model_two_input_values(self):
  class HashedCategoricalColumnTest (line 162) | class HashedCategoricalColumnTest(tf.test.TestCase):
    method test_linear_model (line 165) | def test_linear_model(self):
  class CrossedColumnTest (line 191) | class CrossedColumnTest(tf.test.TestCase):
    method test_linear_model (line 194) | def test_linear_model(self):
    method test_linear_model_with_weights (line 225) | def test_linear_model_with_weights(self):
  class LinearModelTest (line 289) | class LinearModelTest(tf.test.TestCase):
    method test_raises_if_empty_feature_columns (line 291) | def test_raises_if_empty_feature_columns(self):
    method test_should_be_feature_column (line 296) | def test_should_be_feature_column(self):
    method test_should_be_dense_or_categorical_column (line 300) | def test_should_be_dense_or_categorical_column(self):
    method test_does_not_support_dict_columns (line 323) | def test_does_not_support_dict_columns(self):
    method test_raises_if_duplicate_name (line 328) | def test_raises_if_duplicate_name(self):
    method test_not_dict_input_features (line 335) | def test_not_dict_input_features(self):
    method test_dense_bias (line 343) | def test_dense_bias(self):
    method test_sparse_bias (line 356) | def test_sparse_bias(self):
    method test_dense_and_sparse_bias (line 375) | def test_dense_and_sparse_bias(self):
    method test_dense_and_sparse_column (line 393) | def test_dense_and_sparse_column(self):
    method test_dense_multi_output (line 449) | def test_dense_multi_output(self):
    method test_sparse_multi_output (line 464) | def test_sparse_multi_output(self):
    method test_dense_multi_dimension (line 486) | def test_dense_multi_dimension(self):
    method test_sparse_multi_rank (line 498) | def test_sparse_multi_rank(self):
    method test_sparse_combiner (line 520) | def test_sparse_combiner(self):
    method test_sparse_combiner_sqrtn (line 536) | def test_sparse_combiner_sqrtn(self):
    method test_sparse_combiner_with_negative_weights (line 552) | def test_sparse_combiner_with_negative_weights(self):
    method test_dense_multi_dimension_multi_output (line 573) | def test_dense_multi_dimension_multi_output(self):
    method test_raises_if_shape_mismatch (line 588) | def test_raises_if_shape_mismatch(self):
    method test_dense_reshaping (line 598) | def test_dense_reshaping(self):
    method test_dense_multi_column (line 612) | def test_dense_multi_column(self):
    method test_dense_trainable_default (line 630) | def test_dense_trainable_default(self):
    method test_sparse_trainable_default (line 641) | def test_sparse_trainable_default(self):
    method test_dense_trainable_false (line 654) | def test_dense_trainable_false(self):
    method test_sparse_trainable_false (line 663) | def test_sparse_trainable_false(self):
    method test_column_order (line 674) | def test_column_order(self):
    method test_variable_names (line 710) | def test_variable_names(self):
    method test_fit_and_predict (line 740) | def test_fit_and_predict(self):
    method test_static_batch_size_mismatch (line 756) | def test_static_batch_size_mismatch(self):
    method test_subset_of_static_batch_size_mismatch (line 770) | def test_subset_of_static_batch_size_mismatch(self):
    method test_runtime_batch_size_mismatch (line 786) | def test_runtime_batch_size_mismatch(self):
    method test_runtime_batch_size_matches (line 802) | def test_runtime_batch_size_matches(self):
    method test_with_1d_sparse_tensor (line 821) | def test_with_1d_sparse_tensor(self):
    method test_with_1d_unknown_shape_sparse_tensor (line 861) | def test_with_1d_unknown_shape_sparse_tensor(self):
    method test_with_rank_0_feature (line 906) | def test_with_rank_0_feature(self):
    method test_multiple_linear_models (line 929) | def test_multiple_linear_models(self):
  class VocabularyFileCategoricalColumnTest (line 951) | class VocabularyFileCategoricalColumnTest(tf.test.TestCase):
    method setUp (line 953) | def setUp(self):
    method DISABLED_test_linear_model (line 968) | def DISABLED_test_linear_model(self):
  class VocabularyListCategoricalColumnTest (line 998) | class VocabularyListCategoricalColumnTest(tf.test.TestCase):
    method test_linear_model (line 1001) | def test_linear_model(self):
  class IdentityCategoricalColumnTest (line 1030) | class IdentityCategoricalColumnTest(tf.test.TestCase):
    method test_linear_model (line 1033) | def test_linear_model(self):
  class IndicatorColumnTest (line 1059) | class IndicatorColumnTest(tf.test.TestCase):
    method test_linear_model (line 1062) | def test_linear_model(self):
  class EmbeddingColumnTest (line 1086) | class EmbeddingColumnTest(tf.test.TestCase, parameterized.TestCase):
    method test_linear_model (line 1089) | def test_linear_model(self):
  class SharedEmbeddingColumnTest (line 1171) | class SharedEmbeddingColumnTest(tf.test.TestCase, parameterized.TestCase):
    method test_linear_model (line 1174) | def test_linear_model(self):
  class WeightedCategoricalColumnTest (line 1272) | class WeightedCategoricalColumnTest(tf.test.TestCase):
    method test_linear_model (line 1275) | def test_linear_model(self):
    method test_linear_model_mismatched_shape (line 1308) | def test_linear_model_mismatched_shape(self):
    method test_linear_model_mismatched_dense_values (line 1330) | def test_linear_model_mismatched_dense_values(self):
    method test_linear_model_mismatched_dense_shape (line 1354) | def test_linear_model_mismatched_dense_shape(self):
  class LinearModelLayerSerializationTest (line 1385) | class LinearModelLayerSerializationTest(tf.test.TestCase, parameterized....
    method test_get_config (line 1390) | def test_get_config(self, units, sparse_combiner, trainable, name):
    method test_from_config (line 1411) | def test_from_config(self, units, sparse_combiner, trainable, name):

FILE: tensorflow_estimator/python/estimator/canned/linear_optimizer/python/sdca_test.py
  class SDCAClassifierTest (line 26) | class SDCAClassifierTest(tf.test.TestCase):
    method testRealValuedFeatures (line 28) | def testRealValuedFeatures(self):
    method testRealValuedFeatureWithHigherDimension (line 50) | def testRealValuedFeatureWithHigherDimension(self):
    method testBucketizedFeatures (line 70) | def testBucketizedFeatures(self):
    method testSparseFeatures (line 95) | def testSparseFeatures(self):
    method testWeightedSparseFeatures (line 121) | def testWeightedSparseFeatures(self):
    method testWeightedSparseFeaturesOOVWithNoOOVBuckets (line 152) | def testWeightedSparseFeaturesOOVWithNoOOVBuckets(self):
    method testCrossedFeatures (line 184) | def testCrossedFeatures(self):
    method testMixedFeatures (line 213) | def testMixedFeatures(self):
    method testPartitionedVariables (line 251) | def testPartitionedVariables(self):
  class SDCARegressorTest (line 293) | class SDCARegressorTest(tf.test.TestCase):
    method testRealValuedLinearFeatures (line 295) | def testRealValuedLinearFeatures(self):
    method testMixedFeaturesArbitraryWeights (line 326) | def testMixedFeaturesArbitraryWeights(self):
    method testPartitionedVariables (line 364) | def testPartitionedVariables(self):
    method testSparseFeaturesWithL1Reg (line 404) | def testSparseFeaturesWithL1Reg(self):
    method testBiasOnly (line 478) | def testBiasOnly(self):
    method testBiasAndOtherColumns (line 509) | def testBiasAndOtherColumns(self):
    method testBiasAndOtherColumnsFabricatedCentered (line 571) | def testBiasAndOtherColumnsFabricatedCentered(self):
    method testUnknownBatchSize (line 625) | def testUnknownBatchSize(self):

FILE: tensorflow_estimator/python/estimator/canned/linear_optimizer/python/utils/sdca_ops.py
  class _SparseFeatureColumn (line 34) | class _SparseFeatureColumn(object):
    method __init__ (line 81) | def __init__(self, example_indices, feature_indices, feature_values):
    method example_indices (line 108) | def example_indices(self):
    method feature_indices (line 117) | def feature_indices(self):
    method feature_values (line 126) | def feature_values(self):
  class _SDCAModel (line 135) | class _SDCAModel(object):
    method __init__ (line 197) | def __init__(self, examples, variables, options):
    method _symmetric_l1_regularization (line 253) | def _symmetric_l1_regularization(self):
    method _symmetric_l2_regularization (line 256) | def _symmetric_l2_regularization(self):
    method _num_loss_partitions (line 259) | def _num_loss_partitions(self):
    method _adaptive (line 263) | def _adaptive(self):
    method _num_table_shards (line 267) | def _num_table_shards(self):
    method _create_slots (line 273) | def _create_slots(self):
    method _assert_specified (line 312) | def _assert_specified(self, items, check_in):
    method _assert_list (line 317) | def _assert_list(self, items, check_in):
    method _var_to_list (line 322) | def _var_to_list(self, var):
    method _l1_loss (line 328) | def _l1_loss(self):
    method _l2_loss (line 343) | def _l2_loss(self):
    method _convert_n_to_tensor (line 358) | def _convert_n_to_tensor(self, input_list, as_ref=False):
    method _get_first_dimension_size_statically (line 374) | def _get_first_dimension_size_statically(self, w, num_partitions):
    method _linear_predictions (line 381) | def _linear_predictions(self, examples):
    method predictions (line 412) | def predictions(self, examples):
    method _get_partitioned_update_ops (line 443) | def _get_partitioned_update_ops(self, v_num, num_partitions_by_var,
    method minimize (line 459) | def minimize(self, global_step=None, name=None):
    method update_weights (line 639) | def update_weights(self, train_op):
    method approximate_duality_gap (line 680) | def approximate_duality_gap(self):
    method unregularized_loss (line 710) | def unregularized_loss(self, examples):
    method regularized_loss (line 771) | def regularized_loss(self, examples):

FILE: tensorflow_estimator/python/estimator/canned/linear_optimizer/python/utils/sdca_ops_test.py
  function make_example_proto (line 38) | def make_example_proto(feature_dict, target, value=1.0):
  function make_example_dict (line 52) | def make_example_dict(example_protos, example_weights):
  function make_random_examples_and_variables_dicts (line 98) | def make_random_examples_and_variables_dicts(num_examples, dim, num_non_...
  function make_variable_dict (line 124) | def make_variable_dict(max_age, max_gender, num_shards=None, partitioned...
  function make_dense_examples_and_variables_dicts (line 145) | def make_dense_examples_and_variables_dicts(dense_features_values, weights,
  function get_binary_predictions_for_logistic (line 190) | def get_binary_predictions_for_logistic(predictions, cutoff=0.5):
  function get_binary_predictions_for_hinge (line 197) | def get_binary_predictions_for_hinge(predictions):
  class _SDCAModelTest (line 206) | class _SDCAModelTest(TensorFlowTestCase):
    method _single_threaded_test_session (line 209) | def _single_threaded_test_session(self):
  class SdcaWithLogisticLossTest (line 217) | class SdcaWithLogisticLossTest(_SDCAModelTest):
    method testSimple (line 220) | def testSimple(self):
    method testPartitionedPrimals (line 268) | def testPartitionedPrimals(self):
    method testSomePartitionedPrimals (line 316) | def testSomePartitionedPrimals(self):
    method testSparseRandom (line 377) | def testSparseRandom(self):
    method testSparseDuplicate (line 401) | def testSparseDuplicate(self):
    method testDistributedSimple (line 428) | def testDistributedSimple(self):
    method testSimpleNoL2 (line 504) | def testSimpleNoL2(self):
    method testSomeUnweightedExamples (line 529) | def testSomeUnweightedExamples(self):
    method testFractionalExampleLabel (line 583) | def testFractionalExampleLabel(self):
    method testImbalanced (line 612) | def testImbalanced(self):
    method testImbalancedWithExampleWeights (line 661) | def testImbalancedWithExampleWeights(self):
    method testInstancesOfOneClassOnly (line 701) | def testInstancesOfOneClassOnly(self):
    method testOutOfRangeSparseFeatures (line 740) | def testOutOfRangeSparseFeatures(self):
    method testOutOfRangeDenseFeatures (line 767) | def testOutOfRangeDenseFeatures(self):
    method testMissingFeature (line 789) | def testMissingFeature(self):
  class SdcaWithLinearLossTest (line 832) | class SdcaWithLinearLossTest(_SDCAModelTest):
    method testSimple (line 835) | def testSimple(self):
    method testL2Regularization (line 874) | def testL2Regularization(self):
    method testL1Regularization (line 922) | def testL1Regularization(self):
    method testFeatureValues (line 960) | def testFeatureValues(self):
    method testDenseFeaturesWithDefaultWeights (line 1007) | def testDenseFeaturesWithDefaultWeights(self):
    method testDenseFeaturesWithArbitraryWeights (line 1037) | def testDenseFeaturesWithArbitraryWeights(self):
  class SdcaWithHingeLossTest (line 1072) | class SdcaWithHingeLossTest(_SDCAModelTest):
    method testSimple (line 1075) | def testSimple(self):
    method testDenseFeaturesPerfectlySeparable (line 1123) | def testDenseFeaturesPerfectlySeparable(self):
    method testDenseFeaturesSeparableWithinMargins (line 1155) | def testDenseFeaturesSeparableWithinMargins(self):
    method testDenseFeaturesWeightedExamples (line 1186) | def testDenseFeaturesWeightedExamples(self):
  class SdcaWithSmoothHingeLossTest (line 1222) | class SdcaWithSmoothHingeLossTest(_SDCAModelTest):
    method testSimple (line 1225) | def testSimple(self):
  class SdcaWithPoissonLossTest (line 1275) | class SdcaWithPoissonLossTest(_SDCAModelTest):
    method testSimple (line 1278) | def testSimple(self):
  class SdcaFprintTest (line 1327) | class SdcaFprintTest(_SDCAModelTest):
    method testFprint (line 1337) | def testFprint(self):
  class _SparseFeatureColumnTest (line 1347) | class _SparseFeatureColumnTest(TensorFlowTestCase):
    method testBasic (line 1350) | def testBasic(self):

FILE: tensorflow_estimator/python/estimator/canned/linear_optimizer/python/utils/sharded_mutable_dense_hashtable.py
  class _MutableDenseHashTable (line 33) | class _MutableDenseHashTable(lookup_ops.LookupInterface):
    method __init__ (line 37) | def __init__(self,
    method _create_resource (line 104) | def _create_resource(self):
    method name (line 125) | def name(self):
    method size (line 128) | def size(self, name=None):
    method lookup (line 143) | def lookup(self, keys, name=None):
    method insert (line 169) | def insert(self, keys, values, name=None):
    method export (line 196) | def export(self, name=None):
    method _serialize_to_tensors (line 214) | def _serialize_to_tensors(self):
    method _restore_from_tensors (line 218) | def _restore_from_tensors(self, restored_tensors):
    class _Saveable (line 224) | class _Saveable(BaseSaverBuilder.SaveableObject):
      method __init__ (line 227) | def __init__(self, table, name):
      method restore (line 236) | def restore(self, restored_tensors, restored_shapes):
  class _ShardedMutableDenseHashTable (line 247) | class _ShardedMutableDenseHashTable(object):
    method __init__ (line 258) | def __init__(self,
    method name (line 289) | def name(self):
    method _num_shards (line 293) | def _num_shards(self):
    method table_shards (line 297) | def table_shards(self):
    method size (line 300) | def size(self, name=None):
    method _shard_indices (line 305) | def _shard_indices(self, keys):
    method _check_keys (line 314) | def _check_keys(self, keys):
    method lookup (line 319) | def lookup(self, keys, name=None):
    method insert (line 342) | def insert(self, keys, values, name=None):
    method export_sharded (line 359) | def export_sharded(self, name=None):

FILE: tensorflow_estimator/python/estimator/canned/linear_optimizer/python/utils/sharded_mutable_dense_hashtable_test.py
  class _ShardedMutableDenseHashTableTest (line 26) | class _ShardedMutableDenseHashTableTest(tf.test.TestCase):
    method testShardedMutableHashTable (line 29) | def testShardedMutableHashTable(self):
    method testShardedMutableHashTableVectors (line 54) | def testShardedMutableHashTableVectors(self):
    method testExportSharded (line 82) | def testExportSharded(self):

FILE: tensorflow_estimator/python/estimator/canned/linear_test.py
  function _linear_regressor_fn (line 29) | def _linear_regressor_fn(*args, **kwargs):
  function _linear_classifier_fn (line 33) | def _linear_classifier_fn(*args, **kwargs):
  class LinearRegressorEvaluationV2Test (line 40) | class LinearRegressorEvaluationV2Test(
    method __init__ (line 43) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearRegressorPredictV2Test (line 49) | class LinearRegressorPredictV2Test(
    method __init__ (line 52) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearRegressorIntegrationV2Test (line 58) | class LinearRegressorIntegrationV2Test(
    method __init__ (line 61) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearRegressorTrainingV2Test (line 67) | class LinearRegressorTrainingV2Test(
    method __init__ (line 70) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearClassifierTrainingV2Test (line 79) | class LinearClassifierTrainingV2Test(
    method __init__ (line 82) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearClassifierEvaluationV2Test (line 90) | class LinearClassifierEvaluationV2Test(
    method __init__ (line 93) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearClassifierPredictV2Test (line 101) | class LinearClassifierPredictV2Test(
    method __init__ (line 104) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearClassifierIntegrationV2Test (line 112) | class LinearClassifierIntegrationV2Test(
    method __init__ (line 115) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearLogitFnV2Test (line 126) | class LinearLogitFnV2Test(linear_testing_utils.BaseLinearLogitFnTest,
    method __init__ (line 129) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearWarmStartingV2Test (line 138) | class LinearWarmStartingV2Test(linear_testing_utils.BaseLinearWarmStarti...
    method __init__ (line 141) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class ComputeFractionOfZeroTest (line 150) | class ComputeFractionOfZeroTest(tf.test.TestCase):
    method _assertSparsity (line 152) | def _assertSparsity(self, expected_sparsity, tensor):
    method test_small_float32 (line 156) | def test_small_float32(self):
    method test_small_int32 (line 162) | def test_small_int32(self):
    method test_small_float64 (line 166) | def test_small_float64(self):
    method test_small_int64 (line 170) | def test_small_int64(self):
    method test_nested (line 174) | def test_nested(self):
    method test_none (line 179) | def test_none(self):
    method test_empty (line 183) | def test_empty(self):
    method test_multiple_empty (line 189) | def test_multiple_empty(self):
    method test_some_empty (line 198) | def test_some_empty(self):
    method test_mixed_types (line 206) | def test_mixed_types(self):
    method test_2_27_zeros__using_512_MiB_of_ram (line 213) | def test_2_27_zeros__using_512_MiB_of_ram(self):
    method test_2_27_ones__using_512_MiB_of_ram (line 217) | def test_2_27_ones__using_512_MiB_of_ram(self):

FILE: tensorflow_estimator/python/estimator/canned/linear_testing_utils.py
  function assert_close (line 62) | def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
  function save_variables_to_ckpt (line 77) | def save_variables_to_ckpt(model_dir):
  function queue_parsed_features (line 84) | def queue_parsed_features(feature_map):
  function sorted_key_dict (line 99) | def sorted_key_dict(unsorted_dict):
  function sigmoid (line 103) | def sigmoid(x):
  function mock_optimizer (line 107) | def mock_optimizer(testcase, expected_loss=None):
  class BaseLinearRegressorEvaluationTest (line 142) | class BaseLinearRegressorEvaluationTest(object):
    method __init__ (line 144) | def __init__(self, linear_regressor_fn, fc_lib=feature_column_v2):
    method setUp (line 148) | def setUp(self):
    method tearDown (line 151) | def tearDown(self):
    method test_evaluation_for_simple_data (line 156) | def test_evaluation_for_simple_data(self):
    method test_evaluation_batch (line 182) | def test_evaluation_batch(self):
    method test_evaluation_weights (line 212) | def test_evaluation_weights(self):
    method test_evaluation_for_multi_dimensions (line 246) | def test_evaluation_for_multi_dimensions(self):
    method test_evaluation_for_multiple_feature_columns (line 282) | def test_evaluation_for_multiple_feature_columns(self):
    method test_evaluation_for_multiple_feature_columns_mix (line 320) | def test_evaluation_for_multiple_feature_columns_mix(self):
  class BaseLinearRegressorPredictTest (line 360) | class BaseLinearRegressorPredictTest(object):
    method __init__ (line 362) | def __init__(self, linear_regressor_fn, fc_lib=feature_column_v2):
    method setUp (line 366) | def setUp(self):
    method tearDown (line 369) | def tearDown(self):
    method test_1d (line 374) | def test_1d(self):
    method testMultiDim (line 397) | def testMultiDim(self):
    method testTwoFeatureColumns (line 430) | def testTwoFeatureColumns(self):
    method testTwoFeatureColumnsMix (line 458) | def testTwoFeatureColumnsMix(self):
    method testSparseCombiner (line 483) | def testSparseCombiner(self):
  class BaseLinearRegressorIntegrationTest (line 546) | class BaseLinearRegressorIntegrationTest(object):
    method __init__ (line 548) | def __init__(self, linear_regressor_fn, fc_lib=feature_column_v2):
    method setUp (line 552) | def setUp(self):
    method tearDown (line 555) | def tearDown(self):
    method _test_complete_flow (line 560) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 592) | def test_numpy_input_fn(self):
    method test_pandas_input_fn (line 628) | def test_pandas_input_fn(self):
    method test_input_fn_from_parse_example (line 657) | def test_input_fn_from_parse_example(self):
  class BaseLinearRegressorTrainingTest (line 718) | class BaseLinearRegressorTrainingTest(object):
    method __init__ (line 720) | def __init__(self, linear_regressor_fn, fc_lib=feature_column_v2):
    method setUp (line 724) | def setUp(self):
    method tearDown (line 727) | def tearDown(self):
    method _assert_checkpoint (line 732) | def _assert_checkpoint(self,
    method testFromScratchWithDefaultOptimizer (line 757) | def testFromScratchWithDefaultOptimizer(self):
    method testTrainWithOneDimLabel (line 773) | def testTrainWithOneDimLabel(self):
    method testTrainWithOneDimWeight (line 793) | def testTrainWithOneDimWeight(self):
    method testFromScratch (line 818) | def testFromScratch(self):
    method testFromCheckpoint (line 843) | def testFromCheckpoint(self):
    method testFromCheckpointMultiBatch (line 879) | def testFromCheckpointMultiBatch(self):
  class BaseLinearClassifierTrainingTest (line 920) | class BaseLinearClassifierTrainingTest(object):
    method __init__ (line 922) | def __init__(self, linear_classifier_fn, fc_lib=feature_column_v2):
    method setUp (line 926) | def setUp(self):
    method tearDown (line 929) | def tearDown(self):
    method _assert_checkpoint (line 933) | def _assert_checkpoint(self,
    method _testFromScratchWithDefaultOptimizer (line 962) | def _testFromScratchWithDefaultOptimizer(self, n_classes):
    method testBinaryClassesFromScratchWithDefaultOptimizer (line 978) | def testBinaryClassesFromScratchWithDefaultOptimizer(self):
    method testMultiClassesFromScratchWithDefaultOptimizer (line 981) | def testMultiClassesFromScratchWithDefaultOptimizer(self):
    method _testTrainWithTwoDimsLabel (line 984) | def _testTrainWithTwoDimsLabel(self, n_classes):
    method testBinaryClassesTrainWithTwoDimsLabel (line 1005) | def testBinaryClassesTrainWithTwoDimsLabel(self):
    method testMultiClassesTrainWithTwoDimsLabel (line 1008) | def testMultiClassesTrainWithTwoDimsLabel(self):
    method _testTrainWithOneDimLabel (line 1011) | def _testTrainWithOneDimLabel(self, n_classes):
    method testBinaryClassesTrainWithOneDimLabel (line 1030) | def testBinaryClassesTrainWithOneDimLabel(self):
    method testMultiClassesTrainWithOneDimLabel (line 1033) | def testMultiClassesTrainWithOneDimLabel(self):
    method _testTrainWithTwoDimsWeight (line 1036) | def _testTrainWithTwoDimsWeight(self, n_classes):
    method testBinaryClassesTrainWithTwoDimsWeight (line 1061) | def testBinaryClassesTrainWithTwoDimsWeight(self):
    method testMultiClassesTrainWithTwoDimsWeight (line 1064) | def testMultiClassesTrainWithTwoDimsWeight(self):
    method _testTrainWithOneDimWeight (line 1067) | def _testTrainWithOneDimWeight(self, n_classes):
    method testBinaryClassesTrainWithOneDimWeight (line 1090) | def testBinaryClassesTrainWithOneDimWeight(self):
    method testMultiClassesTrainWithOneDimWeight (line 1093) | def testMultiClassesTrainWithOneDimWeight(self):
    method _testFromScratch (line 1096) | def _testFromScratch(self, n_classes):
    method testBinaryClassesFromScratch (line 1132) | def testBinaryClassesFromScratch(self):
    method testMultiClassesFromScratch (line 1135) | def testMultiClassesFromScratch(self):
    method _testFromCheckpoint (line 1138) | def _testFromCheckpoint(self, n_classes):
    method testBinaryClassesFromCheckpoint (line 1197) | def testBinaryClassesFromCheckpoint(self):
    method testMultiClassesFromCheckpoint (line 1200) | def testMultiClassesFromCheckpoint(self):
    method _testFromCheckpointFloatLabels (line 1203) | def _testFromCheckpointFloatLabels(self, n_classes):
    method testBinaryClassesFromCheckpointFloatLabels (line 1242) | def testBinaryClassesFromCheckpointFloatLabels(self):
    method testMultiClassesFromCheckpointFloatLabels (line 1245) | def testMultiClassesFromCheckpointFloatLabels(self):
    method _testFromCheckpointMultiBatch (line 1248) | def _testFromCheckpointMultiBatch(self, n_classes):
    method testBinaryClassesFromCheckpointMultiBatch (line 1312) | def testBinaryClassesFromCheckpointMultiBatch(self):
    method testMultiClassesFromCheckpointMultiBatch (line 1315) | def testMultiClassesFromCheckpointMultiBatch(self):
  class BaseLinearClassifierEvaluationTest (line 1319) | class BaseLinearClassifierEvaluationTest(object):
    method __init__ (line 1321) | def __init__(self, linear_classifier_fn, fc_lib=feature_column_v2):
    method setUp (line 1325) | def setUp(self):
    method tearDown (line 1328) | def tearDown(self):
    method _test_evaluation_for_simple_data (line 1332) | def _test_evaluation_for_simple_data(self, n_classes):
    method test_binary_classes_evaluation_for_simple_data (line 1394) | def test_binary_classes_evaluation_for_simple_data(self):
    method test_multi_classes_evaluation_for_simple_data (line 1397) | def test_multi_classes_evaluation_for_simple_data(self):
    method _test_evaluation_batch (line 1400) | def _test_evaluation_batch(self, n_classes):
    method test_binary_classes_evaluation_batch (line 1472) | def test_binary_classes_evaluation_batch(self):
    method test_multi_classes_evaluation_batch (line 1475) | def test_multi_classes_evaluation_batch(self):
    method _test_evaluation_weights (line 1478) | def _test_evaluation_weights(self, n_classes):
    method test_binary_classes_evaluation_weights (line 1563) | def test_binary_classes_evaluation_weights(self):
    method test_multi_classes_evaluation_weights (line 1566) | def test_multi_classes_evaluation_weights(self):
  class BaseLinearClassifierPredictTest (line 1570) | class BaseLinearClassifierPredictTest(object):
    method __init__ (line 1572) | def __init__(self, linear_classifier_fn, fc_lib=feature_column_v2):
    method setUp (line 1576) | def setUp(self):
    method tearDown (line 1579) | def tearDown(self):
    method _testPredictions (line 1583) | def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
    method testBinaryClassesWithoutLabelVocabulary (line 1658) | def testBinaryClassesWithoutLabelVocabulary(self):
    method testBinaryClassesWithLabelVocabulary (line 1665) | def testBinaryClassesWithLabelVocabulary(self):
    method testMultiClassesWithoutLabelVocabulary (line 1672) | def testMultiClassesWithoutLabelVocabulary(self):
    method testMultiClassesWithLabelVocabulary (line 1679) | def testMultiClassesWithLabelVocabulary(self):
    method testSparseCombiner (line 1686) | def testSparseCombiner(self):
  class BaseLinearClassifierIntegrationTest (line 1749) | class BaseLinearClassifierIntegrationTest(object):
    method __init__ (line 1751) | def __init__(self, linear_classifier_fn, fc_lib=feature_column_v2):
    method setUp (line 1755) | def setUp(self):
    method tearDown (line 1758) | def tearDown(self):
    method _test_complete_flow (line 1762) | def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
    method _test_numpy_input_fn (line 1794) | def _test_numpy_input_fn(self, n_classes):
    method test_binary_classes_numpy_input_fn (line 1830) | def test_binary_classes_numpy_input_fn(self):
    method test_multi_classes_numpy_input_fn (line 1833) | def test_multi_classes_numpy_input_fn(self):
    method _test_pandas_input_fn (line 1836) | def _test_pandas_input_fn(self, n_classes):
    method test_binary_classes_pandas_input_fn (line 1865) | def test_binary_classes_pandas_input_fn(self):
    method test_multi_classes_pandas_input_fn (line 1868) | def test_multi_classes_pandas_input_fn(self):
    method _test_input_fn_from_parse_example (line 1871) | def _test_input_fn_from_parse_example(self, n_classes):
    method test_binary_classes_input_fn_from_parse_example (line 1930) | def test_binary_classes_input_fn_from_parse_example(self):
    method test_multi_classes_input_fn_from_parse_example (line 1933) | def test_multi_classes_input_fn_from_parse_example(self):
  class BaseLinearLogitFnTest (line 1937) | class BaseLinearLogitFnTest(object):
    method __init__ (line 1939) | def __init__(self, fc_lib=feature_column_v2):
    method test_basic_logit_correctness (line 1942) | def test_basic_logit_correctness(self):
    method test_compute_fraction_of_zero_v2 (line 1963) | def test_compute_fraction_of_zero_v2(self):
  class BaseLinearWarmStartingTest (line 1995) | class BaseLinearWarmStartingTest(object):
    method __init__ (line 1997) | def __init__(self,
    method setUp (line 2005) | def setUp(self):
    method tearDown (line 2020) | def tearDown(self):
    method test_classifier_basic_warm_starting (line 2025) | def test_classifier_basic_warm_starting(self):
    method test_regressor_basic_warm_starting (line 2059) | def test_regressor_basic_warm_starting(self):
    method test_warm_starting_selective_variables (line 2091) | def test_warm_starting_selective_variables(self):
    method test_warm_starting_with_vocab_remapping_and_partitioning (line 2124) | def test_warm_starting_with_vocab_remapping_and_partitioning(self):
    method test_warm_starting_with_naming_change (line 2202) | def test_warm_starting_with_naming_change(self):

FILE: tensorflow_estimator/python/estimator/canned/metric_keys.py
  class MetricKeys (line 24) | class MetricKeys(object):

FILE: tensorflow_estimator/python/estimator/canned/optimizers.py
  function get_optimizer_instance (line 48) | def get_optimizer_instance(opt, learning_rate=None):
  function _optimizer_has_default_learning_rate (line 90) | def _optimizer_has_default_learning_rate(opt):
  function get_optimizer_instance_v2 (line 99) | def get_optimizer_instance_v2(opt, learning_rate=None):

FILE: tensorflow_estimator/python/estimator/canned/optimizers_test.py
  class _TestOptimizer (line 26) | class _TestOptimizer(tf.compat.v1.train.Optimizer):
    method __init__ (line 28) | def __init__(self):
  class GetOptimizerInstance (line 33) | class GetOptimizerInstance(tf.test.TestCase):
    method test_unsupported_name (line 35) | def test_unsupported_name(self):
    method test_supported_name_but_learning_rate_none (line 40) | def test_supported_name_but_learning_rate_none(self):
    method test_keras_optimizer_after_tf_2_11 (line 45) | def test_keras_optimizer_after_tf_2_11(self):
    method test_adagrad (line 61) | def test_adagrad(self):
    method test_adam (line 66) | def test_adam(self):
    method test_ftrl (line 71) | def test_ftrl(self):
    method test_rmsprop (line 76) | def test_rmsprop(self):
    method test_sgd (line 81) | def test_sgd(self):
    method test_object (line 86) | def test_object(self):
    method test_object_invalid (line 90) | def test_object_invalid(self):
    method test_callable (line 95) | def test_callable(self):
    method test_lambda (line 103) | def test_lambda(self):
    method test_callable_returns_invalid (line 107) | def test_callable_returns_invalid(self):

FILE: tensorflow_estimator/python/estimator/canned/optimizers_test_v2.py
  class _TestOptimizerV2 (line 26) | class _TestOptimizerV2(tf_keras.optimizers.legacy.Optimizer):
    method __init__ (line 28) | def __init__(self):
    method get_config (line 31) | def get_config(self):
  class GetOptimizerInstanceV2 (line 35) | class GetOptimizerInstanceV2(tf.test.TestCase):
    method test_unsupported_name (line 38) | def test_unsupported_name(self):
    method test_adagrad_but_no_learning_rate (line 44) | def test_adagrad_but_no_learning_rate(self):
    method test_adam_but_no_learning_rate (line 56) | def test_adam_but_no_learning_rate(self):
    method test_adagrad (line 65) | def test_adagrad(self):
    method test_adam (line 75) | def test_adam(self):
    method test_ftrl (line 84) | def test_ftrl(self):
    method test_rmsprop (line 93) | def test_rmsprop(self):
    method test_sgd (line 103) | def test_sgd(self):
    method test_object (line 112) | def test_object(self):
    method test_object_invalid (line 116) | def test_object_invalid(self):
    method test_callable (line 122) | def test_callable(self):
    method test_lambda (line 130) | def test_lambda(self):
    method test_callable_returns_invalid (line 134) | def test_callable_returns_invalid(self):

FILE: tensorflow_estimator/python/estimator/canned/parsing_utils.py
  function classifier_parse_example_spec_v2 (line 28) | def classifier_parse_example_spec_v2(feature_columns,
  function regressor_parse_example_spec_v2 (line 148) | def regressor_parse_example_spec_v2(feature_columns,
  function _add_label_and_weight_to_parsing_spec (line 264) | def _add_label_and_weight_to_parsing_spec(parsing_spec,
  function classifier_parse_example_spec (line 316) | def classifier_parse_example_spec(feature_columns,
  function regressor_parse_example_spec (line 335) | def regressor_parse_example_spec(

FILE: tensorflow_estimator/python/estimator/canned/parsing_utils_test.py
  class BaseClassifierParseExampleSpec (line 25) | class BaseClassifierParseExampleSpec(object):
    method __init__ (line 28) | def __init__(self, parse_example_fn):
    method test_defaults (line 31) | def test_defaults(self):
    method test_string (line 40) | def test_string(self):
    method test_label_default_value (line 52) | def test_label_default_value(self):
    method test_weight_column_as_string (line 65) | def test_weight_column_as_string(self):
    method test_weight_column_as_numeric_column (line 77) | def test_weight_column_as_numeric_column(self):
    method test_label_key_should_not_be_used_as_feature (line 89) | def test_label_key_should_not_be_used_as_feature(self):
    method test_weight_column_should_not_be_used_as_feature (line 96) | def test_weight_column_should_not_be_used_as_feature(self):
    method test_weight_column_should_be_a_numeric_column (line 104) | def test_weight_column_should_be_a_numeric_column(self):
  class ClassifierParseExampleSpecV2 (line 114) | class ClassifierParseExampleSpecV2(BaseClassifierParseExampleSpec,
    method __init__ (line 117) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
    method test_non_v1_feature_column (line 122) | def test_non_v1_feature_column(self):
  class ClassifierParseExampleSpecV1 (line 133) | class ClassifierParseExampleSpecV1(BaseClassifierParseExampleSpec,
    method __init__ (line 136) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class BaseRegressorParseExampleSpec (line 142) | class BaseRegressorParseExampleSpec(object):
    method __init__ (line 145) | def __init__(self, parse_example_fn):
    method test_defaults (line 148) | def test_defaults(self):
    method test_int64 (line 157) | def test_int64(self):
    method test_label_default_value (line 168) | def test_label_default_value(self):
    method test_label_dimension (line 183) | def test_label_dimension(self):
    method test_weight_column_as_string (line 194) | def test_weight_column_as_string(self):
    method test_weight_column_as_numeric_column (line 206) | def test_weight_column_as_numeric_column(self):
    method test_label_key_should_not_be_used_as_feature (line 218) | def test_label_key_should_not_be_used_as_feature(self):
    method test_weight_column_should_not_be_used_as_feature (line 225) | def test_weight_column_should_not_be_used_as_feature(self):
    method test_weight_column_should_be_a_numeric_column (line 233) | def test_weight_column_should_be_a_numeric_column(self):
  class RegressorParseExampleSpecV2 (line 243) | class RegressorParseExampleSpecV2(BaseRegressorParseExampleSpec,
    method __init__ (line 246) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
    method test_non_v1_feature_column (line 251) | def test_non_v1_feature_column(self):
  class RegressorParseExampleSpecV1 (line 262) | class RegressorParseExampleSpecV1(BaseRegressorParseExampleSpec,
    method __init__ (line 265) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name

FILE: tensorflow_estimator/python/estimator/canned/prediction_keys.py
  class PredictionKeys (line 22) | class PredictionKeys(object):

FILE: tensorflow_estimator/python/estimator/canned/rnn.py
  function _single_rnn_cell (line 63) | def _single_rnn_cell(units, cell_type):
  function _make_rnn_cell_fn (line 81) | def _make_rnn_cell_fn(units, cell_type=_SIMPLE_RNN_KEY):
  class RNNModel (line 105) | class RNNModel(tf_keras.models.Model):
    method __init__ (line 129) | def __init__(self,
    method call (line 180) | def call(self, inputs, training=None):
    method get_config (line 227) | def get_config(self):
    method from_config (line 245) | def from_config(cls, config, custom_objects=None):
  function _get_rnn_estimator_spec (line 274) | def _get_rnn_estimator_spec(features, labels, mode, head, rnn_model, opt...
  function _verify_rnn_cell_input (line 332) | def _verify_rnn_cell_input(rnn_cell_fn, units, cell_type):
  function _make_rnn_layer (line 338) | def _make_rnn_layer(rnn_cell_fn, units, cell_type, return_sequences):
  class RNNEstimator (line 366) | class RNNEstimator(estimator.Estimator):
    method __init__ (line 426) | def __init__(self,
  class RNNClassifier (line 519) | class RNNClassifier(RNNEstimator):
    method __init__ (line 571) | def __init__(self,

FILE: tensorflow_estimator/python/estimator/canned/rnn_test.py
  function _assert_close (line 51) | def _assert_close(expected, actual, rtol=1e-04, name='assert_close'):
  function create_checkpoint (line 66) | def create_checkpoint(kernel, recurrent, bias, dense_kernel, dense_bias,
  function _make_rnn_layer (line 101) | def _make_rnn_layer(rnn_cell_fn=None,
  class RNNLayerFnTest (line 113) | class RNNLayerFnTest(tf.test.TestCase, parameterized.TestCase):
    method testWrongClassProvided (line 116) | def testWrongClassProvided(self):
    method testWrongStringProvided (line 122) | def testWrongStringProvided(self):
    method testDefaultCellProvided (line 130) | def testDefaultCellProvided(self, cell_type):
    method testSpecificLayerTypeProvided (line 139) | def testSpecificLayerTypeProvided(self, cell_type, layer_type):
    method testSpecificLayerTypeArguments (line 144) | def testSpecificLayerTypeArguments(self):
    method testCustomCellProvided (line 157) | def testCustomCellProvided(self, mock_rnn_layer_type):
    method testMultipleCellsProvided (line 167) | def testMultipleCellsProvided(self):
    method testCustomCellFnProvided (line 176) | def testCustomCellFnProvided(self, mock_rnn_layer_type):
  function _mock_logits_layer (line 185) | def _mock_logits_layer(kernel, bias):
  function _default_features_fn (line 205) | def _default_features_fn():
  function _get_mock_head (line 213) | def _get_mock_head():
  class RNNLogitFnTest (line 221) | class RNNLogitFnTest(tf.test.TestCase, parameterized.TestCase):
    method setUp (line 224) | def setUp(self):
    method _mock_logits_layer (line 237) | def _mock_logits_layer(self):
    method _test_logits (line 240) | def _test_logits(self,
    method testOneDimLogits (line 289) | def testOneDimLogits(self, return_sequences, expected_logits, training...
    method testMultiDimLogits (line 332) | def testMultiDimLogits(self, return_sequences, expected_logits):
    method testMultiExampleMultiDim (line 383) | def testMultiExampleMultiDim(self, return_sequences, expected_logits):
    method testMultiExamplesDifferentLength (line 450) | def testMultiExamplesDifferentLength(self, return_sequences, expected_...
    method testMultiExamplesWithContext (line 496) | def testMultiExamplesWithContext(self):
    method testMultiExamplesMultiFeatures (line 539) | def testMultiExamplesMultiFeatures(self):
    method testTrainingMode (line 592) | def testTrainingMode(self, mode, expected_training_mode):
  class RNNModelTest (line 621) | class RNNModelTest(tf.test.TestCase, parameterized.TestCase):
    method setUp (line 624) | def setUp(self):
    method _get_compiled_model (line 643) | def _get_compiled_model(self,
    method testModelWeights (line 669) | def testModelWeights(self):
    method _testModelConfig (line 705) | def _testModelConfig(self, **kwargs):
    method testModelConfig (line 737) | def testModelConfig(self):
    method testModelConfigWithActivation (line 750) | def testModelConfigWithActivation(self):
    method testModelConfigWithContextFeatures (line 755) | def testModelConfigWithContextFeatures(self):
    method DISABLED_testSaveModelWeights (line 762) | def DISABLED_testSaveModelWeights(self):  # See b/129842600.
    method DISABLED_testEvaluationMetrics (line 774) | def DISABLED_testEvaluationMetrics(self):  # See b/129842600.
    method DISABLED_testEvaluationSequential (line 782) | def DISABLED_testEvaluationSequential(self):  # See b/129842600.
    method DISABLED_testPredictions (line 789) | def DISABLED_testPredictions(self):  # See b/129842600.
    method DISABLED_testPredictionsSequential (line 796) | def DISABLED_testPredictionsSequential(self):  # See b/129842600.
    method DISABLED_testTraining (line 808) | def DISABLED_testTraining(self, optimizer):  # See b/129842600.
    method DISABLED_testTrainingSequential (line 819) | def DISABLED_testTrainingSequential(self):  # See b/129842600.
  class RNNEstimatorInitTest (line 828) | class RNNEstimatorInitTest(tf.test.TestCase):
    method setUp (line 830) | def setUp(self):
    method testConflictingRNNCellFn (line 839) | def testConflictingRNNCellFn(self):
    method testNonSequentialHeadProvided (line 856) | def testNonSequentialHeadProvided(self):
    method testWrongOptimizerTypeProvided (line 865) | def testWrongOptimizerTypeProvided(self):
  class RNNClassifierTrainingTest (line 876) | class RNNClassifierTrainingTest(tf.test.TestCase):
    method setUp (line 878) | def setUp(self):
    method _assert_checkpoint (line 889) | def _assert_checkpoint(self, n_classes, input_units, cell_units,
    method _mock_optimizer (line 919) | def _mock_optimizer(self, expected_loss=None):
    method _testFromScratchWithDefaultOptimizer (line 955) | def _testFromScratchWithDefaultOptimizer(self, n_classes):
    method testBinaryClassFromScratchWithDefaultOptimizer (line 983) | def testBinaryClassFromScratchWithDefaultOptimizer(self):
    method testMultiClassFromScratchWithDefaultOptimizer (line 986) | def testMultiClassFromScratchWithDefaultOptimizer(self):
    method testFromScratchWithCustomRNNCellFn (line 989) | def testFromScratchWithCustomRNNCellFn(self):
    method _testExampleWeight (line 1022) | def _testExampleWeight(self, n_classes):
    method testBinaryClassWithExampleWeight (line 1052) | def testBinaryClassWithExampleWeight(self):
    method testMultiClassWithExampleWeight (line 1055) | def testMultiClassWithExampleWeight(self):
    method _testFromCheckpoint (line 1058) | def _testFromCheckpoint(self, input_fn, expected_loss, **kwargs):
    method testBinaryClassFromCheckpoint (line 1081) | def testBinaryClassFromCheckpoint(self):
    method testMultiClassFromCheckpoint (line 1096) | def testMultiClassFromCheckpoint(self):
    method testBinaryClassFromCheckpointSequential (line 1114) | def testBinaryClassFromCheckpointSequential(self):
    method testBinaryClassFromCheckpointSequentialWithWeights (line 1142) | def testBinaryClassFromCheckpointSequentialWithWeights(self):
    method testDefaultGradientClipping (line 1176) | def testDefaultGradientClipping(self):
  function sorted_key_dict (line 1230) | def sorted_key_dict(unsorted_dict):
  class RNNClassifierEvaluationTest (line 1235) | class RNNClassifierEvaluationTest(tf.test.TestCase):
    method setUp (line 1237) | def setUp(self):
    method _testFromCheckpoint (line 1249) | def _testFromCheckpoint(self, input_fn, **kwargs):
    method testBinaryClassEvaluationMetrics (line 1266) | def testBinaryClassEvaluationMetrics(self):
    method testBinaryClassEvaluationMetricsSequential (line 1304) | def testBinaryClassEvaluationMetricsSequential(self):
    method testMultiClassEvaluationMetrics (line 1351) | def testMultiClassEvaluationMetrics(self):
  class RNNClassifierPredictionTest (line 1391) | class RNNClassifierPredictionTest(tf.test.TestCase):
    method setUp (line 1393) | def setUp(self):
    method _testFromCheckpoint (line 1404) | def _testFromCheckpoint(self, input_fn, **kwargs):
    method testBinaryClassPredictions (line 1430) | def testBinaryClassPredictions(self):
    method testMultiClassPredictions (line 1450) | def testMultiClassPredictions(self):
    method testBinaryClassPredictionsSequential (line 1471) | def testBinaryClassPredictionsSequential(self):
  class BaseRNNClassificationIntegrationTest (line 1505) | class BaseRNNClassificationIntegrationTest(object):
    method setUp (line 1507) | def setUp(self):
    method __init__ (line 1514) | def __init__(self, _create_estimator_fn):
    method _test_complete_flow (line 1517) | def _test_complete_flow(self,
    method _testNumpyInputFn (line 1557) | def _testNumpyInputFn(self, optimizer):
    method testNumpyInputFnStringOptimizer (line 1593) | def testNumpyInputFnStringOptimizer(self):
    method testNumpyInputFnOptimizerInstance (line 1596) | def testNumpyInputFnOptimizerInstance(self):
    method testParseExampleInputFn (line 1599) | def testParseExampleInputFn(self):
  function _rnn_classifier_fn (line 1655) | def _rnn_classifier_fn(feature_columns, n_classes, cell_units, model_dir,
  class RNNClassifierIntegrationTest (line 1666) | class RNNClassifierIntegrationTest(BaseRNNClassificationIntegrationTest,
    method __init__ (line 1669) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  function _rnn_classifier_dropout_fn (line 1674) | def _rnn_classifier_dropout_fn(feature_columns, n_classes, cell_units,
  class RNNClassifierDropoutIntegrationTest (line 1692) | class RNNClassifierDropoutIntegrationTest(BaseRNNClassificationIntegrati...
    method __init__ (line 1695) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  function _rnn_estimator_fn (line 1701) | def _rnn_estimator_fn(feature_columns, n_classes, cell_units, model_dir,
  class RNNEstimatorIntegrationTest (line 1712) | class RNNEstimatorIntegrationTest(BaseRNNClassificationIntegrationTest,
    method __init__ (line 1715) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class ModelFnTest (line 1721) | class ModelFnTest(tf.test.TestCase):
    method _test_sequential_mask_in_head (line 1724) | def _test_sequential_mask_in_head(self, mask=None):
    method testSequentialMaskInHead (line 1759) | def testSequentialMaskInHead(self):
    method testSequentialMaskInHeadWithMasks (line 1762) | def testSequentialMaskInHeadWithMasks(self):

FILE: tensorflow_estimator/python/estimator/canned/saved_model_estimator.py
  class SavedModelEstimator (line 35) | class SavedModelEstimator(estimator_lib.EstimatorV2):
    method __init__ (line 121) | def __init__(self, saved_model_dir, model_dir=None):
    method _extract_available_modes (line 148) | def _extract_available_modes(self):
    method _validate_mode (line 167) | def _validate_mode(self, mode):
    method _get_meta_graph_def_for_mode (line 174) | def _get_meta_graph_def_for_mode(self, mode):
    method _get_signature_def_for_mode (line 178) | def _get_signature_def_for_mode(self, mode):
    method _get_saver_def_from_mode (line 191) | def _get_saver_def_from_mode(self, mode):
    method _create_and_assert_global_step (line 195) | def _create_and_assert_global_step(self, graph):
    method _model_fn_from_saved_model (line 205) | def _model_fn_from_saved_model(self, features, labels, mode):
    method _restore_from_saver (line 290) | def _restore_from_saver(self, scaffold, session):
    method latest_checkpoint (line 294) | def latest_checkpoint(self):
  function _get_saved_model_ckpt (line 305) | def _get_saved_model_ckpt(saved_model_dir):
  function _clear_saved_model_collections (line 316) | def _clear_saved_model_collections():
  function _generate_input_map (line 330) | def _generate_input_map(signature_def, features, labels):
  function _check_same_dtype_and_shape (line 382) | def _check_same_dtype_and_shape(tensor, tensor_info, name):
  function _extract_eval_metrics (line 408) | def _extract_eval_metrics(output_dict):
  function _validate_and_extract_outputs (line 447) | def _validate_and_extract_outputs(mode, output_dict, method_name):

FILE: tensorflow_estimator/python/estimator/canned/saved_model_estimator_test.py
  function dummy_input_fn (line 40) | def dummy_input_fn():
  function _serving_feature_dict (line 46) | def _serving_feature_dict():
  function dummy_input_fn_features_only (line 50) | def dummy_input_fn_features_only():
  function dummy_supervised_receiver_fn (line 55) | def dummy_supervised_receiver_fn():
  function dummy_serving_receiver_fn (line 60) | def dummy_serving_receiver_fn():
  function model_fn_diff_modes (line 64) | def model_fn_diff_modes(features, labels, mode):
  function model_fn_with_trackable (line 93) | def model_fn_with_trackable(features, labels, mode):
  class SavedModelEstimatorTest (line 115) | class SavedModelEstimatorTest(tf.test.TestCase):
    method setUp (line 117) | def setUp(self):
    method tearDown (line 121) | def tearDown(self):
    method _get_tmp_dir (line 128) | def _get_tmp_dir(self):
    method _export_estimator (line 133) | def _export_estimator(self,
    method test_load_all_modes (line 154) | def test_load_all_modes(self):
    method test_load_all_modes_no_train (line 171) | def test_load_all_modes_no_train(self):
    method test_partial_exported_estimator (line 183) | def test_partial_exported_estimator(self):
    method test_with_incorrect_input (line 199) | def test_with_incorrect_input(self):
    method test_input_fn_with_global_step (line 219) | def test_input_fn_with_global_step(self):
    method test_re_export_saved_model_serving_only (line 233) | def test_re_export_saved_model_serving_only(self):
    method test_re_export_saved_model (line 256) | def test_re_export_saved_model(self):
    method test_re_export_saved_model_with_trackable (line 303) | def test_re_export_saved_model_with_trackable(self):
    method test_load_saved_model_from_serving_only (line 350) | def test_load_saved_model_from_serving_only(self):
    method test_with_local_init_op (line 384) | def test_with_local_init_op(self):
    method test_with_assets (line 411) | def test_with_assets(self):
    method test_with_working_input_fn (line 454) | def test_with_working_input_fn(self):
    method test_control_dependency (line 478) | def test_control_dependency(self):
    method test_saveable_resources (line 496) | def test_saveable_resources(self):

FILE: tensorflow_estimator/python/estimator/canned/timeseries/ar_model.py
  class LSTMPredictionModel (line 33) | class LSTMPredictionModel(tf_keras.models.Model):
    method __init__ (line 41) | def __init__(self,
    method call (line 65) | def call(self, input_window_features, output_window_features):
  class ARModel (line 77) | class ARModel(model.TimeSeriesModel):
    method __init__ (line 128) | def __init__(self,
    method initialize_graph (line 202) | def initialize_graph(self, input_statistics=None):
    method get_start_state (line 216) | def get_start_state(self):
    method random_model_parameters (line 227) | def random_model_parameters(self, seed=None):
    method generate (line 230) | def generate(self,
    method _predicted_covariance_op (line 237) | def _predicted_covariance_op(self, activations, num_values):
    method _predicted_mean_op (line 258) | def _predicted_mean_op(self, activations):
    method prediction_ops (line 269) | def prediction_ops(self, times, values, exogenous_regressors):
    method _output_window_predictions (line 342) | def _output_window_predictions(self, input_window_features,
    method loss_op (line 352) | def loss_op(self, targets, prediction_ops):
    method _process_exogenous_features (line 367) | def _process_exogenous_features(self, times, features):
    method predict (line 381) | def predict(self, features):
    method _process_window (line 555) | def _process_window(self, features, mode, exogenous_regressors):
    method get_batch_loss (line 617) | def get_batch_loss(self, features, mode, state):
    method _compute_time_features (line 788) | def _compute_time_features(self, time):

FILE: tensorflow_estimator/python/estimator/canned/timeseries/ar_model_test.py
  class ARModelTest (line 32) | class ARModelTest(tf.test.TestCase):
    method test_wrong_window_size (line 34) | def test_wrong_window_size(self):
    method test_predictions_direct_lstm (line 60) | def test_predictions_direct_lstm(self):
    method test_long_eval (line 77) | def test_long_eval(self):
    method test_long_eval_discard_indivisible (line 102) | def test_long_eval_discard_indivisible(self):

FILE: tensorflow_estimator/python/estimator/canned/timeseries/ar_model_training_test.py
  class InputFnBuilder (line 31) | class InputFnBuilder(object):
    method __init__ (line 33) | def __init__(self,
    method initialize_data_with_properties (line 46) | def initialize_data_with_properties(self, noise_stddev, periods, num_s...
    method train_or_test_input_fn (line 70) | def train_or_test_input_fn(self, time, data):
    method train_input_fn (line 86) | def train_input_fn(self):
    method test_input_fn (line 90) | def test_input_fn(self):
    method prediction_input_fn (line 94) | def prediction_input_fn(self):
    method true_values (line 125) | def true_values(self):
  class ARModelTrainingTest (line 134) | class ARModelTrainingTest(tf.test.TestCase):
    method train_helper (line 136) | def train_helper(self, input_window_size, loss, max_loss=None, periods...
    method test_autoregression_squared (line 199) | def test_autoregression_squared(self):
    method test_autoregression_short_input_window (line 202) | def test_autoregression_short_input_window(self):
    method test_autoregression_normal (line 205) | def test_autoregression_normal(self):
    method test_autoregression_normal_multiple_periods (line 211) | def test_autoregression_normal_multiple_periods(self):

FILE: tensorflow_estimator/python/estimator/canned/timeseries/estimators.py
  class TimeSeriesRegressor (line 33) | class TimeSeriesRegressor(estimator_lib.Estimator):
    method __init__ (line 36) | def __init__(self,
    method _model_start_state_placeholders (line 75) | def _model_start_state_placeholders(self,
    method build_one_shot_parsing_serving_input_receiver_fn (line 112) | def build_one_shot_parsing_serving_input_receiver_fn(self,
    method build_raw_serving_input_receiver_fn (line 210) | def build_raw_serving_input_receiver_fn(self,
  class LSTMAutoRegressor (line 293) | class LSTMAutoRegressor(TimeSeriesRegressor):
    method __init__ (line 373) | def __init__(self,

FILE: tensorflow_estimator/python/estimator/canned/timeseries/estimators_test.py
  class _SeedRunConfig (line 33) | class _SeedRunConfig(estimator_lib.RunConfig):
    method tf_random_seed (line 36) | def tf_random_seed(self):
  function _generate_data (line 40) | def _generate_data():
  function _build_input_fn_with_seed (line 47) | def _build_input_fn_with_seed(seed):
  class TimeSeriesRegressorTest (line 71) | class TimeSeriesRegressorTest(tf.test.TestCase):
    method _fit_restore_fit_test_template (line 73) | def _fit_restore_fit_test_template(self, estimator_fn, test_saved_model):
    method disabled_test_time_series_regressor (line 179) | def disabled_test_time_series_regressor(self):
    method test_ar_lstm_regressor (line 196) | def test_ar_lstm_regressor(self):

FILE: tensorflow_estimator/python/estimator/canned/timeseries/feature_keys.py
  class State (line 24) | class State(object):
  class Times (line 34) | class Times(object):
  class Values (line 40) | class Values(object):
  class TrainEvalFeatures (line 46) | class TrainEvalFeatures(Times, Values):
  class PredictionFeatures (line 51) | class PredictionFeatures(Times, State):
  class FilteringFeatures (line 56) | class FilteringFeatures(Times, Values, State):
  class PredictionResults (line 61) | class PredictionResults(Times):
  class FilteringResults (line 66) | class FilteringResults(Times, State):
  class SavedModelLabels (line 71) | class SavedModelLabels(object):

FILE: tensorflow_estimator/python/estimator/canned/timeseries/head.py
  class _NoStatePredictOutput (line 30) | class _NoStatePredictOutput(export_lib.PredictOutput):
    method as_signature_def (line 32) | def as_signature_def(self, receiver_tensors):
  class TimeSeriesRegressionHead (line 43) | class TimeSeriesRegressionHead(head_lib._Head):  # pylint:disable=protec...
    method __init__ (line 46) | def __init__(self,
    method name (line 68) | def name(self):
    method create_loss (line 73) | def create_loss(self, features, mode, logits=None, labels=None):
    method logits_dimension (line 82) | def logits_dimension(self):
    method _train_ops (line 86) | def _train_ops(self, features):
    method _evaluate_ops (line 100) | def _evaluate_ops(self, features):
    method _predict_ops (line 124) | def _predict_ops(self, features):
    method _serving_ops (line 133) | def _serving_ops(self, features):
    method _convert_feature_to_tensor (line 166) | def _convert_feature_to_tensor(self, name, value):
    method _gather_state (line 179) | def _gather_state(self, features):
    method _check_predict_features (line 199) | def _check_predict_features(self, features):
    method create_estimator_spec (line 222) | def create_estimator_spec(self, features, mode, labels=None):
  class OneShotPredictionHead (line 271) | class OneShotPredictionHead(TimeSeriesRegressionHead):
    method _check_predict_features (line 288) | def _check_predict_features(self, features):
    method _evaluate_ops (line 318) | def _evaluate_ops(self, features):
    method _serving_ops (line 326) | def _serving_ops(self, features):
  function _check_feature_shapes_compatible_with (line 360) | def _check_feature_shapes_compatible_with(features,
  function _check_train_eval_features (line 391) | def _check_train_eval_features(features, model):
  function _identity_metric_single (line 424) | def _identity_metric_single(name, input_tensor):
  function _identity_metric_nested (line 453) | def _identity_metric_nested(name, input_tensors):
  function state_to_dictionary (line 466) | def state_to_dictionary(state_tuple):

FILE: tensorflow_estimator/python/estimator/canned/timeseries/head_test.py
  class HeadTest (line 41) | class HeadTest(tf.test.TestCase):
    method test_labels_provided_error (line 43) | def test_labels_provided_error(self):
    method test_unknown_mode (line 55) | def test_unknown_mode(self):
  class _TickerModel (line 61) | class _TickerModel(object):
    method initialize_graph (line 65) | def initialize_graph(self, input_statistics):
    method define_loss (line 68) | def define_loss(self, features, mode):
  class EvaluationMetricsTests (line 78) | class EvaluationMetricsTests(tf.test.TestCase):
    method test_metrics_consistent (line 80) | def test_metrics_consistent(self):
    method test_custom_metrics (line 131) | def test_custom_metrics(self):
  class _StubModel (line 169) | class _StubModel(object):
    method initialize_graph (line 173) | def initialize_graph(self, input_statistics):
  function _stub_model_fn (line 177) | def _stub_model_fn():
  class TrainEvalFeatureCheckingTests (line 184) | class TrainEvalFeatureCheckingTests(tf.test.TestCase):
    method test_no_time_feature (line 186) | def test_no_time_feature(self):
    method test_no_value_feature (line 197) | def test_no_value_feature(self):
    method test_bad_time_rank (line 208) | def test_bad_time_rank(self):
    method test_bad_value_rank (line 222) | def test_bad_value_rank(self):
    method test_bad_value_num_features (line 236) | def test_bad_value_num_features(self):
    method test_bad_exogenous_shape (line 250) | def test_bad_exogenous_shape(self):
  class PredictFeatureCheckingTests (line 265) | class PredictFeatureCheckingTests(tf.test.TestCase):
    method test_no_time_feature (line 267) | def test_no_time_feature(self):
    method test_no_start_state_feature (line 279) | def test_no_start_state_feature(self):
    method test_bad_time_rank (line 289) | def test_bad_time_rank(self):
    method test_bad_exogenous_shape (line 302) | def test_bad_exogenous_shape(self):
  class OneShotTests (line 317) | class OneShotTests(parameterized.TestCase):
    method test_one_shot_prediction_head_export (line 319) | def test_one_shot_prediction_head_export(self):

FILE: tensorflow_estimator/python/estimator/canned/timeseries/math_utils.py
  function replicate_state (line 27) | def replicate_state(start_state, batch_size):
  class InputStatisticsFromMiniBatch (line 92) | class InputStatisticsFromMiniBatch(object):
    method __init__ (line 95) | def __init__(self, num_features, dtype, starting_variance_window_size=...
    method initialize_graph (line 108) | def initialize_graph(self, features, update_statistics=True):
    class _AdaptiveInputAuxiliaryStatistics (line 166) | class _AdaptiveInputAuxiliaryStatistics(
      method __new__ (line 189) | def __new__(cls, num_features, dtype):
    method _update_statistics_from_mini_batch (line 230) | def _update_statistics_from_mini_batch(self, statistics, auxiliary_var...
    method _create_variable_statistics_object (line 357) | def _create_variable_statistics_object(self):

FILE: tensorflow_estimator/python/estimator/canned/timeseries/math_utils_test.py
  class InputStatisticsTests (line 26) | class InputStatisticsTests(tf.test.TestCase):
    method _input_statistics_test_template (line 28) | def _input_statistics_test_template(self,
    method test_queue (line 84) | def test_queue(self):

FILE: tensorflow_estimator/python/estimator/canned/timeseries/model.py
  class TimeSeriesModel (line 44) | class TimeSeriesModel(object):
    method __init__ (line 47) | def __init__(self,
    method exogenous_feature_columns (line 74) | def exogenous_feature_columns(self):
    method generate (line 81) | def generate(self,
    method initialize_graph (line 111) | def initialize_graph(self, input_statistics=None):
    method _scale_data (line 126) | def _scale_data(self, data):
    method _scale_variance (line 133) | def _scale_variance(self, variance):
    method _scale_back_data (line 140) | def _scale_back_data(self, data):
    method _scale_back_variance (line 147) | def _scale_back_variance(self, variance):
    method _check_graph_initialized (line 154) | def _check_graph_initialized(self):
    method define_loss (line 161) | def define_loss(self, features, mode):
    method get_start_state (line 198) | def get_start_state(self):
    method get_batch_loss (line 208) | def get_batch_loss(self, features, mode, state):
    method predict (line 225) | def predict(self, features):
    method _get_exogenous_embedding_shape (line 251) | def _get_exogenous_embedding_shape(self):
    method _process_exogenous_features (line 272) | def _process_exogenous_features(self, times, features):

FILE: tensorflow_estimator/python/estimator/canned/timeseries/model_utils.py
  function fully_connected (line 27) | def fully_connected(inp,
  function canonicalize_times_or_steps_from_output (line 47) | def canonicalize_times_or_steps_from_output(times, steps,

FILE: tensorflow_estimator/python/estimator/canned/timeseries/saved_model_utils.py
  function _canonicalize_numpy_data (line 34) | def _canonicalize_numpy_data(data, require_single_batch):
  function _colate_features_to_feeds_and_fetches (line 120) | def _colate_features_to_feeds_and_fetches(signature,
  function predict_continuation (line 149) | def predict_continuation(continue_from,
  function cold_start_filter (line 207) | def cold_start_filter(signatures, session, features):
  function filter_continuation (line 251) | def filter_continuation(continue_from, signatures, session, features):

FILE: tensorflow_estimator/python/estimator/canned/timeseries/state_management.py
  class PassthroughStateManager (line 26) | class PassthroughStateManager(object):
    method __init__ (line 29) | def __init__(self):
    method initialize_graph (line 33) | def initialize_graph(self, model, input_statistics=None):
    method define_loss (line 39) | def define_loss(self, model, features, mode):
  class _OverridableStateManager (line 62) | class _OverridableStateManager(PassthroughStateManager):
    method _define_loss_with_saved_state (line 66) | def _define_loss_with_saved_state(self, model, features, mode):
    method define_loss (line 69) | def define_loss(self, model, features, mode):
  class FilteringOnlyStateManager (line 88) | class FilteringOnlyStateManager(_OverridableStateManager):
    method _define_loss_with_saved_state (line 97) | def _define_loss_with_saved_state(self, model, features, mode):

FILE: tensorflow_estimator/python/estimator/canned/v1/baseline_estimator_test_v1.py
  function assert_close (line 40) | def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
  function save_variables_to_ckpt (line 55) | def save_variables_to_ckpt(model_dir):
  function _baseline_estimator_fn (line 62) | def _baseline_estimator_fn(weight_column=None, label_dimension=1, **kwar...
  class BaselineEstimatorEvaluationTest (line 72) | class BaselineEstimatorEvaluationTest(tf.test.TestCase):
    method setUp (line 74) | def setUp(self):
    method tearDown (line 77) | def tearDown(self):
    method test_evaluation_batch (line 82) | def test_evaluation_batch(self):
    method test_evaluation_weights (line 109) | def test_evaluation_weights(self):
    method test_evaluation_for_multi_dimensions (line 139) | def test_evaluation_for_multi_dimensions(self):
  class BaselineEstimatorPredictTest (line 169) | class BaselineEstimatorPredictTest(tf.test.TestCase):
    method setUp (line 171) | def setUp(self):
    method tearDown (line 174) | def tearDown(self):
    method test_1d (line 179) | def test_1d(self):
    method testMultiDim (line 199) | def testMultiDim(self):
  class BaselineEstimatorIntegrationTest (line 226) | class BaselineEstimatorIntegrationTest(tf.test.TestCase):
    method setUp (line 228) | def setUp(self):
    method tearDown (line 231) | def tearDown(self):
    method _test_complete_flow (line 236) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 267) | def test_numpy_input_fn(self):
  class BaselineEstimatorTrainingTest (line 305) | class BaselineEstimatorTrainingTest(tf.test.TestCase):
    method setUp (line 307) | def setUp(self):
    method tearDown (line 310) | def tearDown(self):
    method _mock_optimizer (line 315) | def _mock_optimizer(self, expected_loss=None):
    method _assert_checkpoint (line 350) | def _assert_checkpoint(self,
    method testFromScratch (line 370) | def testFromScratch(self):
    method testFromCheckpoint (line 388) | def testFromCheckpoint(self):

FILE: tensorflow_estimator/python/estimator/canned/v1/baseline_test_v1.py
  function assert_close (line 56) | def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
  function save_variables_to_ckpt (line 71) | def save_variables_to_ckpt(model_dir):
  function queue_parsed_features (line 78) | def queue_parsed_features(feature_map):
  function sorted_key_dict (line 93) | def sorted_key_dict(unsorted_dict):
  function sigmoid (line 97) | def sigmoid(x):
  function _baseline_regressor_fn (line 101) | def _baseline_regressor_fn(*args, **kwargs):
  function _baseline_classifier_fn (line 105) | def _baseline_classifier_fn(*args, **kwargs):
  class BaselineRegressorEvaluationTest (line 114) | class BaselineRegressorEvaluationTest(tf.test.TestCase):
    method setUp (line 116) | def setUp(self):
    method tearDown (line 119) | def tearDown(self):
    method test_evaluation_for_simple_data (line 124) | def test_evaluation_for_simple_data(self):
    method test_evaluation_batch (line 147) | def test_evaluation_batch(self):
    method test_evaluation_weights (line 174) | def test_evaluation_weights(self):
    method test_evaluation_for_multi_dimensions (line 204) | def test_evaluation_for_multi_dimensions(self):
  class BaselineRegressorPredictTest (line 234) | class BaselineRegressorPredictTest(tf.test.TestCase):
    method setUp (line 236) | def setUp(self):
    method tearDown (line 239) | def tearDown(self):
    method test_1d (line 244) | def test_1d(self):
    method testMultiDim (line 264) | def testMultiDim(self):
  class BaselineRegressorIntegrationTest (line 291) | class BaselineRegressorIntegrationTest(tf.test.TestCase):
    method setUp (line 293) | def setUp(self):
    method tearDown (line 296) | def tearDown(self):
    method _test_complete_flow (line 301) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 332) | def test_numpy_input_fn(self):
    method test_pandas_input_fn (line 368) | def test_pandas_input_fn(self):
    method test_input_fn_from_parse_example (line 397) | def test_input_fn_from_parse_example(self):
  class BaselineRegressorTrainingTest (line 459) | class BaselineRegressorTrainingTest(tf.test.TestCase):
    method setUp (line 461) | def setUp(self):
    method tearDown (line 464) | def tearDown(self):
    method _mock_optimizer (line 469) | def _mock_optimizer(self, expected_loss=None):
    method _assert_checkpoint (line 504) | def _assert_checkpoint(self,
    method testFromScratchWithDefaultOptimizer (line 524) | def testFromScratchWithDefaultOptimizer(self):
    method testTrainWithOneDimLabel (line 536) | def testTrainWithOneDimLabel(self):
    method testTrainWithOneDimWeight (line 553) | def testTrainWithOneDimWeight(self):
    method testFromScratch (line 576) | def testFromScratch(self):
    method testFromCheckpoint (line 594) | def testFromCheckpoint(self):
    method testFromCheckpointMultiBatch (line 623) | def testFromCheckpointMultiBatch(self):
  class BaselineClassifierTrainingTest (line 660) | class BaselineClassifierTrainingTest(tf.test.TestCase):
    method setUp (line 662) | def setUp(self):
    method tearDown (line 665) | def tearDown(self):
    method _mock_optimizer (line 669) | def _mock_optimizer(self, expected_loss=None):
    method _assert_checkpoint (line 700) | def _assert_checkpoint(self,
    method _testFromScratchWithDefaultOptimizer (line 722) | def _testFromScratchWithDefaultOptimizer(self, n_classes):
    method testBinaryClassesFromScratchWithDefaultOptimizer (line 734) | def testBinaryClassesFromScratchWithDefaultOptimizer(self):
    method testMultiClassesFromScratchWithDefaultOptimizer (line 737) | def testMultiClassesFromScratchWithDefaultOptimizer(self):
    method _testTrainWithTwoDimsLabel (line 740) | def _testTrainWithTwoDimsLabel(self, n_classes):
    method testBinaryClassesTrainWithTwoDimsLabel (line 759) | def testBinaryClassesTrainWithTwoDimsLabel(self):
    method testMultiClassesTrainWithTwoDimsLabel (line 762) | def testMultiClassesTrainWithTwoDimsLabel(self):
    method _testTrainWithOneDimLabel (line 765) | def _testTrainWithOneDimLabel(self, n_classes):
    method testBinaryClassesTrainWithOneDimLabel (line 782) | def testBinaryClassesTrainWithOneDimLabel(self):
    method testMultiClassesTrainWithOneDimLabel (line 785) | def testMultiClassesTrainWithOneDimLabel(self):
    method _testTrainWithTwoDimsWeight (line 788) | def _testTrainWithTwoDimsWeight(self, n_classes):
    method testBinaryClassesTrainWithTwoDimsWeight (line 810) | def testBinaryClassesTrainWithTwoDimsWeight(self):
    method testMultiClassesTrainWithTwoDimsWeight (line 813) | def testMultiClassesTrainWithTwoDimsWeight(self):
    method _testTrainWithOneDimWeight (line 816) | def _testTrainWithOneDimWeight(self, n_classes):
    method testBinaryClassesTrainWithOneDimWeight (line 836) | def testBinaryClassesTrainWithOneDimWeight(self):
    method testMultiClassesTrainWithOneDimWeight (line 839) | def testMultiClassesTrainWithOneDimWeight(self):
    method _testFromScratch (line 842) | def _testFromScratch(self, n_classes):
    method testBinaryClassesFromScratch (line 874) | def testBinaryClassesFromScratch(self):
    method testMultiClassesFromScratch (line 877) | def testMultiClassesFromScratch(self):
    method _testFromCheckpoint (line 880) | def _testFromCheckpoint(self, n_classes):
    method testBinaryClassesFromCheckpoint (line 928) | def testBinaryClassesFromCheckpoint(self):
    method testMultiClassesFromCheckpoint (line 931) | def testMultiClassesFromCheckpoint(self):
    method _testFromCheckpointFloatLabels (line 934) | def _testFromCheckpointFloatLabels(self, n_classes):
    method testBinaryClassesFromCheckpointFloatLabels (line 968) | def testBinaryClassesFromCheckpointFloatLabels(self):
    method testMultiClassesFromCheckpointFloatLabels (line 971) | def testMultiClassesFromCheckpointFloatLabels(self):
    method _testFromCheckpointMultiBatch (line 974) | def _testFromCheckpointMultiBatch(self, n_classes):
    method testBinaryClassesFromCheckpointMultiBatch (line 1031) | def testBinaryClassesFromCheckpointMultiBatch(self):
    method testMultiClassesFromCheckpointMultiBatch (line 1034) | def testMultiClassesFromCheckpointMultiBatch(self):
  class BaselineClassifierEvaluationTest (line 1039) | class BaselineClassifierEvaluationTest(tf.test.TestCase):
    method setUp (line 1041) | def setUp(self):
    method tearDown (line 1044) | def tearDown(self):
    method _test_evaluation_for_simple_data (line 1048) | def _test_evaluation_for_simple_data(self, n_classes):
    method test_binary_classes_evaluation_for_simple_data (line 1100) | def test_binary_classes_evaluation_for_simple_data(self):
    method test_multi_classes_evaluation_for_simple_data (line 1103) | def test_multi_classes_evaluation_for_simple_data(self):
    method _test_evaluation_batch (line 1106) | def _test_evaluation_batch(self, n_classes):
    method test_binary_classes_evaluation_batch (line 1168) | def test_binary_classes_evaluation_batch(self):
    method test_multi_classes_evaluation_batch (line 1171) | def test_multi_classes_evaluation_batch(self):
    method _test_evaluation_weights (line 1174) | def _test_evaluation_weights(self, n_classes):
    method test_binary_classes_evaluation_weights (line 1250) | def test_binary_classes_evaluation_weights(self):
    method test_multi_classes_evaluation_weights (line 1253) | def test_multi_classes_evaluation_weights(self):
  class BaselineClassifierPredictTest (line 1258) | class BaselineClassifierPredictTest(tf.test.TestCase):
    method setUp (line 1260) | def setUp(self):
    method tearDown (line 1263) | def tearDown(self):
    method _testPredictions (line 1267) | def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
    method testBinaryClassesWithoutLabelVocabulary (line 1334) | def testBinaryClassesWithoutLabelVocabulary(self):
    method testBinaryClassesWithLabelVocabulary (line 1341) | def testBinaryClassesWithLabelVocabulary(self):
    method testMultiClassesWithoutLabelVocabulary (line 1348) | def testMultiClassesWithoutLabelVocabulary(self):
    method testMultiClassesWithLabelVocabulary (line 1355) | def testMultiClassesWithLabelVocabulary(self):
  class BaselineClassifierIntegrationTest (line 1364) | class BaselineClassifierIntegrationTest(tf.test.TestCase):
    method setUp (line 1366) | def setUp(self):
    method tearDown (line 1369) | def tearDown(self):
    method _test_complete_flow (line 1373) | def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
    method _test_numpy_input_fn (line 1404) | def _test_numpy_input_fn(self, n_classes):
    method test_binary_classes_numpy_input_fn (line 1440) | def test_binary_classes_numpy_input_fn(self):
    method test_multi_classes_numpy_input_fn (line 1443) | def test_multi_classes_numpy_input_fn(self):
    method _test_pandas_input_fn (line 1446) | def _test_pandas_input_fn(self, n_classes):
    method test_binary_classes_pandas_input_fn (line 1475) | def test_binary_classes_pandas_input_fn(self):
    method test_multi_classes_pandas_input_fn (line 1478) | def test_multi_classes_pandas_input_fn(self):
    method _test_input_fn_from_parse_example (line 1481) | def _test_input_fn_from_parse_example(self, n_classes):
    method test_binary_classes_input_fn_from_parse_example (line 1540) | def test_binary_classes_input_fn_from_parse_example(self):
    method test_multi_classes_input_fn_from_parse_example (line 1543) | def test_multi_classes_input_fn_from_parse_example(self):
  class BaselineLogitFnTest (line 1551) | class BaselineLogitFnTest(tf.test.TestCase):
    method test_basic_logit_correctness (line 1553) | def test_basic_logit_correctness(self):

FILE: tensorflow_estimator/python/estimator/canned/v1/dnn_estimator_test_v1.py
  function _dnn_estimator_fn (line 36) | def _dnn_estimator_fn(weight_column=None, label_dimension=1, **kwargs):
  function _dnn_estimator_classifier_fn (line 47) | def _dnn_estimator_classifier_fn(n_classes=3, **kwargs):
  class DNNEstimatorEvaluateTest (line 55) | class DNNEstimatorEvaluateTest(
    method __init__ (line 58) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNEstimatorPredictTest (line 65) | class DNNEstimatorPredictTest(dnn_testing_utils_v1.BaseDNNRegressorPredi...
    method __init__ (line 68) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNEstimatorTrainTest (line 75) | class DNNEstimatorTrainTest(dnn_testing_utils_v1.BaseDNNRegressorTrainTest,
    method __init__ (line 78) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNEstimatorWarmStartingTest (line 85) | class DNNEstimatorWarmStartingTest(dnn_testing_utils_v1.BaseDNNWarmStart...
    method __init__ (line 88) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNEstimatorIntegrationTest (line 95) | class DNNEstimatorIntegrationTest(tf.test.TestCase):
    method setUp (line 97) | def setUp(self):
    method tearDown (line 100) | def tearDown(self):
    method _test_complete_flow (line 105) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 141) | def test_numpy_input_fn(self):

FILE: tensorflow_estimator/python/estimator/canned/v1/dnn_linear_combined_estimator_test_v1.py
  function _dnn_only_estimator_fn (line 38) | def _dnn_only_estimator_fn(hidden_units,
  class DNNOnlyEstimatorEvaluateTest (line 65) | class DNNOnlyEstimatorEvaluateTest(
    method __init__ (line 68) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyEstimatorPredictTest (line 75) | class DNNOnlyEstimatorPredictTest(
    method __init__ (line 78) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyEstimatorTrainTest (line 85) | class DNNOnlyEstimatorTrainTest(dnn_testing_utils_v1.BaseDNNRegressorTra...
    method __init__ (line 88) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  function _linear_only_estimator_fn (line 94) | def _linear_only_estimator_fn(feature_columns,
  class LinearOnlyEstimatorEvaluateTest (line 117) | class LinearOnlyEstimatorEvaluateTest(
    method __init__ (line 121) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyEstimatorPredictTest (line 128) | class LinearOnlyEstimatorPredictTest(
    method __init__ (line 131) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyEstimatorTrainTest (line 138) | class LinearOnlyEstimatorTrainTest(
    method __init__ (line 141) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNLinearCombinedEstimatorIntegrationTest (line 148) | class DNNLinearCombinedEstimatorIntegrationTest(tf.test.TestCase):
    method setUp (line 150) | def setUp(self):
    method tearDown (line 153) | def tearDown(self):
    method _test_complete_flow (line 158) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 199) | def test_numpy_input_fn(self):

FILE: tensorflow_estimator/python/estimator/canned/v1/dnn_linear_combined_test_v1.py
  class DNNOnlyModelFnTest (line 65) | class DNNOnlyModelFnTest(dnn_testing_utils_v1.BaseDNNModelFnTest,
    method __init__ (line 68) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
    method _dnn_only_model_fn (line 73) | def _dnn_only_model_fn(self,
  function _linear_regressor_fn (line 101) | def _linear_regressor_fn(feature_columns,
  class LinearOnlyRegressorPartitionerTest (line 121) | class LinearOnlyRegressorPartitionerTest(
    method __init__ (line 125) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyRegressorPartitionerV2Test (line 132) | class LinearOnlyRegressorPartitionerV2Test(
    method __init__ (line 136) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyRegressorEvaluationTest (line 143) | class LinearOnlyRegressorEvaluationTest(
    method __init__ (line 147) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyRegressorEvaluationV2Test (line 154) | class LinearOnlyRegressorEvaluationV2Test(
    method __init__ (line 158) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyRegressorPredictTest (line 165) | class LinearOnlyRegressorPredictTest(
    method __init__ (line 168) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyRegressorPredictV2Test (line 175) | class LinearOnlyRegressorPredictV2Test(
    method __init__ (line 178) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyRegressorIntegrationTest (line 185) | class LinearOnlyRegressorIntegrationTest(
    method __init__ (line 189) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyRegressorIntegrationV2Test (line 196) | class LinearOnlyRegressorIntegrationV2Test(
    method __init__ (line 200) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyRegressorTrainingTest (line 207) | class LinearOnlyRegressorTrainingTest(
    method __init__ (line 210) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyRegressorTrainingV2Test (line 217) | class LinearOnlyRegressorTrainingV2Test(
    method __init__ (line 220) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  function _linear_classifier_fn (line 226) | def _linear_classifier_fn(feature_columns,
  class LinearOnlyClassifierTrainingTest (line 248) | class LinearOnlyClassifierTrainingTest(
    method __init__ (line 251) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyClassifierTrainingV2Test (line 258) | class LinearOnlyClassifierTrainingV2Test(
    method __init__ (line 261) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyClassifierClassesEvaluationTest (line 270) | class LinearOnlyClassifierClassesEvaluationTest(
    method __init__ (line 274) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyClassifierClassesEvaluationV2Test (line 281) | class LinearOnlyClassifierClassesEvaluationV2Test(
    method __init__ (line 285) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyClassifierPredictTest (line 294) | class LinearOnlyClassifierPredictTest(
    method __init__ (line 297) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyClassifierPredictV2Test (line 304) | class LinearOnlyClassifierPredictV2Test(
    method __init__ (line 307) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyClassifierIntegrationTest (line 316) | class LinearOnlyClassifierIntegrationTest(
    method __init__ (line 320) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearOnlyClassifierIntegrationV2Test (line 327) | class LinearOnlyClassifierIntegrationV2Test(
    method __init__ (line 331) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNLinearCombinedRegressorIntegrationTest (line 341) | class DNNLinearCombinedRegressorIntegrationTest(tf.test.TestCase):
    method setUp (line 343) | def setUp(self):
    method tearDown (line 346) | def tearDown(self):
    method _test_complete_flow_helper (line 351) | def _test_complete_flow_helper(self, linear_feature_columns,
    method _test_complete_flow (line 386) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method _test_complete_flow_mix1 (line 403) | def _test_complete_flow_mix1(self, train_input_fn, eval_input_fn,
    method _test_complete_flow_mix2 (line 421) | def _test_complete_flow_mix2(self, train_input_fn, eval_input_fn,
    method _test_numpy_input_fn_helper (line 439) | def _test_numpy_input_fn_helper(self, fc_impl, fn_to_run):
    method test_numpy_input_fn_basic (line 466) | def test_numpy_input_fn_basic(self, fc_impl):
    method test_numpy_input_fn_mix1 (line 469) | def test_numpy_input_fn_mix1(self, fc_impl):
    method test_numpy_input_fn_mix2 (line 472) | def test_numpy_input_fn_mix2(self, fc_impl):
    method _test_pandas_input_fn_helper (line 475) | def _test_pandas_input_fn_helper(self, fc_impl, fn_to_run):
    method test_pandas_input_fn_basic (line 500) | def test_pandas_input_fn_basic(self, fc_impl):
    method test_pandas_input_fn_mix1 (line 503) | def test_pandas_input_fn_mix1(self, fc_impl):
    method test_pandas_input_fn_mix2 (line 506) | def test_pandas_input_fn_mix2(self, fc_impl):
    method _test_input_fn_from_parse_example_helper (line 509) | def _test_input_fn_from_parse_example_helper(self, fc_impl, fn_to_run):
    method test_input_fn_from_parse_example_basic (line 567) | def test_input_fn_from_parse_example_basic(self, fc_impl):
    method test_input_fn_from_parse_example_mix1 (line 571) | def test_input_fn_from_parse_example_mix1(self, fc_impl):
    method test_input_fn_from_parse_example_mix2 (line 575) | def test_input_fn_from_parse_example_mix2(self, fc_impl):
  function _dnn_classifier_fn (line 581) | def _dnn_classifier_fn(hidden_units,
  class DNNOnlyClassifierEvaluateTest (line 603) | class DNNOnlyClassifierEvaluateTest(
    method __init__ (line 606) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyClassifierEvaluateV2Test (line 613) | class DNNOnlyClassifierEvaluateV2Test(
    method __init__ (line 616) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyClassifierPredictTest (line 623) | class DNNOnlyClassifierPredictTest(
    method __init__ (line 626) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyClassifierPredictV2Test (line 633) | class DNNOnlyClassifierPredictV2Test(
    method __init__ (line 636) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyClassifierTrainTest (line 643) | class DNNOnlyClassifierTrainTest(
    method __init__ (line 646) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyClassifierTrainV2Test (line 653) | class DNNOnlyClassifierTrainV2Test(
    method __init__ (line 656) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  function _dnn_regressor_fn (line 663) | def _dnn_regressor_fn(hidden_units,
  class DNNOnlyRegressorEvaluateTest (line 683) | class DNNOnlyRegressorEvaluateTest(
    method __init__ (line 686) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyRegressorEvaluateV2Test (line 693) | class DNNOnlyRegressorEvaluateV2Test(
    method __init__ (line 696) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyRegressorPredictTest (line 703) | class DNNOnlyRegressorPredictTest(
    method __init__ (line 706) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyRegressorPredictV2Test (line 713) | class DNNOnlyRegressorPredictV2Test(
    method __init__ (line 716) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyRegressorTrainTest (line 723) | class DNNOnlyRegressorTrainTest(dnn_testing_utils_v1.BaseDNNRegressorTra...
    method __init__ (line 726) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNOnlyRegressorTrainV2Test (line 733) | class DNNOnlyRegressorTrainV2Test(
    method __init__ (line 736) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNLinearCombinedClassifierIntegrationTest (line 744) | class DNNLinearCombinedClassifierIntegrationTest(tf.test.TestCase):
    method setUp (line 746) | def setUp(self):
    method tearDown (line 749) | def tearDown(self):
    method _as_label (line 754) | def _as_label(self, data_in_float):
    method _test_complete_flow (line 757) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 798) | def test_numpy_input_fn(self, fc_impl):
    method test_pandas_input_fn (line 828) | def test_pandas_input_fn(self, fc_impl):
    method test_input_fn_from_parse_example (line 854) | def test_input_fn_from_parse_example(self, fc_impl):
  class DNNLinearCombinedTests (line 918) | class DNNLinearCombinedTests(tf.test.TestCase):
    method setUp (line 920) | def setUp(self):
    method tearDown (line 923) | def tearDown(self):
    method _mock_optimizer (line 927) | def _mock_optimizer(self, real_optimizer, var_name_prefix):
    method test_train_op_calls_both_dnn_and_linear (line 949) | def test_train_op_calls_both_dnn_and_linear(self, fc_impl):
    method test_dnn_and_linear_logits_are_added (line 974) | def test_dnn_and_linear_logits_are_added(self, fc_impl):
  class DNNLinearCombinedWarmStartingTest (line 1003) | class DNNLinearCombinedWarmStartingTest(tf.test.TestCase):
    method setUp (line 1005) | def setUp(self):
    method tearDown (line 1019) | def tearDown(self):
    method test_classifier_basic_warm_starting (line 1024) | def test_classifier_basic_warm_starting(self, fc_impl):
    method test_regressor_basic_warm_starting (line 1064) | def test_regressor_basic_warm_starting(self, fc_impl):
    method test_warm_starting_selective_variables (line 1102) | def test_warm_starting_selective_variables(self, fc_impl):

FILE: tensorflow_estimator/python/estimator/canned/v1/dnn_test_fc_v1_v1.py
  function _dnn_classifier_fn (line 53) | def _dnn_classifier_fn(*args, **kwargs):
  class DNNModelFnTest (line 58) | class DNNModelFnTest(dnn_testing_utils_v1.BaseDNNModelFnTest, tf.test.Te...
    method __init__ (line 60) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNLogitFnTest (line 67) | class DNNLogitFnTest(dnn_testing_utils_v1.BaseDNNLogitFnTest, tf.test.Te...
    method __init__ (line 69) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNWarmStartingTest (line 76) | class DNNWarmStartingTest(dnn_testing_utils_v1.BaseDNNWarmStartingTest,
    method __init__ (line 79) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNClassifierEvaluateTest (line 86) | class DNNClassifierEvaluateTest(
    method __init__ (line 89) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNClassifierPredictTest (line 96) | class DNNClassifierPredictTest(
    method __init__ (line 99) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNClassifierTrainTest (line 106) | class DNNClassifierTrainTest(dnn_testing_utils_v1.BaseDNNClassifierTrain...
    method __init__ (line 109) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  function _dnn_regressor_fn (line 115) | def _dnn_regressor_fn(*args, **kwargs):
  class DNNRegressorEvaluateTest (line 120) | class DNNRegressorEvaluateTest(
    method __init__ (line 123) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNRegressorPredictTest (line 130) | class DNNRegressorPredictTest(dnn_testing_utils_v1.BaseDNNRegressorPredi...
    method __init__ (line 133) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNRegressorTrainTest (line 140) | class DNNRegressorTrainTest(dnn_testing_utils_v1.BaseDNNRegressorTrainTest,
    method __init__ (line 143) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  function _queue_parsed_features (line 149) | def _queue_parsed_features(feature_map):
  class DNNRegressorIntegrationTest (line 165) | class DNNRegressorIntegrationTest(tf.test.TestCase, parameterized.TestCa...
    method setUp (line 167) | def setUp(self):
    method tearDown (line 170) | def tearDown(self):
    method _test_complete_flow (line 175) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 212) | def test_numpy_input_fn(self):
    method test_pandas_input_fn (line 238) | def test_pandas_input_fn(self):
    method test_input_fn_from_parse_example (line 262) | def test_input_fn_from_parse_example(self):
  class DNNClassifierIntegrationTest (line 321) | class DNNClassifierIntegrationTest(tf.test.TestCase):
    method setUp (line 323) | def setUp(self):
    method tearDown (line 326) | def tearDown(self):
    method _as_label (line 331) | def _as_label(self, data_in_float):
    method _test_complete_flow (line 334) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 371) | def test_numpy_input_fn(self):
    method test_pandas_input_fn (line 400) | def test_pandas_input_fn(self):
    method test_input_fn_from_parse_example (line 425) | def test_input_fn_from_parse_example(self):

FILE: tensorflow_estimator/python/estimator/canned/v1/dnn_test_fc_v2_v1.py
  function _dnn_classifier_fn (line 50) | def _dnn_classifier_fn(*args, **kwargs):
  class DNNModelFnV2Test (line 55) | class DNNModelFnV2Test(dnn_testing_utils_v1.BaseDNNModelFnTest,
    method __init__ (line 58) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNLogitFnV2Test (line 65) | class DNNLogitFnV2Test(dnn_testing_utils_v1.BaseDNNLogitFnTest,
    method __init__ (line 68) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNWarmStartingV2Test (line 75) | class DNNWarmStartingV2Test(dnn_testing_utils_v1.BaseDNNWarmStartingTest,
    method __init__ (line 78) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNClassifierEvaluateV2Test (line 85) | class DNNClassifierEvaluateV2Test(
    method __init__ (line 88) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNClassifierPredictV2Test (line 95) | class DNNClassifierPredictV2Test(
    method __init__ (line 98) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNClassifierTrainV2Test (line 105) | class DNNClassifierTrainV2Test(dnn_testing_utils_v1.BaseDNNClassifierTra...
    method __init__ (line 108) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  function _dnn_regressor_fn (line 114) | def _dnn_regressor_fn(*args, **kwargs):
  class DNNRegressorEvaluateV2Test (line 119) | class DNNRegressorEvaluateV2Test(
    method __init__ (line 122) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNRegressorPredictV2Test (line 129) | class DNNRegressorPredictV2Test(
    method __init__ (line 132) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class DNNRegressorTrainV2Test (line 139) | class DNNRegressorTrainV2Test(dnn_testing_utils_v1.BaseDNNRegressorTrain...
    method __init__ (line 142) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  function _queue_parsed_features (line 148) | def _queue_parsed_features(feature_map):
  class DNNRegressorIntegrationTest (line 164) | class DNNRegressorIntegrationTest(tf.test.TestCase, parameterized.TestCa...
    method setUp (line 166) | def setUp(self):
    method tearDown (line 169) | def tearDown(self):
    method _test_complete_flow (line 174) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 210) | def test_numpy_input_fn(self):
    method test_pandas_input_fn (line 236) | def test_pandas_input_fn(self):
    method test_input_fn_from_parse_example (line 260) | def test_input_fn_from_parse_example(self):
  class DNNClassifierIntegrationTest (line 319) | class DNNClassifierIntegrationTest(tf.test.TestCase):
    method setUp (line 321) | def setUp(self):
    method tearDown (line 324) | def tearDown(self):
    method _as_label (line 329) | def _as_label(self, data_in_float):
    method _test_complete_flow (line 332) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 368) | def test_numpy_input_fn(self):
    method test_pandas_input_fn (line 397) | def test_pandas_input_fn(self):
    method test_input_fn_from_parse_example (line 422) | def test_input_fn_from_parse_example(self):

FILE: tensorflow_estimator/python/estimator/canned/v1/dnn_testing_utils_v1.py
  function assert_close (line 66) | def assert_close(expected, actual, rtol=1e-04, message='', name='assert_...
  function create_checkpoint (line 82) | def create_checkpoint(weights_and_biases,
  function mock_head (line 130) | def mock_head(testcase, hidden_units, logits_dimension, expected_logits):
  function mock_optimizer (line 190) | def mock_optimizer(testcase, hidden_units, expected_loss=None):
  class BaseDNNModelFnTest (line 240) | class BaseDNNModelFnTest(object):
    method __init__ (line 243) | def __init__(self, dnn_model_fn, fc_impl=feature_column):
    method setUp (line 247) | def setUp(self):
    method tearDown (line 250) | def tearDown(self):
    method _test_logits (line 255) | def _test_logits(self, mode, hidden_units, logits_dimension, inputs,
    method test_one_dim_logits (line 287) | def test_one_dim_logits(self):
    method test_multi_dim_logits (line 311) | def test_multi_dim_logits(self):
    method test_multi_example_multi_dim_logits (line 336) | def test_multi_example_multi_dim_logits(self):
    method test_multi_dim_input_one_dim_logits (line 365) | def test_multi_dim_input_one_dim_logits(self):
    method test_multi_dim_input_multi_dim_logits (line 390) | def test_multi_dim_input_multi_dim_logits(self):
    method test_multi_feature_column_multi_dim_logits (line 415) | def test_multi_feature_column_multi_dim_logits(self):
    method test_multi_feature_column_mix_multi_dim_logits (line 466) | def test_multi_feature_column_mix_multi_dim_logits(self):
    method test_features_tensor_raises_value_error (line 517) | def test_features_tensor_raises_value_error(self):
  class BaseDNNLogitFnTest (line 545) | class BaseDNNLogitFnTest(object):
    method __init__ (line 548) | def __init__(self, dnn_logit_fn_builder, fc_impl=feature_column):
    method setUp (line 552) | def setUp(self):
    method tearDown (line 555) | def tearDown(self):
    method _test_logits (line 560) | def _test_logits(self,
    method test_one_dim_logits (line 594) | def test_one_dim_logits(self):
    method test_one_dim_logits_with_batch_norm (line 617) | def test_one_dim_logits_with_batch_norm(self):
    method test_multi_dim_logits (line 682) | def test_multi_dim_logits(self):
    method test_multi_example_multi_dim_logits (line 706) | def test_multi_example_multi_dim_logits(self):
    method test_multi_dim_input_one_dim_logits (line 734) | def test_multi_dim_input_one_dim_logits(self):
    method test_multi_dim_input_multi_dim_logits (line 759) | def test_multi_dim_input_multi_dim_logits(self):
    method test_multi_feature_column_multi_dim_logits (line 783) | def test_multi_feature_column_multi_dim_logits(self):
    method test_multi_feature_column_mix_multi_dim_logits (line 834) | def test_multi_feature_column_mix_multi_dim_logits(self):
  class BaseDNNWarmStartingTest (line 886) | class BaseDNNWarmStartingTest(object):
    method __init__ (line 888) | def __init__(self,
    method setUp (line 896) | def setUp(self):
    method tearDown (line 911) | def tearDown(self):
    method assertAllNotClose (line 916) | def assertAllNotClose(self, t1, t2):
    method test_classifier_basic_warm_starting (line 927) | def test_classifier_basic_warm_starting(self):
    method test_regressor_basic_warm_starting (line 960) | def test_regressor_basic_warm_starting(self):
    method test_warm_starting_selective_variables (line 991) | def test_warm_starting_selective_variables(self):
    method test_warm_starting_with_vocab_remapping_and_partitioning (line 1041) | def test_warm_starting_with_vocab_remapping_and_partitioning(self):
    method test_warm_starting_with_naming_change (line 1139) | def test_warm_starting_with_naming_change(self):
  class BaseDNNClassifierEvaluateTest (line 1190) | class BaseDNNClassifierEvaluateTest(object):
    method __init__ (line 1192) | def __init__(self, dnn_classifier_fn, fc_impl=feature_column):
    method setUp (line 1196) | def setUp(self):
    method tearDown (line 1199) | def tearDown(self):
    method test_one_dim (line 1204) | def test_one_dim(self):
    method test_multi_dim (line 1246) | def test_multi_dim(self):
    method test_float_labels (line 1283) | def test_float_labels(self):
    method test_multi_dim_weights (line 1310) | def test_multi_dim_weights(self):
  class BaseDNNRegressorEvaluateTest (line 1341) | class BaseDNNRegressorEvaluateTest(object):
    method __init__ (line 1343) | def __init__(self, dnn_regressor_fn, fc_impl=feature_column):
    method setUp (line 1347) | def setUp(self):
    method tearDown (line 1350) | def tearDown(self):
    method test_one_dim (line 1355) | def test_one_dim(self):
    method test_multi_dim (line 1387) | def test_multi_dim(self):
    method test_multi_dim_weights (line 1422) | def test_multi_dim_weights(self):
  class BaseDNNClassifierPredictTest (line 1452) | class BaseDNNClassifierPredictTest(object):
    method __init__ (line 1454) | def __init__(self, dnn_classifier_fn, fc_impl=feature_column):
    method setUp (line 1458) | def setUp(self):
    method tearDown (line 1461) | def tearDown(self):
    method _test_one_dim (line 1466) | def _test_one_dim(self, label_vocabulary, label_output_fn):
    method test_one_dim_without_label_vocabulary (line 1502) | def test_one_dim_without_label_vocabulary(self):
    method test_one_dim_with_label_vocabulary (line 1506) | def test_one_dim_with_label_vocabulary(self):
    method _test_multi_dim_with_3_classes (line 1512) | def _test_multi_dim_with_3_classes(self, label_vocabulary, label_outpu...
    method test_multi_dim_with_3_classes_but_no_label_vocab (line 1559) | def test_multi_dim_with_3_classes_but_no_label_vocab(self):
    method test_multi_dim_with_3_classes_and_label_vocab (line 1563) | def test_multi_dim_with_3_classes_and_label_vocab(self):
  class BaseDNNRegressorPredictTest (line 1570) | class BaseDNNRegressorPredictTest(object):
    method __init__ (line 1572) | def __init__(self, dnn_regressor_fn, fc_impl=feature_column):
    method setUp (line 1576) | def setUp(self):
    method tearDown (line 1579) | def tearDown(self):
    method test_one_dim (line 1584) | def test_one_dim(self):
    method test_multi_dim (line 1608) | def test_multi_dim(self):
  class _SummaryHook (line 1637) | class _SummaryHook(tf.compat.v1.train.SessionRunHook):
    method __init__ (line 1640) | def __init__(self):
    method begin (line 1643) | def begin(self):
    method before_run (line 1646) | def before_run(self, run_context):
    method after_run (line 1649) | def after_run(self, run_context, run_values):
    method summaries (line 1654) | def summaries(self):
  function _assert_checkpoint (line 1658) | def _assert_checkpoint(testcase, global_step, input_units, hidden_units,
  function _assert_simple_summary (line 1694) | def _assert_simple_summary(testcase, expected_values, actual_summary):
  class BaseDNNClassifierTrainTest (line 1710) | class BaseDNNClassifierTrainTest(object):
    method __init__ (line 1712) | def __init__(self, dnn_classifier_fn, fc_impl=feature_column):
    method setUp (line 1716) | def setUp(self):
    method tearDown (line 1719) | def tearDown(self):
    method test_from_scratch_with_default_optimizer_binary (line 1724) | def test_from_scratch_with_default_optimizer_binary(self):
    method test_from_scratch_with_default_optimizer_multi_class (line 1745) | def test_from_scratch_with_default_optimizer_multi_class(self):
    method test_from_scratch_validate_summary (line 1768) | def test_from_scratch_validate_summary(self):
    method test_binary_classification (line 1803) | def test_binary_classification(self):
    method test_binary_classification_float_labels (line 1856) | def test_binary_classification_float_labels(self):
    method test_multi_class (line 1888) | def test_multi_class(self):
  class BaseDNNRegressorTrainTest (line 1944) | class BaseDNNRegressorTrainTest(object):
    method __init__ (line 1946) | def __init__(self, dnn_regressor_fn, fc_impl=feature_column):
    method setUp (line 1950) | def setUp(self):
    method tearDown (line 1953) | def tearDown(self):
    method test_from_scratch_with_default_optimizer (line 1958) | def test_from_scratch_with_default_optimizer(self):
    method test_from_scratch (line 1979) | def test_from_scratch(self):
    method test_one_dim (line 2014) | def test_one_dim(self):
    method test_multi_dim (line 2068) | def test_multi_dim(self):

FILE: tensorflow_estimator/python/estimator/canned/v1/linear_estimator_test_v1.py
  function _linear_estimator_fn (line 36) | def _linear_estimator_fn(weight_column=None, label_dimension=1, **kwargs):
  class LinearEstimatorEvaluateTest (line 48) | class LinearEstimatorEvaluateTest(
    method __init__ (line 52) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearEstimatorPredictTest (line 59) | class LinearEstimatorPredictTest(
    method __init__ (line 62) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearEstimatorTrainTest (line 69) | class LinearEstimatorTrainTest(
    method __init__ (line 72) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearEstimatorIntegrationTest (line 79) | class LinearEstimatorIntegrationTest(tf.test.TestCase):
    method setUp (line 81) | def setUp(self):
    method tearDown (line 84) | def tearDown(self):
    method _test_complete_flow (line 89) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 124) | def test_numpy_input_fn(self):

FILE: tensorflow_estimator/python/estimator/canned/v1/linear_test_v1.py
  function _linear_regressor_fn (line 31) | def _linear_regressor_fn(*args, **kwargs):
  function _linear_classifier_fn (line 35) | def _linear_classifier_fn(*args, **kwargs):
  class LinearRegressorPartitionerTest (line 43) | class LinearRegressorPartitionerTest(
    method __init__ (line 47) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearRegressorPartitionerV2Test (line 54) | class LinearRegressorPartitionerV2Test(
    method __init__ (line 58) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearRegressorEvaluationTest (line 65) | class LinearRegressorEvaluationTest(
    method __init__ (line 69) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearRegressorEvaluationV2Test (line 76) | class LinearRegressorEvaluationV2Test(
    method __init__ (line 80) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearRegressorPredictTest (line 87) | class LinearRegressorPredictTest(
    method __init__ (line 90) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearRegressorPredictV2Test (line 97) | class LinearRegressorPredictV2Test(
    method __init__ (line 100) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearRegressorIntegrationTest (line 107) | class LinearRegressorIntegrationTest(
    method __init__ (line 111) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearRegressorIntegrationV2Test (line 118) | class LinearRegressorIntegrationV2Test(
    method __init__ (line 122) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearRegressorTrainingTest (line 129) | class LinearRegressorTrainingTest(
    method __init__ (line 132) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearRegressorTrainingV2Test (line 139) | class LinearRegressorTrainingV2Test(
    method __init__ (line 142) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearClassifierTrainingTest (line 150) | class LinearClassifierTrainingTest(
    method __init__ (line 153) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearClassifierTrainingV2Test (line 160) | class LinearClassifierTrainingV2Test(
    method __init__ (line 163) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearClassifierEvaluationTest (line 172) | class LinearClassifierEvaluationTest(
    method __init__ (line 176) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearClassifierEvaluationV2Test (line 183) | class LinearClassifierEvaluationV2Test(
    method __init__ (line 187) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearClassifierPredictTest (line 196) | class LinearClassifierPredictTest(
    method __init__ (line 199) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearClassifierPredictV2Test (line 206) | class LinearClassifierPredictV2Test(
    method __init__ (line 209) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearClassifierIntegrationTest (line 218) | class LinearClassifierIntegrationTest(
    method __init__ (line 222) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearClassifierIntegrationV2Test (line 229) | class LinearClassifierIntegrationV2Test(
    method __init__ (line 233) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearLogitFnTest (line 243) | class LinearLogitFnTest(linear_testing_utils_v1.BaseLinearLogitFnTest,
    method __init__ (line 246) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearLogitFnV2Test (line 253) | class LinearLogitFnV2Test(linear_testing_utils_v1.BaseLinearLogitFnTest,
    method __init__ (line 256) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearWarmStartingTest (line 264) | class LinearWarmStartingTest(linear_testing_utils_v1.BaseLinearWarmStart...
    method __init__ (line 267) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class LinearWarmStartingV2Test (line 277) | class LinearWarmStartingV2Test(
    method __init__ (line 280) | def __init__(self, methodName='runTest'):  # pylint: disable=invalid-name
  class ComputeFractionOfZeroTest (line 290) | class ComputeFractionOfZeroTest(tf.test.TestCase):
    method _assertSparsity (line 292) | def _assertSparsity(self, expected_sparsity, tensor):
    method test_small_float32 (line 297) | def test_small_float32(self):
    method test_small_int32 (line 303) | def test_small_int32(self):
    method test_small_float64 (line 307) | def test_small_float64(self):
    method test_small_int64 (line 311) | def test_small_int64(self):
    method test_nested (line 315) | def test_nested(self):
    method test_none (line 320) | def test_none(self):
    method test_empty (line 324) | def test_empty(self):
    method test_multiple_empty (line 331) | def test_multiple_empty(self):
    method test_some_empty (line 341) | def test_some_empty(self):
    method test_mixed_types (line 349) | def test_mixed_types(self):
    method test_2_27_zeros__using_512_MiB_of_ram (line 356) | def test_2_27_zeros__using_512_MiB_of_ram(self):
    method test_2_27_ones__using_512_MiB_of_ram (line 360) | def test_2_27_ones__using_512_MiB_of_ram(self):

FILE: tensorflow_estimator/python/estimator/canned/v1/linear_testing_utils_v1.py
  function assert_close (line 72) | def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
  function save_variables_to_ckpt (line 87) | def save_variables_to_ckpt(model_dir):
  function queue_parsed_features (line 94) | def queue_parsed_features(feature_map):
  function sorted_key_dict (line 109) | def sorted_key_dict(unsorted_dict):
  function sigmoid (line 113) | def sigmoid(x):
  class CheckPartitionerVarHook (line 117) | class CheckPartitionerVarHook(tf.compat.v1.train.SessionRunHook):
    method __init__ (line 120) | def __init__(self, test_case, var_name, var_dim, partitions):
    method begin (line 126) | def begin(self):
  class BaseLinearRegressorPartitionerTest (line 139) | class BaseLinearRegressorPartitionerTest(object):
    method __init__ (line 141) | def __init__(self, linear_regressor_fn, fc_lib=feature_column):
    method setUp (line 145) | def setUp(self):
    method tearDown (line 148) | def tearDown(self):
    method testPartitioner (line 153) | def testPartitioner(self):
    method testDefaultPartitionerWithMultiplePsReplicas (line 181) | def testDefaultPartitionerWithMultiplePsReplicas(self):
  class BaseLinearRegressorEvaluationTest (line 219) | class BaseLinearRegressorEvaluationTest(object):
    method __init__ (line 221) | def __init__(self, linear_regressor_fn, fc_lib=feature_column):
    method setUp (line 225) | def setUp(self):
    method tearDown (line 228) | def tearDown(self):
    method test_evaluation_for_simple_data (line 233) | def test_evaluation_for_simple_data(self):
    method test_evaluation_batch (line 259) | def test_evaluation_batch(self):
    method test_evaluation_weights (line 289) | def test_evaluation_weights(self):
    method test_evaluation_for_multi_dimensions (line 322) | def test_evaluation_for_multi_dimensions(self):
    method test_evaluation_for_multiple_feature_columns (line 358) | def test_evaluation_for_multiple_feature_columns(self):
    method test_evaluation_for_multiple_feature_columns_mix (line 396) | def test_evaluation_for_multiple_feature_columns_mix(self):
  class BaseLinearRegressorPredictTest (line 436) | class BaseLinearRegressorPredictTest(object):
    method __init__ (line 438) | def __init__(self, linear_regressor_fn, fc_lib=feature_column):
    method setUp (line 442) | def setUp(self):
    method tearDown (line 445) | def tearDown(self):
    method test_1d (line 450) | def test_1d(self):
    method testMultiDim (line 473) | def testMultiDim(self):
    method testTwoFeatureColumns (line 506) | def testTwoFeatureColumns(self):
    method testTwoFeatureColumnsMix (line 534) | def testTwoFeatureColumnsMix(self):
    method testSparseCombiner (line 559) | def testSparseCombiner(self):
  class BaseLinearRegressorIntegrationTest (line 622) | class BaseLinearRegressorIntegrationTest(object):
    method __init__ (line 624) | def __init__(self, linear_regressor_fn, fc_lib=feature_column):
    method setUp (line 628) | def setUp(self):
    method tearDown (line 631) | def tearDown(self):
    method _test_complete_flow (line 636) | def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_i...
    method test_numpy_input_fn (line 669) | def test_numpy_input_fn(self):
    method test_pandas_input_fn (line 705) | def test_pandas_input_fn(self):
    method test_input_fn_from_parse_example (line 734) | def test_input_fn_from_parse_example(self):
  class BaseLinearRegressorTrainingTest (line 795) | class BaseLinearRegressorTrainingTest(object):
    method __init__ (line 797) | def __init__(self, linear_regressor_fn, fc_lib=feature_column):
    method setUp (line 801) | def setUp(self):
    method tearDown (line 804) | def tearDown(self):
    method _mock_optimizer (line 809) | def _mock_optimizer(self, expected_loss=None):
    method _assert_checkpoint (line 847) | def _assert_checkpoint(self,
    method testFromScratchWithDefaultOptimizer (line 872) | def testFromScratchWithDefaultOptimizer(self):
    method testTrainWithOneDimLabel (line 888) | def testTrainWithOneDimLabel(self):
    method testTrainWithOneDimWeight (line 908) | def testTrainWithOneDimWeight(self):
    method testFromScratch (line 933) | def testFromScratch(self):
    method testFromCheckpoint (line 957) | def testFromCheckpoint(self):
    method testFromCheckpointMultiBatch (line 992) | def testFromCheckpointMultiBatch(self):
  class BaseLinearClassifierTrainingTest (line 1031) | class BaseLinearClassifierTrainingTest(object):
    method __init__ (line 1033) | def __init__(self, linear_classifier_fn, fc_lib=feature_column):
    method setUp (line 1037) | def setUp(self):
    method tearDown (line 1040) | def tearDown(self):
    method _mock_optimizer (line 1044) | def _mock_optimizer(self, expected_loss=None):
    method _assert_checkpoint (line 1078) | def _assert_checkpoint(self,
    method _testFromScratchWithDefaultOptimizer (line 1107) | def _testFromScratchWithDefaultOptimizer(self, n_classes):
    method testBinaryClassesFromScratchWithDefaultOptimizer (line 1123) | def testBinaryClassesFromScratchWithDefaultOptimizer(self):
    method testMultiClassesFromScratchWithDefaultOptimizer (line 1126) | def testMultiClassesFromScratchWithDefaultOptimizer(self):
    method _testTrainWithTwoDimsLabel (line 1129) | def _testTrainWithTwoDimsLabel(self, n_classes):
    method testBinaryClassesTrainWithTwoDimsLabel (line 1150) | def testBinaryClassesTrainWithTwoDimsLabel(self):
    method testMultiClassesTrainWithTwoDimsLabel (line 1153) | def testMultiClassesTrainWithTwoDimsLabel(self):
    method _testTrainWithOneDimLabel (line 1156) | def _testTrainWithOneDimLabel(self, n_classes):
    method testBinaryClassesTrainWithOneDimLabel (line 1175) | def testBinaryClassesTrainWithOneDimLabel(self):
    method testMultiClassesTrainWithOneDimLabel (line 1178) | def testMultiClassesTrainWithOneDimLabel(self):
    method _testTrainWithTwoDimsWeight (line 1181) | def _testTrainWithTwoDimsWeight(self, n_classes):
    method testBinaryClassesTrainWithTwoDimsWeight (line 1206) | def testBinaryClassesTrainWithTwoDimsWeight(self):
    method testMultiClassesTrainWithTwoDimsWeight (line 1209) | def testMultiClassesTrainWithTwoDimsWeight(self):
    method _testTrainWithOneDimWeight (line 1212) | def _testTrainWithOneDimWeight(self, n_classes):
    method testBinaryClassesTrainWithOneDimWeight (line 1235) | def testBinaryClassesTrainWithOneDimWeight(self):
    method testMultiClassesTrainWithOneDimWeight (line 1238) | def testMultiClassesTrainWithOneDimWeight(self):
    method _testFromScratch (line 1241) | def _testFromScratch(self, n_classes):
    method testBinaryClassesFromScratch (line 1277) | def testBinaryClassesFromScratch(self):
    method testMultiClassesFromScratch (line 1280) | def testMultiClassesFromScratch(self):
    method _testFromCheckpoint (line 1283) | def _testFromCheckpoint(self, n_classes):
    method testBinaryClassesFromCheckpoint (line 1342) | def testBinaryClassesFromCheckpoint(self):
    method testMultiClassesFromCheckpoint (line 1345) | def testMultiClassesFromCheckpoint(self):
    method _testFromCheckpointFloatLabels (line 1348) | def _testFromCheckpointFloatLabels(self, n_classes):
    method testBinaryClassesFromCheckpointFloatLabels (line 1387) | def testBinaryClassesFromCheckpointFloatLabels(self):
    method testMultiClassesFromCheckpointFloatLabels (line 1390) | def testMultiClassesFromCheckpointFloatLabels(self):
    method _testFromCheckpointMultiBatch (line 1393) | def _testFromCheckpointMultiBatch(self, n_classes):
    method testBinaryClassesFromCheckpointMultiBatch (line 1457) | def testBinaryClassesFromCheckpointMultiBatch(self):
    method testMultiClassesFromCheckpointMultiBatch (line 1460) | def testMultiClassesFromCheckpointMultiBatch(self):
  class BaseLinearClassifierEvaluationTest (line 1464) | class BaseLinearClassifierEvaluationTest(object):
    method __init__ (line 1466) | def __init__(self, linear_classifier_fn, fc_lib=feature_column):
    method setUp (line 1470) | def setUp(self):
    method tearDown (line 1473) | def tearDown(self):
    method _test_evaluation_for_simple_data (line 1477) | def _test_evaluation_for_simple_data(self, n_classes):
    method test_binary_classes_evaluation_for_simple_data (line 1539) | def test_binary_classes_evaluation_for_simple_data(self):
    method test_multi_classes_evaluation_for_simple_data (line 1542) | def test_multi_classes_evaluation_for_simple_data(self):
    method _test_evaluation_batch (line 1545) | def _test_evaluation_batch(self, n_classes):
    method test_binary_classes_evaluation_batch (line 1617) | def test_binary_classes_evaluation_batch(self):
    method test_multi_classes_evaluation_batch (line 1620) | def test_multi_classes_evaluation_batch(self):
    method _test_evaluation_weights (line 1623) | def _test_evaluation_weights(self, n_classes):
    method test_binary_classes_evaluation_weights (line 1708) | def test_binary_classes_evaluation_weights(self):
    method test_multi_classes_evaluation_weights (line 1711) | def test_multi_classes_evaluation_weights(self):
  class BaseLinearClassifierPredictTest (line 1715) | class BaseLinearClassifierPredictTest(object):
    method __init__ (line 1717) | def __init__(self, linear_classifier_fn, fc_lib=feature_column):
    method setUp (line 1721) | def setUp(self):
    method tearDown (line 1724) | def tearDown(self):
    method _testPredictions (line 1728) | def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
    method testBinaryClassesWithoutLabelVocabulary (line 1803) | def testBinaryClassesWithoutLabelVocabulary(self):
    method testBinaryClassesWithLabelVocabulary (line 1810) | def testBinaryClassesWithLabelVocabulary(self):
    method testMultiClassesWithoutLabelVocabulary (line 1817) | def testMultiClassesWithoutLabelVocabulary(self):
    method testMultiClassesWithLabelVocabulary (line 1824) | def testMultiClassesWithLabelVocabulary(self):
    method testSparseCombiner (line 1831) | def testSparseCombiner(self):
  class BaseLinearClassifierIntegrationTest (line 1894) | class BaseLinearClassifierIntegrationTest(object):
    method __init__ (line 
Condensed preview — 187 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (4,210K chars).
[
  {
    "path": ".bazelrc",
    "chars": 130,
    "preview": "\n# Default options should come above this line\n\n# Put user-specific options in .bazelrc.user\ntry-import %workspace%/.baz"
  },
  {
    "path": ".gitignore",
    "chars": 135,
    "preview": "# editor files\n*.swp\n*~\n.vscode/\n.DS_Store\n\n# bazel\n/.bazelrc.user\n/bazel-*\n\n# python\n*.pyc\n*.pyo\n__pycache__\n*.whl\n.ipy"
  },
  {
    "path": "BUILD",
    "chars": 101,
    "preview": "# Description: Tensorflow Estimator.\n\nlicenses([\"notice\"])  # Apache 2.0\n\nexports_files([\"LICENSE\"])\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "chars": 1453,
    "preview": "Want to contribute? Great! First, read this page (including the small print at the end).\n\n### Before you contribute\n\nBef"
  },
  {
    "path": "LICENSE",
    "chars": 11416,
    "preview": "Copyright 2018 The TensorFlow Authors.  All rights reserved.\n\n                                 Apache License\n          "
  },
  {
    "path": "README.md",
    "chars": 2010,
    "preview": "-----------------\n| **`Documentation`** |\n|-----------------|\n| [![Documentation](https://img.shields.io/badge/api-refer"
  },
  {
    "path": "WORKSPACE",
    "chars": 470,
    "preview": "workspace(name = \"org_tensorflow_estimator\")\n\n# Use a custom python toolchain to make sure we always use the python bina"
  },
  {
    "path": "tensorflow_estimator/BUILD",
    "chars": 2914,
    "preview": "# Placeholder: load py_library\n\n# Description: Tensorflow Estimator.\nload(\n    \"//tensorflow_estimator/python/estimator/"
  },
  {
    "path": "tensorflow_estimator/estimator.bzl",
    "chars": 736,
    "preview": "\"\"\"Estimator common skylark macros.\"\"\"\n\n# Macro to run Estimator py_tests against pip installation.\ndef py_test(deps = ["
  },
  {
    "path": "tensorflow_estimator/python/estimator/BUILD",
    "chars": 66530,
    "preview": "# Placeholder: load py_library\nload(\"//tensorflow_estimator:estimator.bzl\", \"py_test\")\n\npackage(default_visibility = [\"/"
  },
  {
    "path": "tensorflow_estimator/python/estimator/api/BUILD",
    "chars": 1933,
    "preview": "# Placeholder: load aliased py_binary\nload(\"//tensorflow_estimator/python/estimator/api:api_gen.bzl\", \"ESTIMATOR_API_INI"
  },
  {
    "path": "tensorflow_estimator/python/estimator/api/api_gen.bzl",
    "chars": 13914,
    "preview": "\"\"\"Targets for generating TensorFlow Estimator Python API __init__.py files.\n\nThis bzl file is copied with slight modifi"
  },
  {
    "path": "tensorflow_estimator/python/estimator/api/extractor_wrapper.py",
    "chars": 898,
    "preview": "# Copyright 2023 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/api/generator_wrapper.py",
    "chars": 897,
    "preview": "# Copyright 2023 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/baseline.py",
    "chars": 23473,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/baseline_estimator_test.py",
    "chars": 14944,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/baseline_test.py",
    "chars": 54404,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/canned_estimator_ds_integration_test.py",
    "chars": 5863,
    "preview": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/dnn.py",
    "chars": 48618,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/dnn_estimator_test.py",
    "chars": 7401,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/dnn_linear_combined.py",
    "chars": 48471,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/dnn_linear_combined_estimator_test.py",
    "chars": 9505,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/dnn_linear_combined_test.py",
    "chars": 38700,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/dnn_test_fc_v2.py",
    "chars": 19108,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/dnn_testing_utils.py",
    "chars": 82124,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/head.py",
    "chars": 72566,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/head_test.py",
    "chars": 166796,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/kmeans.py",
    "chars": 20395,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/kmeans_test.py",
    "chars": 21558,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/linear.py",
    "chars": 68128,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/linear_estimator_test.py",
    "chars": 7183,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/linear_model_test.py",
    "chars": 59598,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/linear_optimizer/BUILD",
    "chars": 2480,
    "preview": "# Placeholder: load py_library\nload(\"//tensorflow_estimator:estimator.bzl\", \"py_test\")\n\npackage(default_visibility = [\"/"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/linear_optimizer/__init__.py",
    "chars": 994,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/linear_optimizer/doc/sdca.ipynb",
    "chars": 11291,
    "preview": "{\n  \"cells\": [\n    {\n      \"cell_type\": \"markdown\",\n      \"metadata\": {\n        \"colab_type\": \"text\",\n        \"id\": \"DzJ"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/linear_optimizer/python/sdca_test.py",
    "chars": 26650,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/linear_optimizer/python/utils/sdca_ops.py",
    "chars": 32502,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/linear_optimizer/python/utils/sdca_ops_test.py",
    "chars": 54340,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/linear_optimizer/python/utils/sharded_mutable_dense_hashtable.py",
    "chars": 14506,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/linear_optimizer/python/utils/sharded_mutable_dense_hashtable_test.py",
    "chars": 4582,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/linear_test.py",
    "chars": 7789,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/linear_testing_utils.py",
    "chars": 84267,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/metric_keys.py",
    "chars": 2364,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/optimizers.py",
    "chars": 6396,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/optimizers_test.py",
    "chars": 4324,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/optimizers_test_v2.py",
    "chars": 5806,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/parsing_utils.py",
    "chars": 15354,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/parsing_utils_test.py",
    "chars": 10653,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/prediction_keys.py",
    "chars": 1275,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/rnn.py",
    "chars": 28806,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/rnn_test.py",
    "chars": 67981,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/saved_model_estimator.py",
    "chars": 20175,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/saved_model_estimator_test.py",
    "chars": 20627,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/testdata/wire_vocabulary.txt",
    "chars": 20,
    "preview": "omar\nstringer\nmarlo\n"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/timeseries/BUILD",
    "chars": 5297,
    "preview": "# Placeholder: load py_library\nload(\"//tensorflow_estimator:estimator.bzl\", \"py_test\")\n\npackage(default_visibility = [\"/"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/timeseries/ar_model.py",
    "chars": 38400,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/timeseries/ar_model_test.py",
    "chars": 5182,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/timeseries/ar_model_training_test.py",
    "chars": 8601,
    "preview": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/timeseries/estimators.py",
    "chars": 20770,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/timeseries/estimators_test.py",
    "chars": 8722,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/timeseries/feature_keys.py",
    "chars": 2314,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/timeseries/head.py",
    "chars": 21379,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/timeseries/head_test.py",
    "chars": 17922,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/timeseries/math_utils.py",
    "chars": 19761,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/timeseries/math_utils_test.py",
    "chars": 4079,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/timeseries/model.py",
    "chars": 14441,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/timeseries/model_utils.py",
    "chars": 3212,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/timeseries/saved_model_utils.py",
    "chars": 14941,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/timeseries/state_management.py",
    "chars": 4004,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/v1/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/v1/baseline_estimator_test_v1.py",
    "chars": 15279,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/v1/baseline_test_v1.py",
    "chars": 55981,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/v1/dnn_estimator_test_v1.py",
    "chars": 6245,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/v1/dnn_linear_combined_estimator_test_v1.py",
    "chars": 8836,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/v1/dnn_linear_combined_test_v1.py",
    "chars": 46937,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/v1/dnn_test_fc_v1_v1.py",
    "chars": 17817,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/v1/dnn_test_fc_v2_v1.py",
    "chars": 17722,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/v1/dnn_testing_utils_v1.py",
    "chars": 82598,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/v1/linear_estimator_test_v1.py",
    "chars": 5629,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/v1/linear_test_v1.py",
    "chars": 13879,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/canned/v1/linear_testing_utils_v1.py",
    "chars": 91091,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/distribute_strategy_estimator_integration_test.py",
    "chars": 7834,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/distribute_strategy_estimator_training_test.py",
    "chars": 24488,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/early_stopping.py",
    "chars": 23610,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/early_stopping_test.py",
    "chars": 9670,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/estimator.py",
    "chars": 103213,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/estimator_export.py",
    "chars": 2883,
    "preview": "# Copyright 2023 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/estimator_export_test.py",
    "chars": 2004,
    "preview": "# Copyright 2023 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/estimator_lib.py",
    "chars": 4886,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/estimator_test.py",
    "chars": 134806,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/export/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "tensorflow_estimator/python/estimator/export/export.py",
    "chars": 20652,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/export/export_lib.py",
    "chars": 3055,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/export/export_output.py",
    "chars": 1924,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/export/export_test.py",
    "chars": 23540,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/export/function.py",
    "chars": 13836,
    "preview": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/export/function_test.py",
    "chars": 9491,
    "preview": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/exporter.py",
    "chars": 19873,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/exporter_test.py",
    "chars": 17430,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/extenders.py",
    "chars": 4896,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/extenders_test.py",
    "chars": 5205,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/gc.py",
    "chars": 6327,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/gc_test.py",
    "chars": 6074,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/head/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "tensorflow_estimator/python/estimator/head/base_head.py",
    "chars": 39719,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/head/base_head_test.py",
    "chars": 10317,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/head/binary_class_head.py",
    "chars": 27000,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/head/binary_class_head_test.py",
    "chars": 64901,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/head/head_utils.py",
    "chars": 3986,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/head/multi_class_head.py",
    "chars": 21524,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/head/multi_class_head_test.py",
    "chars": 65188,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/head/multi_head.py",
    "chars": 23371,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/head/multi_head_test.py",
    "chars": 36820,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/head/multi_label_head.py",
    "chars": 26898,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/head/multi_label_head_test.py",
    "chars": 56975,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/head/regression_head.py",
    "chars": 23862,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/head/regression_head_test.py",
    "chars": 72196,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/head/sequential_head.py",
    "chars": 20736,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/head/sequential_head_test.py",
    "chars": 21047,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/hooks/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "tensorflow_estimator/python/estimator/hooks/basic_session_run_hooks.py",
    "chars": 2835,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/hooks/basic_session_run_hooks_test.py",
    "chars": 58286,
    "preview": "# pylint: disable=g-bad-file-header\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the"
  },
  {
    "path": "tensorflow_estimator/python/estimator/hooks/fake_summary_writer.py",
    "chars": 5595,
    "preview": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/hooks/hooks.py",
    "chars": 11072,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/hooks/hooks_test.py",
    "chars": 15092,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/hooks/session_run_hook.py",
    "chars": 4204,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/inputs/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "tensorflow_estimator/python/estimator/inputs/inputs.py",
    "chars": 1106,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/inputs/numpy_io.py",
    "chars": 8018,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/inputs/numpy_io_test.py",
    "chars": 22399,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/inputs/pandas_io.py",
    "chars": 5859,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/inputs/pandas_io_test.py",
    "chars": 11028,
    "preview": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/inputs/queues/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "tensorflow_estimator/python/estimator/inputs/queues/feeding_functions.py",
    "chars": 18804,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/inputs/queues/feeding_functions_test.py",
    "chars": 13543,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/inputs/queues/feeding_queue_runner.py",
    "chars": 6883,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/inputs/queues/feeding_queue_runner_test.py",
    "chars": 5162,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/keras_distribute_strategy_test.py",
    "chars": 11635,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/keras_lib.py",
    "chars": 33679,
    "preview": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/keras_premade_model_test.py",
    "chars": 8192,
    "preview": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/keras_test.py",
    "chars": 48113,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/mode_keys.py",
    "chars": 1080,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/model_fn.py",
    "chars": 25073,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/model_fn_test.py",
    "chars": 26991,
    "preview": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/object_checkpointing_test.py",
    "chars": 5767,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/run_config.py",
    "chars": 38460,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/run_config_test.py",
    "chars": 46693,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tf_estimator_doctest.py",
    "chars": 5800,
    "preview": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tools/__init__.py",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "tensorflow_estimator/python/estimator/tools/analytics.py",
    "chars": 1265,
    "preview": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tools/checkpoint_converter.py",
    "chars": 14988,
    "preview": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tools/checkpoint_converter_test.py",
    "chars": 16018,
    "preview": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/BUILD",
    "chars": 6237,
    "preview": "# Description: TPUEstimator\n\n# Placeholder: load py_library\n\n# INTERNAL TEST RULE PLACEHOLDER\nload(\"//tensorflow_estimat"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/__init__.py",
    "chars": 818,
    "preview": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/_tpu_estimator_embedding.py",
    "chars": 28939,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/autotuning_iterations_per_loop_test.py",
    "chars": 18228,
    "preview": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/error_handling.py",
    "chars": 5327,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/error_handling_test.py",
    "chars": 1481,
    "preview": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/iteration_count_estimator.py",
    "chars": 7833,
    "preview": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/spatial_partitioning_api.md",
    "chars": 3578,
    "preview": "# Spatial partitioning\n\nSpatial partitioning allows us to run models with larger input images. Typically\nthese models wi"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/tpu_config.py",
    "chars": 15727,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/tpu_config_test.py",
    "chars": 8067,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/tpu_context.py",
    "chars": 34455,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/tpu_enqueue_sequence_test.py",
    "chars": 17168,
    "preview": "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/tpu_estimator.py",
    "chars": 184949,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/tpu_estimator_embedding_test.py",
    "chars": 57098,
    "preview": "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/tpu_estimator_evaluation_test.py",
    "chars": 15747,
    "preview": "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/tpu_estimator_export_test.py",
    "chars": 28157,
    "preview": "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/tpu_estimator_gradients_test.py",
    "chars": 32083,
    "preview": "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/tpu_estimator_input_v2_test.py",
    "chars": 6856,
    "preview": "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/tpu_estimator_integration_test.py",
    "chars": 21802,
    "preview": "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/tpu_estimator_model_parallelism_test.py",
    "chars": 13734,
    "preview": "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/tpu_estimator_signals_test.py",
    "chars": 13742,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/tpu_estimator_test.py",
    "chars": 104675,
    "preview": "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/tpu/util.py",
    "chars": 3607,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/training.py",
    "chars": 44016,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/training_test.py",
    "chars": 90338,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/util.py",
    "chars": 4635,
    "preview": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/python/estimator/util_test.py",
    "chars": 4017,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/tools/pip_package/BUILD",
    "chars": 873,
    "preview": "package(default_visibility = [\"//tensorflow_estimator:internal\"])\n\n# Description:\n#  Tools for building the TensorFlow p"
  },
  {
    "path": "tensorflow_estimator/tools/pip_package/build_pip_package.sh",
    "chars": 4038,
    "preview": "#!/usr/bin/env bash\n# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License,"
  },
  {
    "path": "tensorflow_estimator/tools/pip_package/create_pip_helper.py",
    "chars": 5275,
    "preview": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "tensorflow_estimator/tools/pip_package/setup.py",
    "chars": 2968,
    "preview": "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"L"
  },
  {
    "path": "third_party/py/BUILD",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "third_party/py/BUILD.tpl",
    "chars": 766,
    "preview": "licenses([\"restricted\"])\n\npackage(default_visibility = [\"//visibility:public\"])\n\n# Point both runtimes to the same pytho"
  },
  {
    "path": "third_party/py/python_configure.bzl",
    "chars": 2144,
    "preview": "\"\"\"Repository rule for Python autoconfiguration.\n\n`python_configure` depends on the following environment variables:\n\n  "
  }
]

About this extraction

This page contains the full source code of the tensorflow/estimator GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 187 files (3.9 MB), approximately 1.0M tokens, and a symbol index with 4635 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!